path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
PythonJupyterNotebooks/Week12-Day3-Activity6-ByDave-ClasssInstructorcorrelating_returns.ipynb | ###Markdown
Correlating Returns
###Code
import os
import pandas as pd
from datetime import datetime, timedelta
from dotenv import load_dotenv
#import alpaca_trade_api as alpaca
from newsapi.newsapi_client import NewsApiClient
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import alpaca_trade_api as trade_api
from alpaca_trade_api.rest import REST#, TimeFrame
import yfinance as yf
import datetime as dt
###Output
_____no_output_____
###Markdown
Load API Keys from Environment Variables
###Code
# Load .env enviroment variables
load_dotenv()
# Set News API Key
newsapi = NewsApiClient(api_key=os.environ["NEWS_API_KEY"])
# Set Alpaca API key and secret
alpaca_api_key = os.getenv("ALPACA_API_KEY")
alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY")
api = trade_api.REST(alpaca_api_key, alpaca_secret_key, api_version='v2')
alpaca_api_key
###Output
_____no_output_____
###Markdown
Get AAPL Returns for Past Month
###Code
# Set the ticker
#ticker = "AAPL"
# Set timeframe to '1D'
#timeframe = "1D"
# Set current date and the date from one month ago using the ISO format
#current_date = pd.Timestamp(datetime.now(), tz="America/New_York").isoformat()
#past_date = pd.Timestamp(datetime.now()- timedelta(30), tz="America/New_York").isoformat()
#print(past_date)
# Get 4 weeks worth of historical data for AAPL
#df = api.get_bars(
# ticker,
# TimeFrame.Day,
#limit=None,
# '2022-04-05',
# '2022-05-10',
#after=None,
#until=None,
# adjustment='raw'
#).df
#yearly = api.get_bars(tickers, TimeFrame.Day, "2021-03-20", "2022-03-20", adjustment='raw').df
# Display data
#df.head()
# Set the Tesla and Coca-Cola tickers
ticker = ["AAPL"]
# Set timeframe to "1Day"
timeframe = "1Day"
# Set start and end datetimes of 1 year, between now and 365 days ago.
start_date = pd.Timestamp("2022-04-11", tz="America/New_York").isoformat()
end_date = pd.Timestamp("2022-05-11", tz="America/New_York").isoformat()
# Get 1 year's worth of historical data for Tesla and Coca-Cola
#df = api.get_bars(
# ticker,
# timeframe,
# limit=None,
# start=past_date,
# end=current_date,
#after=None,
#until=None,
#).df
start = dt.datetime.today()-dt.timedelta(35)
end = dt.datetime.today()
ticker = 'AAPL'
df = yf.download(ticker, start, end)
df.tail()
# Drop Outer Table Level
#df = df.droplevel(axis=1, level=0)
#print(df)
# Use the drop function to drop extra columns
df = df.drop(["Open", "High", "Low", "Volume","Adj Close"],axis=1)
# Since this is daily data, we can keep only the date (remove the time) component of the data
#df.index = df.index.date
# Display sample data
df.head()
df.to_excel('AAPLRaw.xlsx')
# Use the `pct_change` function to calculate daily returns of AAPL
aapl_returns = df.pct_change(3).shift(-3).dropna()
# Display sample data
aapl_returns.to_excel('aapl.xlsx')
aapl_returns.tail()
# Use newsapi client to get most relevant 20 headlines per day in the past month
def get_headlines(keyword):
all_headlines = []
all_dates = []
date = datetime.strptime(current_date[:10], "%Y-%m-%d")
end_date = datetime.strptime(past_date[:10], "%Y-%m-%d")
print(f"Fetching news about '{keyword}'")
print("*" * 30)
while date > end_date:
print(f"retrieving news from: {date}")
articles = newsapi.get_everything(
q=keyword,
from_param=str(date)[:10],
to=str(date)[:10],
language="en",
sort_by="relevancy",
page=1,
)
headlines = []
for i in range(0, len(articles["articles"])):
headlines.append(articles["articles"][i]["title"])
all_headlines.append(headlines)
all_dates.append(date)
date = date - timedelta(days=1)
return all_headlines, all_dates
###Output
_____no_output_____
###Markdown
Note: Be aware that running the 3 requests below will only work once within a 24 hour period due to the request limits imposed by the API provider.
###Code
# Get first topic
aapl_headlines, dates = get_headlines("aapl")
# Get second topic
#print(aapl_headlines)
trade_headlines, _ = get_headlines("trade")
# Get third topic
economy_headlines, _ = get_headlines("economy")
# Instantiate SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
# Create function that computes average compound sentiment of headlines for each day
def headline_sentiment_summarizer_avg(headlines):
sentiment = []
for day in headlines:
day_score = []
for h in day:
if h == None:
continue
else:
day_score.append(sid.polarity_scores(h)["compound"])
sentiment.append(sum(day_score) / len(day_score))
return sentiment
# Get averages of each topics sentiment
aapl_avg = headline_sentiment_summarizer_avg(aapl_headlines)
trade_avg = headline_sentiment_summarizer_avg(trade_headlines)
economy_avg = headline_sentiment_summarizer_avg(economy_headlines)
# Combine Sentiment Averages into DataFrame
topic_sentiments = pd.DataFrame(
{
"aapl_avg": aapl_avg,
"trade_avg": trade_avg,
"economy_avg": economy_avg,
}
)
topic_sentiments
#aapl_returns
# Set the index value of the sentiment averages DataFrame to be the series of dates.
topic_sentiments.index = pd.to_datetime(dates)
#topic_sentiments
# Merge with AAPL returns
topic_sentiments = aapl_returns.join(topic_sentiments).dropna(how="any")
# Display data
#display(topic_sentiments)
topic_sentiments.to_excel("topicSent.xlsx")
topic_sentiments
# Correlate the headlines' sentiment to returns
topic_sentiments.corr().style.background_gradient()
###Output
_____no_output_____ |
examples/metrics/example_01_BinaryClassificationMetrics.ipynb | ###Markdown
Example 01: General Use of BinaryClassificationMetrics[](https://colab.research.google.com/github/slickml/slick-ml/blob/master/examples/metrics/example_01_BinaryClassificationMetrics.ipynb) Google Colab Configuration
###Code
# !git clone https://github.com/slickml/slick-ml.git
# %cd slick-ml
# !pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Local Environment Configuration
###Code
# Change path to project root
%cd ../..
###Output
/Users/atahmassebi/Desktop/AmirStuff/GitHub/slick-ml
###Markdown
Import Python Libraries
###Code
%load_ext autoreload
# widen the screen
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# change the path and loading class
import os, sys
import pandas as pd
import numpy as np
import seaborn as sns
%autoreload
from slickml.metrics import BinaryClassificationMetrics
###Output
_____no_output_____
###Markdown
_____ BinaryClassificationMetrics Docstring
###Code
help(BinaryClassificationMetrics)
###Output
Help on class BinaryClassificationMetrics in module slickml.metrics:
class BinaryClassificationMetrics(builtins.object)
| BinaryClassificationMetrics(y_true, y_pred_proba, threshold=None, average_method=None, precision_digits=None, display_df=True)
|
| Binary Classification Metrics.
| This is wrapper to calculate all the binary classification
| metrics with both arbitrary and three computed methods for
| calculating the thresholds. Threshold computations including:
| 1) Youden Index: (https://en.wikipedia.org/wiki/Youden%27s_J_statistic).
| 2) Maximizing Precision-Recall.
| 3) Maximizing Sensitivity-Specificity.
|
| Parameters
| ----------
| y_true: numpy.array[int] or list[int]
| List of ground truth binary values [0, 1]
|
| y_pred_proba: numpy.array[float] or list[float]
| List of predicted probability for the positive class
| (class=1 or y_pred_proba[:, 1] in scikit-learn)
|
| threshold: float, optional (default=0.5)
| Threshold value for mapping y_pred_prob to y_pred
| Note that for threshold ">" is used instead of ">="
|
| average_method: str, optional (default="binary")
| Method to calculate the average of the metric. Possible values are
| "micro", "macro", "weighted", "binary"
|
| precision_digits: int, optional (default=3)
| The number of precision digits to format the scores' dataframe
|
| display_df: boolean, optional (default=True)
| Flag to display the formatted scores' dataframe
|
| Attributes
| ----------
| y_pred_: numpy.array(int) or list[int]
| Predicted class based on the threshold.
| Positive class for y_pred_proba >= threshold and
| negative for else.
|
| accuracy_: float value between 0. and 1.
| Classification accuracy based on threshold value
|
| balanced_accuracy_: float value between 0. and 1.
| Balanced classification accuracy based on threshold value
| considering the prevalence of the classes
|
| fpr_list_: numpy.array[float] or list[float]
| List of calculated false-positive-rates based on roc_thresholds.
| This can be used for ROC curve plotting
|
| tpr_list_: numpy.array[float] or list[float]
| List of calculated true-positive-rates based on roc_thresholds
| This can be used for ROC curve plotting
|
| roc_thresholds_: numpy.array[float] or list[float]
| List of thresholds value to calculate fpr_list_ and tpr_list_
|
| auc_roc_: float value between 0. and 1.
| Area under ROC curve
|
| precision_list_: numpy.array[float] or list[float]
| List of calculated precision based on pr_thresholds
| This can be used for ROC curve plotting
|
| recall_list_: numpy.array[float] or list[float]
| List of calculated recall based on pr_thresholds
| This can be used for ROC curve plotting
|
| pr_thresholds_: numpy.array[float] or list[float]
| List of thresholds value to calculate precision_list_ and recall_list_
|
| auc_pr_: float value between 0. and 1.
| Area under Precision-Recall curve
|
| precision_: float value between 0. and 1.
| Precision based on threshold value
|
| recall_: float value between 0. and 1.
| Recall based on threshold value
|
| f1_: float value between 0. and 1.
| F1-score based on threshold value (beta=1.0)
|
| f2_: float value between 0. and 1.
| F2-score based on threshold value (beta=2.0)
|
| f05_: float value between 0. and 1.
| F(1/2)-score based on threshold value (beta=0.5)
|
| average_precision_: float value between 0. and 1.
| Avearge precision based on threshold value and class prevalence
|
| tn_: integer
| True negative counts based on threshold value
|
| fp_: integer
| False positive counts based on threshold value
|
| fn_: integer
| False negative counts based on threshold value
|
| tp_: integer
| True positive counts based on threshold value
|
| threat_score_: float value between 0. and 1.
| Threat score based on threshold value
|
| youden_threshold_: float value between 0. and 1.
| Threshold calculated based on Youden Index
|
| sens_spec_threshold_: float value between 0. and 1.
| Threshold calculated based on maximized sensitivity-specificity
|
| prec_rec_threshold_: float value between 0. and 1.
| Threshold calculated based on maximized precision-recall
|
| thresholds_dict_: dict()
| Dictionary of all calculated thresholds
|
| metrics_dict_: dict()
| Dictionary of all calculated metrics
|
| metrics_df_: pandas.DataFrame
| Pandas DataFrame of all calculated metrics with threshold as index
|
| average_methods_: list[str]
| List of all possible average methods
|
| plotting_dict_: dict()
| Plotting object as a dictionary consists of all
| calculated metrics which was used to plot the thresholds
|
| Methods defined here:
|
| __init__(self, y_true, y_pred_proba, threshold=None, average_method=None, precision_digits=None, display_df=True)
| Initialize self. See help(type(self)) for accurate signature.
|
| plot(self, figsize=None, save_path=None)
| Function to plot binary classification metrics.
| This function is a helper function based on the plotting_dict
| attribute of the BinaryClassificationMetrics class.
|
| Parameters
| ----------
| figsize: tuple, optional, (default=(12, 12))
| Figure size
|
| save_path: str, optional (default=None)
| The full or relative path to save the plot including the image format.
| For example "myplot.png" or "../../myplot.pdf"
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Example 1
###Code
# y_true values
y_true = [0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1,
1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1,
1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,
1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,
1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0,
1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1]
# Y_pred_proba values
y_pred_proba = [0. , 0.12, 0.78, 0.07, 1. , 0.05, 1. , 0. , 1. , 0. , 1. ,
0.99, 0.93, 0.88, 0.86, 1. , 0.99, 1. , 1. , 0.74, 0. , 1. ,
1. , 0.79, 1. , 0.58, 1. , 0.95, 1. , 1. , 1. , 0.38, 1. ,
0.94, 1. , 1. , 1. , 0.01, 0.81, 1. , 0.99, 1. , 0.4 , 1. ,
1. , 1. , 0.9 , 0.06, 0. , 0.02, 0.99, 0.45, 1. , 1. , 0.52,
0.99, 0.02, 0. , 1. , 0.04, 0.19, 0.99, 0. , 0. , 0.11, 1. ,
1. , 0.31, 1. , 0.25, 0. , 0. , 0.99, 1. , 0.01, 0.09, 0. ,
1. , 0.98, 0. , 0.6 , 0.1 , 1. , 1. , 0. , 1. , 0.96, 0.02,
1. , 0.84, 1. , 0.97, 0.01, 0.99, 0.4 , 0. , 0.18, 1. , 1. ,
1. , 0.96, 0.04, 1. , 0.17, 1. , 0.96, 1. , 0. , 1. , 0.06,
1. , 0.75, 0.64, 0.74, 0.5 , 0.97, 0.11, 0.9 , 0. , 0.15, 1. ,
0.11, 1. , 0.02, 1. , 0.27, 0.95, 0.91, 0.99, 0. , 1. , 0.79,
1. , 1. , 0.87, 1. , 1. , 0. , 0.73, 0.97, 1. , 0.82, 0.3 ,
0. , 0.09, 1. , 1. , 1. , 1. , 1. , 0.76, 0.75, 0.99, 0.99,
0.96, 0.01, 0.08, 0.98, 1. , 0. , 1. , 1. , 0.82, 0.04, 0.98,
0. , 1. , 1. , 0.02, 0. , 1. , 0.99, 1. , 0.96, 0. , 0. ,
1. , 0. , 1. , 1. , 0. , 0.83, 0. , 0.15, 1. , 0.98, 0.98,
1. ]
example1 = BinaryClassificationMetrics(y_true, y_pred_proba, precision_digits=3)
example1.plot(figsize=(12, 12),
save_path=None)
###Output
_____no_output_____
###Markdown
Example 2
###Code
example = BinaryClassificationMetrics(y_true, y_pred_proba, display_df=False)
print(F"Accuracy = {example.accuracy_}")
print(F"Balanced Accuracy = {example.balanced_accuracy_}")
print(F"AUC ROC = {example.auc_roc_}")
print(F"AUC PR = {example.auc_pr_}")
print(F"Precision = {example.precision_}")
print(F"Recall = {example.recall_}")
print(F"F1-Score = {example.f1_}")
print(F"F2-Score = {example.f2_}")
print(F"F0.5-Score = {example.f05_}")
print(F"Average Precision = {example.average_precision_}")
print(F"Threat Score = {example.threat_score_}")
print(F"Metrics Dict = {example.metrics_dict_}")
print(F"Thresholds Dict = {example.thresholds_dict_}")
example.plot()
thresholds = example.thresholds_dict_
methods = example.average_methods_
frames = []
for method in methods:
for threshold in thresholds:
ex = BinaryClassificationMetrics(y_true, y_pred_proba, threshold=thresholds[threshold], average_method=method, display_df=False)
frames.append(ex.metrics_df_)
df_to_show = pd.concat(frames)
# Set CSS properties
th_props = [("font-size", "12px"),
("text-align", "left"),
("font-weight", "bold")]
td_props = [("font-size", "12px"),
("text-align", "center")]
# Set table styles
styles = [dict(selector = "th", props = th_props),
dict(selector = "td", props = td_props)]
cm = sns.light_palette("blue", as_cmap = True)
display(df_to_show.style.background_gradient(cmap = cm) \
.set_table_styles(styles))
###Output
_____no_output_____
###Markdown
Example 3
###Code
# loading data from slick-ml/data
data = pd.read_csv("./data/clf_data.csv")
data.head()
# setting up the X, y
y = data["CLASS"].values
X = data.drop(["CLASS"], axis=1)
# train-test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y)
# train a classifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred_proba = clf.predict_proba(X_test)
example3 = BinaryClassificationMetrics(y_test, y_pred_proba[:,1])
example3.plot()
thresholds = example3.thresholds_dict_
methods = example3.average_methods_
frames = []
for method in methods:
for threshold in thresholds:
ex = BinaryClassificationMetrics(y_test, y_pred_proba[:,1], threshold=thresholds[threshold], average_method=method, display_df=False)
frames.append(ex.metrics_df_)
df_to_show = pd.concat(frames)
# Set CSS properties
th_props = [("font-size", "12px"),
("text-align", "left"),
("font-weight", "bold")]
td_props = [("font-size", "12px"),
("text-align", "center")]
# Set table styles
styles = [dict(selector = "th", props = th_props),
dict(selector = "td", props = td_props)]
cm = sns.light_palette("blue", as_cmap = True)
display(df_to_show.round(decimals=3).style.background_gradient(cmap = cm).set_table_styles(styles))
###Output
_____no_output_____
###Markdown
Example 4
###Code
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
X = data.data
y = data.target
# train-test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y)
# train a classifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred_proba = clf.predict_proba(X_test)[:, 1]
example4 = BinaryClassificationMetrics(y_test, y_pred_proba)
example4.plot()
###Output
_____no_output_____
###Markdown
Example 01: General Use of BinaryClassificationMetrics[](https://colab.research.google.com/github/slickml/slick-ml/blob/master/examples/metrics/example_01_BinaryClassificationMetrics.ipynb) Google Colab Configuration
###Code
# !git clone https://github.com/slickml/slick-ml.git
# %cd slick-ml
# !pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Local Environment Configuration
###Code
# Change path to project root
%cd ../..
###Output
/home/amirhessam/Documents/GitHub/slick-ml
###Markdown
Import Python Libraries
###Code
%load_ext autoreload
# widen the screen
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# change the path and loading class
import os, sys
import pandas as pd
import numpy as np
import seaborn as sns
%autoreload
from slickml.metrics import BinaryClassificationMetrics
###Output
_____no_output_____
###Markdown
_____ BinaryClassificationMetrics Docstring
###Code
help(BinaryClassificationMetrics)
###Output
Help on class BinaryClassificationMetrics in module slickml.metrics:
class BinaryClassificationMetrics(builtins.object)
| BinaryClassificationMetrics(y_true, y_pred_proba, threshold=None, average_method=None, precision_digits=None, display_df=True)
|
| Binary Classification Metrics.
| This is wrapper to calculate all the binary classification
| metrics with both arbitrary and three computed methods for
| calculating the thresholds. Threshold computations including:
| 1) Youden Index: (https://en.wikipedia.org/wiki/Youden%27s_J_statistic).
| 2) Maximizing Precision-Recall.
| 3) Maximizing Sensitivity-Specificity.
|
| Parameters
| ----------
| y_true: numpy.array[int] or list[int]
| List of ground truth binary values [0, 1]
|
| y_pred_proba: numpy.array[float] or list[float]
| List of predicted probability for the positive class
| (class=1 or y_pred_proba[:, 1] in scikit-learn)
|
| threshold: float, optional (default=0.5)
| Threshold value for mapping y_pred_prob to y_pred
| Note that for threshold ">" is used instead of ">="
|
| average_method: str, optional (default="binary")
| Method to calculate the average of the metric. Possible values are
| "micro", "macro", "weighted", "binary"
|
| precision_digits: int, optional (default=3)
| The number of precision digits to format the scores' dataframe
|
| display_df: boolean, optional (default=True)
| Flag to display the formatted scores' dataframe
|
| Attributes
| ----------
| y_pred_: numpy.array(int) or list[int]
| Predicted class based on the threshold.
| Positive class for y_pred_proba >= threshold and
| negative for else.
|
| accuracy_: float value between 0. and 1.
| Classification accuracy based on threshold value
|
| balanced_accuracy_: float value between 0. and 1.
| Balanced classification accuracy based on threshold value
| considering the prevalence of the classes
|
| fpr_list_: numpy.array[float] or list[float]
| List of calculated false-positive-rates based on roc_thresholds.
| This can be used for ROC curve plotting
|
| tpr_list_: numpy.array[float] or list[float]
| List of calculated true-positive-rates based on roc_thresholds
| This can be used for ROC curve plotting
|
| roc_thresholds_: numpy.array[float] or list[float]
| List of thresholds value to calculate fpr_list_ and tpr_list_
|
| auc_roc_: float value between 0. and 1.
| Area under ROC curve
|
| precision_list_: numpy.array[float] or list[float]
| List of calculated precision based on pr_thresholds
| This can be used for ROC curve plotting
|
| recall_list_: numpy.array[float] or list[float]
| List of calculated recall based on pr_thresholds
| This can be used for ROC curve plotting
|
| pr_thresholds_: numpy.array[float] or list[float]
| List of thresholds value to calculate precision_list_ and recall_list_
|
| auc_pr_: float value between 0. and 1.
| Area under Precision-Recall curve
|
| precision_: float value between 0. and 1.
| Precision based on threshold value
|
| recall_: float value between 0. and 1.
| Recall based on threshold value
|
| f1_: float value between 0. and 1.
| F1-score based on threshold value (beta=1.0)
|
| f2_: float value between 0. and 1.
| F2-score based on threshold value (beta=2.0)
|
| f05_: float value between 0. and 1.
| F(1/2)-score based on threshold value (beta=0.5)
|
| average_precision_: float value between 0. and 1.
| Avearge precision based on threshold value and class prevalence
|
| tn_: integer
| True negative counts based on threshold value
|
| fp_: integer
| False positive counts based on threshold value
|
| fn_: integer
| False negative counts based on threshold value
|
| tp_: integer
| True positive counts based on threshold value
|
| threat_score_: float value between 0. and 1.
| Threat score based on threshold value
|
| youden_threshold_: float value between 0. and 1.
| Threshold calculated based on Youden Index
|
| sens_spec_threshold_: float value between 0. and 1.
| Threshold calculated based on maximized sensitivity-specificity
|
| prec_rec_threshold_: float value between 0. and 1.
| Threshold calculated based on maximized precision-recall
|
| thresholds_dict_: dict()
| Dictionary of all calculated thresholds
|
| metrics_dict_: dict()
| Dictionary of all calculated metrics
|
| metrics_df_: pandas.DataFrame
| Pandas DataFrame of all calculated metrics with threshold as index
|
| average_methods_: list[str]
| List of all possible average methods
|
| plotting_dict_: dict()
| Plotting object as a dictionary consists of all
| calculated metrics which was used to plot the thresholds
|
| Methods defined here:
|
| __init__(self, y_true, y_pred_proba, threshold=None, average_method=None, precision_digits=None, display_df=True)
| Initialize self. See help(type(self)) for accurate signature.
|
| plot(self, figsize=None, save_path=None)
| Function to plot binary classification metrics.
| This function is a helper function based on the plotting_dict
| attribute of the BinaryClassificationMetrics class.
|
| Parameters
| ----------
| figsize: tuple, optional, (default=(12, 12))
| Figure size
|
| save_path: str, optional (default=None)
| The full or relative path to save the plot including the image format.
| For example "myplot.png" or "../../myplot.pdf"
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
###Markdown
Example 1
###Code
# y_true values
y_true = [0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1,
1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1,
1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0,
1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1,
1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0,
1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1]
# Y_pred_proba values
y_pred_proba = [0. , 0.12, 0.78, 0.07, 1. , 0.05, 1. , 0. , 1. , 0. , 1. ,
0.99, 0.93, 0.88, 0.86, 1. , 0.99, 1. , 1. , 0.74, 0. , 1. ,
1. , 0.79, 1. , 0.58, 1. , 0.95, 1. , 1. , 1. , 0.38, 1. ,
0.94, 1. , 1. , 1. , 0.01, 0.81, 1. , 0.99, 1. , 0.4 , 1. ,
1. , 1. , 0.9 , 0.06, 0. , 0.02, 0.99, 0.45, 1. , 1. , 0.52,
0.99, 0.02, 0. , 1. , 0.04, 0.19, 0.99, 0. , 0. , 0.11, 1. ,
1. , 0.31, 1. , 0.25, 0. , 0. , 0.99, 1. , 0.01, 0.09, 0. ,
1. , 0.98, 0. , 0.6 , 0.1 , 1. , 1. , 0. , 1. , 0.96, 0.02,
1. , 0.84, 1. , 0.97, 0.01, 0.99, 0.4 , 0. , 0.18, 1. , 1. ,
1. , 0.96, 0.04, 1. , 0.17, 1. , 0.96, 1. , 0. , 1. , 0.06,
1. , 0.75, 0.64, 0.74, 0.5 , 0.97, 0.11, 0.9 , 0. , 0.15, 1. ,
0.11, 1. , 0.02, 1. , 0.27, 0.95, 0.91, 0.99, 0. , 1. , 0.79,
1. , 1. , 0.87, 1. , 1. , 0. , 0.73, 0.97, 1. , 0.82, 0.3 ,
0. , 0.09, 1. , 1. , 1. , 1. , 1. , 0.76, 0.75, 0.99, 0.99,
0.96, 0.01, 0.08, 0.98, 1. , 0. , 1. , 1. , 0.82, 0.04, 0.98,
0. , 1. , 1. , 0.02, 0. , 1. , 0.99, 1. , 0.96, 0. , 0. ,
1. , 0. , 1. , 1. , 0. , 0.83, 0. , 0.15, 1. , 0.98, 0.98,
1. ]
example1 = BinaryClassificationMetrics(y_true, y_pred_proba, precision_digits=3)
example1.plot(figsize=(12, 12),
save_path=None)
###Output
_____no_output_____
###Markdown
Example 2
###Code
example = BinaryClassificationMetrics(y_true, y_pred_proba, display_df=False)
print(F"Accuracy = {example.accuracy_}")
print(F"Balanced Accuracy = {example.balanced_accuracy_}")
print(F"AUC ROC = {example.auc_roc_}")
print(F"AUC PR = {example.auc_pr_}")
print(F"Precision = {example.precision_}")
print(F"Recall = {example.recall_}")
print(F"F1-Score = {example.f1_}")
print(F"F2-Score = {example.f2_}")
print(F"F0.5-Score = {example.f05_}")
print(F"Average Precision = {example.average_precision_}")
print(F"Threat Score = {example.threat_score_}")
print(F"Metrics Dict = {example.metrics_dict_}")
print(F"Thresholds Dict = {example.thresholds_dict_}")
example.plot()
thresholds = example.thresholds_dict_
methods = example.average_methods_
frames = []
for method in methods:
for threshold in thresholds:
ex = BinaryClassificationMetrics(y_true, y_pred_proba, threshold=thresholds[threshold], average_method=method, display_df=False)
frames.append(ex.metrics_df_)
df_to_show = pd.concat(frames)
# Set CSS properties
th_props = [("font-size", "12px"),
("text-align", "left"),
("font-weight", "bold")]
td_props = [("font-size", "12px"),
("text-align", "center")]
# Set table styles
styles = [dict(selector = "th", props = th_props),
dict(selector = "td", props = td_props)]
cm = sns.light_palette("blue", as_cmap = True)
display(df_to_show.style.background_gradient(cmap = cm) \
.set_table_styles(styles))
###Output
_____no_output_____
###Markdown
Example 3
###Code
# loading data from slick-ml/data
data = pd.read_csv("./data/clf_data.csv")
data.head()
# setting up the X, y
y = data["CLASS"].values
X = data.drop(["CLASS"], axis=1)
# train-test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y)
# train a classifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred_proba = clf.predict_proba(X_test)
example3 = BinaryClassificationMetrics(y_test, y_pred_proba[:,1])
example3.plot()
thresholds = example3.thresholds_dict_
methods = example3.average_methods_
frames = []
for method in methods:
for threshold in thresholds:
ex = BinaryClassificationMetrics(y_test, y_pred_proba[:,1], threshold=thresholds[threshold], average_method=method, display_df=False)
frames.append(ex.metrics_df_)
df_to_show = pd.concat(frames)
# Set CSS properties
th_props = [("font-size", "12px"),
("text-align", "left"),
("font-weight", "bold")]
td_props = [("font-size", "12px"),
("text-align", "center")]
# Set table styles
styles = [dict(selector = "th", props = th_props),
dict(selector = "td", props = td_props)]
cm = sns.light_palette("blue", as_cmap = True)
display(df_to_show.round(decimals=3).style.background_gradient(cmap = cm).set_table_styles(styles))
###Output
_____no_output_____
###Markdown
Example 4
###Code
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
X = data.data
y = data.target
# train-test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y)
# train a classifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred_proba = clf.predict_proba(X_test)[:, 1]
example4 = BinaryClassificationMetrics(y_test, y_pred_proba)
example4.plot()
###Output
_____no_output_____ |
interactive-demos/shap.ipynb | ###Markdown
SHAPThis example interactively demonstrates SHAP using nnabla's pre-trained model.[Scott M Lundberg and Su-In Lee. A Unified Approach to Interpreting Model Predictions. In Advances in Neural Information Processing Systems 30, pages 4768–4. 2017.](https://proceedings.neurips.cc/paper/2017/hash/8a20a8621978632d76c43dfd28b67767-Abstract.html) PreparationLet's start by installing nnabla and accessing [nnabla-examples repository](https://github.com/sony/nnabla-examples). If you're running on Colab, make sure that your Runtime setting is set as GPU, which can be set up from the top menu (Runtime → change runtime type), and make sure to click **Connect** on the top right-hand side of the screen before you start.
###Code
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla-examples.git
%cd nnabla-examples/responsible_ai/shap
###Output
_____no_output_____
###Markdown
Import dependencies.
###Code
import os
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
import nnabla as nn
from nnabla.utils.image_utils import imread
from nnabla.models.imagenet import VGG16
from utils import shap_computation
###Output
_____no_output_____
###Markdown
Image Preparation Prepare image to apply to SHAP.
###Code
url = 'https://upload.wikimedia.org/wikipedia/commons/4/4e/A_crab_spider_on_a_flower_preying_upon_a_euglossine_bee%2C_while_a_butterfly_looks_for_nectar.jpg'
target_img_path = 'input_flower_moth_spider.jpg'
if not os.path.isfile(target_img_path):
tgt = urllib.request.urlopen(url).read()
with open(target_img_path, mode='wb') as f:
f.write(tgt)
###Output
_____no_output_____
###Markdown
Take a look at what the image looks like. We can see a flower in the middle on which a butterfly rests, which is called long-tailed skipper butterfly.
###Code
img = imread(target_img_path, size=(224, 224), channel_first=True)
plt.imshow(img.transpose(1,2,0))
plt.show()
###Output
_____no_output_____
###Markdown
Then we prepare images to use when computing shap.
###Code
file_path = "imagenet50"
if not os.path.isdir(file_path):
!unzip "imagenet50.zip"
calc_images = []
for calc_img_path in os.listdir(file_path):
calc_img = imread(file_path + "/" + calc_img_path, size=(224, 224), channel_first=True)
calc_images.append(calc_img)
calc_images = np.array(calc_images)
###Output
_____no_output_____
###Markdown
Network DefinitionLoading the model is very simple.You can choose other models such as `VGG11`, `VGG13`, by specifying the model's name as an argument. Of course, you can choose other pretrained models as well. See the [Docs](https://nnabla.readthedocs.io/en/latest/python/api/models/imagenet.html).**NOTE**: If you use the `VGG16` for the first time, nnabla will automatically download the weights from `https://nnabla.org` and it may take up to a few minutes.
###Code
model = VGG16()
batch_size = 1
x = nn.Variable((batch_size,) + model.input_shape)
# set training True since gradient of variable is necessary for SHAP
vgg = model(x, training=True, returns_net=True)
vgg_variables = vgg.variables
###Output
_____no_output_____
###Markdown
We now define the input, and extract the necessary outputs. pred: final output of the model
###Code
target_label_indices = {
'butterfly': 326,# lycaenid, lycaenid butterfly
'flower': 985,# daisy
'spider': 74,# garden spider
}
input_name = list(vgg.inputs.keys())[0]
vgg_variables[input_name].d = img
pred = vgg_variables["VGG16/Affine_3"]
selected = pred[:, target_label_indices['butterfly']]
###Output
_____no_output_____
###Markdown
Let's see how the model predicted the image. We can see the model classified the image as we expect. Labels regarding butterfly comes high, while flower is also recognized although it is14th ranked probability.
###Code
selected.forward()
predicted_labels = np.argsort(-pred.d[0])
for i, predicted_label in enumerate(predicted_labels[:15]):
print(f'Top {i+1}, Label index: {predicted_label}, Label name: {model.category_names[predicted_label]}')
###Output
_____no_output_____
###Markdown
SHAP Computation Now we compute SHAP for butterfly and then for flower to save the images.
###Code
shap_computation(
model_graph=vgg,
X=img,
label=target_label_indices['butterfly'],
output="img_butterfly.png",
interim_layer_index=10,
num_samples=50,
dataset=calc_images,
batch_size=25
)
shap_computation(
model_graph=vgg,
X=img,
label=target_label_indices['flower'],
output="img_flower.png",
interim_layer_index=10,
num_samples=50,
dataset=calc_images,
batch_size=25
)
###Output
_____no_output_____
###Markdown
Visualization Finally, compare images in oneline to enable to see the differences clearly. The **red** pixels represent places where SHAP judges **positive** influence to the prediction.On the other hand, **blue** pixel represents places where SHAP judges **negative** influence to the prediction. Image on the middle (pridiction for butterfly): The edge of the **butterfly** appears **red**, and the edge of the **flower** appears **blue**.Image on the right (pridiction for flower): The edge of the **butterfly** appears **blue**, and the edge of the **flower** appears **red**.
###Code
img_butterfly = imread("img_butterfly.png", size=(224, 224),channel_first=True)
img_flower = imread("img_flower.png", size=(224, 224), channel_first=True)
images = {
'original': img.transpose(1,2,0),
'butterfly': img_butterfly.transpose(1,2,0),
'flower': img_flower.transpose(1,2,0),
}
row = 1
col = len(images)
fig, axes = plt.subplots(row, col, figsize=(15,15))
for i, (k, v) in enumerate(images.items()):
axes[i].axis("off")
axes[i].imshow(v)
axes[i].set_title(k)
###Output
_____no_output_____
###Markdown
SHAPThis example interactively demonstrates SHAP using nnabla's pre-trained model.SHAP : A Unified Approach to Interpreting Model PredictionsScott Lundberg, Su-In Lee et. al., arXiv (2017) https://arxiv.org/abs/1705.07874 PreparationLet's start by installing nnabla and accessing [nnabla-examples repository](https://github.com/sony/nnabla-examples).
###Code
!pip install nnabla-ext-cuda100
!git clone https://github.com/sony/nnabla-examples.git
%cd nnabla-examples/responsible_ai/shap
###Output
Collecting nnabla-ext-cuda100
Downloading nnabla_ext_cuda100-1.20.1-cp37-cp37m-manylinux1_x86_64.whl (42.9 MB)
[K |████████████████████████████████| 42.9 MB 63 kB/s
[?25hRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from nnabla-ext-cuda100) (57.2.0)
Collecting nnabla==1.20.1
Downloading nnabla-1.20.1-cp37-cp37m-manylinux1_x86_64.whl (18.3 MB)
[K |████████████████████████████████| 18.3 MB 65 kB/s
[?25hCollecting pynvml
Downloading pynvml-11.0.0-py3-none-any.whl (46 kB)
[K |████████████████████████████████| 46 kB 4.7 MB/s
[?25hRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (1.15.0)
Requirement already satisfied: pyyaml in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (3.13)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (1.19.5)
Requirement already satisfied: imageio in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (2.4.1)
Collecting boto3
Downloading boto3-1.18.2-py3-none-any.whl (131 kB)
[K |████████████████████████████████| 131 kB 66.1 MB/s
[?25hRequirement already satisfied: protobuf>=3.6 in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (3.17.3)
Collecting configparser
Downloading configparser-5.0.2-py3-none-any.whl (19 kB)
Requirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (7.1.2)
Requirement already satisfied: h5py<=3.1.0 in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (3.1.0)
Requirement already satisfied: contextlib2 in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (0.5.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (1.4.1)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (4.41.1)
Requirement already satisfied: Cython in /usr/local/lib/python3.7/dist-packages (from nnabla==1.20.1->nnabla-ext-cuda100) (0.29.23)
Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py<=3.1.0->nnabla==1.20.1->nnabla-ext-cuda100) (1.5.2)
Collecting s3transfer<0.6.0,>=0.5.0
Downloading s3transfer-0.5.0-py3-none-any.whl (79 kB)
[K |████████████████████████████████| 79 kB 9.4 MB/s
[?25hCollecting botocore<1.22.0,>=1.21.2
Downloading botocore-1.21.2-py3-none-any.whl (7.7 MB)
[K |████████████████████████████████| 7.7 MB 52.8 MB/s
[?25hCollecting jmespath<1.0.0,>=0.7.1
Downloading jmespath-0.10.0-py2.py3-none-any.whl (24 kB)
Requirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.7/dist-packages (from botocore<1.22.0,>=1.21.2->boto3->nnabla==1.20.1->nnabla-ext-cuda100) (2.8.1)
Collecting urllib3<1.27,>=1.25.4
Downloading urllib3-1.26.6-py2.py3-none-any.whl (138 kB)
[K |████████████████████████████████| 138 kB 73.1 MB/s
[?25hInstalling collected packages: urllib3, jmespath, botocore, s3transfer, configparser, boto3, pynvml, nnabla, nnabla-ext-cuda100
Attempting uninstall: urllib3
Found existing installation: urllib3 1.24.3
Uninstalling urllib3-1.24.3:
Successfully uninstalled urllib3-1.24.3
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
requests 2.23.0 requires urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1, but you have urllib3 1.26.6 which is incompatible.
datascience 0.10.6 requires folium==0.2.1, but you have folium 0.8.3 which is incompatible.[0m
Successfully installed boto3-1.18.2 botocore-1.21.2 configparser-5.0.2 jmespath-0.10.0 nnabla-1.20.1 nnabla-ext-cuda100-1.20.1 pynvml-11.0.0 s3transfer-0.5.0 urllib3-1.26.6
Cloning into 'nnabla-examples'...
remote: Enumerating objects: 5117, done.[K
remote: Counting objects: 100% (882/882), done.[K
remote: Compressing objects: 100% (540/540), done.[K
remote: Total 5117 (delta 413), reused 697 (delta 324), pack-reused 4235[K
Receiving objects: 100% (5117/5117), 185.05 MiB | 40.65 MiB/s, done.
Resolving deltas: 100% (2621/2621), done.
/content/nnabla-examples/responsible_ai/shap
###Markdown
Import dependencies.
###Code
import os
import urllib.request
import numpy as np
import matplotlib.pyplot as plt
import nnabla as nn
from nnabla.utils.image_utils import imread
from nnabla.models.imagenet import VGG16
from utils import shap_computation
###Output
2021-07-20 11:57:46,359 [nnabla][INFO]: Initializing CPU extension...
###Markdown
Image Preparation Prepare image to apply to SHAP.
###Code
url = 'https://upload.wikimedia.org/wikipedia/commons/4/4e/A_crab_spider_on_a_flower_preying_upon_a_euglossine_bee%2C_while_a_butterfly_looks_for_nectar.jpg'
target_img_path = 'input_flower_moth_spider.jpg'
if not os.path.isfile(target_img_path):
tgt = urllib.request.urlopen(url).read()
with open(target_img_path, mode='wb') as f:
f.write(tgt)
###Output
_____no_output_____
###Markdown
Take a look at what the image looks like. We can see a flower in the middle on which a butterfly rests, which is called long-tailed skipper butterfly.
###Code
img = imread(target_img_path, size=(224, 224), channel_first=True)
plt.imshow(img.transpose(1,2,0))
plt.show()
###Output
_____no_output_____
###Markdown
Then we prepare images to use when computing shap.
###Code
file_path = "imagenet50"
if not os.path.isdir(file_path):
!unzip "imagenet50.zip"
calc_images = []
for calc_img_path in os.listdir(file_path):
calc_img = imread(file_path + "/" + calc_img_path, size=(224, 224), channel_first=True)
calc_images.append(calc_img)
calc_images = np.array(calc_images)
###Output
Archive: imagenet50.zip
creating: imagenet50/
inflating: imagenet50/sim_n03029197_6381.jpg
inflating: imagenet50/sim_n02010272_1613.jpg
inflating: imagenet50/sim_n03061345_7223.jpg
inflating: imagenet50/sim_n03523987_7290.jpg
inflating: imagenet50/sim_n01687978_7467.jpg
inflating: imagenet50/sim_n10374849_811.jpg
inflating: imagenet50/sim_n12560282_16028.jpg
inflating: imagenet50/sim_n09691729_2568.jpg
inflating: imagenet50/sim_n04404412_26251.jpg
inflating: imagenet50/sim_n07714571_3105.jpg
inflating: imagenet50/sim_n04583212_9954.jpg
inflating: imagenet50/sim_n02011805_6117.jpg
inflating: imagenet50/sim_n03518943_2240.jpg
inflating: imagenet50/sim_n09406793_39048.jpg
inflating: imagenet50/sim_n01887474_9375.jpg
inflating: imagenet50/sim_n02190166_6252.jpg
inflating: imagenet50/sim_n03235180_7802.jpg
inflating: imagenet50/sim_n02395694_14311.jpg
inflating: imagenet50/sim_n07560652_7278.jpg
inflating: imagenet50/sim_n03245889_16148.jpg
inflating: imagenet50/sim_n07581931_2919.jpg
inflating: imagenet50/sim_n07697313_11105.jpg
inflating: imagenet50/sim_n07663899_6767.jpg
inflating: imagenet50/sim_n03118969_4898.jpg
inflating: imagenet50/sim_n01642257_3703.jpg
inflating: imagenet50/sim_n09988703_14533.jpg
inflating: imagenet50/sim_n01549053_4208.jpg
inflating: imagenet50/sim_n12828791_5672.jpg
inflating: imagenet50/sim_n04288533_9519.jpg
inflating: imagenet50/sim_n03822767_7285.jpg
inflating: imagenet50/sim_n02382204_7427.jpg
inflating: imagenet50/sim_n12709103_15667.jpg
inflating: imagenet50/sim_n11769176_1973.jpg
inflating: imagenet50/sim_n03488887_15199.jpg
inflating: imagenet50/sim_n11508382_9183.jpg
inflating: imagenet50/sim_n01624305_3109.jpg
inflating: imagenet50/sim_n04260364_2410.jpg
inflating: imagenet50/sim_n04564581_8170.jpg
inflating: imagenet50/sim_n07930554_10266.jpg
inflating: imagenet50/sim_n02033561_2663.jpg
inflating: imagenet50/sim_n07910656_7115.jpg
inflating: imagenet50/sim_n02138441_3552.jpg
inflating: imagenet50/sim_n03268918_3662.jpg
inflating: imagenet50/sim_n09706255_4386.jpg
inflating: imagenet50/sim_n09906704_22866.jpg
inflating: imagenet50/sim_n13052670_6077.jpg
inflating: imagenet50/sim_n02968333_22100.jpg
inflating: imagenet50/sim_n03809603_3531.jpg
inflating: imagenet50/sim_n03049924_13.jpg
inflating: imagenet50/sim_n03433637_2002.jpg
###Markdown
Network DefinitionLoading the model is very simple.You can choose other models such as `VGG11`, `VGG13`, by specifying the model's name as an argument. Of course, you can choose other pretrained models as well. See the [Docs](https://nnabla.readthedocs.io/en/latest/python/api/models/imagenet.html).**NOTE**: If you use the `VGG16` for the first time, nnabla will automatically download the weights from `https://nnabla.org` and it may take up to a few minutes.
###Code
model = VGG16()
batch_size = 1
x = nn.Variable((batch_size,) + model.input_shape)
# set training True since gradient of variable is necessary for SHAP
vgg = model(x, training=True, returns_net=True)
vgg_variables = vgg.variables
###Output
_____no_output_____
###Markdown
We now define the input, and extract the necessary outputs. pred: final output of the model
###Code
target_label_indices = {
'butterfly': 326,# lycaenid, lycaenid butterfly
'flower': 985,# daisy
'spider': 74,# garden spider
}
input_name = list(vgg.inputs.keys())[0]
vgg_variables[input_name].d = img
pred = vgg_variables["VGG16/Affine_3"]
selected = pred[:, target_label_indices['butterfly']]
###Output
_____no_output_____
###Markdown
Let's see how the model predicted the image. We can see the model classified the image as we expect. Labels regarding butterfly comes high, while flower is also recognized although it is14th ranked probability.
###Code
selected.forward()
predicted_labels = np.argsort(-pred.d[0])
for i, predicted_label in enumerate(predicted_labels[:15]):
print(f'Top {i+1}, Label index: {predicted_label}, Label name: {model.category_names[predicted_label]}')
###Output
Top 1, Label index: 326, Label name: lycaenid, lycaenid butterfly
Top 2, Label index: 716, Label name: picket fence, paling
Top 3, Label index: 321, Label name: admiral
Top 4, Label index: 318, Label name: lacewing, lacewing fly
Top 5, Label index: 323, Label name: monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
Top 6, Label index: 325, Label name: sulphur butterfly, sulfur butterfly
Top 7, Label index: 953, Label name: pineapple, ananas
Top 8, Label index: 985, Label name: daisy
Top 9, Label index: 322, Label name: ringlet, ringlet butterfly
Top 10, Label index: 94, Label name: hummingbird
Top 11, Label index: 658, Label name: mitten
Top 12, Label index: 304, Label name: leaf beetle, chrysomelid
Top 13, Label index: 904, Label name: window screen
Top 14, Label index: 738, Label name: pot, flowerpot
Top 15, Label index: 309, Label name: bee
###Markdown
SHAP Computation Now we compute SHAP for butterfly and then for flower to save the images.
###Code
shap_computation(
model_graph=vgg,
X=img,
label=target_label_indices['butterfly'],
output="img_butterfly.png",
interim_layer_index=10,
num_samples=50,
dataset=calc_images,
batch_size=25
)
shap_computation(
model_graph=vgg,
X=img,
label=target_label_indices['flower'],
output="img_flower.png",
interim_layer_index=10,
num_samples=50,
dataset=calc_images,
batch_size=25
)
###Output
_____no_output_____
###Markdown
Visualization Finally, compare images in oneline to enable to see the differences clearly. The **red** pixels represent places where SHAP judges **positive** influence to the prediction.On the other hand, **blue** pixel represents places where SHAP judges **negative** influence to the prediction. Image on the middle (pridiction for butterfly): The edge of the **butterfly** appears **red**, and the edge of the **flower** appears **blue**.Image on the right (pridiction for flower): The edge of the **butterfly** appears **blue**, and the edge of the **flower** appears **red**.
###Code
img_butterfly = imread("img_butterfly.png", size=(224, 224),channel_first=True)
img_flower = imread("img_flower.png", size=(224, 224), channel_first=True)
images = {
'original': img.transpose(1,2,0),
'butterfly': img_butterfly.transpose(1,2,0),
'flower': img_flower.transpose(1,2,0),
}
row = 1
col = len(images)
fig, axes = plt.subplots(row, col, figsize=(15,15))
for i, (k, v) in enumerate(images.items()):
axes[i].axis("off")
axes[i].imshow(v)
axes[i].set_title(k)
###Output
_____no_output_____ |
evaluate/evaluate_data_set_coding.ipynb | ###Markdown
Table of Contents1 Setup1.1 Setup - imports1.2 Setup - output1.2.1 function make_path1.2.2 set up output configuration1.3 Setup - Functions1.3.1 function plot_precision_recall_n1.3.2 function threshold_at_k1.3.3 function precision_at_k1.3.4 function recall_at_k1.3.5 function accuracy_at_k1.3.6 function precision_recall_f12 class CitationCodingEvaluation3 Load JSON files4 Evaluate all publications4.1 Process JSON4.1.1 Filter publications?4.1.1.1 django init4.1.1.2 get IDs of included publications4.1.2 process JSON files4.2 precision, recall, and accuracy4.3 precision, recall, and accuracy per publication4.3.1 false positives (FP)4.3.2 false negatives (FN)4.3.3 output all publication-citation pairs4.4 graph precision and recall at n4.5 output results to file5 Evaluate only publications with citations5.1 Process JSON5.1.1 Filter publications?5.1.1.1 django init5.1.1.2 get IDs of included publications5.1.2 process JSON files5.2 precision, recall, and accuracy5.3 precision, recall, and accuracy per publication5.3.1 false positives (FP)5.3.2 false negatives (FN)5.3.3 output all publication-citation pairs5.4 graph precision and recall at n5.5 output results to file Setup- Back to [Table of Contents](Table-of-Contents)
###Code
# DEBUG
debug_flag = False
###Output
_____no_output_____
###Markdown
Setup - imports- Back to [Table of Contents](Table-of-Contents)
###Code
# imports
import datetime
import json
import matplotlib
import matplotlib.pyplot
import numpy
import os
import pandas as pd
import six
# scikit-learn
import sklearn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
GradientBoostingClassifier,
AdaBoostClassifier)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
###Output
_____no_output_____
###Markdown
Setup - output- Back to [Table of Contents](Table-of-Contents) function make_path- Back to [Table of Contents](Table-of-Contents)
###Code
# function to create a path if it does not exist
def make_path(path_to_make):
if not os.path.exists(path_to_make):
print("Creating path {}".format(path_to_make))
os.makedirs(path_to_make)
else:
print("{} already exists".format(path_to_make))
return path_to_make
###Output
_____no_output_____
###Markdown
set up output configuration- Back to [Table of Contents](Table-of-Contents)
###Code
TEAM_NAME = "rcc-14"
SUBMISSION_FOLDER = "2019.01.24"
# DEBUG
debug_flag = False
# file name variables
file_name_prefix = "{}-{}-".format( TEAM_NAME, SUBMISSION_FOLDER )
file_name_suffix = ""
# output_to_file flag
output_to_file = True
line_list = None
output_string = None
#output_folder_path = "/data/output"
#output_folder_path = "."
output_folder_path = "/work/evaluate/{}/{}/evaluate/holdout".format( TEAM_NAME, SUBMISSION_FOLDER )
make_path( output_folder_path )
results_file_path = "{}/{}evaluation_results{}.txt".format( output_folder_path, file_name_prefix, file_name_suffix )
precision_recall_graph_path = "{}/{}precision_recall_graph{}.pdf".format( output_folder_path, file_name_prefix, file_name_suffix )
# if we are outputting to file, start line list.
if ( output_to_file == True ):
# put a list in line_list
line_list = []
#-- END init line list --#
###Output
_____no_output_____
###Markdown
Setup - Functions- Back to [Table of Contents](Table-of-Contents) function plot_precision_recall_n- Back to [Table of Contents](Table-of-Contents)
###Code
def plot_precision_recall_n(y_true, y_prob, model_name, output_path_IN = None ):
"""
y_true: ls
ls of ground truth labels
y_prob: ls
ls of predic proba from model
model_name: str
str of model name (e.g, LR_123)
"""
# imports
from sklearn.metrics import precision_recall_curve
# return reference
details_OUT = {}
# declare variables
me = "plot_precision_recall_n"
y_score = None
precision_curve = None
recall_curve = None
pr_thresholds = None
num_above_thresh = None
pct_above_thresh = None
pct_above_per_thresh = None
current_score = None
above_threshold_list = None
above_threshold_count = -1
fig = None
ax1 = None
ax2 = None
# store the raw scores in y_score
y_score = y_prob
# calculate precision-recall curve
# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
# Returns:
# - precision_curve - Precison values such that element i is the precision of predictions where cutoff is score >= thresholds[ i ] and the last element is 1.
# - recall_curve - Recall values such that element i is the recall of predictions where cutoff is score >= thresholds[ i ] and the last element is 0.
# - pr_thresholds - Increasing thresholds on the decision function used to decide 1 or 0, used to calculate precision and recall (looks like it is the set of unique values in the predicted value set).
precision_curve, recall_curve, pr_thresholds = precision_recall_curve( y_true, y_score )
# get all but the last precision score (1).
precision_curve = precision_curve[ : -1 ]
# print( "precision_curve: {}".format( precision_curve ) )
# get all but the last recall score (0).
recall_curve = recall_curve[ : -1 ]
# print( "recall_curve: {}".format( recall_curve ) )
# store details
details_OUT[ "precision" ] = precision_curve
details_OUT[ "recall" ] = recall_curve
details_OUT[ "threshold" ] = pr_thresholds
# init loop over thresholds
pct_above_per_thresh = []
number_scored = len(y_score)
# loop over thresholds
for value in pr_thresholds:
# at each threshold, calculate the percent of rows above the threshold.
above_threshold_list = []
above_threshold_count = -1
for current_score in y_score:
# is it at or above threshold?
if ( current_score >= value ):
# it is either at or above threshold - add to list.
above_threshold_list.append( current_score )
#-- END check to see if at or above threshold? --#
#-- END loop over scores. --#
# how many above threshold?
#num_above_thresh = len(y_score[y_score>=value])
above_threshold_count = len( above_threshold_list )
num_above_thresh = above_threshold_count
# percent above threshold
pct_above_thresh = num_above_thresh / float( number_scored )
# add to list.
pct_above_per_thresh.append( pct_above_thresh )
#-- END loop over thresholds --#
details_OUT[ "percent_above" ] = pct_above_per_thresh
# convert to numpy array
pct_above_per_thresh = numpy.array(pct_above_per_thresh)
# init matplotlib
matplotlib.pyplot.clf()
fig, ax1 = matplotlib.pyplot.subplots()
# plot % above threshold line
ax1.plot( pr_thresholds, pct_above_per_thresh, 'y')
ax1.set_xlabel('threshold values')
matplotlib.pyplot.xticks( [ 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1 ] )
ax1.set_ylabel('% above threshold', color='y')
ax1.set_ylim(0,1.05)
# plot precision line
ax2 = ax1.twinx()
ax2.plot( pr_thresholds, precision_curve, 'b')
ax2.set_ylabel('precision', color='b')
ax2.set_ylim(0,1.05)
# plot recall line
ax3 = ax2.twinx()
ax3.plot( pr_thresholds, recall_curve, 'r')
ax3.set_ylabel('recall', color='r')
ax3.set_ylim(0,1.05)
# finish off graph
name = model_name
matplotlib.pyplot.title(name)
# is there an output path?
if ( ( output_path_IN is not None ) and ( output_path_IN != "" ) ):
# save the figure to file.
matplotlib.pyplot.savefig( output_path_IN )
print( "In {}: figure output to {}".format( me, output_path_IN ) )
#-- END check to see if we output to disk. --#
matplotlib.pyplot.show()
# clear plot.
matplotlib.pyplot.clf()
return details_OUT
#-- END function plot_precision_recall_n() --#
print( "function plot_precision_recall_n() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function threshold_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def threshold_at_k( y_scores, k ):
# return reference
value_OUT = None
# declare variables
value_list = None
threshold_index = -1
# sort values
value_list = numpy.sort( y_scores )
# reverse order of list
value_list = value_list[ : : -1 ]
# calculate index of value that is k% of the way through the sorted distribution of scores
threshold_index = int( k * len( y_scores ) )
# get value that is k% of the way through the sorted distribution of scores
value_OUT = value_list[ threshold_index ]
print( "Threshold: {}".format( value_OUT ) )
return value_OUT
#-- END function threshold_at_k() --#
print( "function threshold_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function precision_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def precision_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate precision
value_OUT = precision_score( y_true, y_pred )
return value_OUT
#-- END function precision_at_k() --#
print( "function precision_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function recall_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def recall_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate recall
value_OUT = recall_score( y_true, y_pred )
return value_OUT
#-- END function recall_at_k() --#
print( "function recall_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function accuracy_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def accuracy_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate accuracy
value_OUT = accuracy_score( y_true, y_pred )
return value_OUT
#-- END function accuracy_at_k() --#
print( "function accuracy_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function precision_recall_f1- Back to [Table of Contents](Table-of-Contents)
###Code
# calculation methods
CALCULATION_METHOD_DEFAULT = "default"
CALCULATION_METHOD_BINARY = "binary"
CACLULATION_METHOD_MACRO = "macro"
CALCULATION_METHOD_MICRO = "micro"
CALCULATION_METHOD_WEIGHTED = "weighted"
# return items
RETURN_CONFUSION_MATRIX = "confusion_matrix"
RETURN_METHOD_TO_RESULT_MAP = "method_to_result_map"
RETURN_LINE_LIST = "line_list"
def precision_recall_f1( baseline_list_IN, predicted_list_IN, calculation_methods_list_IN, do_print_IN = True, output_to_file_IN = output_to_file ):
# return reference
output_dict_OUT = {}
# declare variables
output_string = None
my_line_list = None
calculation_methods = None
cm = None
method_to_result_map = None
calculation_method = None
precision = None
recall = None
accuracy = None
F1 = None
support = None
# declare variables - default algorithm
default_evaluation = None
default_precision_list = None
default_recall_list = None
default_F1_list = None
default_support_list = None
precision_list_length = None
recall_list_length = None
F1_list_length = None
# init
my_line_list = []
# init - calculation methods to include and lists
calculation_methods = calculation_methods_list_IN
baseline_list = baseline_list_IN
derived_binary_list = predicted_list_IN
# confusion matrix
cm = metrics.confusion_matrix( baseline_list, derived_binary_list )
# RETURN - store confusion matrix
output_dict_OUT[ RETURN_CONFUSION_MATRIX ] = cm
# output
output_string = "\nConfusion matrix:\n{}\n\nBinary Key:\n[[ TN, FP ]\n [ FN, TP ]]".format( cm )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output to file...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
# loop over calculation methods
method_to_result_map = {}
for calculation_method in calculation_methods:
# RETURN - create map for method
# output
output_string = "\n==> {}".format( calculation_method )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output to file...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
# binary? If so, do basic calculations as sanity check.
if ( calculation_method == CALCULATION_METHOD_BINARY ):
# calculate precision, recall, accuracy...
# ==> precision
precision = metrics.precision_score( baseline_list, derived_binary_list )
# output
output_string = "\n- {} metrics.precision_score = {}".format( calculation_method, precision )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
# ==> recall
recall = metrics.recall_score( baseline_list, derived_binary_list )
# output
output_string = "- {} metrics.recall_score = {}".format( calculation_method, recall )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
# ==> accuracy
accuracy = metrics.accuracy_score( baseline_list, derived_binary_list )
# output
output_string = "- {} metrics.accuracy_score = {}".format( calculation_method, accuracy )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
#-- END check to see if CALCULATION_METHOD_BINARY --#
# calculate based on calculation method.
# default?
if ( calculation_method == CALCULATION_METHOD_DEFAULT ):
# default metrics and F-Score - default returns a list for each of
# the scores per label, so get list and output, don't pick one or
# another value.
default_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list )
default_precision_list = default_evaluation[ 0 ]
default_recall_list = default_evaluation[ 1 ]
default_F1_list = default_evaluation[ 2 ]
default_support_list = default_evaluation[ 3 ]
# output lists
output_string = "\ndefault lists:"
output_string += "\n- precision list = {}".format( default_precision_list )
output_string += "\n- recall list = {}".format( default_recall_list )
output_string += "\n- F1 list = {}".format( default_F1_list )
output_string += "\n- support list = {}".format( default_support_list )
# add to results map
method_to_result_map[ calculation_method ] = default_evaluation
# look at length of lists (should all be the same).
precision_list_length = len( default_precision_list )
recall_list_length = len( default_recall_list )
F1_list_length = len( default_F1_list )
output_string += "\n\nlist lengths: {}".format( precision_list_length )
if ( precision_list_length > 2 ):
# binary, but list is greater than 2, not binary - output message.
output_string += "\n- NOTE: default output lists have more than two entries - your data is not binary."
#-- END check to see if list length greater than 2 --#
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
# all others are just argument to "average" parameter, result in one number per
# derived score. For now, implement them the same.
else:
# F-Score
evaluation_tuple = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list, average = calculation_method )
precision = evaluation_tuple[ 0 ]
recall = evaluation_tuple[ 1 ]
F1 = evaluation_tuple[ 2 ]
support = evaluation_tuple[ 3 ]
# add to results map
method_to_result_map[ calculation_method ] = evaluation_tuple
# output
output_string = "\n{}: precision = {}, recall = {}, F1 = {}, support = {}".format( calculation_method, precision, recall, F1, support )
if ( do_print_IN == True ):
print( output_string )
#-- END if do_print_IN --#
# if output to file...
if ( output_to_file_IN == True ):
# store line for output
my_line_list.append( output_string )
#-- END if output... --#
#-- END default F-Score --#
#-- END loop over calculation_methods --#
# RETURN - method-to-result map
output_dict_OUT[ RETURN_METHOD_TO_RESULT_MAP ] = method_to_result_map
# RETURN - store line_list
output_dict_OUT[ RETURN_LINE_LIST ] = my_line_list
return output_dict_OUT
#-- END function precision_recall_f1() --#
###Output
_____no_output_____
###Markdown
class CitationCodingEvaluation- Back to [Table of Contents](Table-of-Contents)
###Code
from citation_coding_evaluation import CitationCodingEvaluation
###Output
_____no_output_____
###Markdown
Load JSON files- Back to [Table of Contents](Table-of-Contents)
###Code
# file paths
# for in-repo development
#baseline_json_path = "/work/evaluate/data/holdout/data/input/data_set_citations.json"
# set to "../../data/output" for running against in-repo code development
#derived_prefix = "../../data/output"
# evaluating models against phase 1 holdout
baseline_json_path = "/work/evaluate/data/holdout/data/input/data_set_citations.json"
derived_prefix = "/work/evaluate/{}/{}/results/holdout/output".format( TEAM_NAME, SUBMISSION_FOLDER )
derived_json_path = "{}/data_set_citations.json".format( derived_prefix )
# load the baseline JSON
baseline_json_file = None
baseline_json = None
# if output...
output_string = "Reading baseline/ground_truth file: {}".format( baseline_json_path )
print( output_string )
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output to file... --#
# baseline
with open( baseline_json_path ) as baseline_json_file:
# load the JSON from the file.
baseline_json = json.load( baseline_json_file )
#-- END with...as --#
# load the derived JSON
derived_json_file = None
derived_json = None
# if output...
output_string = "Reading derived/predicted file: {}".format( derived_json_path )
print( output_string )
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output to file... --#
# baseline
with open( derived_json_path ) as derived_json_file:
# load the JSON from the file.
derived_json = json.load( derived_json_file )
#-- END with...as --#
baseline_json
derived_json
###Output
_____no_output_____
###Markdown
Evaluate all publications- Back to [Table of Contents](Table-of-Contents) Process JSON- Back to [Table of Contents](Table-of-Contents)
###Code
# init class to handle evaluation
coding_evaluator = CitationCodingEvaluation()
coding_evaluator.debug_flag = debug_flag
###Output
_____no_output_____
###Markdown
Filter publications?- Back to [Table of Contents](Table-of-Contents)If we want to only run on a subset of publications, need to use some django code. django init- Back to [Table of Contents](Table-of-Contents)
###Code
path_to_django_init = "/home/context/django_init.py"
%run $path_to_django_init
###Output
_____no_output_____
###Markdown
get IDs of included publications- Back to [Table of Contents](Table-of-Contents)
###Code
from sourcenet.models import Article
###Output
_____no_output_____
###Markdown
**Working with django-taggit**- django-taggit documentation: https://github.com/alex/django-taggitAdding tags to a model: from django.db import models from taggit.managers import TaggableManager class Food(models.Model): ... fields here tags = TaggableManager() Interacting with a model that has tags: >>> apple = Food.objects.create(name="apple") >>> apple.tags.add("red", "green", "delicious") >>> apple.tags.all() [, , ] >>> apple.tags.remove("green") >>> apple.tags.all() [, ] >>> Food.objects.filter(tags__name__in=["red"]) [, ] include only those with certain tags. tags_in_list = [ "prelim_unit_test_001", "prelim_unit_test_002", "prelim_unit_test_003", "prelim_unit_test_004", "prelim_unit_test_005", "prelim_unit_test_006", "prelim_unit_test_007" ] tags_in_list = [ "grp_month", ] if ( len( tags_in_list ) > 0 ): filter print( "filtering to just articles with tags: " + str( tags_in_list ) ) grp_article_qs = grp_article_qs.filter( tags__name__in = tags_in_list ) -- END check to see if we have a specific list of tags we want to include --
###Code
# declare variables
article_tag_include_list = None
article_tag_exclude_list = None
article_qs = None
article_id_include_list = None
article_instance = None
# set tags we are filtering on.
# include...
article_tag_include_list = []
article_tag_include_list.append( "holdout" )
# exclude...
article_tag_exclude_list = []
article_tag_exclude_list.append( "no_data" )
# filter Article QuerySet
article_qs = Article.objects.filter( tags__name__in = article_tag_include_list )
article_qs = article_qs.exclude( tags__name__in = article_tag_exclude_list )
# make list of IDs
article_id_include_list = []
for article_instance in article_qs:
# get ID
article_id = article_instance.id
# add to list
article_id_include_list.append( article_id )
#-- END loop over matching articles. --#
# store details in coding_evaluator
coding_evaluator.set_excluded_article_tag_list( article_tag_exclude_list )
coding_evaluator.set_included_article_tag_list( article_tag_include_list )
coding_evaluator.set_included_article_id_list( article_id_include_list )
print( "Including {} publications (include tags: {}; exclude tags {}).".format( len( article_id_include_list ), article_tag_include_list, article_tag_exclude_list ) )
###Output
_____no_output_____
###Markdown
process JSON files- Back to [Table of Contents](Table-of-Contents)
###Code
# process baseline JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_BASELINE
citation_json = baseline_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# process derived JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_DERIVED
citation_json = derived_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# create lists
coding_evaluator.debug_flag = False
details = coding_evaluator.create_evaluation_lists()
status = details
baseline_list = coding_evaluator.get_baseline_list()
derived_raw_list = coding_evaluator.get_derived_raw_list()
derived_binary_list = coding_evaluator.get_derived_binary_list()
publication_id_per_citation_list = coding_evaluator.get_publication_id_list()
data_set_id_per_citation_list = coding_evaluator.get_data_set_id_list()
###Output
_____no_output_____
###Markdown
precision, recall, and accuracy- Back to [Table of Contents](Table-of-Contents)
###Code
# calculation methods to include
calculation_methods = []
calculation_methods.append( CALCULATION_METHOD_DEFAULT )
calculation_methods.append( CALCULATION_METHOD_BINARY )
#calculation_methods.append( CACLULATION_METHOD_MACRO )
#calculation_methods.append( CALCULATION_METHOD_MICRO )
#calculation_methods.append( CALCULATION_METHOD_WEIGHTED )
# call function to do work.
output_dictionary = precision_recall_f1( baseline_list, derived_binary_list, calculation_methods )
# add lines from output to line_list
line_list = line_list + output_dictionary.get( "line_list", [] )
line_list.append( "\n" )
print( "----> output dictionary: {}".format( output_dictionary ) )
# check for excluded articles
excluded_article_count = details.get( CitationCodingEvaluation.DETAILS_EXCLUDED_ARTICLE_COUNT, None )
excluded_article_id_list = details.get( CitationCodingEvaluation.DETAILS_EXCLUDED_ARTICLE_ID_LIST, None )
if ( ( excluded_article_count is not None ) and ( excluded_article_count > 0 ) ):
# add excluded articles details:
line_list.append( "\n" )
line_string = "{} excluded publications: {} ".format( excluded_article_count, excluded_article_id_list )
line_list.append( line_string )
print( line_string )
#-- END check for excluded publications. --#
###Output
_____no_output_____
###Markdown
precision, recall, and accuracy per publication- Back to [Table of Contents](Table-of-Contents)
###Code
# declare variables
pub_debug_flag = False
publication_to_lists_map = coding_evaluator.get_lists_by_publication()
pub_publication_id_list = []
pub_false_positive_list = []
pub_false_negative_list = []
pub_calculation_methods = []
#calculation_methods.append( CALCULATION_METHOD_DEFAULT )
pub_calculation_methods.append( CALCULATION_METHOD_BINARY )
#calculation_methods.append( CACLULATION_METHOD_MACRO )
#calculation_methods.append( CALCULATION_METHOD_MICRO )
#calculation_methods.append( CALCULATION_METHOD_WEIGHTED )
pub_output_dictionary = None
pub_confusion_matrix = None
pub_false_positive_count = None
pub_false_negative_count = None
output_string = None
# declare variables - precision/recall/F1
pub_method_to_result_map = None
pub_evaluation_tuple = None
pub_precision = None
pub_recall = None
pub_F1 = None
pub_precision_list = []
pub_recall_list = []
pub_F1_list = []
# loop over publications
for publication_id in six.iterkeys( publication_to_lists_map ):
if ( debug_flag == True ):
print( "Publication ID: {}".format( publication_id ) )
#-- END debug --#
# get lists
pub_list_dictionary = publication_to_lists_map.get( publication_id, None )
pub_baseline_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_BASELINE, None )
pub_derived_binary_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DERIVED_BINARY, None )
pub_derived_raw_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DERIVED_RAW, None )
pub_publication_id_per_citation_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_PUBLICATION_ID, None )
pub_data_set_id_per_citation_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DATA_SET_ID, None )
if ( debug_flag == True ):
# print lists:
print( "====> baseline......: {}".format( pub_baseline_list ) )
print( "====> derived_binary: {}".format( pub_derived_binary_list ) )
print( "====> derived_raw...: {}".format( pub_derived_raw_list ) )
print( "====> publication_id: {}".format( pub_publication_id_per_citation_list ) )
print( "====> data_set_id...: {}".format( pub_data_set_id_per_citation_list ) )
#-- END debug --#
# call the precision and recall function
pub_output_dictionary = precision_recall_f1( pub_baseline_list, pub_derived_binary_list, pub_calculation_methods, do_print_IN = pub_debug_flag )
if ( debug_flag == True ):
print( "----> pub output dictionary: {}".format( pub_output_dictionary ) )
#-- END debug --#
# get confusion matrix
pub_confusion_matrix = pub_output_dictionary.get( RETURN_CONFUSION_MATRIX, None )
if ( debug_flag == True ):
print( "Confusion Matrix: {}".format( pub_confusion_matrix ) )
#-- END debug --#
if ( pub_confusion_matrix is not None ):
# try to get false positives (cm[ 0 ][ 1 ]). If exception, is 0.
try:
pub_false_positive_count = pub_confusion_matrix[ 0 ][ 1 ]
if ( debug_flag == True ):
print( "found FP!" )
#-- END debug --#
except:
if ( debug_flag == True ):
print( "no FP!" )
#-- END debug --#
# index doesn't exist. Set to 0.
pub_false_positive_count = 0
#-- END try...except. --#
# try to get false negatives (cm[ 1 ][ 0 ]). If exception, is 0.
try:
pub_false_negative_count = pub_confusion_matrix[ 1 ][ 0 ]
if ( debug_flag == True ):
print( "found FN!" )
#-- END debug --#
except:
if ( debug_flag == True ):
print( "no FN!" )
#-- END debug --#
# index doesn't exist. Set to 0.
pub_false_negative_count = 0
#-- END try...except. --#
# add id and count to list.
pub_publication_id_list.append( publication_id )
pub_false_positive_list.append( pub_false_positive_count )
pub_false_negative_list.append( pub_false_negative_count )
else:
# no confusion matrix
print( "ERROR - no confusion matrix!" )
#-- END check to see if confusion matrix --#
# get results...
pub_precision = -1
pub_recall = -1
pub_F1 = -1
pub_method_to_result_map = pub_output_dictionary.get( RETURN_METHOD_TO_RESULT_MAP, None )
if ( pub_method_to_result_map is not None ):
# get results... for binary calculation method
pub_evaluation_tuple = pub_method_to_result_map.get( CALCULATION_METHOD_BINARY, None )
if ( pub_evaluation_tuple is not None ):
# get results
pub_precision = pub_evaluation_tuple[ 0 ]
pub_recall = pub_evaluation_tuple[ 1 ]
pub_F1 = pub_evaluation_tuple[ 2 ]
else:
# no results for binary calculation method
print( "ERROR - no results for binary calculation method!" )
#-- END check to see if confusion matrix --#
else:
# no results for binary calculation method
print( "ERROR - no results!" )
#-- END check to see if confusion matrix --#
# add to lists
pub_precision_list.append( pub_precision )
pub_recall_list.append( pub_recall )
pub_F1_list.append( pub_F1 )
#-- END loop over per-publication lists --#
if ( debug_flag == True ):
print( pub_publication_id_list )
print( pub_false_positive_list )
print( pub_false_negative_list )
#-- END debug --#
###Output
_____no_output_____
###Markdown
false positives (FP)- Back to [Table of Contents](Table-of-Contents)
###Code
# summarize
output_string = "\n========================================\nFalse Positives (FP):"
print( output_string )
line_list.append( output_string )
# declare variables
pub_false_positive_array = None
pub_false_positive_mean = None
pub_fp_pub_id_list = []
pub_fp_count_list = []
item_index = None
current_count = None
output_string = None
zipped_fp_lists = None
fp_row = None
# convert false positive list to a numpy array and get the mean
pub_false_positive_array = numpy.array( pub_false_positive_list )
pub_false_positive_mean = numpy.mean( pub_false_positive_array )
# loop over items, flag any that are over mean
item_index = -1
for current_count in pub_false_positive_list:
# increment index
item_index += 1
# get publication ID
publication_id = pub_publication_id_list[ item_index ]
# is count greater than mean?
if ( current_count > pub_false_positive_mean ):
# add to list
pub_fp_pub_id_list.append( publication_id )
pub_fp_count_list.append( current_count )
else:
if ( debug_flag == True ):
print( "- pub {} FP {} <= mean ( {} )".format( publication_id, current_count, pub_false_positive_mean ) )
#-- END debug --#
#-- END loop over false positives. --#
#-- END loop over publications --#
# zip up the two lists (one list of pairs of values, rather than two lists).
# ID order
zipped_fp_lists = list( zip( pub_fp_count_list, pub_fp_pub_id_list ) )
# convert to ordered by count, then ID, largest to smallest.
zipped_fp_lists.sort( reverse = True )
# anything in the list?
fp_count = len( zipped_fp_lists )
if( fp_count > 0 ):
output_string = "\n==> {} False Positives (FP) above mean ( {} ):".format( fp_count, pub_false_positive_mean )
print( output_string )
line_list.append( output_string )
# output for review
for fp_row in zipped_fp_lists:
# summarize
output_string = "- pub {} FP {} > mean ( {} )".format( fp_row[ 1 ], fp_row[ 0 ], pub_false_positive_mean )
print( output_string )
line_list.append( output_string )
#-- END loop over items --#
#-- END check to see if anything in list. --#
###Output
_____no_output_____
###Markdown
false negatives (FN)- Back to [Table of Contents](Table-of-Contents)
###Code
# summarize
output_string = "\n========================================\nFalse Negatives (FN):"
print( output_string )
line_list.append( output_string )
# declare variables
pub_false_negative_array = None
pub_false_negative_mean = None
pub_fn_pub_id_list = []
pub_fn_count_list = []
item_index = None
current_count = None
output_string = None
zipped_fn_lists = None
fn_row = None
# convert false negative list to a numpy array and get the mean
pub_false_negative_array = numpy.array( pub_false_negative_list )
pub_false_negative_mean = numpy.mean( pub_false_negative_array )
# loop over items, flag any that are over mean
item_index = -1
for current_count in pub_false_negative_list:
# increment index
item_index += 1
# get publication ID
publication_id = pub_publication_id_list[ item_index ]
# is count greater than mean?
if ( current_count > pub_false_negative_mean ):
# add to list
pub_fn_pub_id_list.append( publication_id )
pub_fn_count_list.append( current_count )
else:
if ( debug_flag == True ):
print( "- pub {} FN {} <= mean ( {} )".format( publication_id, current_count, pub_false_negative_mean ) )
#-- END debug --#
#-- END loop over false negatives. --#
#-- END loop over publications --#
# zip up the two lists (one list of pairs of values, rather than two lists).
# ID order
zipped_fn_lists = list( zip( pub_fn_count_list, pub_fn_pub_id_list ) )
# convert to ordered by count, then ID, largest to smallest.
zipped_fn_lists.sort( reverse = True )
# anything in the list?
fn_count = len( zipped_fn_lists )
if( fn_count > 0 ):
output_string = "\n==> {} False Negatives (FN) above mean ( {} ):".format( fn_count, pub_false_negative_mean )
print( output_string )
line_list.append( output_string )
# output for review
for fn_row in zipped_fn_lists:
# summarize
output_string = "- pub {} FN {} > mean ( {} )".format( fn_row[ 1 ], fn_row[ 0 ], pub_false_negative_mean )
print( output_string )
line_list.append( output_string )
#-- END loop over items --#
#-- END check to see if anything in list. --#
###Output
_____no_output_____
###Markdown
output all publication-citation pairs- Back to [Table of Contents](Table-of-Contents)
###Code
# output all the full lists
current_index = -1
for item in publication_id_per_citation_list:
# increment index
current_index += 1
# get current values.
baseline_value = baseline_list[ current_index ]
derived_raw_value = derived_raw_list[ current_index ]
derived_binary_value = derived_binary_list[ current_index ]
pub_id_value = publication_id_per_citation_list[ current_index ]
data_set_id_value = data_set_id_per_citation_list[ current_index ]
print( "{}: pub ID {} - data set ID {} - baseline {} - binary {} - raw {}".format( current_index, pub_id_value, data_set_id_value, baseline_value, derived_binary_value, derived_raw_value ) )
#-- END loop over full lists. --#
###Output
_____no_output_____
###Markdown
graph precision and recall at n- Back to [Table of Contents](Table-of-Contents)
###Code
# set precision_recall_graph_path
#precision_recall_graph_path = "{}/{}precision_recall_graph{}.pdf".format( output_folder_path, file_name_prefix, file_name_suffix )
# declare variables
plot_details = None
# output to file?
if ( output_to_file == True ):
# output figure to file
plot_details = plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation", output_path_IN = precision_recall_graph_path )
else:
# just output to standard out (as is possible)
plot_details = plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation" )
#-- END check to see if output graph to file --#
# DEBUG?
if ( debug_flag == True ):
# summarize
output_string = "- plot details: {}".format( plot_details )
print( output_string )
#line_list.append( output_string )
#-- END DEBUG --#
###Output
_____no_output_____
###Markdown
output results to file- Back to [Table of Contents](Table-of-Contents)
###Code
# set results file path:
#results_file_path = "{}/{}evaluation_results{}.txt".format( output_folder_path, file_name_prefix, file_name_suffix )
# declare variables
results_file = None
line_list_string = None
# do we output to file?
if ( output_to_file == True ):
if ( debug_flag == True ):
print( line_list )
#-- END check to see if debug --#
# yes. open output file.
with open( results_file_path, mode = "w" ) as results_file:
# join line list with "/n", then write.
line_list_string = "\n".join( line_list )
results_file.write( line_list_string )
#-- END with...as --#
print( "results output to {}".format( results_file_path ) )
#-- END check to see if we output to file --#
###Output
_____no_output_____
###Markdown
Evaluate only publications with citations- Back to [Table of Contents](Table-of-Contents)Now, try just doing this on publications that have citations.
###Code
# reset line_list.
line_list = []
# if output...
output_string = "Using baseline/ground_truth file: {}".format( baseline_json_path )
print( output_string )
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output to file... --#
###Output
_____no_output_____
###Markdown
Process JSON- Back to [Table of Contents](Table-of-Contents)
###Code
# init class to handle evaluation
coding_evaluator = CitationCodingEvaluation()
coding_evaluator.debug_flag = debug_flag
###Output
_____no_output_____
###Markdown
Filter publications?- Back to [Table of Contents](Table-of-Contents)If we want to only run on a subset of publications, need to use some django code. django init- Back to [Table of Contents](Table-of-Contents)
###Code
path_to_django_init = "/home/context/django_init.py"
%run $path_to_django_init
###Output
_____no_output_____
###Markdown
get IDs of included publications- Back to [Table of Contents](Table-of-Contents)
###Code
from sourcenet.models import Article
###Output
_____no_output_____
###Markdown
**Working with django-taggit**- django-taggit documentation: https://github.com/alex/django-taggitAdding tags to a model: from django.db import models from taggit.managers import TaggableManager class Food(models.Model): ... fields here tags = TaggableManager() Interacting with a model that has tags: >>> apple = Food.objects.create(name="apple") >>> apple.tags.add("red", "green", "delicious") >>> apple.tags.all() [, , ] >>> apple.tags.remove("green") >>> apple.tags.all() [, ] >>> Food.objects.filter(tags__name__in=["red"]) [, ] include only those with certain tags. tags_in_list = [ "prelim_unit_test_001", "prelim_unit_test_002", "prelim_unit_test_003", "prelim_unit_test_004", "prelim_unit_test_005", "prelim_unit_test_006", "prelim_unit_test_007" ] tags_in_list = [ "grp_month", ] if ( len( tags_in_list ) > 0 ): filter print( "filtering to just articles with tags: " + str( tags_in_list ) ) grp_article_qs = grp_article_qs.filter( tags__name__in = tags_in_list ) -- END check to see if we have a specific list of tags we want to include --
###Code
# filter to just those in holdout that have one or more related citations.
# declare variables
article_tag_include_list = None
article_tag_exclude_list = None
article_qs = None
# include...
article_tag_include_list = []
article_tag_include_list.append( "holdout" )
# exclude...
article_tag_exclude_list = []
article_tag_exclude_list.append( "wc_holdout" )
# get all articles
article_qs = Article.objects.all()
# include tags?
if ( ( article_tag_include_list is not None ) and ( len( article_tag_include_list ) > 0 ) ):
# yes.
article_qs = article_qs.filter( tags__name__in = article_tag_include_list )
#-- END check to see if include tags? --#
# exclude tags?
if ( ( article_tag_exclude_list is not None ) and ( len( article_tag_exclude_list ) > 0 ) ):
# yes.
article_qs = article_qs.exclude( tags__name__in = article_tag_exclude_list )
#-- END check to see if include tags? --#
# filter down to just those where related data set citation id is > 0.
print( "Filtered to {} Article instances.".format( article_qs.count() ) )
# declare variables
article_instance = None
citation_count = None
article_with_citations_id_list = None
# now, loop over the publications
article_with_citations_id_list = []
for article_instance in article_qs:
# see how many citations.
citation_count = 0
citation_count = article_instance.datasetcitation_set.all().count()
if ( citation_count > 0 ):
# add to id list.
article_with_citations_id_list.append( article_instance.id )
#-- END check to see if has citations --#
#-- END loop over articles. --#
# re-do article_qs, limiting to IDs in our list.
article_qs = Article.objects.filter( id__in = article_with_citations_id_list )
print( "Filtered to {} Article instances.".format( article_qs.count() ) )
# declare variables
article_id_include_list = None
article_instance = None
# make list of IDs
article_id_include_list = []
for article_instance in article_qs:
# get ID
article_id = article_instance.id
# add to list
article_id_include_list.append( article_id )
#-- END loop over matching articles. --#
# store details in coding_evaluator
coding_evaluator.set_excluded_article_tag_list( article_tag_exclude_list )
coding_evaluator.set_included_article_tag_list( article_tag_include_list )
coding_evaluator.set_included_article_id_list( article_id_include_list )
print( "Including {} publications (include tags: {}; exclude tags {}).".format( len( article_id_include_list ), article_tag_include_list, article_tag_exclude_list ) )
###Output
_____no_output_____
###Markdown
process JSON files- Back to [Table of Contents](Table-of-Contents)
###Code
# process baseline JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_BASELINE
citation_json = baseline_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# process derived JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_DERIVED
citation_json = derived_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# create lists
coding_evaluator.debug_flag = False
details = coding_evaluator.create_evaluation_lists()
status = details
baseline_list = coding_evaluator.get_baseline_list()
derived_raw_list = coding_evaluator.get_derived_raw_list()
derived_binary_list = coding_evaluator.get_derived_binary_list()
publication_id_per_citation_list = coding_evaluator.get_publication_id_list()
data_set_id_per_citation_list = coding_evaluator.get_data_set_id_list()
###Output
_____no_output_____
###Markdown
precision, recall, and accuracy- Back to [Table of Contents](Table-of-Contents)
###Code
# calculation methods to include
calculation_methods = []
calculation_methods.append( CALCULATION_METHOD_DEFAULT )
calculation_methods.append( CALCULATION_METHOD_BINARY )
#calculation_methods.append( CACLULATION_METHOD_MACRO )
#calculation_methods.append( CALCULATION_METHOD_MICRO )
#calculation_methods.append( CALCULATION_METHOD_WEIGHTED )
# call function to do work.
output_dictionary = precision_recall_f1( baseline_list, derived_binary_list, calculation_methods )
# add lines from output to line_list
line_list = line_list + output_dictionary.get( "line_list", [] )
line_list.append( "\n" )
print( "----> output dictionary: {}".format( output_dictionary ) )
# check for excluded articles
excluded_article_count = details.get( CitationCodingEvaluation.DETAILS_EXCLUDED_ARTICLE_COUNT, None )
excluded_article_id_list = details.get( CitationCodingEvaluation.DETAILS_EXCLUDED_ARTICLE_ID_LIST, None )
if ( ( excluded_article_count is not None ) and ( excluded_article_count > 0 ) ):
# add excluded articles details:
line_list.append( "\n" )
line_string = "{} excluded publications: {} ".format( excluded_article_count, excluded_article_id_list )
line_list.append( line_string )
print( line_string )
#-- END check for excluded publications. --#
###Output
_____no_output_____
###Markdown
precision, recall, and accuracy per publication- Back to [Table of Contents](Table-of-Contents)
###Code
# declare variables
pub_debug_flag = False
publication_to_lists_map = coding_evaluator.get_lists_by_publication()
pub_publication_id_list = []
pub_false_positive_list = []
pub_false_negative_list = []
pub_calculation_methods = []
#calculation_methods.append( CALCULATION_METHOD_DEFAULT )
pub_calculation_methods.append( CALCULATION_METHOD_BINARY )
#calculation_methods.append( CACLULATION_METHOD_MACRO )
#calculation_methods.append( CALCULATION_METHOD_MICRO )
#calculation_methods.append( CALCULATION_METHOD_WEIGHTED )
pub_output_dictionary = None
pub_confusion_matrix = None
pub_false_positive_count = None
pub_false_negative_count = None
output_string = None
# declare variables - precision/recall/F1
pub_method_to_result_map = None
pub_evaluation_tuple = None
pub_precision = None
pub_recall = None
pub_F1 = None
pub_precision_list = []
pub_recall_list = []
pub_F1_list = []
# loop over publications
for publication_id in six.iterkeys( publication_to_lists_map ):
if ( debug_flag == True ):
print( "Publication ID: {}".format( publication_id ) )
#-- END debug --#
# get lists
pub_list_dictionary = publication_to_lists_map.get( publication_id, None )
pub_baseline_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_BASELINE, None )
pub_derived_binary_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DERIVED_BINARY, None )
pub_derived_raw_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DERIVED_RAW, None )
pub_publication_id_per_citation_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_PUBLICATION_ID, None )
pub_data_set_id_per_citation_list = pub_list_dictionary.get( coding_evaluator.LIST_TYPE_DATA_SET_ID, None )
if ( debug_flag == True ):
# print lists:
print( "====> baseline......: {}".format( pub_baseline_list ) )
print( "====> derived_binary: {}".format( pub_derived_binary_list ) )
print( "====> derived_raw...: {}".format( pub_derived_raw_list ) )
print( "====> publication_id: {}".format( pub_publication_id_per_citation_list ) )
print( "====> data_set_id...: {}".format( pub_data_set_id_per_citation_list ) )
#-- END debug --#
# call the precision and recall function
pub_output_dictionary = precision_recall_f1( pub_baseline_list, pub_derived_binary_list, pub_calculation_methods, do_print_IN = pub_debug_flag )
if ( debug_flag == True ):
print( "----> pub output dictionary: {}".format( pub_output_dictionary ) )
#-- END debug --#
# get confusion matrix
pub_confusion_matrix = pub_output_dictionary.get( RETURN_CONFUSION_MATRIX, None )
if ( debug_flag == True ):
print( "Confusion Matrix: {}".format( pub_confusion_matrix ) )
#-- END debug --#
if ( pub_confusion_matrix is not None ):
# try to get false positives (cm[ 0 ][ 1 ]). If exception, is 0.
try:
pub_false_positive_count = pub_confusion_matrix[ 0 ][ 1 ]
if ( debug_flag == True ):
print( "found FP!" )
#-- END debug --#
except:
if ( debug_flag == True ):
print( "no FP!" )
#-- END debug --#
# index doesn't exist. Set to 0.
pub_false_positive_count = 0
#-- END try...except. --#
# try to get false negatives (cm[ 1 ][ 0 ]). If exception, is 0.
try:
pub_false_negative_count = pub_confusion_matrix[ 1 ][ 0 ]
if ( debug_flag == True ):
print( "found FN!" )
#-- END debug --#
except:
if ( debug_flag == True ):
print( "no FN!" )
#-- END debug --#
# index doesn't exist. Set to 0.
pub_false_negative_count = 0
#-- END try...except. --#
# add id and count to list.
pub_publication_id_list.append( publication_id )
pub_false_positive_list.append( pub_false_positive_count )
pub_false_negative_list.append( pub_false_negative_count )
else:
# no confusion matrix
print( "ERROR - no confusion matrix!" )
#-- END check to see if confusion matrix --#
# get results...
pub_precision = -1
pub_recall = -1
pub_F1 = -1
pub_method_to_result_map = pub_output_dictionary.get( RETURN_METHOD_TO_RESULT_MAP, None )
if ( pub_method_to_result_map is not None ):
# get results... for binary calculation method
pub_evaluation_tuple = pub_method_to_result_map.get( CALCULATION_METHOD_BINARY, None )
if ( pub_evaluation_tuple is not None ):
# get results
pub_precision = pub_evaluation_tuple[ 0 ]
pub_recall = pub_evaluation_tuple[ 1 ]
pub_F1 = pub_evaluation_tuple[ 2 ]
else:
# no results for binary calculation method
print( "ERROR - no results for binary calculation method!" )
#-- END check to see if confusion matrix --#
else:
# no results for binary calculation method
print( "ERROR - no results!" )
#-- END check to see if confusion matrix --#
# add to lists
pub_precision_list.append( pub_precision )
pub_recall_list.append( pub_recall )
pub_F1_list.append( pub_F1 )
#-- END loop over per-publication lists --#
if ( debug_flag == True ):
print( pub_publication_id_list )
print( pub_false_positive_list )
print( pub_false_negative_list )
#-- END debug --#
###Output
_____no_output_____
###Markdown
false positives (FP)- Back to [Table of Contents](Table-of-Contents)
###Code
# summarize
output_string = "\n========================================\nFalse Positives (FP):"
print( output_string )
line_list.append( output_string )
# declare variables
pub_false_positive_array = None
pub_false_positive_mean = None
pub_fp_pub_id_list = []
pub_fp_count_list = []
item_index = None
current_count = None
output_string = None
zipped_fp_lists = None
fp_row = None
# convert false positive list to a numpy array and get the mean
pub_false_positive_array = numpy.array( pub_false_positive_list )
pub_false_positive_mean = numpy.mean( pub_false_positive_array )
# loop over items, flag any that are over mean
item_index = -1
for current_count in pub_false_positive_list:
# increment index
item_index += 1
# get publication ID
publication_id = pub_publication_id_list[ item_index ]
# is count greater than mean?
if ( current_count > pub_false_positive_mean ):
# add to list
pub_fp_pub_id_list.append( publication_id )
pub_fp_count_list.append( current_count )
else:
if ( debug_flag == True ):
print( "- pub {} FP {} <= mean ( {} )".format( publication_id, current_count, pub_false_positive_mean ) )
#-- END debug --#
#-- END loop over false positives. --#
#-- END loop over publications --#
# zip up the two lists (one list of pairs of values, rather than two lists).
# ID order
zipped_fp_lists = list( zip( pub_fp_count_list, pub_fp_pub_id_list ) )
# convert to ordered by count, then ID, largest to smallest.
zipped_fp_lists.sort( reverse = True )
# anything in the list?
fp_count = len( zipped_fp_lists )
if( fp_count > 0 ):
output_string = "\n==> {} False Positives (FP) above mean ( {} ):".format( fp_count, pub_false_positive_mean )
print( output_string )
line_list.append( output_string )
# output for review
for fp_row in zipped_fp_lists:
# summarize
output_string = "- pub {} FP {} > mean ( {} )".format( fp_row[ 1 ], fp_row[ 0 ], pub_false_positive_mean )
print( output_string )
line_list.append( output_string )
#-- END loop over items --#
#-- END check to see if anything in list. --#
###Output
_____no_output_____
###Markdown
false negatives (FN)- Back to [Table of Contents](Table-of-Contents)
###Code
# summarize
output_string = "\n========================================\nFalse Negatives (FN):"
print( output_string )
line_list.append( output_string )
# declare variables
pub_false_negative_array = None
pub_false_negative_mean = None
pub_fn_pub_id_list = []
pub_fn_count_list = []
item_index = None
current_count = None
output_string = None
zipped_fn_lists = None
fn_row = None
# convert false negative list to a numpy array and get the mean
pub_false_negative_array = numpy.array( pub_false_negative_list )
pub_false_negative_mean = numpy.mean( pub_false_negative_array )
# loop over items, flag any that are over mean
item_index = -1
for current_count in pub_false_negative_list:
# increment index
item_index += 1
# get publication ID
publication_id = pub_publication_id_list[ item_index ]
# is count greater than mean?
if ( current_count > pub_false_negative_mean ):
# add to list
pub_fn_pub_id_list.append( publication_id )
pub_fn_count_list.append( current_count )
else:
if ( debug_flag == True ):
print( "- pub {} FN {} <= mean ( {} )".format( publication_id, current_count, pub_false_negative_mean ) )
#-- END debug --#
#-- END loop over false negatives. --#
#-- END loop over publications --#
# zip up the two lists (one list of pairs of values, rather than two lists).
# ID order
zipped_fn_lists = list( zip( pub_fn_count_list, pub_fn_pub_id_list ) )
# convert to ordered by count, then ID, largest to smallest.
zipped_fn_lists.sort( reverse = True )
# anything in the list?
fn_count = len( zipped_fn_lists )
if( fn_count > 0 ):
output_string = "\n==> {} False Negatives (FN) above mean ( {} ):".format( fn_count, pub_false_negative_mean )
print( output_string )
line_list.append( output_string )
# output for review
for fn_row in zipped_fn_lists:
# summarize
output_string = "- pub {} FN {} > mean ( {} )".format( fn_row[ 1 ], fn_row[ 0 ], pub_false_negative_mean )
print( output_string )
line_list.append( output_string )
#-- END loop over items --#
#-- END check to see if anything in list. --#
###Output
_____no_output_____
###Markdown
output all publication-citation pairs- Back to [Table of Contents](Table-of-Contents)
###Code
# output all the full lists
current_index = -1
for item in publication_id_per_citation_list:
# increment index
current_index += 1
# get current values.
baseline_value = baseline_list[ current_index ]
derived_raw_value = derived_raw_list[ current_index ]
derived_binary_value = derived_binary_list[ current_index ]
pub_id_value = publication_id_per_citation_list[ current_index ]
data_set_id_value = data_set_id_per_citation_list[ current_index ]
print( "{}: pub ID {} - data set ID {} - baseline {} - binary {} - raw {}".format( current_index, pub_id_value, data_set_id_value, baseline_value, derived_binary_value, derived_raw_value ) )
#-- END loop over full lists. --#
###Output
_____no_output_____
###Markdown
graph precision and recall at n- Back to [Table of Contents](Table-of-Contents)
###Code
# set new precision_recall_graph_path
precision_recall_graph_path = "{}/{}precision_recall_graph-cited{}.pdf".format( output_folder_path, file_name_prefix, file_name_suffix )
# declare variables
plot_details = None
# output to file?
if ( output_to_file == True ):
# output figure to file
plot_details = plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation", output_path_IN = precision_recall_graph_path )
else:
# just output to standard out (as is possible)
plot_details = plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation" )
#-- END check to see if output graph to file --#
# DEBUG?
if ( debug_flag == True ):
# summarize
output_string = "- plot details: {}".format( plot_details )
print( output_string )
#line_list.append( output_string )
#-- END DEBUG --#
###Output
_____no_output_____
###Markdown
output results to file- Back to [Table of Contents](Table-of-Contents)
###Code
# set results file path:
results_file_path = "{}/{}evaluation_results-cited{}.txt".format( output_folder_path, file_name_prefix, file_name_suffix )
# declare variables
results_file = None
line_list_string = None
# do we output to file?
if ( output_to_file == True ):
if ( debug_flag == True ):
print( line_list )
#-- END check to see if debug --#
# yes. open output file.
with open( results_file_path, mode = "w" ) as results_file:
# join line list with "/n", then write.
line_list_string = "\n".join( line_list )
results_file.write( line_list_string )
#-- END with...as --#
print( "results output to {}".format( results_file_path ) )
#-- END check to see if we output to file --#
###Output
_____no_output_____
###Markdown
Table of Contents1 Setup1.1 Setup - imports1.2 Setup - Functions1.2.1 function plot_precision_recall_n1.2.2 function threshold_at_k1.2.3 function precision_at_k1.2.4 function recall_at_k1.2.5 function accuracy_at_k1.3 Setup - output2 class CitationCodingEvaluation3 Load JSON files4 Process JSON5 Evaluate5.1 precision, recall, and accuracy5.2 graph precision and recall at n5.3 output results to file Setup- Back to [Table of Contents](Table-of-Contents) Setup - imports- Back to [Table of Contents](Table-of-Contents)
###Code
# imports
import datetime
import json
import matplotlib
import matplotlib.pyplot
import numpy
import pandas as pd
import six
# scikit-learn
import sklearn
from sklearn import metrics
from sklearn.metrics import precision_recall_curve, auc
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
GradientBoostingClassifier,
AdaBoostClassifier)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
###Output
_____no_output_____
###Markdown
Setup - Functions- Back to [Table of Contents](Table-of-Contents) function plot_precision_recall_n- Back to [Table of Contents](Table-of-Contents)
###Code
def plot_precision_recall_n(y_true, y_prob, model_name, output_path_IN = None ):
"""
y_true: ls
ls of ground truth labels
y_prob: ls
ls of predic proba from model
model_name: str
str of model name (e.g, LR_123)
"""
# imports
from sklearn.metrics import precision_recall_curve
# return reference
details_OUT = {}
# declare variables
y_score = None
precision_curve = None
recall_curve = None
pr_thresholds = None
num_above_thresh = None
pct_above_thresh = None
pct_above_per_thresh = None
current_score = None
above_threshold_list = None
above_threshold_count = -1
# store the raw scores in y_score
y_score = y_prob
# calculate precision-recall curve
# http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html
# Returns:
# - precision_curve - Precison values such that element i is the precision of predictions where cutoff is score >= thresholds[ i ] and the last element is 1.
# - recall_curve - Recall values such that element i is the recall of predictions where cutoff is score >= thresholds[ i ] and the last element is 0.
# - pr_thresholds - Increasing thresholds on the decision function used to decide 1 or 0, used to calculate precision and recall (looks like it is the set of unique values in the predicted value set).
precision_curve, recall_curve, pr_thresholds = precision_recall_curve( y_true, y_score )
# get all but the last precision score (1).
precision_curve = precision_curve[ : -1 ]
# print( "precision_curve: {}".format( precision_curve ) )
# get all but the last recall score (0).
recall_curve = recall_curve[ : -1 ]
# print( "recall_curve: {}".format( recall_curve ) )
# store details
details_OUT[ "precision" ] = precision_curve
details_OUT[ "recall" ] = recall_curve
details_OUT[ "threshold" ] = pr_thresholds
# init loop over thresholds
pct_above_per_thresh = []
number_scored = len(y_score)
# loop over thresholds
for value in pr_thresholds:
# at each threshold, calculate the percent of rows above the threshold.
above_threshold_list = []
above_threshold_count = -1
for current_score in y_score:
# is it at or above threshold?
if ( current_score >= value ):
# it is either at or above threshold - add to list.
above_threshold_list.append( current_score )
#-- END check to see if at or above threshold? --#
#-- END loop over scores. --#
# how many above threshold?
#num_above_thresh = len(y_score[y_score>=value])
above_threshold_count = len( above_threshold_list )
num_above_thresh = above_threshold_count
# percent above threshold
pct_above_thresh = num_above_thresh / float( number_scored )
# add to list.
pct_above_per_thresh.append( pct_above_thresh )
#-- END loop over thresholds --#
details_OUT[ "percent_above" ] = pct_above_per_thresh
# convert to numpy array
pct_above_per_thresh = numpy.array(pct_above_per_thresh)
# init matplotlib
matplotlib.pyplot.clf()
fig, ax1 = matplotlib.pyplot.subplots()
# plot precision line
ax1.plot(pct_above_per_thresh, precision_curve, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax1.set_ylim(0,1.05)
# plot recall line
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax2.set_ylim(0,1.05)
# finish off graph
name = model_name
matplotlib.pyplot.title(name)
# is there an output path?
if ( ( output_path_IN is not None ) and ( output_path_IN != "" ) ):
# save the figure to file.
matplotlib.pyplot.savefig( output_path_IN )
#-- END check to see if we output to disk. --#
matplotlib.pyplot.show()
# clear plot.
matplotlib.pyplot.clf()
return details_OUT
#-- END function plot_precision_recall_n() --#
print( "function plot_precision_recall_n() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function threshold_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def threshold_at_k( y_scores, k ):
# return reference
value_OUT = None
# declare variables
value_list = None
threshold_index = -1
# sort values
value_list = numpy.sort( y_scores )
# reverse order of list
value_list = value_list[ : : -1 ]
# calculate index of value that is k% of the way through the sorted distribution of scores
threshold_index = int( k * len( y_scores ) )
# get value that is k% of the way through the sorted distribution of scores
value_OUT = value_list[ threshold_index ]
print( "Threshold: {}".format( value_OUT ) )
return value_OUT
#-- END function threshold_at_k() --#
print( "function threshold_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function precision_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def precision_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate precision
value_OUT = precision_score( y_true, y_pred )
return value_OUT
#-- END function precision_at_k() --#
print( "function precision_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function recall_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def recall_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate recall
value_OUT = recall_score( y_true, y_pred )
return value_OUT
#-- END function recall_at_k() --#
print( "function recall_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
function accuracy_at_k- Back to [Table of Contents](Table-of-Contents)
###Code
def accuracy_at_k( y_true, y_scores, k ):
# return reference
value_OUT = None
# declare variables
threshold = None
# get threshold index
threshold = threshold_at_k( y_scores, k )
# use threshold to generate predicted scores
y_pred = numpy.asarray( [ 1 if i >= threshold else 0 for i in y_scores ] )
# calculate accuracy
value_OUT = accuracy_score( y_true, y_pred )
return value_OUT
#-- END function accuracy_at_k() --#
print( "function accuracy_at_k() defined at {}".format( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
Setup - output- Back to [Table of Contents](Table-of-Contents)
###Code
# output_to_file flag
output_to_file = True
line_list = None
output_string = None
#output_folder_path = "/data/output"
output_folder_path = "."
results_file_path = "{}/evaluation_results.txt".format( output_folder_path )
precision_recall_graph_path = "{}/precision_recall_graph.pdf".format( output_folder_path )
# if we are outputting to file, start line list.
if ( output_to_file == True ):
# put a list in line_list
line_list = []
#-- END init line list --#
###Output
_____no_output_____
###Markdown
class CitationCodingEvaluation- Back to [Table of Contents](Table-of-Contents)
###Code
from citation_coding_evaluation import CitationCodingEvaluation
###Output
_____no_output_____
###Markdown
Load JSON files- Back to [Table of Contents](Table-of-Contents)
###Code
# file paths
baseline_json_path = "./data_set_citations.json"
derived_prefix = ""
# set to ".." for running against in-repo code development
#derived_prefix = ".."
derived_json_path = "{}/data/output/data_set_citations.json".format( derived_prefix )
# load the baseline JSON
baseline_json_file = None
baseline_json = None
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( "Reading baseline file: {}".format( baseline_json_path ) )
#-- END if output... --#
# baseline
with open( baseline_json_path ) as baseline_json_file:
# load the JSON from the file.
baseline_json = json.load( baseline_json_file )
#-- END with...as --#
# load the derived JSON
derived_json_file = None
derived_json = None
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( "Reading derived file: {}".format( derived_json_path ) )
#-- END if output... --#
# baseline
with open( derived_json_path ) as derived_json_file:
# load the JSON from the file.
derived_json = json.load( derived_json_file )
#-- END with...as --#
baseline_json
derived_json
###Output
_____no_output_____
###Markdown
Process JSON- Back to [Table of Contents](Table-of-Contents)
###Code
# init class to handle evaluation
coding_evaluator = CitationCodingEvaluation()
# process baseline JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_BASELINE
citation_json = baseline_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# process derived JSON
result_type = CitationCodingEvaluation.RESULT_TYPE_DERIVED
citation_json = derived_json
status = coding_evaluator.process_citation_json( citation_json, result_type )
# output
output_string = "Processing status for {} (None = Success!): \"{}\"".format( result_type, status )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
###Output
_____no_output_____
###Markdown
Evaluate- Back to [Table of Contents](Table-of-Contents)
###Code
# create lists
status = coding_evaluator.create_evaluation_lists()
baseline_list = coding_evaluator.get_baseline_list()
derived_raw_list = coding_evaluator.get_derived_raw_list()
derived_binary_list = coding_evaluator.get_derived_binary_list()
###Output
_____no_output_____
###Markdown
precision, recall, and accuracy- Back to [Table of Contents](Table-of-Contents)
###Code
# calculation methods to include
calculation_methods = []
calculation_methods.append( "binary" )
calculation_methods.append( "macro" )
calculation_methods.append( "micro" )
calculation_methods.append( "weighted" )
# ==> basic binary scores
if ( "binary" in calculation_methods ):
# confusion matrix
cm = metrics.confusion_matrix( baseline_list, derived_binary_list )
print( cm )
# output
output_string = "Confusion matrix: {}".format( cm )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# calculate precision, recall, accuracy...
# ==> precision
precision = metrics.precision_score( baseline_list, derived_binary_list )
# output
output_string = "precision = {}".format( precision )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# ==> recall
recall = metrics.recall_score( baseline_list, derived_binary_list )
# output
output_string = "recall = {}".format( recall )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# ==> accuracy
accuracy = metrics.accuracy_score( baseline_list, derived_binary_list )
# output
output_string = "accuracy = {}".format( accuracy )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
# F-Score
binary_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list )
binary_precision_list = binary_evaluation[ 0 ]
binary_precision = binary_precision_list[ 0 ]
binary_recall_list = binary_evaluation[ 1 ]
binary_recall = binary_recall_list[ 0 ]
binary_F1_list = binary_evaluation[ 2 ]
binary_F1 = binary_F1_list[ 0 ]
# output
output_string = "binary: precision = {}, recall = {}, F1 = {}".format( binary_precision, binary_recall, binary_F1 )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END binary F-Score --#
# ==> macro F-Score
if ( "macro" in calculation_methods ):
macro_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list, average = 'macro' )
macro_precision = macro_evaluation[ 0 ]
macro_recall = macro_evaluation[ 1 ]
macro_F1 = macro_evaluation[ 2 ]
# output
output_string = "macro-average: precision = {}, recall = {}, F1 = {}".format( macro_precision, macro_recall, macro_F1 )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END macro F-Score --#
# ==> micro F-Score
if ( "micro" in calculation_methods ):
micro_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list, average = 'micro' )
micro_precision = micro_evaluation[ 0 ]
micro_recall = micro_evaluation[ 1 ]
micro_F1 = micro_evaluation[ 2 ]
# output
output_string = "micro-average: precision = {}, recall = {}, F1 = {}".format( micro_precision, micro_recall, micro_F1 )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END micro F-Score --#
# ==> weighted F-Score
if ( "weighted" in calculation_methods ):
weighted_evaluation = metrics.precision_recall_fscore_support( baseline_list, derived_binary_list, average = 'weighted' )
weighted_precision = weighted_evaluation[ 0 ]
weighted_recall = weighted_evaluation[ 1 ]
weighted_F1 = weighted_evaluation[ 2 ]
# output
output_string = "weighted-average: precision = {}, recall = {}, F1 = {}".format( weighted_precision, weighted_recall, weighted_F1 )
print( output_string )
# if output...
if ( output_to_file == True ):
# store line for output
line_list.append( output_string )
#-- END if output... --#
#-- END weighted F-Score --#
###Output
_____no_output_____
###Markdown
graph precision and recall at n- Back to [Table of Contents](Table-of-Contents)
###Code
# output to file?
if ( output_to_file == True ):
# output figure to file
plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation", output_path_IN = precision_recall_graph_path )
else:
# just output to standard out (as is possible)
plot_precision_recall_n( baseline_list, derived_raw_list, "evaluation" )
#-- END check to see if output graph to file --#
###Output
_____no_output_____
###Markdown
output results to file- Back to [Table of Contents](Table-of-Contents)
###Code
# declare variables
results_file = None
line_list_string = None
# do we output to file?
if ( output_to_file == True ):
# yes. open output file.
with open( results_file_path, mode = "w" ) as results_file:
# join line list with "/n", then write.
line_list_string = "\n".join( line_list )
results_file.write( line_list_string )
#-- END with...as --#
#-- END check to see if we output to file --#
###Output
_____no_output_____ |
TrainingSony_Dark_vision-Pyramid-SWA.ipynb | ###Markdown
LOAD DATASET
###Code
# get train IDs
train_fns = glob.glob(gt_dir + '0*.ARW')
train_ids = [int(os.path.basename(train_fn)[0:5]) for train_fn in train_fns]
ps = 512 # patch size for training
save_freq = 500
DEBUG = 0
if DEBUG == 1:
save_freq = 2
train_ids = train_ids[0:5]
###Output
_____no_output_____
###Markdown
UNET MODULES
###Code
class conv_lrelu(nn.Module):
def __init__(self, in_ch, out_ch):
super(conv_lrelu, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(in_ch,out_ch,3, padding = 1),nn.LeakyReLU())
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.conv1 = conv_lrelu(in_ch,out_ch)
self.conv2 = conv_lrelu(out_ch,out_ch)
self.down = nn.MaxPool2d((2,2))
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.down(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch):
super(up, self).__init__()
self.up = nn.UpsamplingBilinear2d(scale_factor = 2)
self.conv1 = conv_lrelu(in_ch,out_ch)
self.conv2 = conv_lrelu(out_ch,out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv1(x)
x = self.conv2(x)
return x
class UNet(nn.Module):
def __init__(self, in_ch = 4, CH_PER_SCALE = [32,64,128,256,512], out_ch = 12):
super(UNet, self).__init__()
self.inc = conv_lrelu(in_ch, CH_PER_SCALE[0])
self.inc2 = conv_lrelu(CH_PER_SCALE[0], CH_PER_SCALE[0])
self.down1 = down(CH_PER_SCALE[0], CH_PER_SCALE[1])
self.down2 = down(CH_PER_SCALE[1],CH_PER_SCALE[2])
self.down3 = down(CH_PER_SCALE[2],CH_PER_SCALE[3])
self.down4 = down(CH_PER_SCALE[3],CH_PER_SCALE[4])
self.up1 = up(CH_PER_SCALE[4]+CH_PER_SCALE[3],CH_PER_SCALE[3])
self.up2 = up(CH_PER_SCALE[3]+CH_PER_SCALE[2],CH_PER_SCALE[2])
self.up3 = up(CH_PER_SCALE[2]+CH_PER_SCALE[1],CH_PER_SCALE[1])
self.up4 = up(CH_PER_SCALE[1]+CH_PER_SCALE[0],CH_PER_SCALE[0])
self.outc = nn.Conv2d(CH_PER_SCALE[0], out_ch, 1, padding = 0)
def forward(self, x):
x0 = self.inc(x)
x0 = self.inc2(x0)
x1 = self.down1(x0)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
x3_up = self.up1(x4,x3)
x2_up = self.up2(x3_up,x2)
x1_up = self.up3(x2_up,x1)
out = self.up4(x1_up,x0)
out = self.outc(out)
# out = F.pixel_shuffle(out,2) ## Paper final step rearranges 12 channes to 3 RGB channels
# out = F.hardtanh(out, min_val=0, max_val=1) #Clamp the top and bottom to 0,1 since pixels can only be in this value
return out
class PRIDNet(nn.Module):
def __init__(self, in_ch = 4, out_ch = 12):
super(PRIDNet, self).__init__()
self.feature_extraction = nn.Sequential(conv_lrelu(in_ch, 32), *[conv_lrelu(32, 32) for i in range(3)])
self.unet0 = UNet(in_ch = 32, out_ch = 12)
self.unet1 = UNet(in_ch = 32, out_ch = 12)
self.unet2 = UNet(in_ch = 32, out_ch = 12)
self.unet3 = UNet(in_ch = 32, out_ch = 12)
self.unet4 = UNet(in_ch = 32, out_ch = 12)
self.avgpool1 = nn.AvgPool2d((2,2))
self.avgpool2 = nn.AvgPool2d((4,4))
self.avgpool3 = nn.AvgPool2d((8,8))
self.avgpool4 = nn.AvgPool2d((16,16))
self.up4 = nn.UpsamplingBilinear2d(scale_factor = 16)
self.up3 = nn.UpsamplingBilinear2d(scale_factor = 8)
self.up2 = nn.UpsamplingBilinear2d(scale_factor = 4)
self.up1 = nn.UpsamplingBilinear2d(scale_factor = 2)
self.out = nn.Conv2d(32+12*5, out_ch, 1, padding = 0)
def forward(self, x):
x_feat = self.feature_extraction(x)
x0 = self.unet0(x_feat)
x1 = self.up1(self.unet1(self.avgpool1(x_feat)))
x2 = self.up2(self.unet2(self.avgpool2(x_feat)))
x3 = self.up3(self.unet3(self.avgpool3(x_feat)))
x4 = self.up4(self.unet4(self.avgpool4(x_feat)))
x_unet_all = torch.cat([x_feat,x0,x1,x2,x3,x4], axis = 1)
out = self.out(x_unet_all)
out = F.pixel_shuffle(out,2) ## Paper final step rearranges 12 channes to 3 RGB channels
out = F.hardtanh(out, min_val=0, max_val=1) #Clamp the top and bottom to 0,1 since pixels can only be in this value
return out
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
continue
#if isinstance(param, self.Parameter):
else:
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
###Output
_____no_output_____
###Markdown
Helper Functions for packing raw and saving images
###Code
def pack_raw(raw):
# pack Bayer image to 4 channels
im = raw.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
out = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return out
# Raw data takes long time to load. Keep them in memory after loaded.
gt_images = [None] * 6000
input_images = {}
input_images['300'] = [None] * len(train_ids)
input_images['250'] = [None] * len(train_ids)
input_images['100'] = [None] * len(train_ids)
g_loss = np.zeros((5000, 1))
allfolders = glob.glob(result_dir + '*0')
for folder in allfolders:
lastepoch = np.maximum(epochs, int(folder[-4:]))
###Output
_____no_output_____
###Markdown
Training
###Code
model = PRIDNet()
model = model.cuda()
model = model.train()
def process_img(input_raw_img, model, ratio):
## Process image(s) using the given model
# input_raw_img: numpy array, dimension: (Batch,Height,Width,Channel)
# ratio: numpy array, dimension: (Batch,)
model.eval();
model.to(deviceTag)
ratio = ratio.reshape(ratio.shape[0],1,1,1)
input_raw_img = np.transpose(input_raw_img, [0,3,1,2]).astype('float32')*ratio
input_tensor = torch.from_numpy(input_raw_img.copy()).float().to(deviceTag)
with torch.no_grad():
output_tensor = model(input_tensor)
output_img = output_tensor.cpu().numpy()
output_img = np.transpose(output_img, [0,2,3,1])
return output_img
def validate(model, input_list, gt_list, block_size = None, batch_size = 8):
assert len(input_list) == len(gt_list)
model.eval();
PSNR_list = []
for i in range(len(input_list)//batch_size):
if i%10 == 0:
print(i)
input_raw_img_batch = []
gt_img_batch = []
ratio_batch = []
for b in range(batch_size):
if i*batch_size+b < len(input_list):
in_path = input_list[i*batch_size+b]
gt_path = gt_list[i*batch_size+b]
else:
break
in_fn = os.path.basename(in_path)
gt_fn = os.path.basename(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure / in_exposure, 300)
raw = rawpy.imread(in_path)
input_raw_img = pack_raw(raw)
gt_raw = rawpy.imread(gt_path)
gt_img = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_img = np.float32(gt_img / 65535.0)
if block_size is not None:
i_cut, j_cut = np.random.randint(0,input_raw_img.shape[0]-block_size), np.random.randint(0,input_raw_img.shape[1]-block_size)
gt_img = gt_img[i_cut*2:i_cut*2+block_size*2, j_cut*2:j_cut*2+block_size*2, :]
input_raw_img = input_raw_img[i_cut:i_cut+block_size, j_cut:j_cut+block_size, :]
ratio_batch.append(ratio)
input_raw_img_batch.append(input_raw_img)
gt_img_batch.append(gt_img)
input_raw_img_batch = np.array(input_raw_img_batch)
ratio_batch = np.array(ratio_batch)
gt_img_batch = np.array(gt_img_batch)
output_img_batch = process_img(input_raw_img_batch, model, ratio_batch)
plt.figure()
plt.imshow(gt_img_batch[0,:,:,:])
plt.title("Ground Truth")
plt.figure()
plt.imshow(output_img_batch[0,:,:,:])
plt.title("Predicted patch")
MSE = np.mean((output_img_batch.reshape(output_img_batch.shape[0],-1) - gt_img_batch.reshape(gt_img_batch.shape[0],-1))**2, axis = 1)
PSNR_batch = 10*np.log10(1/MSE)
PSNR_list.append(list(PSNR_batch))
Val_PSNR = np.mean(PSNR_list)
return Val_PSNR
learning_rate = 1e-4
batch_num = 4;
base_opt = optim.SGD(model.parameters(), lr=learning_rate)
optimizer = SWA(base_opt, swa_start=1000, swa_freq=10, swa_lr=0.05)
scheduler = optim.lr_scheduler.StepLR(base_opt, step_size=2000, gamma=0.1) #Step Scheduler.
criterion = nn.MSELoss()
Start_epoch = 0
epochs = 4000
TrainingLossData = np.zeros(epochs)
for epoch in range(Start_epoch, Start_epoch+epochs):
print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
epoch_loss = 0 ## Set Epoch Loss
count = 0;
batches_processed = 0
##This version has a batch size of 1. In the future conside increasing batchsize
for ind in np.random.permutation(len(train_ids)):
# get the path from image id
train_id = train_ids[ind]
in_files = glob.glob(input_dir + '%05d_00*.ARW' % train_id)
in_path = in_files[np.random.random_integers(0, len(in_files) - 1)]
in_fn = os.path.basename(in_path)
gt_files = glob.glob(gt_dir + '%05d_00*.ARW' % train_id)
gt_path = gt_files[0]
gt_fn = os.path.basename(gt_path)
in_exposure = float(in_fn[9:-5])
gt_exposure = float(gt_fn[9:-5])
ratio = min(gt_exposure / in_exposure, 300)
st = time.time()
if input_images[str(ratio)[0:3]][ind] is None:
raw = rawpy.imread(in_path)
input_images[str(ratio)[0:3]][ind] = np.expand_dims(pack_raw(raw), axis=0) * ratio
gt_raw = rawpy.imread(gt_path)
im = gt_raw.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_images[ind] = np.expand_dims(np.float32(im / 65535.0), axis=0)
# crop
H = input_images[str(ratio)[0:3]][ind].shape[1]
W = input_images[str(ratio)[0:3]][ind].shape[2]
xx = np.random.randint(0, W - ps)
yy = np.random.randint(0, H - ps)
input_patch = input_images[str(ratio)[0:3]][ind][:, yy:yy + ps, xx:xx + ps, :]
gt_patch = gt_images[ind][:, yy * 2:yy * 2 + ps * 2, xx * 2:xx * 2 + ps * 2, :]
if np.random.randint(2, size=1)[0] == 1: # random flip
input_patch = np.flip(input_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
if np.random.randint(2, size=1)[0] == 1:
input_patch = np.flip(input_patch, axis=2)
gt_patch = np.flip(gt_patch, axis=2)
if np.random.randint(2, size=1)[0] == 1: # random transpose
input_patch = np.transpose(input_patch, (0, 2, 1, 3))
gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))
#(1, 512, 512, 4)
#(1, 1024, 1024, 3)
input_patch = np.transpose(input_patch, (0,3,1,2))
input_patch = torch.from_numpy(input_patch.copy()).cuda()
gt_patch = np.transpose(gt_patch, (0,3,1,2))
gt_patch = torch.from_numpy(gt_patch.copy()).cuda()
##Batch concatenation
if count%(batch_num)==0:
input_patch_all = input_patch
gt_patch_all = gt_patch
else:
input_patch_all = torch.cat([input_patch_all, input_patch], dim=0)
gt_patch_all = torch.cat([gt_patch_all, gt_patch], dim=0)
##Every N batches we ship it back
if count%(batch_num)==batch_num-1:
#print(input_patch_all.shape)
img_pred = model.forward(input_patch_all)
loss = criterion(img_pred, gt_patch_all)
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
batches_processed += 1
##print(epoch_loss/count);
count = count +1
scheduler.step()
print('Epoch finished ! Loss: {}'.format(epoch_loss / batches_processed))
if epoch == 0:
trainF= open(result_dir+"TrainingEpoch.txt","w+")
trainF.write('Epoch,Train_loss\n')
trainF.write('{},{}\n'.format(epoch, epoch_loss / batches_processed))
trainF.close()
else:
trainF= open(result_dir+"TrainingEpoch.txt","a")
trainF.write('{},{}\n'.format(epoch, epoch_loss / batches_processed))
trainF.close()
TrainingLossData[epoch] = epoch_loss / batches_processed ## Save for plotting
################################################ [TODO] ###################################################
# Perform validation with eval_net() on the validation data
# Save the model after every 10 epochs. This save our Memory on HPC.
##Save Top results after 95%
if epoch > epochs*0.95:
torch.save(model.state_dict(),model_save_path + 'sony{}.pth'.format(epoch + 1))
if epoch%99 == 0:
torch.save(model.state_dict(),model_save_path + 'sony{}.pth'.format(epoch + 1))
optimizer.swap_swa_sgd()
torch.save(model.state_dict(),model_save_path + 'sony{}.pth'.format(epoch + 1))
if os.path.isdir(model_save_path):
torch.save(model.state_dict(),model_save_path + 'sony{}.pth'.format(epoch + 1))
else:
os.makedirs(model_save_path, exist_ok=True)
torch.save(model.state_dict(),model_save_path + 'sony{}.pth'.format(epoch + 1))
###Output
_____no_output_____ |
local_ms/notebooks/matt_testing_yfinance.ipynb | ###Markdown
Documentation link:https://pythonrepo.com/repo/ranaroussi-yfinance-python-finance
###Code
msft = yf.Ticker("MSFT")
# get stock info
msft.info
# get historical market data
hist = msft.history(period="max")
# show actions (dividends, splits)
msft.actions
# show dividends
msft.dividends
# show splits
msft.splits
# show financials
msft.financials
msft.quarterly_financials
# show major holders
msft.major_holders
# show institutional holders
msft.institutional_holders
# show balance sheet
msft.balance_sheet
msft.quarterly_balance_sheet
# show cashflow
msft.cashflow
msft.quarterly_cashflow
# show earnings
msft.earnings
msft.quarterly_earnings
# show sustainability
msft.sustainability
# show analysts recommendations
msft.recommendations
# show next event (earnings, etc)
msft.calendar
# show ISIN code - *experimental*
# ISIN = International Securities Identification Number
msft.isin
# show options expirations
msft.options
# get option chain for specific expiration
#opt = msft.option_chain('YYYY-MM-DD')
opt = msft.option_chain(msft.options[0])
# data available via: opt.calls, opt.puts
print(type(opt))
#print(opt)
spy = yf.Ticker("spy")
hut_to = yf.Ticker("hut.to")
hut = yf.Ticker("hut")
aapl = yf.Ticker("aapl")
psyk_ne = yf.Ticker("psyk.ne")
tnt_un_to = yf.Ticker("tnt-un.to")
fx_usd_cad = yf.Ticker("CAD=X")
fx_cad_usd = yf.Ticker("CADUSD=X")
fx_usd_btc = yf.Ticker("BTC-USD")
fx_usd_eth = yf.Ticker("ETH-USD")
tickers = [aapl, msft, hut_to, hut, psyk_ne, spy, tnt_un_to, fx_usd_cad, fx_cad_usd, fx_usd_btc, fx_usd_eth]
print(spy.info['symbol'])
###Output
SPY
###Markdown
**list of ticker attributes (e.g. ticker.info)**
###Code
tt = fx_usd_cad
print(type(tt))
for elem in dir(tt):
print(elem)
print(tt.ticker)
sub_obj = fx_usd_cad.info
print(type(sub_obj))
print(sub_obj)
#for elem in dir(sub_obj):
# print(elem)
###Output
<class 'dict'>
{'exchange': 'CCY', 'shortName': 'USD/CAD', 'exchangeTimezoneName': 'Europe/London', 'exchangeTimezoneShortName': 'GMT', 'isEsgPopulated': False, 'gmtOffSetMilliseconds': '0', 'quoteType': 'CURRENCY', 'symbol': 'CAD=X', 'messageBoardId': 'finmb_CAD_X', 'market': 'ccy_market', 'previousClose': 1.2638, 'regularMarketOpen': 1.2638, 'twoHundredDayAverage': 1.243744, 'trailingAnnualDividendYield': None, 'payoutRatio': None, 'volume24Hr': None, 'regularMarketDayHigh': 1.26825, 'navPrice': None, 'averageDailyVolume10Day': 0, 'totalAssets': None, 'regularMarketPreviousClose': 1.2638, 'fiftyDayAverage': 1.2460346, 'trailingAnnualDividendRate': None, 'open': 1.2638, 'toCurrency': None, 'averageVolume10days': 0, 'expireDate': None, 'yield': None, 'algorithm': None, 'dividendRate': None, 'exDividendDate': None, 'beta': None, 'circulatingSupply': None, 'startDate': None, 'regularMarketDayLow': 1.2623, 'priceHint': 4, 'currency': 'CAD', 'regularMarketVolume': 0, 'lastMarket': None, 'maxSupply': None, 'openInterest': None, 'marketCap': None, 'volumeAllCurrencies': None, 'strikePrice': None, 'averageVolume': 0, 'priceToSalesTrailing12Months': None, 'dayLow': 1.2623, 'ask': 1.2682, 'ytdReturn': None, 'askSize': 0, 'volume': 0, 'fiftyTwoWeekHigh': 1.31115, 'forwardPE': None, 'maxAge': 1, 'fromCurrency': None, 'fiveYearAvgDividendYield': None, 'fiftyTwoWeekLow': 1.2008, 'bid': 1.2676, 'tradeable': False, 'dividendYield': None, 'bidSize': 0, 'dayHigh': 1.26825, 'regularMarketPrice': 1.26785, 'preMarketPrice': None, 'logo_url': ''}
###Markdown
**ticker.info** - dictionary, lots of keys
###Code
print(len(fx_cad_usd.info.items()))
for k,v in fx_cad_usd.info.items():
print(type(v), '\t', k, '\t', v)
print(len(fx_usd_btc.info.items()))
for k,v in fx_usd_btc.info.items():
print(type(v), '\t', k, '\t', v)
strs = ['symbol', 'exchange', 'currency', 'market', 'quoteType', 'shortName', 'longName']
for t in tickers:
print(len(t.info.keys()), end=', ')
print()
for s in strs:
for t in tickers:
if s == 'longName':
try:
print(t.info[s], end=', ')
except KeyError:
print(t.info['shortName'], end=', ')
else:
print(t.info[s], end=', ')
print()
print()
#print(t.info['currency'], end=', ')
#print()
#print(t.info[], end=', ')
#print()
#print(t.info['market'], end=', ')
#print()
###Output
152, 152, 153, 153, 47, 114, 153, 65, 68, 69,
AAPL, MSFT, HUT.TO, HUT, PSYK.NE, SPY, TNT-UN.TO, CAD=X, BTC-USD, ETH-USD,
NMS, NMS, TOR, NMS, NEO, PCX, TOR, CCY, CCC, CCC,
USD, USD, CAD, USD, CAD, USD, CAD, CAD, USD, USD,
us_market, us_market, ca_market, us_market, ca_market, us_market, ca_market, ccy_market, ccc_market, ccc_market,
EQUITY, EQUITY, EQUITY, EQUITY, ETF, ETF, EQUITY, CURRENCY, CRYPTOCURRENCY, CRYPTOCURRENCY,
Apple Inc., Microsoft Corporation, HUT 8 MINING CORP, Hut 8 Mining Corp., HORIZONS PSYCHEDELIC STOCK INDE, SPDR S&P 500, TRUE NORTH COMMERCIAL REIT, USD/CAD, Bitcoin USD, Ethereum USD,
Apple Inc., Microsoft Corporation, Hut 8 Mining Corp., Hut 8 Mining Corp., Horizons Psychedelic Stock Index ETF, SPDR S&P 500 ETF Trust, True North Commercial Real Estate Investment Trust, USD/CAD, Bitcoin USD, Ethereum USD,
###Markdown
**ticker.history method**e.g. `hist = msft.history(period="max")`
###Code
hist = msft.history(period="max")
print(hist)
print(spy.history(interval='1d', period='7d', prepost=True))
days7 = spy.history(interval='1m', period='1d', prepost=False, actions=False)
print(days7)
days7
days7new = days7.reset_index(inplace=False)
days7new['Datetime'] = days7new['Datetime'].astype(str)
days7new['Date']
days7new.values.tolist()
daysAll = spy.history(interval='1d', period='max', prepost=False, actions=False)
print(daysAll)
daysAllNew = daysAll.reset_index(inplace=False)
daysAllNew['Date'] = daysAllNew['Date'].astype(str)
days7new
uc = yf.Ticker("CAD=X")
print(uc.isin)
hist = msft.history(period="7d", interval='1m')
start='2021-11-15'
end='2021-11-22'
print(hist)
from datetime import datetime, timedelta
# date object of today's date
today = datetime.today()
print(today)
datetime.today() - timedelta(weeks=42)
date_end = datetime.today()
for idx in range(4):
date_start = date_end - timedelta(weeks=1)
hist = msft.history(interval='1m', start=date_start, end=date_end)
date_end = date_start
print(hist)
print()
###Output
Open High Low Close \
Datetime
2021-11-15 11:57:00-05:00 334.859985 334.980011 334.799988 334.980011
2021-11-15 11:58:00-05:00 334.959991 335.059998 334.940002 334.971405
2021-11-15 11:59:00-05:00 334.989410 335.040009 334.910004 334.980011
2021-11-15 12:00:00-05:00 335.000000 335.200012 334.950012 335.109985
2021-11-15 12:01:00-05:00 335.100006 335.239990 335.100006 335.160004
... ... ... ... ...
2021-11-22 11:52:00-05:00 344.609985 344.609985 343.989990 344.006989
2021-11-22 11:53:00-05:00 343.959991 344.069885 343.690002 343.690002
2021-11-22 11:54:00-05:00 343.690002 343.959991 343.671295 343.869995
2021-11-22 11:55:00-05:00 343.829987 344.000000 343.820007 343.920013
2021-11-22 11:56:28-05:00 343.850006 343.850006 343.850006 343.850006
Volume Dividends Stock Splits
Datetime
2021-11-15 11:57:00-05:00 0 0.0 0
2021-11-15 11:58:00-05:00 25994 0.0 0
2021-11-15 11:59:00-05:00 16750 0.0 0
2021-11-15 12:00:00-05:00 26653 0.0 0
2021-11-15 12:01:00-05:00 17890 0.0 0
... ... ... ...
2021-11-22 11:52:00-05:00 132176 0.0 0
2021-11-22 11:53:00-05:00 91956 0.0 0
2021-11-22 11:54:00-05:00 213993 0.0 0
2021-11-22 11:55:00-05:00 104528 0.0 0
2021-11-22 11:56:28-05:00 0 0.0 0
[1950 rows x 7 columns]
Open High Low Close \
Datetime
2021-11-08 11:57:00-05:00 334.739990 334.869995 334.739990 334.850006
2021-11-08 11:58:00-05:00 334.859985 334.929993 334.790009 334.859985
2021-11-08 11:59:00-05:00 334.859985 334.970001 334.829987 334.910004
2021-11-08 12:00:00-05:00 334.910004 334.959991 334.829987 334.859985
2021-11-08 12:01:00-05:00 334.859985 334.929993 334.859985 334.890015
... ... ... ... ...
2021-11-15 11:51:00-05:00 334.309998 334.420013 334.225006 334.380005
2021-11-15 11:52:00-05:00 334.420013 334.440002 334.260010 334.399994
2021-11-15 11:53:00-05:00 334.429993 334.600006 334.269989 334.519989
2021-11-15 11:54:00-05:00 334.500000 334.725006 334.500000 334.640015
2021-11-15 11:55:00-05:00 334.649994 334.950012 334.649994 334.885010
Volume Dividends Stock Splits
Datetime
2021-11-08 11:57:00-05:00 0 0 0
2021-11-08 11:58:00-05:00 25555 0 0
2021-11-08 11:59:00-05:00 22154 0 0
2021-11-08 12:00:00-05:00 34907 0 0
2021-11-08 12:01:00-05:00 15491 0 0
... ... ... ...
2021-11-15 11:51:00-05:00 55516 0 0
2021-11-15 11:52:00-05:00 29300 0 0
2021-11-15 11:53:00-05:00 49073 0 0
2021-11-15 11:54:00-05:00 18109 0 0
2021-11-15 11:55:00-05:00 25339 0 0
[1948 rows x 7 columns]
- MSFT: 1m data not available for startTime=1635782188 and endTime=1636390588. Only 7 days worth of 1m granularity data are allowed to be fetched per request.
Empty DataFrame
Columns: [Open, High, Low, Close, Adj Close, Volume]
Index: []
Open High Low Close \
Datetime
2021-10-25 11:57:00-04:00 308.119995 308.140015 308.010010 308.035004
2021-10-25 11:58:00-04:00 308.040009 308.086212 308.000000 308.019989
2021-10-25 11:59:00-04:00 308.040009 308.119995 308.019989 308.065002
2021-10-25 12:00:00-04:00 308.079987 308.179810 308.070007 308.160004
2021-10-25 12:01:00-04:00 308.160004 308.329987 308.109985 308.314392
... ... ... ... ...
2021-11-01 11:51:00-04:00 327.934998 327.959991 327.760010 327.779999
2021-11-01 11:52:00-04:00 327.768311 327.779999 327.739990 327.744995
2021-11-01 11:53:00-04:00 327.845001 327.899994 327.839996 327.869995
2021-11-01 11:54:00-04:00 327.859985 327.890015 327.760010 327.890015
2021-11-01 11:55:00-04:00 327.859985 327.890411 327.753510 327.769989
Volume Dividends Stock Splits
Datetime
2021-10-25 11:57:00-04:00 0 0 0
2021-10-25 11:58:00-04:00 17231 0 0
2021-10-25 11:59:00-04:00 26275 0 0
2021-10-25 12:00:00-04:00 24523 0 0
2021-10-25 12:01:00-04:00 61897 0 0
... ... ... ...
2021-11-01 11:51:00-04:00 47518 0 0
2021-11-01 11:52:00-04:00 6637 0 0
2021-11-01 11:53:00-04:00 67649 0 0
2021-11-01 11:54:00-04:00 32653 0 0
2021-11-01 11:55:00-04:00 31623 0 0
[1949 rows x 7 columns]
|
Become a Data Scientist/Codes/Association Analysis with Apriori in Python.ipynb | ###Markdown
Association Analysis with apriori
###Code
item_supports, rules = apriori.run_apriori("groceries.csv", min_confidence=0.05)
###Output
_____no_output_____
###Markdown
Support
###Code
for items, support in item_supports[: 5]:
print("{0} - {1:.2f}".format(", ".join(items), support))
###Output
whole milk - 0.26
other vegetables - 0.19
rolls/buns - 0.18
soda - 0.17
yogurt - 0.14
|
examples/notebooks/19_map_to_html.ipynb | ###Markdown
[](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/19_map_to_html.ipynb)[](https://gishub.org/leafmap-binder)**Saving maps as a html file**Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
###Code
# !pip install leafmap
import leafmap.foliumap as leafmap
###Output
_____no_output_____
###Markdown
Create an interactive map.
###Code
m = leafmap.Map()
m.add_basemap("HYBRID")
m
###Output
_____no_output_____
###Markdown
Specify the output HTML file name to save the map as a web page.
###Code
m.to_html("mymap.html")
###Output
_____no_output_____
###Markdown
If the output HTML file name is not provided, the function will return a string containing contain the source code of the HTML file.
###Code
html = m.to_html()
# print(html)
###Output
_____no_output_____
###Markdown
[](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/19_map_to_html.ipynb)[](https://gishub.org/leafmap-pangeo)Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
###Code
# !pip install leafmap
import leafmap.foliumap as leafmap
# leafmap.update_package()
m = leafmap.Map()
m.add_basemap("HYBRID")
m
m.to_html("mymap.html")
html = m.to_html()
# print(html)
###Output
_____no_output_____
###Markdown
[](https://githubtocolab.com/giswqs/leafmap/blob/master/examples/notebooks/19_map_to_html.ipynb)[](https://gishub.org/leafmap-pangeo)**Saving maps as a html file**Uncomment the following line to install [leafmap](https://leafmap.org) if needed.
###Code
# !pip install leafmap
import leafmap.foliumap as leafmap
###Output
_____no_output_____
###Markdown
Create an interactive map.
###Code
m = leafmap.Map()
m.add_basemap("HYBRID")
m
###Output
_____no_output_____
###Markdown
Specify the output HTML file name to save the map as a web page.
###Code
m.to_html("mymap.html")
###Output
_____no_output_____
###Markdown
If the output HTML file name is not provided, the function will return a string containing contain the source code of the HTML file.
###Code
html = m.to_html()
# print(html)
###Output
_____no_output_____ |
src/Neural Network.ipynb | ###Markdown
Combinig Neurons into a Neural Network
###Code
class OurNeuralNetwork:
'''
A neural network with:
- 2 inputs
- a hidden layer with 2 neurons (h1, h2)
- an output layer with 1 neuron (o1)
Each neuron has the same weights and bias:
- w = [0, 1]
- b = 0
'''
def __init__(self):
weights = np.array([0,1])
bias = 0
# The Neuron class here is from the previous section
self.h1 = Neuron(weights, bias)
self.h2 = Neuron(weights, bias)
self.o1 = Neuron(weights, bias)
def feedforward(self, x):
out_h1 = self.h1.feedforward(x)
out_h2 = self.h2.feedforward(x)
# The inputs for o1 are the outputs from h1 and h2
out_o1 = self.o1.feedforward(np.array([out_h1, out_h2]))
return out_o1
network = OurNeuralNetwork()
x = np.array([2, 3])
print(network.feedforward(x))
def mse_loss(y_true, y_pred):
# y_true and y_pred are numpy arrays of the same length.
return ((y_true - y_pred) ** 2).mean()
y_true = np.array([1, 0, 0, 1])
y_pred = np.array([0, 0, 0, 0])
print(mse_loss(y_true, y_pred))
def deriv_sigmoid(x):
fx = sigmoid(x)
return fx * (1 - fx)
class NeuralNetwork:
def __init__(self):
# Weights
self.w1 = np.random.normal()
self.w2 = np.random.normal()
self.w3 = np.random.normal()
self.w4 = np.random.normal()
self.w5 = np.random.normal()
self.w6 = np.random.normal()
# Biases
self.b1 = np.random.normal()
self.b2 = np.random.normal()
self.b3 = np.random.normal()
def feedforward(self, x):
# x is a numpy array with 2 elements.
h1 = sigmoid(self.w1 * x[0] + self.w2 * x[1] + self.b1)
h2 = sigmoid(self.w3 * x[0] + self.w4 * x[1] + self.b2)
o1 = sigmoid(self.w5 * h1 + self.w6 * h2 + self.b3)
return o1
def train(self, data, all_y_trues):
'''
- data is a (n x 2) numpy array, n = # of samples in the dataset.
- all_y_trues is a numpy array with n elements.
Elements in all_y_trues correspond to those in data.
'''
learn_rate = 0.1
epochs = 1000 # number of times to loop through the entire dataset
for epoch in range(epochs):
for x, y_true in zip(data, all_y_trues):
# --- Do a feedforward (we'll need these values later)
sum_h1 = self.w1 * x[0] + self.w2 * x[1] + self.b1
h1 = sigmoid(sum_h1)
sum_h2 = self.w3 * x[0] + self.w4 * x[1] + self.b2
h2 = sigmoid(sum_h2)
sum_o1 = self.w5 * h1 + self.w6 * h2 + self.b3
o1 = sigmoid(sum_o1)
y_pred = o1
# --- Calculate partial derivatives.
# --- Naming: d_L_d_w1 represents "partial L / partial w1"
d_L_d_ypred = -2 * (y_true - y_pred)
# Neuron o1
d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)
d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)
d_ypred_d_b3 = deriv_sigmoid(sum_o1)
d_ypred_d_h1 = self.w5 * deriv_sigmoid(sum_o1)
d_ypred_d_h2 = self.w6 * deriv_sigmoid(sum_o1)
# Neuron h1
d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)
d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)
d_h1_d_b1 = deriv_sigmoid(sum_h1)
# Neuron h2
d_h2_d_w3 = x[0] * deriv_sigmoid(sum_h2)
d_h2_d_w4 = x[1] * deriv_sigmoid(sum_h2)
d_h2_d_b2 = deriv_sigmoid(sum_h2)
# --- Update weights and biases
# Neuron h1
self.w1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1
self.w2 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2
self.b1 -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1
# Neuron h2
self.w3 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3
self.w4 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4
self.b2 -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2
# Neuron o1
self.w5 -= learn_rate * d_L_d_ypred * d_ypred_d_w5
self.w6 -= learn_rate * d_L_d_ypred * d_ypred_d_w6
self.b3 -= learn_rate * d_L_d_ypred * d_ypred_d_b3
# --- Calculate total loss at the end of each epoch
if epoch % 10 == 0:
y_preds = np.apply_along_axis(self.feedforward, 1, data)
loss = mse_loss(all_y_trues, y_preds)
print("Epoch %d loss: %.3f" % (epoch, loss))
# Define dataset
data = np.array([
[-2, -1], # Alice
[25, 6], # Bob
[17, 4], # Charlie
[-15, -6], # Diana
])
all_y_trues = np.array([
1, # Alice
0, # Bob
0, # Charlie
1, # Diana
])
# Train our neural network!
network = NeuralNetwork()
network.train(data, all_y_trues)
# Make some predictions
emily = np.array([-7, -3]) # 128 pounds, 63 inches
frank = np.array([20, 2]) # 155 pounds, 68 inches
print("Emily: %.3f" % network.feedforward(emily)) # 0.951 - F
print("Frank: %.3f" % network.feedforward(frank)) # 0.039 - M
###Output
Emily: 0.968
Frank: 0.038
|
notebooks/milestone3/Milestone3-Task3.ipynb | ###Markdown
Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import boto3
import awswrangler as wr
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models.
###Code
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
## using boto3 via awswrangler since I could not connect to s3 with pandas
df = wr.s3.read_csv(path="s3://mds-s3-student96/ml_data_SYD.csv", boto3_session= session,
index_col=0, parse_dates=True)
df.head()
df = df.dropna()
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123)
# EDA
observed = train_df['Observed']
observed = observed.resample('Y').mean()
# Plot Annual Mean Rainfall
observed.plot.line(xlabel="Year", ylabel="Observed Rainfall", legend=False)
plt.title('Annual Mean Rainfall')
plt.show()
# Examine Descriptive Statistics
train_df.describe()
# Training Ensemble Method
ensemble = RandomForestRegressor()
X_train = train_df.drop('Observed', axis = 1)
y_train = train_df['Observed']
ensemble.fit(X_train, y_train)
# Evaluating Results
X_test = test_df.drop('Observed', axis = 1)
y_test = test_df['Observed']
test_predict = ensemble.predict(X_test)
# Check RMSE of test_predict
mean_squared_error(y_test, test_predict, squared=False)
# Calculate RMSE of individual model in the test set
models = X_test.columns
scores = []
for m in models:
score = mean_squared_error(y_test, X_test[m], squared=False)
scores.append(score)
test_results = pd.DataFrame({'Models':models, 'RMSE':scores})
test_results
###Output
_____no_output_____
###Markdown
Given that the Test RMSE of ensemble method is lower than all of the individual model's test set RMSE, we can conclude that we are getting better results with ensemble method. Part 2: Preparation for deploying model next week Complete task 4 from the milestone3 before coming here We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=20)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
DSCI 525 - Web and Cloud Computing Project: Daily Rainfall Over NSW, Australia Milestone 3: Setup Spark Cluster and Develop Machine Learning Authors: Group 24 Huanhuan Li, Nash Makhija and Nicholas Wu Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models. Step 1. Read the data CSV from your S3 bucket.
###Code
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
#aws_credentials = {"key": "","secret": ""}
df = pd.read_csv("s3://mds-s3-student82/output/ml_data_SYD.csv", index_col=0, parse_dates=True)
df
###Output
_____no_output_____
###Markdown
Step 2. Drop rows with nans.
###Code
df = df.dropna()
df
###Output
_____no_output_____
###Markdown
Step 3. Split the data into train (80%) and test (20%) portions with random_state=123.
###Code
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123)
###Output
_____no_output_____
###Markdown
Step 4. Carry out EDA of your choice on the train split.
###Code
train_df.describe(include="all")
train_df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 36791 entries, 1953-10-26 to 1932-01-31
Data columns (total 26 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ACCESS-CM2 36791 non-null float64
1 ACCESS-ESM1-5 36791 non-null float64
2 AWI-ESM-1-1-LR 36791 non-null float64
3 BCC-CSM2-MR 36791 non-null float64
4 BCC-ESM1 36791 non-null float64
5 CMCC-CM2-HR4 36791 non-null float64
6 CMCC-CM2-SR5 36791 non-null float64
7 CMCC-ESM2 36791 non-null float64
8 CanESM5 36791 non-null float64
9 EC-Earth3-Veg-LR 36791 non-null float64
10 FGOALS-g3 36791 non-null float64
11 GFDL-CM4 36791 non-null float64
12 INM-CM4-8 36791 non-null float64
13 INM-CM5-0 36791 non-null float64
14 KIOST-ESM 36791 non-null float64
15 MIROC6 36791 non-null float64
16 MPI-ESM-1-2-HAM 36791 non-null float64
17 MPI-ESM1-2-HR 36791 non-null float64
18 MPI-ESM1-2-LR 36791 non-null float64
19 MRI-ESM2-0 36791 non-null float64
20 NESM3 36791 non-null float64
21 NorESM2-LM 36791 non-null float64
22 NorESM2-MM 36791 non-null float64
23 SAM0-UNICON 36791 non-null float64
24 TaiESM1 36791 non-null float64
25 Observed 36791 non-null float64
dtypes: float64(26)
memory usage: 7.6 MB
###Markdown
Step 5. Train ensemble machine learning model using RandomForestRegressor and evaluate with RMSE by considering Observed as the target column.
###Code
X_train = train_df.drop(columns=["Observed"])
y_train = train_df["Observed"]
X_test = test_df.drop(columns=["Observed"])
y_test = test_df["Observed"]
model = RandomForestRegressor(random_state=123)
model.fit(X_train, y_train)
result = {}
result["RandomForestRegressor"] = [round(mean_squared_error(y_train, model.predict(X_train), squared=False), 6),
round(mean_squared_error(y_test, model.predict(X_test), squared=False), 6)]
pd.DataFrame(result, index=["train_error", "test_error"])
###Output
_____no_output_____
###Markdown
Step 6. Discuss results. Are we getting better results with ensemble models compared to the individual climate models?
###Code
#Calculate the train and test score on individual climate models:
individual_model_names = list(train_df.columns)
individual_model_names.remove("Observed")
for name in individual_model_names:
result[name] = [round(mean_squared_error(y_train, train_df[name], squared=False), 6),
round(mean_squared_error(y_test, test_df[name], squared=False), 6)]
scores = pd.DataFrame(result, index=["train_error", "test_error"]).T.sort_values("test_error")
scores
###Output
_____no_output_____
###Markdown
Discussion1. The emsemble Random Forest Regressor model has a 3.11 RMSE train error and 8.84 test error. 2. The ensemble Random Forest Regressor seem to perform the best with the lowest train and test error, comparing with all the individual models. This is expected because each individual models have differennt areas of inaccuracy in prediction. By combining them together, the ensemble model is able to outperform individual models. 3. However, the gap between train and test error is greater than individual models. There seem to be a bigger overfit problem in our ensemble model than individual models. 4. Hence, we will perform hyperparameter optimization to improve our emsemble model. Part 2: Preparation for deploying model next week We’ve found ```n_estimators=100, max_depth=5, bootstrap=False``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=5, bootstrap=False)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.ensemble import RandomForestRegressor
from lightgbm.sklearn import LGBMRegressor, LGBMClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.metrics import make_scorer
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
import calendar
from pathlib import Path
import s3fs
import sys
plt.style.use('dark_background')
plt.rcParams.update({'font.size': 14, 'axes.labelweight': 'normal', 'figure.figsize': (8, 6)})
# make sure root dir is added to sys.path for our imports from /src and reading files to work
root_name = 'rainfall_group22'
p_root = Path.cwd()
while not p_root.name == root_name:
p_root = p_root.parent
sys.path.append(str(p_root))
# need to be in the cloned repo to import our custom functions
from src import sklearn_helper_funcs as sf
bucket = 's3://mds-s3-student19'
aws_credentials = pd \
.read_csv(p_root / 'student19_accessKeys.csv') \
.iloc[0].to_dict()
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models.
###Code
# read data from milestone2 from s3
df = pd.read_csv(
f'{bucket}/output/ml_data_SYD.csv',
index_col=0,
parse_dates=True,
storage_options=aws_credentials) \
.dropna() \
.pipe(sf.lower_cols)
df_orig = df.copy()
df
# rainfall hist
ax = df.observed_rain.plot(
kind='hist',
bins=50,
title='Rainfall Distribution')
ax.set(xlabel='Observed Rainfall (mm)');
# show mean rainfall per year
df[['observed_rain']].groupby(pd.Grouper(freq='Y')) \
.mean() \
.plot(
title='Mean Rainfall per Year',
xlabel='Year',
ylabel='Mean Rainfall (mm)');
df.observed_rain.groupby(lambda x: x.month) \
.mean() \
.rename_axis('month') \
.reset_index() \
.assign(month=lambda x: x.month.apply(lambda x: calendar.month_abbr[x])) \
.set_index('month') \
.plot(
title='Mean Rainfall per Month',
xlabel='Month',
ylabel='Mean Rainfall (mm)');
target = 'observed_rain'
features = dict(target=[target])
features['numeric'] = sf.all_except(df, features.values())
encoders = dict(
numeric=MinMaxScaler(feature_range=(0, 1)))
smape_scorer = make_scorer(sf.smape, greater_is_better=False)
scoring = dict(
rmse='neg_root_mean_squared_error',
mape=smape_scorer)
cv_args = dict(cv=5, n_jobs=-2, return_train_score=True, scoring=scoring)
mm = sf.ModelManager(
scoring=scoring,
cv_args=cv_args,
features=features,
encoders=encoders,
random_state=123)
x_train, y_train, x_test, y_test = mm.make_train_test(df, target=target, train_size=0.8)
mm.show_ct()
# LGBMRegressor gives better results and trains much faster, will use lgbm going forward
models = dict(
rnd=RandomForestRegressor(max_depth=3, n_estimators=50),
lgbm=LGBMRegressor(max_depth=3, n_estimators=50))
mm.cross_val(models=models, lower_better=True)
mm.shap_plot('lgbm')
# compare RMSE of each individual model vs ensemble
model = models['lgbm']
model.fit(x_train, y_train)
df_test = mm.df_test.copy()
df_test['lgbm_ensemble'] = model.predict(x_test)
m_rmse = {}
for col in df_test.drop(columns=[target]).columns:
m_rmse[col] = mean_squared_error(
y_pred=df_test[col].values,
y_true=df_test.observed_rain.values,
squared=False)
pd.DataFrame \
.from_dict(m_rmse, orient='index', columns=['RMSE']) \
.rename_axis('model') \
.sort_values('RMSE') \
.style \
.pipe(sf.bg, rev=False) \
.format('{:.3f}')
###Output
_____no_output_____
###Markdown
- The ensemble model seems to have ~20% better scores than the mean of all individual models. Part 2: Preparation for deploying model next week Complete task 4 from the milestone3 before coming here We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
# using lgbm model from previous step (faster and better scores)
# model = RandomForestRegressor(n_estimators=100, max_depth=20)
# model.fit(x_train, y_train.values.ravel())
print(f'Train RMSE: {mean_squared_error(y_train, model.predict(x_train), squared=False):.2f}')
print(f'Test RMSE: {mean_squared_error(y_test, model.predict(x_test), squared=False):.2f}')
# ready to deploy
model_name = 'model.joblib'
dump(model, model_name)
###Output
_____no_output_____
###Markdown
***Upload model.joblib to s3. You choose how you want to upload it.***
###Code
import subprocess
source = p_root / model_name
args = ['aws', 's3' , 'cp', str(source), f'{bucket}/{model_name}']
subprocess.run(args);
###Output
_____no_output_____
###Markdown
Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
## add any other additional packages that you need. You are free to use any packages for vizualization.
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models. Step 1: Read the data from S3
###Code
df = pd.read_csv("s3://mds-s3-001/output/ml_data_SYD.csv", index_col=0, parse_dates=True)
#df = pd.read_csv("ml_data_SYD.csv", index_col=0, parse_dates=True)
df.shape
df.head()
###Output
_____no_output_____
###Markdown
Step 2: Drop rows with nans
###Code
df = df.dropna()
df.shape
###Output
_____no_output_____
###Markdown
Step 3: Train test Split
###Code
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123)
X_train, y_train = train_df.drop(columns="observed_rainfall"), train_df["observed_rainfall"]
X_test, y_test = test_df.drop(columns="observed_rainfall"), test_df["observed_rainfall"]
###Output
_____no_output_____
###Markdown
Step 4: EDA and observations
###Code
train_df.shape
###Output
_____no_output_____
###Markdown
**Observation:**There are 36,791 data in the train data frame, and there are 25 features (individual models) for predicting rainfalls in Sidney. And there is one target, which is the observed rainfall
###Code
train_df.describe().T
###Output
_____no_output_____
###Markdown
**Observation:**Across different models, the predicted rainfall has similar mean values and standard deviations. Also, all minimum values are quite close to 0. However, the maximum values varies a lot between different models as some models give around 167, while other models return as low as 74.
###Code
train_df.loc[:, train_df.columns != 'observed_rainfall'].mean()
###Output
_____no_output_____
###Markdown
**Observations:**I took the average of predicted rainfall for different models. As we can see above, model INM-CM5-0 returns the highest average amount of rainfall, while model MPI-ESM1-2-HR returns the lowest average amount of rainfall.
###Code
plt.hist(train_df['observed_rainfall'], bins = 100)
plt.xlabel('Actual amount of rainfall')
plt.ylabel('Counts')
plt.title('Distribution of observed rainfall (target)');
###Output
_____no_output_____
###Markdown
**Observation:**This is a significantly right-skewed distribution. It illustrates that Sidney barely rain as 0 observed rainfall is the most common instance and 0 observed rainfall accounts for much higher proportion than other amounts of rainfall. It may indicate that individual models may predict no rainfall much more frequent than other numbers of amount.
###Code
plt.hist(train_df.loc[:, train_df.columns != 'observed_rainfall'], bins = 100)
plt.legend(train_df.loc[:, train_df.columns != 'observed_rainfall'].columns.values.tolist(),
loc='center left',
bbox_to_anchor=(1, 0.5),
prop={'size': 8})
plt.xlabel('Predicted rainfall')
plt.ylabel('Counts')
plt.title('Distribution of predicted rainfall');
###Output
_____no_output_____
###Markdown
**Observation:**As we guessed before, individual models predict 0 amount of rainfall as the most frequent answer. Step 5: Train and evaluate the machine learning model
###Code
model_untuned = RandomForestRegressor(random_state=123)
model_untuned.fit(X_train, y_train)
data = {'Model': ['Ensembled Model'], 'RMSE': [mean_squared_error(y_test, model_untuned.predict(X_test), squared=False)]}
result_df = pd.DataFrame(data).reset_index(drop=True)
result_df
###Output
_____no_output_____
###Markdown
Step 6: Discuss results
###Code
all_results = {}
for col in X_test.columns:
RMSE = mean_squared_error(y_test, X_test[col], squared=False)
all_results[col] = RMSE
model_results_df = pd.DataFrame(all_results.values(), index=all_results.keys(), columns=['RMSE']).sort_values('RMSE', ascending=False)
model_results_df
###Output
_____no_output_____
###Markdown
**Observation:**In order to make sure the number of instances are equal for comparison purpose, I used the test data frame to calculate the RMSE for each individual model. By comparing the RMSE of our trained ensembled regressor with individual model's performance, we can conclude that our model outperforms all individual models as our ensembled model's RMSE is the lowest one. Part 2: Preparation for deploying model next week ***NOTE: Complete task 4 from the milestone3 before coming here*** We’ve found the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
plt.rcParams.update(
{"font.size": 16, "axes.labelweight": "bold", "figure.figsize": (8, 6)}
)
###Output
Matplotlib created a temporary config/cache directory at /tmp/matplotlib-oj_is30w because the default path (/home/jupyter-student92/.cache/matplotlib) is not a writable directory; it is highly recommended to set the MPLCONFIGDIR environment variable to a writable directory, in particular to speed up the import of Matplotlib and to better support multiprocessing.
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models. **1. Read the data CSV**
###Code
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
# aws_credentials = {
# "key": "",
# "secret": "",
# } # removed secret and key when submitting the notebook
df = pd.read_csv(
"s3://mds-s3-student96/ml_data_SYD.csv",
index_col=0,
parse_dates=True,
storage_options=aws_credentials,
)
###Output
_____no_output_____
###Markdown
**2. Drop rows with nans**
###Code
df = df.dropna()
###Output
_____no_output_____
###Markdown
**3. Split the data**
###Code
train, test = train_test_split(df, test_size=0.2, random_state=123)
###Output
_____no_output_____
###Markdown
**4. Carry out EDA on the train split**
###Code
train.head()
train.describe()
train.info()
train.iloc[:10, :2]
correlations = train.corr()
plt.figure(figsize=(15, 15))
sns.heatmap(correlations, vmax=0.1)
plt.title("Correlation Between Model Predictions")
plt.xlabel("Model IDs")
plt.ylabel("Model IDs");
###Output
_____no_output_____
###Markdown
**5. Train ensumble machine learning model and evaluate with RMSE**
###Code
X_train, y_train = train.drop(columns=["Observed"]), train["Observed"]
X_test, y_test = test.drop(columns=["Observed"]), test["Observed"]
model = RandomForestRegressor()
model.fit(X_train, y_train)
print(
f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}"
)
print(
f"Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}"
)
models = {}
for column in X_train:
models[column] = mean_squared_error(y_train, X_train[column], squared=False)
model_rmse = pd.DataFrame(models.items(), columns=["Model", "RMSE"])
model_rmse.set_index("Model")
###Output
_____no_output_____
###Markdown
**6. Discuss results** > - The `RandomForestRegressor` ensemble model with default arguments gets a decent RMSE score of 3.13 on the train dataset. Compare this with each individual climate models we find that no individual model gets an RMSE less than 9. Therefore, we can say that our ensemble model with default arguments has better results than the individual climate models.> - However, by companring the train RMSE with test RMSE of the `RandomForestRegressor` ensemble model, we can see that the `RandomForestRegressor` model with default hyperparameters is clearly overfitting the training dataset. In this case, we need to tune hyperparameters next in Task 4. Part 2: Preparation for deploying model next week Complete task 4 from the milestone3 before coming here We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
print(
f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}"
)
print(
f"Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}"
)
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
DSCI 525 - Web and Cloud Computing Group 04: Heidi Ye, Junting He, Kamal Moravej, Tanmay Sharma Date: 23-04-2021 Repo Link: https://github.com/UBC-MDS/group4-525 Milestone 3: Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import plotly.express as px
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models. Part 1.1 Read the data CSV from your s3 bucket.
###Code
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
aws_credentials ={"key": "","secret": ""}
df = pd.read_csv("s3://mds-s3-student47/output/ml_data_SYD.csv", index_col=0, parse_dates=True, storage_options=aws_credentials)
df
## Use your ML skills to get from step 1 to step 6
###Output
_____no_output_____
###Markdown
Part 1.2 Drop rows with nans.
###Code
#Check the number of NaNs in the data
df.isnull().sum().sum()
#Drop rows with NaNs
df = df.dropna()
print(f"Now there are {df.isnull().sum().sum()} NaN values in the dataframe.")
###Output
Now there are 0 NaN values in the dataframe.
###Markdown
Part 1.3 Split the data into train (80%) and test (20%) portions with random_state=123.
###Code
#Splitting data into train and test splits
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123)
X_train, y_train = train_df.drop(columns=["Observed"]), train_df["Observed"]
X_test, y_test = test_df.drop(columns=["Observed"]), test_df["Observed"]
###Output
_____no_output_____
###Markdown
Part 1.4 Carry out EDA of your choice on the train split.
###Code
#Simple EDA on the data
train_df.describe()
###Output
_____no_output_____
###Markdown
Part 1.5 Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering "Observed" as the target column.
###Code
#Train ensemble machine learning model using RandomForestRegressor
model = RandomForestRegressor(random_state=123)
model.fit(X_train, y_train)
#Evaluating the model
train_RMSE_rf = mean_squared_error(y_train, model.predict(X_train), squared=False)
test_RMSE_rf = mean_squared_error(y_test, model.predict(X_test), squared=False)
print(f"Train RMSE of ensemble model: {train_RMSE_rf:.4f}")
print(f"Test RMSE of ensemble model: {test_RMSE_rf:.4f}")
print(f"Train RMSE - Test RMSE of ensemble model: {train_RMSE_rf - test_RMSE_rf:.4f}")
###Output
Train RMSE of ensemble model: 3.1081
Test RMSE of ensemble model: 8.8443
Train RMSE - Test RMSE of ensemble model: -5.7362
###Markdown
Part 1.6 Discuss your results. Are you getting better results with ensemble models compared to the individual climate models?
###Code
# Calculating RMSE for each of the individual models
train_RMSE_all = []
test_RMSE_all = []
num_models = X_train.shape[1]
for index in range(num_models):
train_RMSE = round(mean_squared_error(y_train, X_train.iloc[:, index], squared=False), 4)
test_RMSE = round(mean_squared_error(y_test, X_test.iloc[:, index], squared=False), 4)
train_RMSE_all.append(train_RMSE)
test_RMSE_all.append(test_RMSE)
#list of all models
models = X_train.columns
# DataFrame of RMSE for all models
results_all = pd.DataFrame({"Model": models,
"Train_RMSE": train_RMSE_all,
"Test_RMSE": test_RMSE_all,
"Train_Test_RMSE_Delta": [train - test for train,test in zip(train_RMSE_all, test_RMSE_all)]} )
results_all.sort_values("Test_RMSE").reset_index(drop=True)
###Output
_____no_output_____
###Markdown
Discussion:- The individual training(3.11) and test RMSEs(8.85) for the ensemble machine learning model using `RandomForestRegressor` are lower than those for any of the 25 individual models.- However the training-test RMSE gap is much higher(5.73) in the ensemble machine learning model as compared to any of the 25 individual models, wherein this delta is in the range of 0.4 to 0.6.- Hence, we can say that the individual models are performing better than the ensemble model.- The best performing individual model was `KIOST-ESM` with a train RMSE of 9.20 and test RMSE of 9.60. Part 2: Preparation for deploying model next week Complete task 4 from the milestone3 before coming here We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=20)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
Task 3 Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (8,6)})
## add any other additional packages that you need. You are free to use any packages for vizualization.
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models.
###Code
# df = pd.read_csv("ml_data_SYD.csv", index_col=0, parse_dates=True)
## Depending on the permissions that you provided to your bucket you might need to provide your aws credentials
## to read from the bucket, if so provide with your credentials and pass as storage_options=aws_credentials
# aws_credentials = {"key": "","secret": "","token":""}
aws_credentials = {"key": "ASIA6GBAHHZH4ZOEGCVK","secret": "fPuqpGDCgg2hnnZtHWmsqLvRjSBPu7GxnRLsaD9x", "token": "FwoGZXIvYXdzELf//////////wEaDD65jIpKf242x8J88iLGAQp4utwkdejKJJ8atkFlAlfhFd/STs9CLSUmLNhYVJ1hzy2nQ1kPax8OptFtgL67BcFBqAB5r56RU3WJoJONFJoMvymI70MGtFbEiM6fvO8EyDKEHIiErdCWQZOeSv1QwBFhtXRVYrvdZgCAbaHyLi6iJm4BIeMuWUOkJpltqKyXtHHQI8x89Ue5/N0iFRY3ifIfWbJV/9kCwBdqH2OSibWHiQ8bQGL0UnxlOBriuhT85xf7G8zUjs/FPyd3osaOjqspwSmX7yiL69eSBjItMohigSM9MoTejBwMNgHmtZZZ7v3IfW5o12CjO3diMwIVMdAgv87vqj5dPSSh"}
## here 100 data points for testing the code
df = pd.read_csv("s3://mds-s3-28/output/ml_data_SYD.csv",
storage_options=aws_credentials,
index_col=0,
parse_dates=True)
# Drop NA
df = df.dropna()
print(df.shape)
df.head()
# Split train and test
train_df, test_df = train_test_split(df, test_size=0.2, random_state=123)
train_df.index.sort_values()
# EDA train data
plt.style.use('ggplot')
plt.rcParams["figure.figsize"] = (12, 6)
for i in train_df.columns:
x = train_df.index.sort_values()
y = train_df[i]
plt.plot(x, y,label =i)
plt.legend(train_df.columns,
loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("Time")
plt.ylabel("Predicted rainfall (mm/day)")
plt.show()
# Train Randomforest model
X_train = train_df.drop(columns=["observed"])
y_train = train_df["observed"]
X_test = test_df.drop(columns=["observed"])
y_test = test_df["observed"]
model = RandomForestRegressor()
model.fit(X_train, y_train)
# Predict y_test and evaluation
y_hat = model.predict(X_test)
results = {}
results["Model"] = ["Ensemble"]
results["RMSE"] = [mean_squared_error(y_test, y_hat, squared=False)]
pd.DataFrame(results).set_index("Model")
#Comparing the RMSE with individaul model
for col in X_test.columns:
results["Model"].append(col)
results["RMSE"].append(mean_squared_error(y_test, X_test[col], squared=False))
results_df = pd.DataFrame(results).set_index("Model").sort_values("RMSE")
results_df
###Output
_____no_output_____
###Markdown
Comparing to the individual predicting model the ensemble model preforms best with lowest RMSE in the test set Part 2: Preparation for deploying model next week ***NOTE: Complete task 4 from the milestone3 before coming here*** We’ve found the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____
###Markdown
Task 3*By Group III: Mitchie, Jianru, Aishwarya, Aditya**Date: April 24, 2021* Imports
###Code
import numpy as np
import pandas as pd
from joblib import dump, load
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
plt.style.use(plt.style.available[4])
plt.rcParams.update({'font.size': 16, 'axes.labelweight': 'bold', 'figure.figsize': (20,6)})
###Output
_____no_output_____
###Markdown
Part 1: Recall as a final goal of this project. We want to build and deploy ensemble machine learning models in the cloud, where features are outputs of different climate models and the target is the actual rainfall observation. In this milestone, you'll actually build these ensemble machine learning models in the cloud. **Your tasks:**1. Read the data CSV from your s3 bucket. 2. Drop rows with nans. 3. Split the data into train (80%) and test (20%) portions with `random_state=123`. 4. Carry out EDA of your choice on the train split. 5. Train ensemble machine learning model using `RandomForestRegressor` and evaluate with metric of your choice (e.g., `RMSE`) by considering `Observed` as the target column. 6. Discuss your results. Are you getting better results with ensemble models compared to the individual climate models? > Recall that individual columns in the data are predictions of different climate models.
###Code
%%time
## You could download it from your bucket, or you can use the file that I have in my bucket.
## You should be able to access it from my bucket using your key and secret
aws_credentials ={"key": " ": " "} ## dont include you secret and key when submitting the notebook
df = pd.read_csv("s3://mds-s3-student96/ml_data_SYD.csv", index_col=0, parse_dates=True, storage_options=aws_credentials)
###Output
CPU times: user 858 ms, sys: 128 ms, total: 986 ms
Wall time: 1.56 s
###Markdown
1.1 Preliminary EDA
###Code
df.shape
df.head()
# There are missing values in some cols
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 46020 entries, 1889-01-01 to 2014-12-31
Data columns (total 26 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ACCESS-CM2 46020 non-null float64
1 ACCESS-ESM1-5 46020 non-null float64
2 AWI-ESM-1-1-LR 46020 non-null float64
3 BCC-CSM2-MR 45990 non-null float64
4 BCC-ESM1 45990 non-null float64
5 CMCC-CM2-HR4 45990 non-null float64
6 CMCC-CM2-SR5 45990 non-null float64
7 CMCC-ESM2 45990 non-null float64
8 CanESM5 45990 non-null float64
9 EC-Earth3-Veg-LR 46020 non-null float64
10 FGOALS-g3 45990 non-null float64
11 GFDL-CM4 45990 non-null float64
12 INM-CM4-8 45990 non-null float64
13 INM-CM5-0 45990 non-null float64
14 KIOST-ESM 45990 non-null float64
15 MIROC6 46020 non-null float64
16 MPI-ESM-1-2-HAM 46020 non-null float64
17 MPI-ESM1-2-HR 46020 non-null float64
18 MPI-ESM1-2-LR 46020 non-null float64
19 MRI-ESM2-0 46020 non-null float64
20 NESM3 46020 non-null float64
21 NorESM2-LM 45990 non-null float64
22 NorESM2-MM 45990 non-null float64
23 SAM0-UNICON 45989 non-null float64
24 TaiESM1 45990 non-null float64
25 Observed 46020 non-null float64
dtypes: float64(26)
memory usage: 9.5 MB
###Markdown
1.2 Drop rows with NaNs
###Code
df=df.dropna()
df.info()
X, y = df.drop(columns = ['Observed']), df.Observed
print(f"Shape of X: {X.shape}\nShape of labels y: {y.shape}")
###Output
Shape of X: (45989, 25)
Shape of labels y: (45989,)
###Markdown
1.3 Split the data 80/20
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=123)
print(f"Shape of X_train: {X_train.shape}\nShape of y_train: {y_train.shape}\nShape of X_test: {X_test.shape}\nShape of y_test: {y_test.shape}")
###Output
Shape of X_train: (36791, 25)
Shape of y_train: (36791,)
Shape of X_test: (9198, 25)
Shape of y_test: (9198,)
###Markdown
1.4 EDA on the train set
###Code
X_train_yr = X_train.sort_index().resample('10Y').mean()
y_train_yr = y_train.sort_index().resample('10Y').mean()
ax1 = X_train_yr.plot.line()
ax2 = y_train_yr.plot.line(ax=ax1, linestyle='--', marker='o', linewidth=5, markersize=11)
plt.legend(loc=8, ncol=6,bbox_to_anchor=(0.5, -.6, 0, 0))
plt.title('Rainfall amount predicted by 25 models during 1889-2019')
plt.xlabel('Year')
plt.ylabel('Rainfall (mm/day)')
plt.show()
###Output
_____no_output_____
###Markdown
1.5 Train ensemble ML model using `RandomForestRegressor`.
###Code
%%time
model_rf = RandomForestRegressor()
model_rf.fit(X_train, y_train)
print(f"Training on RandomForestRegressor is DONE! 🌲")
print('----------------------------------------------')
model_df=None
model_df = pd.DataFrame(columns=["train_rmse", "test_rmse"])
for model in X_train.columns:
model_df.at[model, 'train_rmse'] = mean_squared_error(y_train, X_train[model], squared=False)
model_df.at[model, 'test_rmse'] = mean_squared_error(y_test, X_test[model], squared=False)
model_df.sort_values('test_rmse');
###Output
_____no_output_____
###Markdown
1.6 Evaluate the ensemble model by `RMSE`
###Code
model_df.at['ensemble_rfr', 'train_rmse'] = mean_squared_error(y_train, model_rf.predict(X_train), squared=False)
model_df.at['ensemble_rfr', 'test_rmse'] = mean_squared_error(y_test, model_rf.predict(X_test), squared=False)
model_df.sort_values('test_rmse')
model_weights = pd.DataFrame(model_rf.feature_importances_, index=X_train.columns, columns=['model_weight'])
model_weights.sort_values('model_weight', ascending=False)
###Output
_____no_output_____
###Markdown
1.7 Visualization of the performance by the ensemble model
###Code
plt.figure(figsize=(8, 8))
plt.scatter(y_train, model_rf.predict(X_train), alpha=0.5, label='Train set')
plt.scatter(y_test, model_rf.predict(X_test), alpha=0.5, label='Test set')
plt.xlabel('Observed rainfall (mm/day)')
plt.ylabel('Predicted rainfall by ensemble model')
plt.xlim(0, 260)
plt.ylim(0, 260)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
1.8 Discussion - Based on the metrics of `RMSE` only, the ensemble model of `RandomForestRegressor` had the best performance among all 25 models, which ends up as RMSE=8.85 in test dataset.- By further investigating in the predicted rainfall values, it is noticeable that the predictions tend to be much lower than the observations in both train and test set. When rainfall amount is within 10 mm/day, the prediction from the ensemble model tend to be more accurate. - The top three most weighted models are `NorESM2-MM, NorESM2-LM, INM-CM4-8`, identified by `RandomForestRegressor`. However, the interesting is they do not appear to have the top performance when they predict solely. Part 2: Preparation for deploying model next week Complete task 4 from the milestone3 before coming here We’ve found ```n_estimators=100, max_depth=5``` to be the best hyperparameter settings with MLlib (from the task 4 from milestone3), here we then use the same hyperparameters to train a scikit-learn model.
###Code
model = RandomForestRegressor(n_estimators=100, max_depth=5)
model.fit(X_train, y_train)
print(f"Train RMSE: {mean_squared_error(y_train, model.predict(X_train), squared=False):.2f}")
print(f" Test RMSE: {mean_squared_error(y_test, model.predict(X_test), squared=False):.2f}")
# ready to deploy
dump(model, "model.joblib")
###Output
_____no_output_____ |
Run deepOrganoid.ipynb | ###Markdown
deepOrganoid: Brightfeld viability assay for matrix embedded organoids Select the first box and press SHIFT+ENTER. This will run that block and advance to the next
###Code
from __future__ import print_function
import os
import sys
import time
import yaml
import pickle
import traceback
import pandas as pd
import numpy as np
import scipy.misc
import scipy.io.wavfile
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from importlib.machinery import SourceFileLoader
from multiprocessing import cpu_count
from matplotlib.pyplot import imread
from PIL import Image
print("All libraries have been loaded")
###Output
All libraries have been loaded
###Markdown
**The subsequent box should match the following:**Tensorflow: 2.1.0nvcc: NVIDIA (R) Cuda compiler driverCopyright (c) 2005-2020 NVIDIA CorporationBuilt on Thu_Jun_11_22:26:48_Pacific_Daylight_Time_2020Cuda compilation tools, release 11.0, V11.0.194Build cuda_11.0_bu.relgpu_drvr445TC445_37.28540450_0
###Code
print("Tensorflow: ",tf.__version__)
!nvcc --version
###Output
Tensorflow: 2.1.0
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2020 NVIDIA Corporation
Built on Thu_Jun_11_22:26:48_Pacific_Daylight_Time_2020
Cuda compilation tools, release 11.0, V11.0.194
Build cuda_11.0_bu.relgpu_drvr445TC445_37.28540450_0
###Markdown
User inputs
###Code
#Must download models first
#Options: AutoML, Lite_vr1, Lite_vr2, Lite_vr3, Final. Refer to text for differences
rel_model_path = 'Models/AutoML'
#Base directory with the images
#WILL NEED TO BE EDITED, For windows remember to use either \\ or / as path deliminters
rel_Input_path = 'Example_data'
#The enumerated GPU device you wish to use 0,1,nGPU
cuda_device = 0
###Output
_____no_output_____
###Markdown
deepOrganoid code
###Code
Base = os.getcwd()
Base = 'Q:/CTCR_DeepHTS/deepOrganoid/'
model_path = os.path.join(Base,rel_model_path)
Input_path = os.path.join(Base,rel_Input_path)
if os.path.exists(model_path):
print("Found: ",model_path)
os.chdir(model_path)
model_name = os.path.basename(model_path)
else:
print('Model path not found')
if os.path.exists(Input_path):
print("Found: ",Input_path)
else:
print('Image directory not found')
os.environ["MKL_NUM_THREADS"] = str(cpu_count())
os.environ["MKL_NUM_THREADS"]
###Output
_____no_output_____
###Markdown
Import inference functions
###Code
def doResize(options):
resize = None
if options and 'Resize' in options and options['Resize'] == True:
resize = (int(options['Width']), int(options['Height']))
return resize
def col_pre_process(data, options):
if len(options.keys()) == 0:
return data
else:
if "Scaling" in options and float(options["Scaling"]) != 0 and float(options["Scaling"]) != 1:
data = data / float(options["Scaling"])
if 'Normalization' in options and options['Normalization'] == True:
mean = np.mean(data)
std = np.std(data)
data = data - mean
data = data / std
return data
return data
def process_test_input(base_dir, test_raw, data_mapping):
test_data = []
le = None
from tensorflow.keras import backend as K
if K.backend() == 'theano' or K.backend() == 'mxnet':
K.set_image_data_format('channels_first')
else:
K.set_image_data_format('channels_last')
# determine the shape of the data to feed into the network
for i in range(len(data_mapping['inputs'])):
inp_port = data_mapping['inputs']['InputPort' + str(i)]
if inp_port['details'][0]['type'] == 'Image':
col_name = inp_port['details'][0]['name']
if 'options' in inp_port['details'][0]:
options = inp_port['details'][0]['options']
else:
options = {}
resize = doResize(options)
img = imread(test_raw[col_name][0])
input_shape = img.shape
num_channels = 1
if resize:
width, height = resize
if len(input_shape) == 3:
num_channels = 3
else:
if len(input_shape) == 2:
width, height = input_shape
else:
width, height, num_channels = input_shape
test_data.append(np.ndarray((len(test_raw),) +
(num_channels, width, height), dtype=np.float32))
for j, filename in enumerate(test_raw[col_name]):
img = imread(filename)
if resize:
img = np.array(Image.fromarray(img.astype(np.uint8)).resize(resize))
if num_channels != 1:
img = np.transpose(img, (2, 0, 1))
test_data[i][j] = img
if K.image_data_format() == 'channels_last':
test_data[i] = np.transpose(test_data[i], (0, 2, 3, 1))
test_data[i] = col_pre_process(test_data[i], options)
# assuming single output, generate labelEncoder
out_port = data_mapping['outputs']['OutputPort0']
if out_port['details'][0]['type'] == 'Categorical':
le = LabelEncoder()
le.fit(out_port['details'][0]['categories'])
return test_data, le
def customPredict(test_data, config, modelFile):
res = None
loss_func = config['params']['loss_func']
if 'is_custom_loss' in config['params']:
isCustomLoss = config['params']['is_custom_loss']
else:
isCustomLoss = False
if isCustomLoss:
customLoss = SourceFileLoader(
"customLoss", 'customLoss.py').load_module()
loss_function = eval('customLoss.' + loss_func)
mod = load_model(modelFile, custom_objects={loss_func: loss_function})
else:
mod = load_model(modelFile)
if os.environ.get("GPU_ENABLED", "0") == "1":
mod.compile (loss='categorical_crossentropy', optimizer='adam', context=["GPU("+str(cuda_device)+")"])
with tf.device("GPU:"+str(cuda_device)):
ress = mod.predict(test_data,batch_size=4)
return ress
def test_model(input_file):
try:
if os.path.exists('model.h5') and os.path.exists('mapping.pkl'):
with open('mapping.pkl', 'rb') as f:
data_mapping = pickle.load(f)
test_raw = pd.read_csv(input_file)
test_data, le = process_test_input(
os.path.dirname(input_file), test_raw, data_mapping)
currentDir = os.getcwd()
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
models = []
models.append(currentDir + '/model.h5')
result = np.array([])
for modelFile in models:
res = customPredict(test_data, config, modelFile)
if result.size != 0:
result = res + result
else:
result = res
res = result / len(models)
out_type = data_mapping['outputs']['OutputPort0']['details'][0]['type']
num_samples = len(test_raw)
if num_samples != 0:
out_dir = "./"
if not os.path.exists(out_dir + "output/"):
os.makedirs(out_dir + "output/")
if out_type == "Numpy":
if not os.path.exists(out_dir + "output/"):
os.makedirs(out_dir + "output/")
temp = np.ndarray((res.shape[0],), dtype=np.object_)
for i in range(res.shape[0]):
filename = "./output/" + str(i) + ".npy"
np.save(out_dir + filename, res[i])
temp[i] = filename
test_raw['predictions'] = temp
test_raw.to_csv('test_result.csv', index=False)
else:
print('model or data mapping does not exist... try downloading again!')
except Exception as e:
print("aborting due to exception... Please check input file format!")
traceback.print_exc()
###Output
_____no_output_____
###Markdown
Run classification by batch (sub-directory)
###Code
import shutil
import timeit
from tensorflow.keras.models import load_model
if os.path.exists('model.h5') and os.path.exists('mapping.pkl'):
with open('mapping.pkl', 'rb') as f:
data_mapping = pickle.load(f)
for i in range(len(data_mapping['inputs'])):
inp_port = data_mapping['inputs']['InputPort' + str(i)]
if inp_port['details'][0]['type'] == 'Image':
Column_name = inp_port['details'][0]['name']
df = pd.DataFrame()
for all_proj in os.listdir(Input_path):
if '_Proj' in all_proj: ##These conditional statments may need to be edited for new applications
start_time = timeit.default_timer()
active_dir = os.path.join(Input_path,all_proj)
name = model_name+".csv"
skip = os.path.isfile(os.path.join(active_dir,name))
if skip:
print("Skipping",active_dir)
else:
print("Building input: ",active_dir)
df = pd.DataFrame()
for root, dirs, files in os.walk(active_dir):
for name in files:
if '.tif' in name and 'ch01' in name:
active = pd.Series(os.path.join(root, name),name = Column_name)
df = df.append(active,ignore_index = True)
df = df.rename(columns={0:Column_name})
DLS_input = os.path.join(active_dir,'test.csv')
df.to_csv(DLS_input, index=False)
print("Running deepOrganoid")
test_model(DLS_input)
name = model_name+".csv"
shutil.move(os.path.join(model_path,"test_result.csv"),os.path.join(active_dir,name))
os.remove(DLS_input)
elapsed = timeit.default_timer() - start_time
print("Data has been saved at: ",all_proj,"\nTime to completion: ",elapsed/60,"minutes")
print("All tasks completed, have yourself a drink")
###Output
Building input: Q:/CTCR_DeepHTS/deepOrganoid/Example_data\2019-04-29_Proj
Running deepOrganoid
Data has been saved at: 2019-04-29_Proj
Time to completion: 0.786632441666734 minutes
All tasks completed, have yourself a drink
###Markdown
Release/Clean GPU VRAM
###Code
#Clear GPU RAM
from numba import cuda
for index, device in enumerate(cuda.gpus):
cuda.select_device(index)
device = cuda.get_current_device()
device.reset()
###Output
_____no_output_____ |
doc/caret2sql-xgboost-iris.ipynb | ###Markdown
Build a Model
###Code
## multiclass classification in iris dataset:
set.seed(1960)
dataset = as.matrix(iris[, -5])
create_model = function() {
formula <- as.formula(Species ~.)
model <- train(Species ~ ., data = iris, method = "xgbTree")
return(model)
}
model = create_model()
# cat(model$feature_names)
pred <- predict(model, as.matrix(iris[, -5]) , type="prob")
pred_labels <- predict(model, as.matrix(iris[, -5]) , type="raw")
sum(pred_labels != iris$Species)/length(pred_labels)
###Output
_____no_output_____
###Markdown
SQL Code Generation
###Code
test_ws_sql_gen = function(mod) {
WS_URL = "https://sklearn2sql.herokuapp.com/model"
WS_URL = "http://localhost:1888/model"
model_serialized <- serialize(mod, NULL)
b64_data = base64encode(model_serialized)
data = list(Name = "xgboost_test_model", SerializedModel = b64_data , SQLDialect = "postgresql" , Mode="caret")
r = POST(WS_URL, body = data, encode = "json")
# print(r)
content = content(r)
# print(content)
lSQL = content$model$SQLGenrationResult[[1]]$SQL # content["model"]["SQLGenrationResult"][0]["SQL"]
return(lSQL);
}
lModelSQL = test_ws_sql_gen(model)
cat(lModelSQL)
###Output
WITH "XGB_0" AS
(WITH "DT_node_lookup" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.430622011 AS "Score" UNION ALL SELECT 2 AS nid, -0.220048919 AS "Score") AS "Values"),
"DT_Output" AS
(SELECT "DT_node_lookup"."KEY" AS "KEY", "DT_node_lookup".node_id_2 AS node_id_2, "DT_node_data".nid AS nid, "DT_node_data"."Score" AS "Score"
FROM "DT_node_lookup" LEFT OUTER JOIN "DT_node_data" ON "DT_node_lookup".node_id_2 = "DT_node_data".nid),
"XGB_Model_0_0" AS
(SELECT "DT_Output"."KEY" AS "KEY", "DT_Output"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output"),
"DT_node_lookup_1" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_1" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.215311036 AS "Score" UNION ALL SELECT 5 AS nid, 0.389552265 AS "Score" UNION ALL SELECT 6 AS nid, 0.163636357 AS "Score" UNION ALL SELECT 7 AS nid, -0.0972973183 AS "Score" UNION ALL SELECT 8 AS nid, -0.212727293 AS "Score") AS "Values"),
"DT_Output_1" AS
(SELECT "DT_node_lookup_1"."KEY" AS "KEY", "DT_node_lookup_1".node_id_2 AS node_id_2, "DT_node_data_1".nid AS nid, "DT_node_data_1"."Score" AS "Score"
FROM "DT_node_lookup_1" LEFT OUTER JOIN "DT_node_data_1" ON "DT_node_lookup_1".node_id_2 = "DT_node_data_1".nid),
"XGB_Model_1_0" AS
(SELECT "DT_Output_1"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_1"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_1"),
"DT_node_lookup_2" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_2" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.402985066 AS "Score" UNION ALL SELECT 3 AS nid, -0.219899267 AS "Score" UNION ALL SELECT 4 AS nid, 0.217241377 AS "Score") AS "Values"),
"DT_Output_2" AS
(SELECT "DT_node_lookup_2"."KEY" AS "KEY", "DT_node_lookup_2".node_id_2 AS node_id_2, "DT_node_data_2".nid AS nid, "DT_node_data_2"."Score" AS "Score"
FROM "DT_node_lookup_2" LEFT OUTER JOIN "DT_node_data_2" ON "DT_node_lookup_2".node_id_2 = "DT_node_data_2".nid),
"XGB_Model_2_0" AS
(SELECT "DT_Output_2"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_2"."Score" AS "Score_virginica"
FROM "DT_Output_2"),
"DT_node_lookup_3" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_3" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.295082778 AS "Score" UNION ALL SELECT 2 AS nid, -0.196979225 AS "Score") AS "Values"),
"DT_Output_3" AS
(SELECT "DT_node_lookup_3"."KEY" AS "KEY", "DT_node_lookup_3".node_id_2 AS node_id_2, "DT_node_data_3".nid AS nid, "DT_node_data_3"."Score" AS "Score"
FROM "DT_node_lookup_3" LEFT OUTER JOIN "DT_node_data_3" ON "DT_node_lookup_3".node_id_2 = "DT_node_data_3".nid),
"XGB_Model_0_1" AS
(SELECT "DT_Output_3"."KEY" AS "KEY", "DT_Output_3"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_3"),
"DT_node_lookup_4" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_4" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.191612214 AS "Score" UNION ALL SELECT 5 AS nid, 0.291078657 AS "Score" UNION ALL SELECT 6 AS nid, 0.0804788619 AS "Score" UNION ALL SELECT 7 AS nid, -0.0038400169 AS "Score" UNION ALL SELECT 8 AS nid, -0.179641828 AS "Score") AS "Values"),
"DT_Output_4" AS
(SELECT "DT_node_lookup_4"."KEY" AS "KEY", "DT_node_lookup_4".node_id_2 AS node_id_2, "DT_node_data_4".nid AS nid, "DT_node_data_4"."Score" AS "Score"
FROM "DT_node_lookup_4" LEFT OUTER JOIN "DT_node_data_4" ON "DT_node_lookup_4".node_id_2 = "DT_node_data_4".nid),
"XGB_Model_1_1" AS
(SELECT "DT_Output_4"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_4"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_4"),
"DT_node_lookup_5" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 7 ELSE 8 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 9 ELSE 10 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_5" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.196446657 AS "Score" UNION ALL SELECT 4 AS nid, -0.000935714925 AS "Score" UNION ALL SELECT 7 AS nid, -0.108741924 AS "Score" UNION ALL SELECT 8 AS nid, 0.13706477 AS "Score" UNION ALL SELECT 9 AS nid, 0.0685180724 AS "Score" UNION ALL SELECT 10 AS nid, 0.298936576 AS "Score") AS "Values"),
"DT_Output_5" AS
(SELECT "DT_node_lookup_5"."KEY" AS "KEY", "DT_node_lookup_5".node_id_2 AS node_id_2, "DT_node_data_5".nid AS nid, "DT_node_data_5"."Score" AS "Score"
FROM "DT_node_lookup_5" LEFT OUTER JOIN "DT_node_data_5" ON "DT_node_lookup_5".node_id_2 = "DT_node_data_5".nid),
"XGB_Model_2_1" AS
(SELECT "DT_Output_5"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_5"."Score" AS "Score_virginica"
FROM "DT_Output_5"),
"DT_node_lookup_6" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_6" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.236277431 AS "Score" UNION ALL SELECT 2 AS nid, -0.181653887 AS "Score") AS "Values"),
"DT_Output_6" AS
(SELECT "DT_node_lookup_6"."KEY" AS "KEY", "DT_node_lookup_6".node_id_2 AS node_id_2, "DT_node_data_6".nid AS nid, "DT_node_data_6"."Score" AS "Score"
FROM "DT_node_lookup_6" LEFT OUTER JOIN "DT_node_data_6" ON "DT_node_lookup_6".node_id_2 = "DT_node_data_6".nid),
"XGB_Model_0_2" AS
(SELECT "DT_Output_6"."KEY" AS "KEY", "DT_Output_6"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_6"),
"DT_node_lookup_7" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_7" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.175510198 AS "Score" UNION ALL SELECT 5 AS nid, 0.233353898 AS "Score" UNION ALL SELECT 6 AS nid, -0.0104013886 AS "Score" UNION ALL SELECT 7 AS nid, 0.0186489262 AS "Score" UNION ALL SELECT 8 AS nid, -0.177015856 AS "Score") AS "Values"),
"DT_Output_7" AS
(SELECT "DT_node_lookup_7"."KEY" AS "KEY", "DT_node_lookup_7".node_id_2 AS node_id_2, "DT_node_data_7".nid AS nid, "DT_node_data_7"."Score" AS "Score"
FROM "DT_node_lookup_7" LEFT OUTER JOIN "DT_node_data_7" ON "DT_node_lookup_7".node_id_2 = "DT_node_data_7".nid),
"XGB_Model_1_2" AS
(SELECT "DT_Output_7"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_7"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_7"),
"DT_node_lookup_8" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.3499999) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_8" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.230453059 AS "Score" UNION ALL SELECT 3 AS nid, -0.179413989 AS "Score" UNION ALL SELECT 5 AS nid, 0.125372291 AS "Score" UNION ALL SELECT 6 AS nid, -0.119990066 AS "Score") AS "Values"),
"DT_Output_8" AS
(SELECT "DT_node_lookup_8"."KEY" AS "KEY", "DT_node_lookup_8".node_id_2 AS node_id_2, "DT_node_data_8".nid AS nid, "DT_node_data_8"."Score" AS "Score"
FROM "DT_node_lookup_8" LEFT OUTER JOIN "DT_node_data_8" ON "DT_node_lookup_8".node_id_2 = "DT_node_data_8".nid),
"XGB_Model_2_2" AS
(SELECT "DT_Output_8"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_8"."Score" AS "Score_virginica"
FROM "DT_Output_8"),
"DT_node_lookup_9" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_9" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.203858107 AS "Score" UNION ALL SELECT 2 AS nid, -0.170715541 AS "Score") AS "Values"),
"DT_Output_9" AS
(SELECT "DT_node_lookup_9"."KEY" AS "KEY", "DT_node_lookup_9".node_id_2 AS node_id_2, "DT_node_data_9".nid AS nid, "DT_node_data_9"."Score" AS "Score"
FROM "DT_node_lookup_9" LEFT OUTER JOIN "DT_node_data_9" ON "DT_node_lookup_9".node_id_2 = "DT_node_data_9".nid),
"XGB_Model_0_3" AS
(SELECT "DT_Output_9"."KEY" AS "KEY", "DT_Output_9"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_9")
SELECT "XGB_esu_0"."KEY", "XGB_esu_0"."Score_setosa", "XGB_esu_0"."Score_versicolor", "XGB_esu_0"."Score_virginica"
FROM (SELECT "XGB_Model_0_0"."KEY" AS "KEY", CAST("XGB_Model_0_0"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_0"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_0"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_0" UNION ALL SELECT "XGB_Model_1_0"."KEY" AS "KEY", CAST("XGB_Model_1_0"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_0"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_0"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_0" UNION ALL SELECT "XGB_Model_2_0"."KEY" AS "KEY", CAST("XGB_Model_2_0"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_0"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_0"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_0" UNION ALL SELECT "XGB_Model_0_1"."KEY" AS "KEY", CAST("XGB_Model_0_1"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_1"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_1"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_1" UNION ALL SELECT "XGB_Model_1_1"."KEY" AS "KEY", CAST("XGB_Model_1_1"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_1"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_1"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_1" UNION ALL SELECT "XGB_Model_2_1"."KEY" AS "KEY", CAST("XGB_Model_2_1"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_1"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_1"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_1" UNION ALL SELECT "XGB_Model_0_2"."KEY" AS "KEY", CAST("XGB_Model_0_2"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_2"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_2"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_2" UNION ALL SELECT "XGB_Model_1_2"."KEY" AS "KEY", CAST("XGB_Model_1_2"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_2"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_2"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_2" UNION ALL SELECT "XGB_Model_2_2"."KEY" AS "KEY", CAST("XGB_Model_2_2"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_2"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_2"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_2" UNION ALL SELECT "XGB_Model_0_3"."KEY" AS "KEY", CAST("XGB_Model_0_3"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_3"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_3"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_3") AS "XGB_esu_0"),
"XGB_1" AS
(WITH "DT_node_lookup_10" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_0" < 5.05000019) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_10" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.163605481 AS "Score" UNION ALL SELECT 5 AS nid, 0.04853056 AS "Score" UNION ALL SELECT 6 AS nid, 0.181296661 AS "Score" UNION ALL SELECT 7 AS nid, -0.0337444134 AS "Score" UNION ALL SELECT 8 AS nid, -0.164137065 AS "Score") AS "Values"),
"DT_Output_10" AS
(SELECT "DT_node_lookup_10"."KEY" AS "KEY", "DT_node_lookup_10".node_id_2 AS node_id_2, "DT_node_data_10".nid AS nid, "DT_node_data_10"."Score" AS "Score"
FROM "DT_node_lookup_10" LEFT OUTER JOIN "DT_node_data_10" ON "DT_node_lookup_10".node_id_2 = "DT_node_data_10".nid),
"XGB_Model_1_3" AS
(SELECT "DT_Output_10"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_10"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_10"),
"DT_node_lookup_11" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 7 ELSE 8 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 9 ELSE 10 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_11" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.16977559 AS "Score" UNION ALL SELECT 4 AS nid, 0.0224882942 AS "Score" UNION ALL SELECT 7 AS nid, -0.0978921279 AS "Score" UNION ALL SELECT 8 AS nid, 0.0969267786 AS "Score" UNION ALL SELECT 9 AS nid, 0.0326717049 AS "Score" UNION ALL SELECT 10 AS nid, 0.206036448 AS "Score") AS "Values"),
"DT_Output_11" AS
(SELECT "DT_node_lookup_11"."KEY" AS "KEY", "DT_node_lookup_11".node_id_2 AS node_id_2, "DT_node_data_11".nid AS nid, "DT_node_data_11"."Score" AS "Score"
FROM "DT_node_lookup_11" LEFT OUTER JOIN "DT_node_data_11" ON "DT_node_lookup_11".node_id_2 = "DT_node_data_11".nid),
"XGB_Model_2_3" AS
(SELECT "DT_Output_11"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_11"."Score" AS "Score_virginica"
FROM "DT_Output_11"),
"DT_node_lookup_12" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_12" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.1834407 AS "Score" UNION ALL SELECT 2 AS nid, -0.16252622 AS "Score") AS "Values"),
"DT_Output_12" AS
(SELECT "DT_node_lookup_12"."KEY" AS "KEY", "DT_node_lookup_12".node_id_2 AS node_id_2, "DT_node_data_12".nid AS nid, "DT_node_data_12"."Score" AS "Score"
FROM "DT_node_lookup_12" LEFT OUTER JOIN "DT_node_data_12" ON "DT_node_lookup_12".node_id_2 = "DT_node_data_12".nid),
"XGB_Model_0_4" AS
(SELECT "DT_Output_12"."KEY" AS "KEY", "DT_Output_12"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_12"),
"DT_node_lookup_13" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_13" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.154141307 AS "Score" UNION ALL SELECT 5 AS nid, 0.190102503 AS "Score" UNION ALL SELECT 6 AS nid, -0.0594451167 AS "Score" UNION ALL SELECT 7 AS nid, -0.0339091681 AS "Score" UNION ALL SELECT 8 AS nid, -0.153364062 AS "Score") AS "Values"),
"DT_Output_13" AS
(SELECT "DT_node_lookup_13"."KEY" AS "KEY", "DT_node_lookup_13".node_id_2 AS node_id_2, "DT_node_data_13".nid AS nid, "DT_node_data_13"."Score" AS "Score"
FROM "DT_node_lookup_13" LEFT OUTER JOIN "DT_node_data_13" ON "DT_node_lookup_13".node_id_2 = "DT_node_data_13".nid),
"XGB_Model_1_4" AS
(SELECT "DT_Output_13"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_13"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_13"),
"DT_node_lookup_14" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_14" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.162489891 AS "Score" UNION ALL SELECT 4 AS nid, 0.0767956674 AS "Score" UNION ALL SELECT 6 AS nid, 0.185022578 AS "Score" UNION ALL SELECT 7 AS nid, 0.121625312 AS "Score" UNION ALL SELECT 8 AS nid, -0.0476637408 AS "Score") AS "Values"),
"DT_Output_14" AS
(SELECT "DT_node_lookup_14"."KEY" AS "KEY", "DT_node_lookup_14".node_id_2 AS node_id_2, "DT_node_data_14".nid AS nid, "DT_node_data_14"."Score" AS "Score"
FROM "DT_node_lookup_14" LEFT OUTER JOIN "DT_node_data_14" ON "DT_node_lookup_14".node_id_2 = "DT_node_data_14".nid),
"XGB_Model_2_4" AS
(SELECT "DT_Output_14"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_14"."Score" AS "Score_virginica"
FROM "DT_Output_14"),
"DT_node_lookup_15" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_15" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.169323787 AS "Score" UNION ALL SELECT 2 AS nid, -0.155848682 AS "Score") AS "Values"),
"DT_Output_15" AS
(SELECT "DT_node_lookup_15"."KEY" AS "KEY", "DT_node_lookup_15".node_id_2 AS node_id_2, "DT_node_data_15".nid AS nid, "DT_node_data_15"."Score" AS "Score"
FROM "DT_node_lookup_15" LEFT OUTER JOIN "DT_node_data_15" ON "DT_node_lookup_15".node_id_2 = "DT_node_data_15".nid),
"XGB_Model_0_5" AS
(SELECT "DT_Output_15"."KEY" AS "KEY", "DT_Output_15"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_15"),
"DT_node_lookup_16" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_0" < 5.05000019) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_16" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.146020249 AS "Score" UNION ALL SELECT 5 AS nid, 0.0163165089 AS "Score" UNION ALL SELECT 6 AS nid, 0.154481009 AS "Score" UNION ALL SELECT 7 AS nid, -0.0333782472 AS "Score" UNION ALL SELECT 8 AS nid, -0.152288616 AS "Score") AS "Values"),
"DT_Output_16" AS
(SELECT "DT_node_lookup_16"."KEY" AS "KEY", "DT_node_lookup_16".node_id_2 AS node_id_2, "DT_node_data_16".nid AS nid, "DT_node_data_16"."Score" AS "Score"
FROM "DT_node_lookup_16" LEFT OUTER JOIN "DT_node_data_16" ON "DT_node_lookup_16".node_id_2 = "DT_node_data_16".nid),
"XGB_Model_1_5" AS
(SELECT "DT_Output_16"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_16"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_16"),
"DT_node_lookup_17" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_17" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.155230343 AS "Score" UNION ALL SELECT 4 AS nid, 0.061719548 AS "Score" UNION ALL SELECT 6 AS nid, 0.170175523 AS "Score" UNION ALL SELECT 7 AS nid, 0.108573265 AS "Score" UNION ALL SELECT 8 AS nid, -0.0493961759 AS "Score") AS "Values"),
"DT_Output_17" AS
(SELECT "DT_node_lookup_17"."KEY" AS "KEY", "DT_node_lookup_17".node_id_2 AS node_id_2, "DT_node_data_17".nid AS nid, "DT_node_data_17"."Score" AS "Score"
FROM "DT_node_lookup_17" LEFT OUTER JOIN "DT_node_data_17" ON "DT_node_lookup_17".node_id_2 = "DT_node_data_17".nid),
"XGB_Model_2_5" AS
(SELECT "DT_Output_17"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_17"."Score" AS "Score_virginica"
FROM "DT_Output_17"),
"DT_node_lookup_18" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_18" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.158738256 AS "Score" UNION ALL SELECT 2 AS nid, -0.150402635 AS "Score") AS "Values"),
"DT_Output_18" AS
(SELECT "DT_node_lookup_18"."KEY" AS "KEY", "DT_node_lookup_18".node_id_2 AS node_id_2, "DT_node_data_18".nid AS nid, "DT_node_data_18"."Score" AS "Score"
FROM "DT_node_lookup_18" LEFT OUTER JOIN "DT_node_data_18" ON "DT_node_lookup_18".node_id_2 = "DT_node_data_18".nid),
"XGB_Model_0_6" AS
(SELECT "DT_Output_18"."KEY" AS "KEY", "DT_Output_18"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_18"),
"DT_node_lookup_19" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_0" < 5.05000019) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_19" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.138532251 AS "Score" UNION ALL SELECT 5 AS nid, 0.00739950361 AS "Score" UNION ALL SELECT 6 AS nid, 0.139471993 AS "Score" UNION ALL SELECT 7 AS nid, -0.0240194462 AS "Score" UNION ALL SELECT 8 AS nid, -0.144811302 AS "Score") AS "Values"),
"DT_Output_19" AS
(SELECT "DT_node_lookup_19"."KEY" AS "KEY", "DT_node_lookup_19".node_id_2 AS node_id_2, "DT_node_data_19".nid AS nid, "DT_node_data_19"."Score" AS "Score"
FROM "DT_node_lookup_19" LEFT OUTER JOIN "DT_node_data_19" ON "DT_node_lookup_19".node_id_2 = "DT_node_data_19".nid),
"XGB_Model_1_6" AS
(SELECT "DT_Output_19"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_19"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_19")
SELECT "XGB_esu_1"."KEY", "XGB_esu_1"."Score_setosa", "XGB_esu_1"."Score_versicolor", "XGB_esu_1"."Score_virginica"
FROM (SELECT "XGB_Model_1_3"."KEY" AS "KEY", CAST("XGB_Model_1_3"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_3"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_3"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_3" UNION ALL SELECT "XGB_Model_2_3"."KEY" AS "KEY", CAST("XGB_Model_2_3"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_3"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_3"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_3" UNION ALL SELECT "XGB_Model_0_4"."KEY" AS "KEY", CAST("XGB_Model_0_4"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_4"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_4"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_4" UNION ALL SELECT "XGB_Model_1_4"."KEY" AS "KEY", CAST("XGB_Model_1_4"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_4"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_4"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_4" UNION ALL SELECT "XGB_Model_2_4"."KEY" AS "KEY", CAST("XGB_Model_2_4"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_4"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_4"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_4" UNION ALL SELECT "XGB_Model_0_5"."KEY" AS "KEY", CAST("XGB_Model_0_5"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_5"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_5"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_5" UNION ALL SELECT "XGB_Model_1_5"."KEY" AS "KEY", CAST("XGB_Model_1_5"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_5"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_5"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_5" UNION ALL SELECT "XGB_Model_2_5"."KEY" AS "KEY", CAST("XGB_Model_2_5"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_5"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_5"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_5" UNION ALL SELECT "XGB_Model_0_6"."KEY" AS "KEY", CAST("XGB_Model_0_6"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_6"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_6"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_6" UNION ALL SELECT "XGB_Model_1_6"."KEY" AS "KEY", CAST("XGB_Model_1_6"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_6"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_6"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_6") AS "XGB_esu_1"),
"XGB_2" AS
(WITH "DT_node_lookup_20" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.3499999) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 7 ELSE 8 END END ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_20" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.145790786 AS "Score" UNION ALL SELECT 5 AS nid, 0.0368432552 AS "Score" UNION ALL SELECT 6 AS nid, 0.161826119 AS "Score" UNION ALL SELECT 7 AS nid, 0.112252824 AS "Score" UNION ALL SELECT 8 AS nid, -0.0754162893 AS "Score") AS "Values"),
"DT_Output_20" AS
(SELECT "DT_node_lookup_20"."KEY" AS "KEY", "DT_node_lookup_20".node_id_2 AS node_id_2, "DT_node_data_20".nid AS nid, "DT_node_data_20"."Score" AS "Score"
FROM "DT_node_lookup_20" LEFT OUTER JOIN "DT_node_data_20" ON "DT_node_lookup_20".node_id_2 = "DT_node_data_20".nid),
"XGB_Model_2_6" AS
(SELECT "DT_Output_20"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_20"."Score" AS "Score_virginica"
FROM "DT_Output_20"),
"DT_node_lookup_21" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_21" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.150172532 AS "Score" UNION ALL SELECT 2 AS nid, -0.145584792 AS "Score") AS "Values"),
"DT_Output_21" AS
(SELECT "DT_node_lookup_21"."KEY" AS "KEY", "DT_node_lookup_21".node_id_2 AS node_id_2, "DT_node_data_21".nid AS nid, "DT_node_data_21"."Score" AS "Score"
FROM "DT_node_lookup_21" LEFT OUTER JOIN "DT_node_data_21" ON "DT_node_lookup_21".node_id_2 = "DT_node_data_21".nid),
"XGB_Model_0_7" AS
(SELECT "DT_Output_21"."KEY" AS "KEY", "DT_Output_21"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_21"),
"DT_node_lookup_22" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_22" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.131217241 AS "Score" UNION ALL SELECT 5 AS nid, 0.140719801 AS "Score" UNION ALL SELECT 6 AS nid, -0.0163263753 AS "Score" UNION ALL SELECT 7 AS nid, 0.0292934608 AS "Score" UNION ALL SELECT 8 AS nid, -0.148598656 AS "Score") AS "Values"),
"DT_Output_22" AS
(SELECT "DT_node_lookup_22"."KEY" AS "KEY", "DT_node_lookup_22".node_id_2 AS node_id_2, "DT_node_data_22".nid AS nid, "DT_node_data_22"."Score" AS "Score"
FROM "DT_node_lookup_22" LEFT OUTER JOIN "DT_node_data_22" ON "DT_node_lookup_22".node_id_2 = "DT_node_data_22".nid),
"XGB_Model_1_7" AS
(SELECT "DT_Output_22"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_22"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_22"),
"DT_node_lookup_23" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_23" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.144370124 AS "Score" UNION ALL SELECT 4 AS nid, 0.0533284321 AS "Score" UNION ALL SELECT 6 AS nid, 0.152582586 AS "Score" UNION ALL SELECT 7 AS nid, 0.0910344645 AS "Score" UNION ALL SELECT 8 AS nid, -0.0538599752 AS "Score") AS "Values"),
"DT_Output_23" AS
(SELECT "DT_node_lookup_23"."KEY" AS "KEY", "DT_node_lookup_23".node_id_2 AS node_id_2, "DT_node_data_23".nid AS nid, "DT_node_data_23"."Score" AS "Score"
FROM "DT_node_lookup_23" LEFT OUTER JOIN "DT_node_data_23" ON "DT_node_lookup_23".node_id_2 = "DT_node_data_23".nid),
"XGB_Model_2_7" AS
(SELECT "DT_Output_23"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_23"."Score" AS "Score_virginica"
FROM "DT_Output_23"),
"DT_node_lookup_24" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_24" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.142601699 AS "Score" UNION ALL SELECT 2 AS nid, -0.140719742 AS "Score") AS "Values"),
"DT_Output_24" AS
(SELECT "DT_node_lookup_24"."KEY" AS "KEY", "DT_node_lookup_24".node_id_2 AS node_id_2, "DT_node_data_24".nid AS nid, "DT_node_data_24"."Score" AS "Score"
FROM "DT_node_lookup_24" LEFT OUTER JOIN "DT_node_data_24" ON "DT_node_lookup_24".node_id_2 = "DT_node_data_24".nid),
"XGB_Model_0_8" AS
(SELECT "DT_Output_24"."KEY" AS "KEY", "DT_Output_24"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_24"),
"DT_node_lookup_25" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_25" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.123814851 AS "Score" UNION ALL SELECT 5 AS nid, 0.129827946 AS "Score" UNION ALL SELECT 6 AS nid, -0.00924589485 AS "Score" UNION ALL SELECT 7 AS nid, 0.00442052027 AS "Score" UNION ALL SELECT 8 AS nid, -0.138096854 AS "Score") AS "Values"),
"DT_Output_25" AS
(SELECT "DT_node_lookup_25"."KEY" AS "KEY", "DT_node_lookup_25".node_id_2 AS node_id_2, "DT_node_data_25".nid AS nid, "DT_node_data_25"."Score" AS "Score"
FROM "DT_node_lookup_25" LEFT OUTER JOIN "DT_node_data_25" ON "DT_node_lookup_25".node_id_2 = "DT_node_data_25".nid),
"XGB_Model_1_8" AS
(SELECT "DT_Output_25"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_25"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_25"),
"DT_node_lookup_26" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_26" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.138808116 AS "Score" UNION ALL SELECT 4 AS nid, 0.0451260768 AS "Score" UNION ALL SELECT 6 AS nid, 0.145031452 AS "Score" UNION ALL SELECT 7 AS nid, 0.0826190412 AS "Score" UNION ALL SELECT 8 AS nid, -0.0548939444 AS "Score") AS "Values"),
"DT_Output_26" AS
(SELECT "DT_node_lookup_26"."KEY" AS "KEY", "DT_node_lookup_26".node_id_2 AS node_id_2, "DT_node_data_26".nid AS nid, "DT_node_data_26"."Score" AS "Score"
FROM "DT_node_lookup_26" LEFT OUTER JOIN "DT_node_data_26" ON "DT_node_lookup_26".node_id_2 = "DT_node_data_26".nid),
"XGB_Model_2_8" AS
(SELECT "DT_Output_26"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_26"."Score" AS "Score_virginica"
FROM "DT_Output_26"),
"DT_node_lookup_27" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_27" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.135521978 AS "Score" UNION ALL SELECT 2 AS nid, -0.135857046 AS "Score") AS "Values"),
"DT_Output_27" AS
(SELECT "DT_node_lookup_27"."KEY" AS "KEY", "DT_node_lookup_27".node_id_2 AS node_id_2, "DT_node_data_27".nid AS nid, "DT_node_data_27"."Score" AS "Score"
FROM "DT_node_lookup_27" LEFT OUTER JOIN "DT_node_data_27" ON "DT_node_lookup_27".node_id_2 = "DT_node_data_27".nid),
"XGB_Model_0_9" AS
(SELECT "DT_Output_27"."KEY" AS "KEY", "DT_Output_27"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_27"),
"DT_node_lookup_28" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_28" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.124956153 AS "Score" UNION ALL SELECT 3 AS nid, -0.116202153 AS "Score" UNION ALL SELECT 5 AS nid, 0.114220828 AS "Score" UNION ALL SELECT 6 AS nid, -0.026405761 AS "Score") AS "Values"),
"DT_Output_28" AS
(SELECT "DT_node_lookup_28"."KEY" AS "KEY", "DT_node_lookup_28".node_id_2 AS node_id_2, "DT_node_data_28".nid AS nid, "DT_node_data_28"."Score" AS "Score"
FROM "DT_node_lookup_28" LEFT OUTER JOIN "DT_node_data_28" ON "DT_node_lookup_28".node_id_2 = "DT_node_data_28".nid),
"XGB_Model_1_9" AS
(SELECT "DT_Output_28"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_28"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_28"),
"DT_node_lookup_29" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_29" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.133166224 AS "Score" UNION ALL SELECT 4 AS nid, 0.0384888016 AS "Score" UNION ALL SELECT 6 AS nid, 0.1380326 AS "Score" UNION ALL SELECT 7 AS nid, 0.077675432 AS "Score" UNION ALL SELECT 8 AS nid, -0.0557836629 AS "Score") AS "Values"),
"DT_Output_29" AS
(SELECT "DT_node_lookup_29"."KEY" AS "KEY", "DT_node_lookup_29".node_id_2 AS node_id_2, "DT_node_data_29".nid AS nid, "DT_node_data_29"."Score" AS "Score"
FROM "DT_node_lookup_29" LEFT OUTER JOIN "DT_node_data_29" ON "DT_node_lookup_29".node_id_2 = "DT_node_data_29".nid),
"XGB_Model_2_9" AS
(SELECT "DT_Output_29"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_29"."Score" AS "Score_virginica"
FROM "DT_Output_29")
SELECT "XGB_esu_2"."KEY", "XGB_esu_2"."Score_setosa", "XGB_esu_2"."Score_versicolor", "XGB_esu_2"."Score_virginica"
FROM (SELECT "XGB_Model_2_6"."KEY" AS "KEY", CAST("XGB_Model_2_6"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_6"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_6"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_6" UNION ALL SELECT "XGB_Model_0_7"."KEY" AS "KEY", CAST("XGB_Model_0_7"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_7"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_7"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_7" UNION ALL SELECT "XGB_Model_1_7"."KEY" AS "KEY", CAST("XGB_Model_1_7"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_7"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_7"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_7" UNION ALL SELECT "XGB_Model_2_7"."KEY" AS "KEY", CAST("XGB_Model_2_7"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_7"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_7"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_7" UNION ALL SELECT "XGB_Model_0_8"."KEY" AS "KEY", CAST("XGB_Model_0_8"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_8"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_8"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_8" UNION ALL SELECT "XGB_Model_1_8"."KEY" AS "KEY", CAST("XGB_Model_1_8"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_8"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_8"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_8" UNION ALL SELECT "XGB_Model_2_8"."KEY" AS "KEY", CAST("XGB_Model_2_8"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_8"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_8"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_8" UNION ALL SELECT "XGB_Model_0_9"."KEY" AS "KEY", CAST("XGB_Model_0_9"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_9"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_9"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_9" UNION ALL SELECT "XGB_Model_1_9"."KEY" AS "KEY", CAST("XGB_Model_1_9"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_9"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_9"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_9" UNION ALL SELECT "XGB_Model_2_9"."KEY" AS "KEY", CAST("XGB_Model_2_9"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_9"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_9"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_9") AS "XGB_esu_2"),
"XGB_3" AS
(WITH "DT_node_lookup_30" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_30" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.128596887 AS "Score" UNION ALL SELECT 2 AS nid, -0.130853787 AS "Score") AS "Values"),
"DT_Output_30" AS
(SELECT "DT_node_lookup_30"."KEY" AS "KEY", "DT_node_lookup_30".node_id_2 AS node_id_2, "DT_node_data_30".nid AS nid, "DT_node_data_30"."Score" AS "Score"
FROM "DT_node_lookup_30" LEFT OUTER JOIN "DT_node_data_30" ON "DT_node_lookup_30".node_id_2 = "DT_node_data_30".nid),
"XGB_Model_0_10" AS
(SELECT "DT_Output_30"."KEY" AS "KEY", "DT_Output_30"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_30"),
"DT_node_lookup_31" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_31" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.118401945 AS "Score" UNION ALL SELECT 3 AS nid, -0.108389668 AS "Score" UNION ALL SELECT 5 AS nid, 0.0950700864 AS "Score" UNION ALL SELECT 6 AS nid, -0.0345255099 AS "Score") AS "Values"),
"DT_Output_31" AS
(SELECT "DT_node_lookup_31"."KEY" AS "KEY", "DT_node_lookup_31".node_id_2 AS node_id_2, "DT_node_data_31".nid AS nid, "DT_node_data_31"."Score" AS "Score"
FROM "DT_node_lookup_31" LEFT OUTER JOIN "DT_node_data_31" ON "DT_node_lookup_31".node_id_2 = "DT_node_data_31".nid),
"XGB_Model_1_10" AS
(SELECT "DT_Output_31"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_31"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_31"),
"DT_node_lookup_32" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 7 ELSE 8 END END ELSE CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 9 ELSE 10 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_32" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.132060915 AS "Score" UNION ALL SELECT 6 AS nid, 0.137775511 AS "Score" UNION ALL SELECT 7 AS nid, -0.0133806318 AS "Score" UNION ALL SELECT 8 AS nid, 0.0999987796 AS "Score" UNION ALL SELECT 9 AS nid, -0.00475717895 AS "Score" UNION ALL SELECT 10 AS nid, 0.0865700617 AS "Score") AS "Values"),
"DT_Output_32" AS
(SELECT "DT_node_lookup_32"."KEY" AS "KEY", "DT_node_lookup_32".node_id_2 AS node_id_2, "DT_node_data_32".nid AS nid, "DT_node_data_32"."Score" AS "Score"
FROM "DT_node_lookup_32" LEFT OUTER JOIN "DT_node_data_32" ON "DT_node_lookup_32".node_id_2 = "DT_node_data_32".nid),
"XGB_Model_2_10" AS
(SELECT "DT_Output_32"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_32"."Score" AS "Score_virginica"
FROM "DT_Output_32"),
"DT_node_lookup_33" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_33" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.121591054 AS "Score" UNION ALL SELECT 2 AS nid, -0.125550315 AS "Score") AS "Values"),
"DT_Output_33" AS
(SELECT "DT_node_lookup_33"."KEY" AS "KEY", "DT_node_lookup_33".node_id_2 AS node_id_2, "DT_node_data_33".nid AS nid, "DT_node_data_33"."Score" AS "Score"
FROM "DT_node_lookup_33" LEFT OUTER JOIN "DT_node_data_33" ON "DT_node_lookup_33".node_id_2 = "DT_node_data_33".nid),
"XGB_Model_0_11" AS
(SELECT "DT_Output_33"."KEY" AS "KEY", "DT_Output_33"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_33"),
"DT_node_lookup_34" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.8499999) THEN CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_34" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.100486428 AS "Score" UNION ALL SELECT 4 AS nid, -0.100176238 AS "Score" UNION ALL SELECT 5 AS nid, 0.0193595029 AS "Score" UNION ALL SELECT 6 AS nid, 0.148525387 AS "Score") AS "Values"),
"DT_Output_34" AS
(SELECT "DT_node_lookup_34"."KEY" AS "KEY", "DT_node_lookup_34".node_id_2 AS node_id_2, "DT_node_data_34".nid AS nid, "DT_node_data_34"."Score" AS "Score"
FROM "DT_node_lookup_34" LEFT OUTER JOIN "DT_node_data_34" ON "DT_node_lookup_34".node_id_2 = "DT_node_data_34".nid),
"XGB_Model_1_11" AS
(SELECT "DT_Output_34"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_34"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_34"),
"DT_node_lookup_35" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 7 ELSE 8 END END ELSE CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 9 ELSE 10 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_35" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.126148537 AS "Score" UNION ALL SELECT 6 AS nid, 0.128563508 AS "Score" UNION ALL SELECT 7 AS nid, -0.0119296303 AS "Score" UNION ALL SELECT 8 AS nid, 0.0929059535 AS "Score" UNION ALL SELECT 9 AS nid, 0.0574249029 AS "Score" UNION ALL SELECT 10 AS nid, 0.0111032212 AS "Score") AS "Values"),
"DT_Output_35" AS
(SELECT "DT_node_lookup_35"."KEY" AS "KEY", "DT_node_lookup_35".node_id_2 AS node_id_2, "DT_node_data_35".nid AS nid, "DT_node_data_35"."Score" AS "Score"
FROM "DT_node_lookup_35" LEFT OUTER JOIN "DT_node_data_35" ON "DT_node_lookup_35".node_id_2 = "DT_node_data_35".nid),
"XGB_Model_2_11" AS
(SELECT "DT_Output_35"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_35"."Score" AS "Score_virginica"
FROM "DT_Output_35"),
"DT_node_lookup_36" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_36" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.114515401 AS "Score" UNION ALL SELECT 2 AS nid, -0.120699108 AS "Score") AS "Values"),
"DT_Output_36" AS
(SELECT "DT_node_lookup_36"."KEY" AS "KEY", "DT_node_lookup_36".node_id_2 AS node_id_2, "DT_node_data_36".nid AS nid, "DT_node_data_36"."Score" AS "Score"
FROM "DT_node_lookup_36" LEFT OUTER JOIN "DT_node_data_36" ON "DT_node_lookup_36".node_id_2 = "DT_node_data_36".nid),
"XGB_Model_0_12" AS
(SELECT "DT_Output_36"."KEY" AS "KEY", "DT_Output_36"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_36"),
"DT_node_lookup_37" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_37" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.103598572 AS "Score" UNION ALL SELECT 3 AS nid, -0.0926524177 AS "Score" UNION ALL SELECT 5 AS nid, -0.0100903679 AS "Score" UNION ALL SELECT 6 AS nid, 0.12229614 AS "Score") AS "Values"),
"DT_Output_37" AS
(SELECT "DT_node_lookup_37"."KEY" AS "KEY", "DT_node_lookup_37".node_id_2 AS node_id_2, "DT_node_data_37".nid AS nid, "DT_node_data_37"."Score" AS "Score"
FROM "DT_node_lookup_37" LEFT OUTER JOIN "DT_node_data_37" ON "DT_node_lookup_37".node_id_2 = "DT_node_data_37".nid),
"XGB_Model_1_12" AS
(SELECT "DT_Output_37"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_37"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_37"),
"DT_node_lookup_38" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.3499999) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_38" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.107482918 AS "Score" UNION ALL SELECT 4 AS nid, -0.0376043208 AS "Score" UNION ALL SELECT 5 AS nid, 0.0357078426 AS "Score" UNION ALL SELECT 6 AS nid, 0.123302825 AS "Score") AS "Values"),
"DT_Output_38" AS
(SELECT "DT_node_lookup_38"."KEY" AS "KEY", "DT_node_lookup_38".node_id_2 AS node_id_2, "DT_node_data_38".nid AS nid, "DT_node_data_38"."Score" AS "Score"
FROM "DT_node_lookup_38" LEFT OUTER JOIN "DT_node_data_38" ON "DT_node_lookup_38".node_id_2 = "DT_node_data_38".nid),
"XGB_Model_2_12" AS
(SELECT "DT_Output_38"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_38"."Score" AS "Score_virginica"
FROM "DT_Output_38"),
"DT_node_lookup_39" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_39" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.107602976 AS "Score" UNION ALL SELECT 2 AS nid, -0.116029181 AS "Score") AS "Values"),
"DT_Output_39" AS
(SELECT "DT_node_lookup_39"."KEY" AS "KEY", "DT_node_lookup_39".node_id_2 AS node_id_2, "DT_node_data_39".nid AS nid, "DT_node_data_39"."Score" AS "Score"
FROM "DT_node_lookup_39" LEFT OUTER JOIN "DT_node_data_39" ON "DT_node_lookup_39".node_id_2 = "DT_node_data_39".nid),
"XGB_Model_0_13" AS
(SELECT "DT_Output_39"."KEY" AS "KEY", "DT_Output_39"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_39")
SELECT "XGB_esu_3"."KEY", "XGB_esu_3"."Score_setosa", "XGB_esu_3"."Score_versicolor", "XGB_esu_3"."Score_virginica"
FROM (SELECT "XGB_Model_0_10"."KEY" AS "KEY", CAST("XGB_Model_0_10"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_10"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_10"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_10" UNION ALL SELECT "XGB_Model_1_10"."KEY" AS "KEY", CAST("XGB_Model_1_10"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_10"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_10"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_10" UNION ALL SELECT "XGB_Model_2_10"."KEY" AS "KEY", CAST("XGB_Model_2_10"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_10"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_10"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_10" UNION ALL SELECT "XGB_Model_0_11"."KEY" AS "KEY", CAST("XGB_Model_0_11"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_11"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_11"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_11" UNION ALL SELECT "XGB_Model_1_11"."KEY" AS "KEY", CAST("XGB_Model_1_11"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_11"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_11"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_11" UNION ALL SELECT "XGB_Model_2_11"."KEY" AS "KEY", CAST("XGB_Model_2_11"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_11"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_11"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_11" UNION ALL SELECT "XGB_Model_0_12"."KEY" AS "KEY", CAST("XGB_Model_0_12"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_12"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_12"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_12" UNION ALL SELECT "XGB_Model_1_12"."KEY" AS "KEY", CAST("XGB_Model_1_12"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_12"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_12"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_12" UNION ALL SELECT "XGB_Model_2_12"."KEY" AS "KEY", CAST("XGB_Model_2_12"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_12"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_12"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_12" UNION ALL SELECT "XGB_Model_0_13"."KEY" AS "KEY", CAST("XGB_Model_0_13"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_13"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_13"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_13") AS "XGB_esu_3"),
"XGB_4" AS
(WITH "DT_node_lookup_40" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN CASE WHEN ("ADS"."Feature_0" < 4.94999981) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 7 ELSE 8 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_40" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0980459675 AS "Score" UNION ALL SELECT 5 AS nid, -0.0931193605 AS "Score" UNION ALL SELECT 6 AS nid, -0.0185350645 AS "Score" UNION ALL SELECT 7 AS nid, 0.0990569592 AS "Score" UNION ALL SELECT 8 AS nid, -0.00604510913 AS "Score") AS "Values"),
"DT_Output_40" AS
(SELECT "DT_node_lookup_40"."KEY" AS "KEY", "DT_node_lookup_40".node_id_2 AS node_id_2, "DT_node_data_40".nid AS nid, "DT_node_data_40"."Score" AS "Score"
FROM "DT_node_lookup_40" LEFT OUTER JOIN "DT_node_data_40" ON "DT_node_lookup_40".node_id_2 = "DT_node_data_40".nid),
"XGB_Model_1_13" AS
(SELECT "DT_Output_40"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_40"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_40"),
"DT_node_lookup_41" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 7 ELSE 8 END ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_41" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.11717876 AS "Score" UNION ALL SELECT 4 AS nid, 0.0469515584 AS "Score" UNION ALL SELECT 6 AS nid, 0.116294377 AS "Score" UNION ALL SELECT 7 AS nid, 0.048174724 AS "Score" UNION ALL SELECT 8 AS nid, 0.00156639761 AS "Score") AS "Values"),
"DT_Output_41" AS
(SELECT "DT_node_lookup_41"."KEY" AS "KEY", "DT_node_lookup_41".node_id_2 AS node_id_2, "DT_node_data_41".nid AS nid, "DT_node_data_41"."Score" AS "Score"
FROM "DT_node_lookup_41" LEFT OUTER JOIN "DT_node_data_41" ON "DT_node_lookup_41".node_id_2 = "DT_node_data_41".nid),
"XGB_Model_2_13" AS
(SELECT "DT_Output_41"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_41"."Score" AS "Score_virginica"
FROM "DT_Output_41"),
"DT_node_lookup_42" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_42" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.101478003 AS "Score" UNION ALL SELECT 2 AS nid, -0.110535346 AS "Score") AS "Values"),
"DT_Output_42" AS
(SELECT "DT_node_lookup_42"."KEY" AS "KEY", "DT_node_lookup_42".node_id_2 AS node_id_2, "DT_node_data_42".nid AS nid, "DT_node_data_42"."Score" AS "Score"
FROM "DT_node_lookup_42" LEFT OUTER JOIN "DT_node_data_42" ON "DT_node_lookup_42".node_id_2 = "DT_node_data_42".nid),
"XGB_Model_0_14" AS
(SELECT "DT_Output_42"."KEY" AS "KEY", "DT_Output_42"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_42"),
"DT_node_lookup_43" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_43" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0896863416 AS "Score" UNION ALL SELECT 3 AS nid, -0.0700609013 AS "Score" UNION ALL SELECT 5 AS nid, 0.0782628804 AS "Score" UNION ALL SELECT 6 AS nid, -0.0241376031 AS "Score") AS "Values"),
"DT_Output_43" AS
(SELECT "DT_node_lookup_43"."KEY" AS "KEY", "DT_node_lookup_43".node_id_2 AS node_id_2, "DT_node_data_43".nid AS nid, "DT_node_data_43"."Score" AS "Score"
FROM "DT_node_lookup_43" LEFT OUTER JOIN "DT_node_data_43" ON "DT_node_lookup_43".node_id_2 = "DT_node_data_43".nid),
"XGB_Model_1_14" AS
(SELECT "DT_Output_43"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_43"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_43"),
"DT_node_lookup_44" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 3.1500001) THEN CASE WHEN ("ADS"."Feature_0" < 6.55000019) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_44" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0954309031 AS "Score" UNION ALL SELECT 4 AS nid, -0.0523391366 AS "Score" UNION ALL SELECT 5 AS nid, 0.091995813 AS "Score" UNION ALL SELECT 6 AS nid, -0.0021055711 AS "Score") AS "Values"),
"DT_Output_44" AS
(SELECT "DT_node_lookup_44"."KEY" AS "KEY", "DT_node_lookup_44".node_id_2 AS node_id_2, "DT_node_data_44".nid AS nid, "DT_node_data_44"."Score" AS "Score"
FROM "DT_node_lookup_44" LEFT OUTER JOIN "DT_node_data_44" ON "DT_node_lookup_44".node_id_2 = "DT_node_data_44".nid),
"XGB_Model_2_14" AS
(SELECT "DT_Output_44"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_44"."Score" AS "Score_virginica"
FROM "DT_Output_44"),
"DT_node_lookup_45" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_45" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0953521058 AS "Score" UNION ALL SELECT 2 AS nid, -0.105807476 AS "Score") AS "Values"),
"DT_Output_45" AS
(SELECT "DT_node_lookup_45"."KEY" AS "KEY", "DT_node_lookup_45".node_id_2 AS node_id_2, "DT_node_data_45".nid AS nid, "DT_node_data_45"."Score" AS "Score"
FROM "DT_node_lookup_45" LEFT OUTER JOIN "DT_node_data_45" ON "DT_node_lookup_45".node_id_2 = "DT_node_data_45".nid),
"XGB_Model_0_15" AS
(SELECT "DT_Output_45"."KEY" AS "KEY", "DT_Output_45"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_45"),
"DT_node_lookup_46" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.8499999) THEN CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_46" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0824376494 AS "Score" UNION ALL SELECT 3 AS nid, -0.0624245554 AS "Score" UNION ALL SELECT 5 AS nid, 0.113651976 AS "Score" UNION ALL SELECT 6 AS nid, 0.00734611601 AS "Score") AS "Values"),
"DT_Output_46" AS
(SELECT "DT_node_lookup_46"."KEY" AS "KEY", "DT_node_lookup_46".node_id_2 AS node_id_2, "DT_node_data_46".nid AS nid, "DT_node_data_46"."Score" AS "Score"
FROM "DT_node_lookup_46" LEFT OUTER JOIN "DT_node_data_46" ON "DT_node_lookup_46".node_id_2 = "DT_node_data_46".nid),
"XGB_Model_1_15" AS
(SELECT "DT_Output_46"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_46"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_46"),
"DT_node_lookup_47" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_3" < 1.60000002) THEN 7 ELSE 8 END ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 9 ELSE 10 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_47" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 4 AS nid, -0.135008916 AS "Score" UNION ALL SELECT 5 AS nid, 0.123494379 AS "Score" UNION ALL SELECT 7 AS nid, -0.0914958119 AS "Score" UNION ALL SELECT 8 AS nid, 0.117312506 AS "Score" UNION ALL SELECT 9 AS nid, -0.0789289549 AS "Score" UNION ALL SELECT 10 AS nid, 0.101093598 AS "Score") AS "Values"),
"DT_Output_47" AS
(SELECT "DT_node_lookup_47"."KEY" AS "KEY", "DT_node_lookup_47".node_id_2 AS node_id_2, "DT_node_data_47".nid AS nid, "DT_node_data_47"."Score" AS "Score"
FROM "DT_node_lookup_47" LEFT OUTER JOIN "DT_node_data_47" ON "DT_node_lookup_47".node_id_2 = "DT_node_data_47".nid),
"XGB_Model_2_15" AS
(SELECT "DT_Output_47"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_47"."Score" AS "Score_virginica"
FROM "DT_Output_47"),
"DT_node_lookup_48" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_48" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0890828669 AS "Score" UNION ALL SELECT 2 AS nid, -0.100694902 AS "Score") AS "Values"),
"DT_Output_48" AS
(SELECT "DT_node_lookup_48"."KEY" AS "KEY", "DT_node_lookup_48".node_id_2 AS node_id_2, "DT_node_data_48".nid AS nid, "DT_node_data_48"."Score" AS "Score"
FROM "DT_node_lookup_48" LEFT OUTER JOIN "DT_node_data_48" ON "DT_node_lookup_48".node_id_2 = "DT_node_data_48".nid),
"XGB_Model_0_16" AS
(SELECT "DT_Output_48"."KEY" AS "KEY", "DT_Output_48"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_48"),
"DT_node_lookup_49" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_49" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0831603035 AS "Score" UNION ALL SELECT 3 AS nid, -0.055462949 AS "Score" UNION ALL SELECT 5 AS nid, -0.00624901755 AS "Score" UNION ALL SELECT 6 AS nid, 0.0915830731 AS "Score") AS "Values"),
"DT_Output_49" AS
(SELECT "DT_node_lookup_49"."KEY" AS "KEY", "DT_node_lookup_49".node_id_2 AS node_id_2, "DT_node_data_49".nid AS nid, "DT_node_data_49"."Score" AS "Score"
FROM "DT_node_lookup_49" LEFT OUTER JOIN "DT_node_data_49" ON "DT_node_lookup_49".node_id_2 = "DT_node_data_49".nid),
"XGB_Model_1_16" AS
(SELECT "DT_Output_49"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_49"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_49")
SELECT "XGB_esu_4"."KEY", "XGB_esu_4"."Score_setosa", "XGB_esu_4"."Score_versicolor", "XGB_esu_4"."Score_virginica"
FROM (SELECT "XGB_Model_1_13"."KEY" AS "KEY", CAST("XGB_Model_1_13"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_13"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_13"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_13" UNION ALL SELECT "XGB_Model_2_13"."KEY" AS "KEY", CAST("XGB_Model_2_13"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_13"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_13"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_13" UNION ALL SELECT "XGB_Model_0_14"."KEY" AS "KEY", CAST("XGB_Model_0_14"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_14"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_14"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_14" UNION ALL SELECT "XGB_Model_1_14"."KEY" AS "KEY", CAST("XGB_Model_1_14"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_14"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_14"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_14" UNION ALL SELECT "XGB_Model_2_14"."KEY" AS "KEY", CAST("XGB_Model_2_14"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_14"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_14"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_14" UNION ALL SELECT "XGB_Model_0_15"."KEY" AS "KEY", CAST("XGB_Model_0_15"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_15"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_15"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_15" UNION ALL SELECT "XGB_Model_1_15"."KEY" AS "KEY", CAST("XGB_Model_1_15"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_15"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_15"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_15" UNION ALL SELECT "XGB_Model_2_15"."KEY" AS "KEY", CAST("XGB_Model_2_15"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_15"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_15"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_15" UNION ALL SELECT "XGB_Model_0_16"."KEY" AS "KEY", CAST("XGB_Model_0_16"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_16"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_16"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_16" UNION ALL SELECT "XGB_Model_1_16"."KEY" AS "KEY", CAST("XGB_Model_1_16"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_16"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_16"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_16") AS "XGB_esu_4"),
"XGB_5" AS
(WITH "DT_node_lookup_50" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_3" < 1.60000002) THEN 7 ELSE 8 END ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 9 ELSE 10 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_50" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 4 AS nid, -0.12478856 AS "Score" UNION ALL SELECT 5 AS nid, 0.116463222 AS "Score" UNION ALL SELECT 7 AS nid, -0.086858131 AS "Score" UNION ALL SELECT 8 AS nid, 0.109670587 AS "Score" UNION ALL SELECT 9 AS nid, -0.0725010484 AS "Score" UNION ALL SELECT 10 AS nid, 0.0947115868 AS "Score") AS "Values"),
"DT_Output_50" AS
(SELECT "DT_node_lookup_50"."KEY" AS "KEY", "DT_node_lookup_50".node_id_2 AS node_id_2, "DT_node_data_50".nid AS nid, "DT_node_data_50"."Score" AS "Score"
FROM "DT_node_lookup_50" LEFT OUTER JOIN "DT_node_data_50" ON "DT_node_lookup_50".node_id_2 = "DT_node_data_50".nid),
"XGB_Model_2_16" AS
(SELECT "DT_Output_50"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_50"."Score" AS "Score_virginica"
FROM "DT_Output_50"),
"DT_node_lookup_51" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 0.800000012) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_51" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0832635835 AS "Score" UNION ALL SELECT 2 AS nid, -0.0958806723 AS "Score") AS "Values"),
"DT_Output_51" AS
(SELECT "DT_node_lookup_51"."KEY" AS "KEY", "DT_node_lookup_51".node_id_2 AS node_id_2, "DT_node_data_51".nid AS nid, "DT_node_data_51"."Score" AS "Score"
FROM "DT_node_lookup_51" LEFT OUTER JOIN "DT_node_data_51" ON "DT_node_lookup_51".node_id_2 = "DT_node_data_51".nid),
"XGB_Model_0_17" AS
(SELECT "DT_Output_51"."KEY" AS "KEY", "DT_Output_51"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_51"),
"DT_node_lookup_52" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 7 ELSE 8 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_52" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0501718856 AS "Score" UNION ALL SELECT 5 AS nid, 0.0267561898 AS "Score" UNION ALL SELECT 6 AS nid, -0.0622728802 AS "Score" UNION ALL SELECT 7 AS nid, -0.0275017917 AS "Score" UNION ALL SELECT 8 AS nid, 0.100373603 AS "Score") AS "Values"),
"DT_Output_52" AS
(SELECT "DT_node_lookup_52"."KEY" AS "KEY", "DT_node_lookup_52".node_id_2 AS node_id_2, "DT_node_data_52".nid AS nid, "DT_node_data_52"."Score" AS "Score"
FROM "DT_node_lookup_52" LEFT OUTER JOIN "DT_node_data_52" ON "DT_node_lookup_52".node_id_2 = "DT_node_data_52".nid),
"XGB_Model_1_17" AS
(SELECT "DT_Output_52"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_52"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_52"),
"DT_node_lookup_53" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_53" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.0980901048 AS "Score" UNION ALL SELECT 4 AS nid, 0.0302875228 AS "Score" UNION ALL SELECT 5 AS nid, 0.109206215 AS "Score" UNION ALL SELECT 7 AS nid, -0.0670151263 AS "Score" UNION ALL SELECT 8 AS nid, 0.0893127248 AS "Score") AS "Values"),
"DT_Output_53" AS
(SELECT "DT_node_lookup_53"."KEY" AS "KEY", "DT_node_lookup_53".node_id_2 AS node_id_2, "DT_node_data_53".nid AS nid, "DT_node_data_53"."Score" AS "Score"
FROM "DT_node_lookup_53" LEFT OUTER JOIN "DT_node_data_53" ON "DT_node_lookup_53".node_id_2 = "DT_node_data_53".nid),
"XGB_Model_2_17" AS
(SELECT "DT_Output_53"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_53"."Score" AS "Score_virginica"
FROM "DT_Output_53"),
"DT_node_lookup_54" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 2.45000005) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_54" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0780817717 AS "Score" UNION ALL SELECT 2 AS nid, -0.0911756903 AS "Score") AS "Values"),
"DT_Output_54" AS
(SELECT "DT_node_lookup_54"."KEY" AS "KEY", "DT_node_lookup_54".node_id_2 AS node_id_2, "DT_node_data_54".nid AS nid, "DT_node_data_54"."Score" AS "Score"
FROM "DT_node_lookup_54" LEFT OUTER JOIN "DT_node_data_54" ON "DT_node_lookup_54".node_id_2 = "DT_node_data_54".nid),
"XGB_Model_0_18" AS
(SELECT "DT_Output_54"."KEY" AS "KEY", "DT_Output_54"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_54"),
"DT_node_lookup_55" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.14999962) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_55" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0551870279 AS "Score" UNION ALL SELECT 4 AS nid, -0.0328219756 AS "Score" UNION ALL SELECT 5 AS nid, 0.00440375088 AS "Score" UNION ALL SELECT 6 AS nid, 0.0789479539 AS "Score") AS "Values"),
"DT_Output_55" AS
(SELECT "DT_node_lookup_55"."KEY" AS "KEY", "DT_node_lookup_55".node_id_2 AS node_id_2, "DT_node_data_55".nid AS nid, "DT_node_data_55"."Score" AS "Score"
FROM "DT_node_lookup_55" LEFT OUTER JOIN "DT_node_data_55" ON "DT_node_lookup_55".node_id_2 = "DT_node_data_55".nid),
"XGB_Model_1_18" AS
(SELECT "DT_Output_55"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_55"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_55"),
"DT_node_lookup_56" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_56" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0914660245 AS "Score" UNION ALL SELECT 3 AS nid, -0.0772953629 AS "Score" UNION ALL SELECT 5 AS nid, 0.0453824326 AS "Score" UNION ALL SELECT 6 AS nid, -0.0148755731 AS "Score") AS "Values"),
"DT_Output_56" AS
(SELECT "DT_node_lookup_56"."KEY" AS "KEY", "DT_node_lookup_56".node_id_2 AS node_id_2, "DT_node_data_56".nid AS nid, "DT_node_data_56"."Score" AS "Score"
FROM "DT_node_lookup_56" LEFT OUTER JOIN "DT_node_data_56" ON "DT_node_lookup_56".node_id_2 = "DT_node_data_56".nid),
"XGB_Model_2_18" AS
(SELECT "DT_Output_56"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_56"."Score" AS "Score_virginica"
FROM "DT_Output_56"),
"DT_node_lookup_57" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.04999995) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_57" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0570238791 AS "Score" UNION ALL SELECT 2 AS nid, -0.0825070962 AS "Score") AS "Values"),
"DT_Output_57" AS
(SELECT "DT_node_lookup_57"."KEY" AS "KEY", "DT_node_lookup_57".node_id_2 AS node_id_2, "DT_node_data_57".nid AS nid, "DT_node_data_57"."Score" AS "Score"
FROM "DT_node_lookup_57" LEFT OUTER JOIN "DT_node_data_57" ON "DT_node_lookup_57".node_id_2 = "DT_node_data_57".nid),
"XGB_Model_0_19" AS
(SELECT "DT_Output_57"."KEY" AS "KEY", "DT_Output_57"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_57"),
"DT_node_lookup_58" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.64999962) THEN 7 ELSE 8 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_58" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0438280217 AS "Score" UNION ALL SELECT 5 AS nid, 0.0186245013 AS "Score" UNION ALL SELECT 6 AS nid, -0.0549921058 AS "Score" UNION ALL SELECT 7 AS nid, -0.0116502615 AS "Score" UNION ALL SELECT 8 AS nid, 0.0863905475 AS "Score") AS "Values"),
"DT_Output_58" AS
(SELECT "DT_node_lookup_58"."KEY" AS "KEY", "DT_node_lookup_58".node_id_2 AS node_id_2, "DT_node_data_58".nid AS nid, "DT_node_data_58"."Score" AS "Score"
FROM "DT_node_lookup_58" LEFT OUTER JOIN "DT_node_data_58" ON "DT_node_lookup_58".node_id_2 = "DT_node_data_58".nid),
"XGB_Model_1_19" AS
(SELECT "DT_Output_58"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_58"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_58"),
"DT_node_lookup_59" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.1500001) THEN CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_59" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0701877698 AS "Score" UNION ALL SELECT 3 AS nid, -0.0145883719 AS "Score" UNION ALL SELECT 5 AS nid, 0.0068610874 AS "Score" UNION ALL SELECT 6 AS nid, 0.0685431734 AS "Score") AS "Values"),
"DT_Output_59" AS
(SELECT "DT_node_lookup_59"."KEY" AS "KEY", "DT_node_lookup_59".node_id_2 AS node_id_2, "DT_node_data_59".nid AS nid, "DT_node_data_59"."Score" AS "Score"
FROM "DT_node_lookup_59" LEFT OUTER JOIN "DT_node_data_59" ON "DT_node_lookup_59".node_id_2 = "DT_node_data_59".nid),
"XGB_Model_2_19" AS
(SELECT "DT_Output_59"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_59"."Score" AS "Score_virginica"
FROM "DT_Output_59")
SELECT "XGB_esu_5"."KEY", "XGB_esu_5"."Score_setosa", "XGB_esu_5"."Score_versicolor", "XGB_esu_5"."Score_virginica"
FROM (SELECT "XGB_Model_2_16"."KEY" AS "KEY", CAST("XGB_Model_2_16"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_16"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_16"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_16" UNION ALL SELECT "XGB_Model_0_17"."KEY" AS "KEY", CAST("XGB_Model_0_17"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_17"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_17"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_17" UNION ALL SELECT "XGB_Model_1_17"."KEY" AS "KEY", CAST("XGB_Model_1_17"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_17"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_17"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_17" UNION ALL SELECT "XGB_Model_2_17"."KEY" AS "KEY", CAST("XGB_Model_2_17"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_17"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_17"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_17" UNION ALL SELECT "XGB_Model_0_18"."KEY" AS "KEY", CAST("XGB_Model_0_18"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_18"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_18"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_18" UNION ALL SELECT "XGB_Model_1_18"."KEY" AS "KEY", CAST("XGB_Model_1_18"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_18"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_18"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_18" UNION ALL SELECT "XGB_Model_2_18"."KEY" AS "KEY", CAST("XGB_Model_2_18"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_18"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_18"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_18" UNION ALL SELECT "XGB_Model_0_19"."KEY" AS "KEY", CAST("XGB_Model_0_19"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_19"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_19"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_19" UNION ALL SELECT "XGB_Model_1_19"."KEY" AS "KEY", CAST("XGB_Model_1_19"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_19"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_19"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_19" UNION ALL SELECT "XGB_Model_2_19"."KEY" AS "KEY", CAST("XGB_Model_2_19"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_19"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_19"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_19") AS "XGB_esu_5"),
"XGB_6" AS
(WITH "DT_node_lookup_60" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 3.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_60" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0540493242 AS "Score" UNION ALL SELECT 2 AS nid, -0.0792427734 AS "Score") AS "Values"),
"DT_Output_60" AS
(SELECT "DT_node_lookup_60"."KEY" AS "KEY", "DT_node_lookup_60".node_id_2 AS node_id_2, "DT_node_data_60".nid AS nid, "DT_node_data_60"."Score" AS "Score"
FROM "DT_node_lookup_60" LEFT OUTER JOIN "DT_node_data_60" ON "DT_node_lookup_60".node_id_2 = "DT_node_data_60".nid),
"XGB_Model_0_20" AS
(SELECT "DT_Output_60"."KEY" AS "KEY", "DT_Output_60"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_60"),
"DT_node_lookup_61" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 7 ELSE 8 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_61" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0378667191 AS "Score" UNION ALL SELECT 5 AS nid, 0.0157628767 AS "Score" UNION ALL SELECT 6 AS nid, -0.0497770272 AS "Score" UNION ALL SELECT 7 AS nid, -0.00262441905 AS "Score" UNION ALL SELECT 8 AS nid, 0.0772111639 AS "Score") AS "Values"),
"DT_Output_61" AS
(SELECT "DT_node_lookup_61"."KEY" AS "KEY", "DT_node_lookup_61".node_id_2 AS node_id_2, "DT_node_data_61".nid AS nid, "DT_node_data_61"."Score" AS "Score"
FROM "DT_node_lookup_61" LEFT OUTER JOIN "DT_node_data_61" ON "DT_node_lookup_61".node_id_2 = "DT_node_data_61".nid),
"XGB_Model_1_20" AS
(SELECT "DT_Output_61"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_61"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_61"),
"DT_node_lookup_62" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.14999962) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 7 ELSE 8 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_62" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0839618593 AS "Score" UNION ALL SELECT 5 AS nid, -0.0245218221 AS "Score" UNION ALL SELECT 6 AS nid, 0.0942077488 AS "Score" UNION ALL SELECT 7 AS nid, -0.0924151838 AS "Score" UNION ALL SELECT 8 AS nid, -0.0187207777 AS "Score") AS "Values"),
"DT_Output_62" AS
(SELECT "DT_node_lookup_62"."KEY" AS "KEY", "DT_node_lookup_62".node_id_2 AS node_id_2, "DT_node_data_62".nid AS nid, "DT_node_data_62"."Score" AS "Score"
FROM "DT_node_lookup_62" LEFT OUTER JOIN "DT_node_data_62" ON "DT_node_lookup_62".node_id_2 = "DT_node_data_62".nid),
"XGB_Model_2_20" AS
(SELECT "DT_Output_62"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_62"."Score" AS "Score_virginica"
FROM "DT_Output_62"),
"DT_node_lookup_63" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_63" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0179066956 AS "Score") AS "Values"),
"DT_Output_63" AS
(SELECT "DT_node_lookup_63"."KEY" AS "KEY", "DT_node_lookup_63".node_id_2 AS node_id_2, "DT_node_data_63".nid AS nid, "DT_node_data_63"."Score" AS "Score"
FROM "DT_node_lookup_63" LEFT OUTER JOIN "DT_node_data_63" ON "DT_node_lookup_63".node_id_2 = "DT_node_data_63".nid),
"XGB_Model_0_21" AS
(SELECT "DT_Output_63"."KEY" AS "KEY", "DT_Output_63"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_63"),
"DT_node_lookup_64" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN CASE WHEN ("ADS"."Feature_3" < 1.25) THEN 7 ELSE 8 END ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_64" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 4 AS nid, -0.0268702079 AS "Score" UNION ALL SELECT 5 AS nid, -0.000982199796 AS "Score" UNION ALL SELECT 6 AS nid, -0.0406880565 AS "Score" UNION ALL SELECT 7 AS nid, -0.00101343798 AS "Score" UNION ALL SELECT 8 AS nid, 0.0830909014 AS "Score") AS "Values"),
"DT_Output_64" AS
(SELECT "DT_node_lookup_64"."KEY" AS "KEY", "DT_node_lookup_64".node_id_2 AS node_id_2, "DT_node_data_64".nid AS nid, "DT_node_data_64"."Score" AS "Score"
FROM "DT_node_lookup_64" LEFT OUTER JOIN "DT_node_data_64" ON "DT_node_lookup_64".node_id_2 = "DT_node_data_64".nid),
"XGB_Model_1_21" AS
(SELECT "DT_Output_64"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_64"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_64"),
"DT_node_lookup_65" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.1500001) THEN CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_65" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0619417466 AS "Score" UNION ALL SELECT 4 AS nid, 0.0941465348 AS "Score" UNION ALL SELECT 5 AS nid, 0.0434851237 AS "Score" UNION ALL SELECT 6 AS nid, -0.0493997522 AS "Score") AS "Values"),
"DT_Output_65" AS
(SELECT "DT_node_lookup_65"."KEY" AS "KEY", "DT_node_lookup_65".node_id_2 AS node_id_2, "DT_node_data_65".nid AS nid, "DT_node_data_65"."Score" AS "Score"
FROM "DT_node_lookup_65" LEFT OUTER JOIN "DT_node_data_65" ON "DT_node_lookup_65".node_id_2 = "DT_node_data_65".nid),
"XGB_Model_2_21" AS
(SELECT "DT_Output_65"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_65"."Score" AS "Score_virginica"
FROM "DT_Output_65"),
"DT_node_lookup_66" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_66" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0155692408 AS "Score") AS "Values"),
"DT_Output_66" AS
(SELECT "DT_node_lookup_66"."KEY" AS "KEY", "DT_node_lookup_66".node_id_2 AS node_id_2, "DT_node_data_66".nid AS nid, "DT_node_data_66"."Score" AS "Score"
FROM "DT_node_lookup_66" LEFT OUTER JOIN "DT_node_data_66" ON "DT_node_lookup_66".node_id_2 = "DT_node_data_66".nid),
"XGB_Model_0_22" AS
(SELECT "DT_Output_66"."KEY" AS "KEY", "DT_Output_66"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_66"),
"DT_node_lookup_67" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_67" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0449330322 AS "Score" UNION ALL SELECT 4 AS nid, -0.0250414126 AS "Score" UNION ALL SELECT 5 AS nid, 0.00143576087 AS "Score" UNION ALL SELECT 6 AS nid, 0.0741356462 AS "Score") AS "Values"),
"DT_Output_67" AS
(SELECT "DT_node_lookup_67"."KEY" AS "KEY", "DT_node_lookup_67".node_id_2 AS node_id_2, "DT_node_data_67".nid AS nid, "DT_node_data_67"."Score" AS "Score"
FROM "DT_node_lookup_67" LEFT OUTER JOIN "DT_node_data_67" ON "DT_node_lookup_67".node_id_2 = "DT_node_data_67".nid),
"XGB_Model_1_22" AS
(SELECT "DT_Output_67"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_67"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_67"),
"DT_node_lookup_68" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.1500001) THEN CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_68" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.059155751 AS "Score" UNION ALL SELECT 4 AS nid, 0.0893679634 AS "Score" UNION ALL SELECT 5 AS nid, 0.0408296585 AS "Score" UNION ALL SELECT 6 AS nid, -0.0458338223 AS "Score") AS "Values"),
"DT_Output_68" AS
(SELECT "DT_node_lookup_68"."KEY" AS "KEY", "DT_node_lookup_68".node_id_2 AS node_id_2, "DT_node_data_68".nid AS nid, "DT_node_data_68"."Score" AS "Score"
FROM "DT_node_lookup_68" LEFT OUTER JOIN "DT_node_data_68" ON "DT_node_lookup_68".node_id_2 = "DT_node_data_68".nid),
"XGB_Model_2_22" AS
(SELECT "DT_Output_68"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_68"."Score" AS "Score_virginica"
FROM "DT_Output_68"),
"DT_node_lookup_69" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_69" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.015744729 AS "Score") AS "Values"),
"DT_Output_69" AS
(SELECT "DT_node_lookup_69"."KEY" AS "KEY", "DT_node_lookup_69".node_id_2 AS node_id_2, "DT_node_data_69".nid AS nid, "DT_node_data_69"."Score" AS "Score"
FROM "DT_node_lookup_69" LEFT OUTER JOIN "DT_node_data_69" ON "DT_node_lookup_69".node_id_2 = "DT_node_data_69".nid),
"XGB_Model_0_23" AS
(SELECT "DT_Output_69"."KEY" AS "KEY", "DT_Output_69"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_69")
SELECT "XGB_esu_6"."KEY", "XGB_esu_6"."Score_setosa", "XGB_esu_6"."Score_versicolor", "XGB_esu_6"."Score_virginica"
FROM (SELECT "XGB_Model_0_20"."KEY" AS "KEY", CAST("XGB_Model_0_20"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_20"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_20"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_20" UNION ALL SELECT "XGB_Model_1_20"."KEY" AS "KEY", CAST("XGB_Model_1_20"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_20"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_20"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_20" UNION ALL SELECT "XGB_Model_2_20"."KEY" AS "KEY", CAST("XGB_Model_2_20"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_20"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_20"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_20" UNION ALL SELECT "XGB_Model_0_21"."KEY" AS "KEY", CAST("XGB_Model_0_21"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_21"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_21"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_21" UNION ALL SELECT "XGB_Model_1_21"."KEY" AS "KEY", CAST("XGB_Model_1_21"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_21"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_21"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_21" UNION ALL SELECT "XGB_Model_2_21"."KEY" AS "KEY", CAST("XGB_Model_2_21"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_21"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_21"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_21" UNION ALL SELECT "XGB_Model_0_22"."KEY" AS "KEY", CAST("XGB_Model_0_22"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_22"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_22"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_22" UNION ALL SELECT "XGB_Model_1_22"."KEY" AS "KEY", CAST("XGB_Model_1_22"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_22"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_22"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_22" UNION ALL SELECT "XGB_Model_2_22"."KEY" AS "KEY", CAST("XGB_Model_2_22"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_22"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_22"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_22" UNION ALL SELECT "XGB_Model_0_23"."KEY" AS "KEY", CAST("XGB_Model_0_23"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_23"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_23"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_23") AS "XGB_esu_6"),
"XGB_7" AS
(WITH "DT_node_lookup_70" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN CASE WHEN ("ADS"."Feature_3" < 1.25) THEN 7 ELSE 8 END ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_70" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 4 AS nid, -0.0176538341 AS "Score" UNION ALL SELECT 5 AS nid, -0.00114593399 AS "Score" UNION ALL SELECT 6 AS nid, -0.0446741246 AS "Score" UNION ALL SELECT 7 AS nid, 0.000384609273 AS "Score" UNION ALL SELECT 8 AS nid, 0.0789841935 AS "Score") AS "Values"),
"DT_Output_70" AS
(SELECT "DT_node_lookup_70"."KEY" AS "KEY", "DT_node_lookup_70".node_id_2 AS node_id_2, "DT_node_data_70".nid AS nid, "DT_node_data_70"."Score" AS "Score"
FROM "DT_node_lookup_70" LEFT OUTER JOIN "DT_node_data_70" ON "DT_node_lookup_70".node_id_2 = "DT_node_data_70".nid),
"XGB_Model_1_23" AS
(SELECT "DT_Output_70"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_70"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_70"),
"DT_node_lookup_71" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_71" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, 0.0031665361 AS "Score" UNION ALL SELECT 4 AS nid, -0.0605971441 AS "Score" UNION ALL SELECT 5 AS nid, 0.00406159461 AS "Score" UNION ALL SELECT 6 AS nid, 0.0611109696 AS "Score") AS "Values"),
"DT_Output_71" AS
(SELECT "DT_node_lookup_71"."KEY" AS "KEY", "DT_node_lookup_71".node_id_2 AS node_id_2, "DT_node_data_71".nid AS nid, "DT_node_data_71"."Score" AS "Score"
FROM "DT_node_lookup_71" LEFT OUTER JOIN "DT_node_data_71" ON "DT_node_lookup_71".node_id_2 = "DT_node_data_71".nid),
"XGB_Model_2_23" AS
(SELECT "DT_Output_71"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_71"."Score" AS "Score_virginica"
FROM "DT_Output_71"),
"DT_node_lookup_72" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_72" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0133001292 AS "Score") AS "Values"),
"DT_Output_72" AS
(SELECT "DT_node_lookup_72"."KEY" AS "KEY", "DT_node_lookup_72".node_id_2 AS node_id_2, "DT_node_data_72".nid AS nid, "DT_node_data_72"."Score" AS "Score"
FROM "DT_node_lookup_72" LEFT OUTER JOIN "DT_node_data_72" ON "DT_node_lookup_72".node_id_2 = "DT_node_data_72".nid),
"XGB_Model_0_24" AS
(SELECT "DT_Output_72"."KEY" AS "KEY", "DT_Output_72"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_72"),
"DT_node_lookup_73" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_73" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, 0.0480866693 AS "Score" UNION ALL SELECT 4 AS nid, -0.0190238114 AS "Score" UNION ALL SELECT 5 AS nid, -0.000265264971 AS "Score" UNION ALL SELECT 6 AS nid, -0.0367767699 AS "Score") AS "Values"),
"DT_Output_73" AS
(SELECT "DT_node_lookup_73"."KEY" AS "KEY", "DT_node_lookup_73".node_id_2 AS node_id_2, "DT_node_data_73".nid AS nid, "DT_node_data_73"."Score" AS "Score"
FROM "DT_node_lookup_73" LEFT OUTER JOIN "DT_node_data_73" ON "DT_node_lookup_73".node_id_2 = "DT_node_data_73".nid),
"XGB_Model_1_24" AS
(SELECT "DT_Output_73"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_73"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_73"),
"DT_node_lookup_74" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_74" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0540382154 AS "Score" UNION ALL SELECT 4 AS nid, 0.0821191147 AS "Score" UNION ALL SELECT 5 AS nid, 0.0366323479 AS "Score" UNION ALL SELECT 6 AS nid, -0.0388527885 AS "Score") AS "Values"),
"DT_Output_74" AS
(SELECT "DT_node_lookup_74"."KEY" AS "KEY", "DT_node_lookup_74".node_id_2 AS node_id_2, "DT_node_data_74".nid AS nid, "DT_node_data_74"."Score" AS "Score"
FROM "DT_node_lookup_74" LEFT OUTER JOIN "DT_node_data_74" ON "DT_node_lookup_74".node_id_2 = "DT_node_data_74".nid),
"XGB_Model_2_24" AS
(SELECT "DT_Output_74"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_74"."Score" AS "Score_virginica"
FROM "DT_Output_74"),
"DT_node_lookup_75" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_75" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00997390598 AS "Score") AS "Values"),
"DT_Output_75" AS
(SELECT "DT_node_lookup_75"."KEY" AS "KEY", "DT_node_lookup_75".node_id_2 AS node_id_2, "DT_node_data_75".nid AS nid, "DT_node_data_75"."Score" AS "Score"
FROM "DT_node_lookup_75" LEFT OUTER JOIN "DT_node_data_75" ON "DT_node_lookup_75".node_id_2 = "DT_node_data_75".nid),
"XGB_Model_0_25" AS
(SELECT "DT_Output_75"."KEY" AS "KEY", "DT_Output_75"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_75"),
"DT_node_lookup_76" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 5 ELSE 6 END ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 7 ELSE 8 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_76" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0431434438 AS "Score" UNION ALL SELECT 5 AS nid, 0.0769269913 AS "Score" UNION ALL SELECT 6 AS nid, 0.0126732504 AS "Score" UNION ALL SELECT 7 AS nid, 0.0043824818 AS "Score" UNION ALL SELECT 8 AS nid, -0.0386093259 AS "Score") AS "Values"),
"DT_Output_76" AS
(SELECT "DT_node_lookup_76"."KEY" AS "KEY", "DT_node_lookup_76".node_id_2 AS node_id_2, "DT_node_data_76".nid AS nid, "DT_node_data_76"."Score" AS "Score"
FROM "DT_node_lookup_76" LEFT OUTER JOIN "DT_node_data_76" ON "DT_node_lookup_76".node_id_2 = "DT_node_data_76".nid),
"XGB_Model_1_25" AS
(SELECT "DT_Output_76"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_76"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_76"),
"DT_node_lookup_77" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 5 ELSE 6 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_77" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 3 AS nid, -0.0818281472 AS "Score" UNION ALL SELECT 4 AS nid, 0.0207340959 AS "Score" UNION ALL SELECT 5 AS nid, 0.00212759362 AS "Score" UNION ALL SELECT 6 AS nid, 0.0531373285 AS "Score") AS "Values"),
"DT_Output_77" AS
(SELECT "DT_node_lookup_77"."KEY" AS "KEY", "DT_node_lookup_77".node_id_2 AS node_id_2, "DT_node_data_77".nid AS nid, "DT_node_data_77"."Score" AS "Score"
FROM "DT_node_lookup_77" LEFT OUTER JOIN "DT_node_data_77" ON "DT_node_lookup_77".node_id_2 = "DT_node_data_77".nid),
"XGB_Model_2_25" AS
(SELECT "DT_Output_77"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_77"."Score" AS "Score_virginica"
FROM "DT_Output_77"),
"DT_node_lookup_78" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_78" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00955263898 AS "Score") AS "Values"),
"DT_Output_78" AS
(SELECT "DT_node_lookup_78"."KEY" AS "KEY", "DT_node_lookup_78".node_id_2 AS node_id_2, "DT_node_data_78".nid AS nid, "DT_node_data_78"."Score" AS "Score"
FROM "DT_node_lookup_78" LEFT OUTER JOIN "DT_node_data_78" ON "DT_node_lookup_78".node_id_2 = "DT_node_data_78".nid),
"XGB_Model_0_26" AS
(SELECT "DT_Output_78"."KEY" AS "KEY", "DT_Output_78"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_78"),
"DT_node_lookup_79" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_79" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.04119201 AS "Score" UNION ALL SELECT 3 AS nid, 0.0539880507 AS "Score" UNION ALL SELECT 5 AS nid, 0.0263735726 AS "Score" UNION ALL SELECT 6 AS nid, -0.0261711683 AS "Score") AS "Values"),
"DT_Output_79" AS
(SELECT "DT_node_lookup_79"."KEY" AS "KEY", "DT_node_lookup_79".node_id_2 AS node_id_2, "DT_node_data_79".nid AS nid, "DT_node_data_79"."Score" AS "Score"
FROM "DT_node_lookup_79" LEFT OUTER JOIN "DT_node_data_79" ON "DT_node_lookup_79".node_id_2 = "DT_node_data_79".nid),
"XGB_Model_1_26" AS
(SELECT "DT_Output_79"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_79"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_79")
SELECT "XGB_esu_7"."KEY", "XGB_esu_7"."Score_setosa", "XGB_esu_7"."Score_versicolor", "XGB_esu_7"."Score_virginica"
FROM (SELECT "XGB_Model_1_23"."KEY" AS "KEY", CAST("XGB_Model_1_23"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_23"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_23"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_23" UNION ALL SELECT "XGB_Model_2_23"."KEY" AS "KEY", CAST("XGB_Model_2_23"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_23"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_23"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_23" UNION ALL SELECT "XGB_Model_0_24"."KEY" AS "KEY", CAST("XGB_Model_0_24"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_24"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_24"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_24" UNION ALL SELECT "XGB_Model_1_24"."KEY" AS "KEY", CAST("XGB_Model_1_24"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_24"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_24"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_24" UNION ALL SELECT "XGB_Model_2_24"."KEY" AS "KEY", CAST("XGB_Model_2_24"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_24"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_24"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_24" UNION ALL SELECT "XGB_Model_0_25"."KEY" AS "KEY", CAST("XGB_Model_0_25"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_25"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_25"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_25" UNION ALL SELECT "XGB_Model_1_25"."KEY" AS "KEY", CAST("XGB_Model_1_25"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_25"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_25"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_25" UNION ALL SELECT "XGB_Model_2_25"."KEY" AS "KEY", CAST("XGB_Model_2_25"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_25"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_25"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_25" UNION ALL SELECT "XGB_Model_0_26"."KEY" AS "KEY", CAST("XGB_Model_0_26"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_26"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_26"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_26" UNION ALL SELECT "XGB_Model_1_26"."KEY" AS "KEY", CAST("XGB_Model_1_26"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_26"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_26"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_26") AS "XGB_esu_7"),
"XGB_8" AS
(WITH "DT_node_lookup_80" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_80" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0511326827 AS "Score" UNION ALL SELECT 3 AS nid, 0.00185551844 AS "Score" UNION ALL SELECT 4 AS nid, 0.0385568812 AS "Score") AS "Values"),
"DT_Output_80" AS
(SELECT "DT_node_lookup_80"."KEY" AS "KEY", "DT_node_lookup_80".node_id_2 AS node_id_2, "DT_node_data_80".nid AS nid, "DT_node_data_80"."Score" AS "Score"
FROM "DT_node_lookup_80" LEFT OUTER JOIN "DT_node_data_80" ON "DT_node_lookup_80".node_id_2 = "DT_node_data_80".nid),
"XGB_Model_2_26" AS
(SELECT "DT_Output_80"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_80"."Score" AS "Score_virginica"
FROM "DT_Output_80"),
"DT_node_lookup_81" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_81" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00981088728 AS "Score") AS "Values"),
"DT_Output_81" AS
(SELECT "DT_node_lookup_81"."KEY" AS "KEY", "DT_node_lookup_81".node_id_2 AS node_id_2, "DT_node_data_81".nid AS nid, "DT_node_data_81"."Score" AS "Score"
FROM "DT_node_lookup_81" LEFT OUTER JOIN "DT_node_data_81" ON "DT_node_lookup_81".node_id_2 = "DT_node_data_81".nid),
"XGB_Model_0_27" AS
(SELECT "DT_Output_81"."KEY" AS "KEY", "DT_Output_81"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_81"),
"DT_node_lookup_82" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_82" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0384210199 AS "Score" UNION ALL SELECT 3 AS nid, 0.0508708805 AS "Score" UNION ALL SELECT 5 AS nid, 0.00210936577 AS "Score" UNION ALL SELECT 6 AS nid, -0.027880488 AS "Score") AS "Values"),
"DT_Output_82" AS
(SELECT "DT_node_lookup_82"."KEY" AS "KEY", "DT_node_lookup_82".node_id_2 AS node_id_2, "DT_node_data_82".nid AS nid, "DT_node_data_82"."Score" AS "Score"
FROM "DT_node_lookup_82" LEFT OUTER JOIN "DT_node_data_82" ON "DT_node_lookup_82".node_id_2 = "DT_node_data_82".nid),
"XGB_Model_1_27" AS
(SELECT "DT_Output_82"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_82"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_82"),
"DT_node_lookup_83" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 3.04999995) THEN CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_83" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0464496911 AS "Score" UNION ALL SELECT 4 AS nid, 0.0466180742 AS "Score" UNION ALL SELECT 5 AS nid, -0.03792914 AS "Score" UNION ALL SELECT 6 AS nid, 0.0333756544 AS "Score") AS "Values"),
"DT_Output_83" AS
(SELECT "DT_node_lookup_83"."KEY" AS "KEY", "DT_node_lookup_83".node_id_2 AS node_id_2, "DT_node_data_83".nid AS nid, "DT_node_data_83"."Score" AS "Score"
FROM "DT_node_lookup_83" LEFT OUTER JOIN "DT_node_data_83" ON "DT_node_lookup_83".node_id_2 = "DT_node_data_83".nid),
"XGB_Model_2_27" AS
(SELECT "DT_Output_83"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_83"."Score" AS "Score_virginica"
FROM "DT_Output_83"),
"DT_node_lookup_84" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_84" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00944088772 AS "Score") AS "Values"),
"DT_Output_84" AS
(SELECT "DT_node_lookup_84"."KEY" AS "KEY", "DT_node_lookup_84".node_id_2 AS node_id_2, "DT_node_data_84".nid AS nid, "DT_node_data_84"."Score" AS "Score"
FROM "DT_node_lookup_84" LEFT OUTER JOIN "DT_node_data_84" ON "DT_node_lookup_84".node_id_2 = "DT_node_data_84".nid),
"XGB_Model_0_28" AS
(SELECT "DT_Output_84"."KEY" AS "KEY", "DT_Output_84"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_84"),
"DT_node_lookup_85" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.54999995) THEN 3 ELSE CASE WHEN ("ADS"."Feature_2" < 4.75) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_85" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0252227988 AS "Score" UNION ALL SELECT 3 AS nid, -0.0220762342 AS "Score" UNION ALL SELECT 5 AS nid, -0.00109118759 AS "Score" UNION ALL SELECT 6 AS nid, 0.0518871881 AS "Score") AS "Values"),
"DT_Output_85" AS
(SELECT "DT_node_lookup_85"."KEY" AS "KEY", "DT_node_lookup_85".node_id_2 AS node_id_2, "DT_node_data_85".nid AS nid, "DT_node_data_85"."Score" AS "Score"
FROM "DT_node_lookup_85" LEFT OUTER JOIN "DT_node_data_85" ON "DT_node_lookup_85".node_id_2 = "DT_node_data_85".nid),
"XGB_Model_1_28" AS
(SELECT "DT_Output_85"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_85"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_85"),
"DT_node_lookup_86" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_86" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0413380004 AS "Score" UNION ALL SELECT 3 AS nid, -0.0661251023 AS "Score" UNION ALL SELECT 4 AS nid, 0.0321115516 AS "Score") AS "Values"),
"DT_Output_86" AS
(SELECT "DT_node_lookup_86"."KEY" AS "KEY", "DT_node_lookup_86".node_id_2 AS node_id_2, "DT_node_data_86".nid AS nid, "DT_node_data_86"."Score" AS "Score"
FROM "DT_node_lookup_86" LEFT OUTER JOIN "DT_node_data_86" ON "DT_node_lookup_86".node_id_2 = "DT_node_data_86".nid),
"XGB_Model_2_28" AS
(SELECT "DT_Output_86"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_86"."Score" AS "Score_virginica"
FROM "DT_Output_86"),
"DT_node_lookup_87" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_87" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00913806539 AS "Score") AS "Values"),
"DT_Output_87" AS
(SELECT "DT_node_lookup_87"."KEY" AS "KEY", "DT_node_lookup_87".node_id_2 AS node_id_2, "DT_node_data_87".nid AS nid, "DT_node_data_87"."Score" AS "Score"
FROM "DT_node_lookup_87" LEFT OUTER JOIN "DT_node_data_87" ON "DT_node_lookup_87".node_id_2 = "DT_node_data_87".nid),
"XGB_Model_0_29" AS
(SELECT "DT_Output_87"."KEY" AS "KEY", "DT_Output_87"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_87"),
"DT_node_lookup_88" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_0" < 6.14999962) THEN 5 ELSE 6 END ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_88" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0292247944 AS "Score" UNION ALL SELECT 4 AS nid, -0.0188348461 AS "Score" UNION ALL SELECT 5 AS nid, -0.00687014451 AS "Score" UNION ALL SELECT 6 AS nid, 0.0701614246 AS "Score") AS "Values"),
"DT_Output_88" AS
(SELECT "DT_node_lookup_88"."KEY" AS "KEY", "DT_node_lookup_88".node_id_2 AS node_id_2, "DT_node_data_88".nid AS nid, "DT_node_data_88"."Score" AS "Score"
FROM "DT_node_lookup_88" LEFT OUTER JOIN "DT_node_data_88" ON "DT_node_lookup_88".node_id_2 = "DT_node_data_88".nid),
"XGB_Model_1_29" AS
(SELECT "DT_Output_88"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_88"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_88"),
"DT_node_lookup_89" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_89" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0398634486 AS "Score" UNION ALL SELECT 3 AS nid, 0.0345116779 AS "Score" UNION ALL SELECT 4 AS nid, -0.0714285746 AS "Score") AS "Values"),
"DT_Output_89" AS
(SELECT "DT_node_lookup_89"."KEY" AS "KEY", "DT_node_lookup_89".node_id_2 AS node_id_2, "DT_node_data_89".nid AS nid, "DT_node_data_89"."Score" AS "Score"
FROM "DT_node_lookup_89" LEFT OUTER JOIN "DT_node_data_89" ON "DT_node_lookup_89".node_id_2 = "DT_node_data_89".nid),
"XGB_Model_2_29" AS
(SELECT "DT_Output_89"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_89"."Score" AS "Score_virginica"
FROM "DT_Output_89")
SELECT "XGB_esu_8"."KEY", "XGB_esu_8"."Score_setosa", "XGB_esu_8"."Score_versicolor", "XGB_esu_8"."Score_virginica"
FROM (SELECT "XGB_Model_2_26"."KEY" AS "KEY", CAST("XGB_Model_2_26"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_26"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_26"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_26" UNION ALL SELECT "XGB_Model_0_27"."KEY" AS "KEY", CAST("XGB_Model_0_27"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_27"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_27"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_27" UNION ALL SELECT "XGB_Model_1_27"."KEY" AS "KEY", CAST("XGB_Model_1_27"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_27"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_27"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_27" UNION ALL SELECT "XGB_Model_2_27"."KEY" AS "KEY", CAST("XGB_Model_2_27"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_27"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_27"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_27" UNION ALL SELECT "XGB_Model_0_28"."KEY" AS "KEY", CAST("XGB_Model_0_28"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_28"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_28"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_28" UNION ALL SELECT "XGB_Model_1_28"."KEY" AS "KEY", CAST("XGB_Model_1_28"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_28"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_28"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_28" UNION ALL SELECT "XGB_Model_2_28"."KEY" AS "KEY", CAST("XGB_Model_2_28"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_28"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_28"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_28" UNION ALL SELECT "XGB_Model_0_29"."KEY" AS "KEY", CAST("XGB_Model_0_29"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_29"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_29"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_29" UNION ALL SELECT "XGB_Model_1_29"."KEY" AS "KEY", CAST("XGB_Model_1_29"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_29"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_29"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_29" UNION ALL SELECT "XGB_Model_2_29"."KEY" AS "KEY", CAST("XGB_Model_2_29"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_29"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_29"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_29") AS "XGB_esu_8"),
"XGB_9" AS
(WITH "DT_node_lookup_90" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_90" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00948853698 AS "Score") AS "Values"),
"DT_Output_90" AS
(SELECT "DT_node_lookup_90"."KEY" AS "KEY", "DT_node_lookup_90".node_id_2 AS node_id_2, "DT_node_data_90".nid AS nid, "DT_node_data_90"."Score" AS "Score"
FROM "DT_node_lookup_90" LEFT OUTER JOIN "DT_node_data_90" ON "DT_node_lookup_90".node_id_2 = "DT_node_data_90".nid),
"XGB_Model_0_30" AS
(SELECT "DT_Output_90"."KEY" AS "KEY", "DT_Output_90"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_90"),
"DT_node_lookup_91" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 5 ELSE 6 END END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_91" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0280799326 AS "Score" UNION ALL SELECT 3 AS nid, -0.0311154202 AS "Score" UNION ALL SELECT 5 AS nid, -0.00652475329 AS "Score" UNION ALL SELECT 6 AS nid, 0.0759590864 AS "Score") AS "Values"),
"DT_Output_91" AS
(SELECT "DT_node_lookup_91"."KEY" AS "KEY", "DT_node_lookup_91".node_id_2 AS node_id_2, "DT_node_data_91".nid AS nid, "DT_node_data_91"."Score" AS "Score"
FROM "DT_node_lookup_91" LEFT OUTER JOIN "DT_node_data_91" ON "DT_node_lookup_91".node_id_2 = "DT_node_data_91".nid),
"XGB_Model_1_30" AS
(SELECT "DT_Output_91"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_91"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_91"),
"DT_node_lookup_92" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_92" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0402488708 AS "Score" UNION ALL SELECT 3 AS nid, 0.0290185809 AS "Score" UNION ALL SELECT 4 AS nid, -0.0551775955 AS "Score") AS "Values"),
"DT_Output_92" AS
(SELECT "DT_node_lookup_92"."KEY" AS "KEY", "DT_node_lookup_92".node_id_2 AS node_id_2, "DT_node_data_92".nid AS nid, "DT_node_data_92"."Score" AS "Score"
FROM "DT_node_lookup_92" LEFT OUTER JOIN "DT_node_data_92" ON "DT_node_lookup_92".node_id_2 = "DT_node_data_92".nid),
"XGB_Model_2_30" AS
(SELECT "DT_Output_92"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_92"."Score" AS "Score_virginica"
FROM "DT_Output_92"),
"DT_node_lookup_93" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_93" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0098428065 AS "Score") AS "Values"),
"DT_Output_93" AS
(SELECT "DT_node_lookup_93"."KEY" AS "KEY", "DT_node_lookup_93".node_id_2 AS node_id_2, "DT_node_data_93".nid AS nid, "DT_node_data_93"."Score" AS "Score"
FROM "DT_node_lookup_93" LEFT OUTER JOIN "DT_node_data_93" ON "DT_node_lookup_93".node_id_2 = "DT_node_data_93".nid),
"XGB_Model_0_31" AS
(SELECT "DT_Output_93"."KEY" AS "KEY", "DT_Output_93"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_93"),
"DT_node_lookup_94" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE CASE WHEN ("ADS"."Feature_0" < 6.14999962) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_94" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0284582432 AS "Score" UNION ALL SELECT 3 AS nid, 0.0424418673 AS "Score" UNION ALL SELECT 5 AS nid, -0.00249441504 AS "Score" UNION ALL SELECT 6 AS nid, -0.0245191883 AS "Score") AS "Values"),
"DT_Output_94" AS
(SELECT "DT_node_lookup_94"."KEY" AS "KEY", "DT_node_lookup_94".node_id_2 AS node_id_2, "DT_node_data_94".nid AS nid, "DT_node_data_94"."Score" AS "Score"
FROM "DT_node_lookup_94" LEFT OUTER JOIN "DT_node_data_94" ON "DT_node_lookup_94".node_id_2 = "DT_node_data_94".nid),
"XGB_Model_1_31" AS
(SELECT "DT_Output_94"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_94"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_94"),
"DT_node_lookup_95" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_95" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0331075191 AS "Score" UNION ALL SELECT 3 AS nid, -0.0172562785 AS "Score" UNION ALL SELECT 4 AS nid, 0.0457667001 AS "Score") AS "Values"),
"DT_Output_95" AS
(SELECT "DT_node_lookup_95"."KEY" AS "KEY", "DT_node_lookup_95".node_id_2 AS node_id_2, "DT_node_data_95".nid AS nid, "DT_node_data_95"."Score" AS "Score"
FROM "DT_node_lookup_95" LEFT OUTER JOIN "DT_node_data_95" ON "DT_node_lookup_95".node_id_2 = "DT_node_data_95".nid),
"XGB_Model_2_31" AS
(SELECT "DT_Output_95"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_95"."Score" AS "Score_virginica"
FROM "DT_Output_95"),
"DT_node_lookup_96" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_96" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00883580931 AS "Score") AS "Values"),
"DT_Output_96" AS
(SELECT "DT_node_lookup_96"."KEY" AS "KEY", "DT_node_lookup_96".node_id_2 AS node_id_2, "DT_node_data_96".nid AS nid, "DT_node_data_96"."Score" AS "Score"
FROM "DT_node_lookup_96" LEFT OUTER JOIN "DT_node_data_96" ON "DT_node_lookup_96".node_id_2 = "DT_node_data_96".nid),
"XGB_Model_0_32" AS
(SELECT "DT_Output_96"."KEY" AS "KEY", "DT_Output_96"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_96"),
"DT_node_lookup_97" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_97" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0245527439 AS "Score" UNION ALL SELECT 3 AS nid, -0.0187102128 AS "Score" UNION ALL SELECT 5 AS nid, 0.0691487789 AS "Score" UNION ALL SELECT 6 AS nid, -0.0142170228 AS "Score") AS "Values"),
"DT_Output_97" AS
(SELECT "DT_node_lookup_97"."KEY" AS "KEY", "DT_node_lookup_97".node_id_2 AS node_id_2, "DT_node_data_97".nid AS nid, "DT_node_data_97"."Score" AS "Score"
FROM "DT_node_lookup_97" LEFT OUTER JOIN "DT_node_data_97" ON "DT_node_lookup_97".node_id_2 = "DT_node_data_97".nid),
"XGB_Model_1_32" AS
(SELECT "DT_Output_97"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_97"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_97"),
"DT_node_lookup_98" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_98" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0364038199 AS "Score" UNION ALL SELECT 3 AS nid, 0.0275381152 AS "Score" UNION ALL SELECT 4 AS nid, -0.0496912189 AS "Score") AS "Values"),
"DT_Output_98" AS
(SELECT "DT_node_lookup_98"."KEY" AS "KEY", "DT_node_lookup_98".node_id_2 AS node_id_2, "DT_node_data_98".nid AS nid, "DT_node_data_98"."Score" AS "Score"
FROM "DT_node_lookup_98" LEFT OUTER JOIN "DT_node_data_98" ON "DT_node_lookup_98".node_id_2 = "DT_node_data_98".nid),
"XGB_Model_2_32" AS
(SELECT "DT_Output_98"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_98"."Score" AS "Score_virginica"
FROM "DT_Output_98"),
"DT_node_lookup_99" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_99" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00938414969 AS "Score") AS "Values"),
"DT_Output_99" AS
(SELECT "DT_node_lookup_99"."KEY" AS "KEY", "DT_node_lookup_99".node_id_2 AS node_id_2, "DT_node_data_99".nid AS nid, "DT_node_data_99"."Score" AS "Score"
FROM "DT_node_lookup_99" LEFT OUTER JOIN "DT_node_data_99" ON "DT_node_lookup_99".node_id_2 = "DT_node_data_99".nid),
"XGB_Model_0_33" AS
(SELECT "DT_Output_99"."KEY" AS "KEY", "DT_Output_99"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_99")
SELECT "XGB_esu_9"."KEY", "XGB_esu_9"."Score_setosa", "XGB_esu_9"."Score_versicolor", "XGB_esu_9"."Score_virginica"
FROM (SELECT "XGB_Model_0_30"."KEY" AS "KEY", CAST("XGB_Model_0_30"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_30"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_30"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_30" UNION ALL SELECT "XGB_Model_1_30"."KEY" AS "KEY", CAST("XGB_Model_1_30"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_30"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_30"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_30" UNION ALL SELECT "XGB_Model_2_30"."KEY" AS "KEY", CAST("XGB_Model_2_30"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_30"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_30"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_30" UNION ALL SELECT "XGB_Model_0_31"."KEY" AS "KEY", CAST("XGB_Model_0_31"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_31"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_31"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_31" UNION ALL SELECT "XGB_Model_1_31"."KEY" AS "KEY", CAST("XGB_Model_1_31"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_31"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_31"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_31" UNION ALL SELECT "XGB_Model_2_31"."KEY" AS "KEY", CAST("XGB_Model_2_31"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_31"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_31"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_31" UNION ALL SELECT "XGB_Model_0_32"."KEY" AS "KEY", CAST("XGB_Model_0_32"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_32"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_32"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_32" UNION ALL SELECT "XGB_Model_1_32"."KEY" AS "KEY", CAST("XGB_Model_1_32"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_32"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_32"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_32" UNION ALL SELECT "XGB_Model_2_32"."KEY" AS "KEY", CAST("XGB_Model_2_32"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_32"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_32"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_32" UNION ALL SELECT "XGB_Model_0_33"."KEY" AS "KEY", CAST("XGB_Model_0_33"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_33"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_33"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_33") AS "XGB_esu_9"),
"XGB_10" AS
(WITH "DT_node_lookup_100" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_0" < 6.14999962) THEN CASE WHEN ("ADS"."Feature_2" < 4.44999981) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_100" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0263008215 AS "Score" UNION ALL SELECT 4 AS nid, 0.061920464 AS "Score" UNION ALL SELECT 5 AS nid, 0.00821912475 AS "Score" UNION ALL SELECT 6 AS nid, -0.0278740041 AS "Score") AS "Values"),
"DT_Output_100" AS
(SELECT "DT_node_lookup_100"."KEY" AS "KEY", "DT_node_lookup_100".node_id_2 AS node_id_2, "DT_node_data_100".nid AS nid, "DT_node_data_100"."Score" AS "Score"
FROM "DT_node_lookup_100" LEFT OUTER JOIN "DT_node_data_100" ON "DT_node_lookup_100".node_id_2 = "DT_node_data_100".nid),
"XGB_Model_1_33" AS
(SELECT "DT_Output_100"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_100"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_100"),
"DT_node_lookup_101" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_101" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0337834693 AS "Score" UNION ALL SELECT 3 AS nid, -0.00599373318 AS "Score" UNION ALL SELECT 4 AS nid, -0.0404546335 AS "Score") AS "Values"),
"DT_Output_101" AS
(SELECT "DT_node_lookup_101"."KEY" AS "KEY", "DT_node_lookup_101".node_id_2 AS node_id_2, "DT_node_data_101".nid AS nid, "DT_node_data_101"."Score" AS "Score"
FROM "DT_node_lookup_101" LEFT OUTER JOIN "DT_node_data_101" ON "DT_node_lookup_101".node_id_2 = "DT_node_data_101".nid),
"XGB_Model_2_33" AS
(SELECT "DT_Output_101"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_101"."Score" AS "Score_virginica"
FROM "DT_Output_101"),
"DT_node_lookup_102" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_102" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00787646137 AS "Score") AS "Values"),
"DT_Output_102" AS
(SELECT "DT_node_lookup_102"."KEY" AS "KEY", "DT_node_lookup_102".node_id_2 AS node_id_2, "DT_node_data_102".nid AS nid, "DT_node_data_102"."Score" AS "Score"
FROM "DT_node_lookup_102" LEFT OUTER JOIN "DT_node_data_102" ON "DT_node_lookup_102".node_id_2 = "DT_node_data_102".nid),
"XGB_Model_0_34" AS
(SELECT "DT_Output_102"."KEY" AS "KEY", "DT_Output_102"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_102"),
"DT_node_lookup_103" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_103" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0227765646 AS "Score" UNION ALL SELECT 3 AS nid, -0.045505017 AS "Score" UNION ALL SELECT 4 AS nid, 0.0205142666 AS "Score") AS "Values"),
"DT_Output_103" AS
(SELECT "DT_node_lookup_103"."KEY" AS "KEY", "DT_node_lookup_103".node_id_2 AS node_id_2, "DT_node_data_103".nid AS nid, "DT_node_data_103"."Score" AS "Score"
FROM "DT_node_lookup_103" LEFT OUTER JOIN "DT_node_data_103" ON "DT_node_lookup_103".node_id_2 = "DT_node_data_103".nid),
"XGB_Model_1_34" AS
(SELECT "DT_Output_103"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_103"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_103"),
"DT_node_lookup_104" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_104" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0336465798 AS "Score" UNION ALL SELECT 3 AS nid, -0.0169479903 AS "Score" UNION ALL SELECT 4 AS nid, 0.0445929952 AS "Score") AS "Values"),
"DT_Output_104" AS
(SELECT "DT_node_lookup_104"."KEY" AS "KEY", "DT_node_lookup_104".node_id_2 AS node_id_2, "DT_node_data_104".nid AS nid, "DT_node_data_104"."Score" AS "Score"
FROM "DT_node_lookup_104" LEFT OUTER JOIN "DT_node_data_104" ON "DT_node_lookup_104".node_id_2 = "DT_node_data_104".nid),
"XGB_Model_2_34" AS
(SELECT "DT_Output_104"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_104"."Score" AS "Score_virginica"
FROM "DT_Output_104"),
"DT_node_lookup_105" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_105" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00864409655 AS "Score") AS "Values"),
"DT_Output_105" AS
(SELECT "DT_node_lookup_105"."KEY" AS "KEY", "DT_node_lookup_105".node_id_2 AS node_id_2, "DT_node_data_105".nid AS nid, "DT_node_data_105"."Score" AS "Score"
FROM "DT_node_lookup_105" LEFT OUTER JOIN "DT_node_data_105" ON "DT_node_lookup_105".node_id_2 = "DT_node_data_105".nid),
"XGB_Model_0_35" AS
(SELECT "DT_Output_105"."KEY" AS "KEY", "DT_Output_105"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_105"),
"DT_node_lookup_106" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_106" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0239390749 AS "Score" UNION ALL SELECT 3 AS nid, 0.0417544506 AS "Score" UNION ALL SELECT 4 AS nid, -0.0106078153 AS "Score") AS "Values"),
"DT_Output_106" AS
(SELECT "DT_node_lookup_106"."KEY" AS "KEY", "DT_node_lookup_106".node_id_2 AS node_id_2, "DT_node_data_106".nid AS nid, "DT_node_data_106"."Score" AS "Score"
FROM "DT_node_lookup_106" LEFT OUTER JOIN "DT_node_data_106" ON "DT_node_lookup_106".node_id_2 = "DT_node_data_106".nid),
"XGB_Model_1_35" AS
(SELECT "DT_Output_106"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_106"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_106"),
"DT_node_lookup_107" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_107" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0258831065 AS "Score" UNION ALL SELECT 2 AS nid, 0.0288419928 AS "Score") AS "Values"),
"DT_Output_107" AS
(SELECT "DT_node_lookup_107"."KEY" AS "KEY", "DT_node_lookup_107".node_id_2 AS node_id_2, "DT_node_data_107".nid AS nid, "DT_node_data_107"."Score" AS "Score"
FROM "DT_node_lookup_107" LEFT OUTER JOIN "DT_node_data_107" ON "DT_node_lookup_107".node_id_2 = "DT_node_data_107".nid),
"XGB_Model_2_35" AS
(SELECT "DT_Output_107"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_107"."Score" AS "Score_virginica"
FROM "DT_Output_107"),
"DT_node_lookup_108" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_108" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00862789806 AS "Score") AS "Values"),
"DT_Output_108" AS
(SELECT "DT_node_lookup_108"."KEY" AS "KEY", "DT_node_lookup_108".node_id_2 AS node_id_2, "DT_node_data_108".nid AS nid, "DT_node_data_108"."Score" AS "Score"
FROM "DT_node_lookup_108" LEFT OUTER JOIN "DT_node_data_108" ON "DT_node_lookup_108".node_id_2 = "DT_node_data_108".nid),
"XGB_Model_0_36" AS
(SELECT "DT_Output_108"."KEY" AS "KEY", "DT_Output_108"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_108"),
"DT_node_lookup_109" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_109" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0230634958 AS "Score" UNION ALL SELECT 3 AS nid, -0.0171925146 AS "Score" UNION ALL SELECT 5 AS nid, 0.0669530407 AS "Score" UNION ALL SELECT 6 AS nid, -0.0152255576 AS "Score") AS "Values"),
"DT_Output_109" AS
(SELECT "DT_node_lookup_109"."KEY" AS "KEY", "DT_node_lookup_109".node_id_2 AS node_id_2, "DT_node_data_109".nid AS nid, "DT_node_data_109"."Score" AS "Score"
FROM "DT_node_lookup_109" LEFT OUTER JOIN "DT_node_data_109" ON "DT_node_lookup_109".node_id_2 = "DT_node_data_109".nid),
"XGB_Model_1_36" AS
(SELECT "DT_Output_109"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_109"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_109")
SELECT "XGB_esu_10"."KEY", "XGB_esu_10"."Score_setosa", "XGB_esu_10"."Score_versicolor", "XGB_esu_10"."Score_virginica"
FROM (SELECT "XGB_Model_1_33"."KEY" AS "KEY", CAST("XGB_Model_1_33"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_33"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_33"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_33" UNION ALL SELECT "XGB_Model_2_33"."KEY" AS "KEY", CAST("XGB_Model_2_33"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_33"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_33"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_33" UNION ALL SELECT "XGB_Model_0_34"."KEY" AS "KEY", CAST("XGB_Model_0_34"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_34"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_34"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_34" UNION ALL SELECT "XGB_Model_1_34"."KEY" AS "KEY", CAST("XGB_Model_1_34"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_34"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_34"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_34" UNION ALL SELECT "XGB_Model_2_34"."KEY" AS "KEY", CAST("XGB_Model_2_34"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_34"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_34"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_34" UNION ALL SELECT "XGB_Model_0_35"."KEY" AS "KEY", CAST("XGB_Model_0_35"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_35"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_35"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_35" UNION ALL SELECT "XGB_Model_1_35"."KEY" AS "KEY", CAST("XGB_Model_1_35"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_35"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_35"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_35" UNION ALL SELECT "XGB_Model_2_35"."KEY" AS "KEY", CAST("XGB_Model_2_35"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_35"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_35"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_35" UNION ALL SELECT "XGB_Model_0_36"."KEY" AS "KEY", CAST("XGB_Model_0_36"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_36"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_36"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_36" UNION ALL SELECT "XGB_Model_1_36"."KEY" AS "KEY", CAST("XGB_Model_1_36"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_36"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_36"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_36") AS "XGB_esu_10"),
"XGB_11" AS
(WITH "DT_node_lookup_110" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_110" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0356331468 AS "Score" UNION ALL SELECT 3 AS nid, 0.0271626655 AS "Score" UNION ALL SELECT 4 AS nid, -0.0498888977 AS "Score") AS "Values"),
"DT_Output_110" AS
(SELECT "DT_node_lookup_110"."KEY" AS "KEY", "DT_node_lookup_110".node_id_2 AS node_id_2, "DT_node_data_110".nid AS nid, "DT_node_data_110"."Score" AS "Score"
FROM "DT_node_lookup_110" LEFT OUTER JOIN "DT_node_data_110" ON "DT_node_lookup_110".node_id_2 = "DT_node_data_110".nid),
"XGB_Model_2_36" AS
(SELECT "DT_Output_110"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_110"."Score" AS "Score_virginica"
FROM "DT_Output_110"),
"DT_node_lookup_111" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_111" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00915788487 AS "Score") AS "Values"),
"DT_Output_111" AS
(SELECT "DT_node_lookup_111"."KEY" AS "KEY", "DT_node_lookup_111".node_id_2 AS node_id_2, "DT_node_data_111".nid AS nid, "DT_node_data_111"."Score" AS "Score"
FROM "DT_node_lookup_111" LEFT OUTER JOIN "DT_node_data_111" ON "DT_node_lookup_111".node_id_2 = "DT_node_data_111".nid),
"XGB_Model_0_37" AS
(SELECT "DT_Output_111"."KEY" AS "KEY", "DT_Output_111"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_111"),
"DT_node_lookup_112" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_112" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0205180235 AS "Score" UNION ALL SELECT 3 AS nid, 0.00784827676 AS "Score" UNION ALL SELECT 4 AS nid, -0.0257642232 AS "Score") AS "Values"),
"DT_Output_112" AS
(SELECT "DT_node_lookup_112"."KEY" AS "KEY", "DT_node_lookup_112".node_id_2 AS node_id_2, "DT_node_data_112".nid AS nid, "DT_node_data_112"."Score" AS "Score"
FROM "DT_node_lookup_112" LEFT OUTER JOIN "DT_node_data_112" ON "DT_node_lookup_112".node_id_2 = "DT_node_data_112".nid),
"XGB_Model_1_37" AS
(SELECT "DT_Output_112"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_112"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_112"),
"DT_node_lookup_113" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_113" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0315663256 AS "Score" UNION ALL SELECT 3 AS nid, -0.0142405257 AS "Score" UNION ALL SELECT 4 AS nid, 0.0430724174 AS "Score") AS "Values"),
"DT_Output_113" AS
(SELECT "DT_node_lookup_113"."KEY" AS "KEY", "DT_node_lookup_113".node_id_2 AS node_id_2, "DT_node_data_113".nid AS nid, "DT_node_data_113"."Score" AS "Score"
FROM "DT_node_lookup_113" LEFT OUTER JOIN "DT_node_data_113" ON "DT_node_lookup_113".node_id_2 = "DT_node_data_113".nid),
"XGB_Model_2_37" AS
(SELECT "DT_Output_113"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_113"."Score" AS "Score_virginica"
FROM "DT_Output_113"),
"DT_node_lookup_114" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_114" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00785845518 AS "Score") AS "Values"),
"DT_Output_114" AS
(SELECT "DT_node_lookup_114"."KEY" AS "KEY", "DT_node_lookup_114".node_id_2 AS node_id_2, "DT_node_data_114".nid AS nid, "DT_node_data_114"."Score" AS "Score"
FROM "DT_node_lookup_114" LEFT OUTER JOIN "DT_node_data_114" ON "DT_node_lookup_114".node_id_2 = "DT_node_data_114".nid),
"XGB_Model_0_38" AS
(SELECT "DT_Output_114"."KEY" AS "KEY", "DT_Output_114"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_114"),
"DT_node_lookup_115" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_115" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0237563048 AS "Score" UNION ALL SELECT 3 AS nid, 0.0406773686 AS "Score" UNION ALL SELECT 4 AS nid, -0.0100995535 AS "Score") AS "Values"),
"DT_Output_115" AS
(SELECT "DT_node_lookup_115"."KEY" AS "KEY", "DT_node_lookup_115".node_id_2 AS node_id_2, "DT_node_data_115".nid AS nid, "DT_node_data_115"."Score" AS "Score"
FROM "DT_node_lookup_115" LEFT OUTER JOIN "DT_node_data_115" ON "DT_node_lookup_115".node_id_2 = "DT_node_data_115".nid),
"XGB_Model_1_38" AS
(SELECT "DT_Output_115"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_115"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_115"),
"DT_node_lookup_116" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_116" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0279130358 AS "Score" UNION ALL SELECT 3 AS nid, -0.0115731582 AS "Score" UNION ALL SELECT 4 AS nid, 0.0372236706 AS "Score") AS "Values"),
"DT_Output_116" AS
(SELECT "DT_node_lookup_116"."KEY" AS "KEY", "DT_node_lookup_116".node_id_2 AS node_id_2, "DT_node_data_116".nid AS nid, "DT_node_data_116"."Score" AS "Score"
FROM "DT_node_lookup_116" LEFT OUTER JOIN "DT_node_data_116" ON "DT_node_lookup_116".node_id_2 = "DT_node_data_116".nid),
"XGB_Model_2_38" AS
(SELECT "DT_Output_116"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_116"."Score" AS "Score_virginica"
FROM "DT_Output_116"),
"DT_node_lookup_117" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_117" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00798171479 AS "Score") AS "Values"),
"DT_Output_117" AS
(SELECT "DT_node_lookup_117"."KEY" AS "KEY", "DT_node_lookup_117".node_id_2 AS node_id_2, "DT_node_data_117".nid AS nid, "DT_node_data_117"."Score" AS "Score"
FROM "DT_node_lookup_117" LEFT OUTER JOIN "DT_node_data_117" ON "DT_node_lookup_117".node_id_2 = "DT_node_data_117".nid),
"XGB_Model_0_39" AS
(SELECT "DT_Output_117"."KEY" AS "KEY", "DT_Output_117"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_117"),
"DT_node_lookup_118" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_118" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0232347306 AS "Score" UNION ALL SELECT 3 AS nid, 0.037921533 AS "Score" UNION ALL SELECT 4 AS nid, -0.00783847086 AS "Score") AS "Values"),
"DT_Output_118" AS
(SELECT "DT_node_lookup_118"."KEY" AS "KEY", "DT_node_lookup_118".node_id_2 AS node_id_2, "DT_node_data_118".nid AS nid, "DT_node_data_118"."Score" AS "Score"
FROM "DT_node_lookup_118" LEFT OUTER JOIN "DT_node_data_118" ON "DT_node_lookup_118".node_id_2 = "DT_node_data_118".nid),
"XGB_Model_1_39" AS
(SELECT "DT_Output_118"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_118"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_118"),
"DT_node_lookup_119" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_119" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0324716717 AS "Score" UNION ALL SELECT 3 AS nid, 0.0279072262 AS "Score" UNION ALL SELECT 4 AS nid, -0.0479740165 AS "Score") AS "Values"),
"DT_Output_119" AS
(SELECT "DT_node_lookup_119"."KEY" AS "KEY", "DT_node_lookup_119".node_id_2 AS node_id_2, "DT_node_data_119".nid AS nid, "DT_node_data_119"."Score" AS "Score"
FROM "DT_node_lookup_119" LEFT OUTER JOIN "DT_node_data_119" ON "DT_node_lookup_119".node_id_2 = "DT_node_data_119".nid),
"XGB_Model_2_39" AS
(SELECT "DT_Output_119"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_119"."Score" AS "Score_virginica"
FROM "DT_Output_119")
SELECT "XGB_esu_11"."KEY", "XGB_esu_11"."Score_setosa", "XGB_esu_11"."Score_versicolor", "XGB_esu_11"."Score_virginica"
FROM (SELECT "XGB_Model_2_36"."KEY" AS "KEY", CAST("XGB_Model_2_36"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_36"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_36"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_36" UNION ALL SELECT "XGB_Model_0_37"."KEY" AS "KEY", CAST("XGB_Model_0_37"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_37"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_37"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_37" UNION ALL SELECT "XGB_Model_1_37"."KEY" AS "KEY", CAST("XGB_Model_1_37"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_37"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_37"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_37" UNION ALL SELECT "XGB_Model_2_37"."KEY" AS "KEY", CAST("XGB_Model_2_37"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_37"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_37"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_37" UNION ALL SELECT "XGB_Model_0_38"."KEY" AS "KEY", CAST("XGB_Model_0_38"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_38"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_38"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_38" UNION ALL SELECT "XGB_Model_1_38"."KEY" AS "KEY", CAST("XGB_Model_1_38"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_38"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_38"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_38" UNION ALL SELECT "XGB_Model_2_38"."KEY" AS "KEY", CAST("XGB_Model_2_38"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_38"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_38"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_38" UNION ALL SELECT "XGB_Model_0_39"."KEY" AS "KEY", CAST("XGB_Model_0_39"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_39"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_39"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_39" UNION ALL SELECT "XGB_Model_1_39"."KEY" AS "KEY", CAST("XGB_Model_1_39"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_39"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_39"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_39" UNION ALL SELECT "XGB_Model_2_39"."KEY" AS "KEY", CAST("XGB_Model_2_39"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_39"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_39"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_39") AS "XGB_esu_11"),
"XGB_12" AS
(WITH "DT_node_lookup_120" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_120" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00835893024 AS "Score") AS "Values"),
"DT_Output_120" AS
(SELECT "DT_node_lookup_120"."KEY" AS "KEY", "DT_node_lookup_120".node_id_2 AS node_id_2, "DT_node_data_120".nid AS nid, "DT_node_data_120"."Score" AS "Score"
FROM "DT_node_lookup_120" LEFT OUTER JOIN "DT_node_data_120" ON "DT_node_lookup_120".node_id_2 = "DT_node_data_120".nid),
"XGB_Model_0_40" AS
(SELECT "DT_Output_120"."KEY" AS "KEY", "DT_Output_120"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_120"),
"DT_node_lookup_121" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 5 ELSE 6 END ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_121" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.021529587 AS "Score" UNION ALL SELECT 4 AS nid, 0.0313792825 AS "Score" UNION ALL SELECT 5 AS nid, 0.019653881 AS "Score" UNION ALL SELECT 6 AS nid, -0.0285808612 AS "Score") AS "Values"),
"DT_Output_121" AS
(SELECT "DT_node_lookup_121"."KEY" AS "KEY", "DT_node_lookup_121".node_id_2 AS node_id_2, "DT_node_data_121".nid AS nid, "DT_node_data_121"."Score" AS "Score"
FROM "DT_node_lookup_121" LEFT OUTER JOIN "DT_node_data_121" ON "DT_node_lookup_121".node_id_2 = "DT_node_data_121".nid),
"XGB_Model_1_40" AS
(SELECT "DT_Output_121"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_121"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_121"),
"DT_node_lookup_122" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_122" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0279384684 AS "Score" UNION ALL SELECT 3 AS nid, -0.010515756 AS "Score" UNION ALL SELECT 4 AS nid, 0.0371361896 AS "Score") AS "Values"),
"DT_Output_122" AS
(SELECT "DT_node_lookup_122"."KEY" AS "KEY", "DT_node_lookup_122".node_id_2 AS node_id_2, "DT_node_data_122".nid AS nid, "DT_node_data_122"."Score" AS "Score"
FROM "DT_node_lookup_122" LEFT OUTER JOIN "DT_node_data_122" ON "DT_node_lookup_122".node_id_2 = "DT_node_data_122".nid),
"XGB_Model_2_40" AS
(SELECT "DT_Output_122"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_122"."Score" AS "Score_virginica"
FROM "DT_Output_122"),
"DT_node_lookup_123" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_123" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00840136502 AS "Score") AS "Values"),
"DT_Output_123" AS
(SELECT "DT_node_lookup_123"."KEY" AS "KEY", "DT_node_lookup_123".node_id_2 AS node_id_2, "DT_node_data_123".nid AS nid, "DT_node_data_123"."Score" AS "Score"
FROM "DT_node_lookup_123" LEFT OUTER JOIN "DT_node_data_123" ON "DT_node_lookup_123".node_id_2 = "DT_node_data_123".nid),
"XGB_Model_0_41" AS
(SELECT "DT_Output_123"."KEY" AS "KEY", "DT_Output_123"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_123"),
"DT_node_lookup_124" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.54999995) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_124" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0203047842 AS "Score" UNION ALL SELECT 3 AS nid, -0.0143855372 AS "Score" UNION ALL SELECT 4 AS nid, 0.0256494451 AS "Score") AS "Values"),
"DT_Output_124" AS
(SELECT "DT_node_lookup_124"."KEY" AS "KEY", "DT_node_lookup_124".node_id_2 AS node_id_2, "DT_node_data_124".nid AS nid, "DT_node_data_124"."Score" AS "Score"
FROM "DT_node_lookup_124" LEFT OUTER JOIN "DT_node_data_124" ON "DT_node_lookup_124".node_id_2 = "DT_node_data_124".nid),
"XGB_Model_1_41" AS
(SELECT "DT_Output_124"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_124"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_124"),
"DT_node_lookup_125" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_125" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0319788605 AS "Score" UNION ALL SELECT 3 AS nid, -0.0502205491 AS "Score" UNION ALL SELECT 4 AS nid, 0.0249311812 AS "Score") AS "Values"),
"DT_Output_125" AS
(SELECT "DT_node_lookup_125"."KEY" AS "KEY", "DT_node_lookup_125".node_id_2 AS node_id_2, "DT_node_data_125".nid AS nid, "DT_node_data_125"."Score" AS "Score"
FROM "DT_node_lookup_125" LEFT OUTER JOIN "DT_node_data_125" ON "DT_node_lookup_125".node_id_2 = "DT_node_data_125".nid),
"XGB_Model_2_41" AS
(SELECT "DT_Output_125"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_125"."Score" AS "Score_virginica"
FROM "DT_Output_125"),
"DT_node_lookup_126" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_126" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00687223906 AS "Score") AS "Values"),
"DT_Output_126" AS
(SELECT "DT_node_lookup_126"."KEY" AS "KEY", "DT_node_lookup_126".node_id_2 AS node_id_2, "DT_node_data_126".nid AS nid, "DT_node_data_126"."Score" AS "Score"
FROM "DT_node_lookup_126" LEFT OUTER JOIN "DT_node_data_126" ON "DT_node_lookup_126".node_id_2 = "DT_node_data_126".nid),
"XGB_Model_0_42" AS
(SELECT "DT_Output_126"."KEY" AS "KEY", "DT_Output_126"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_126"),
"DT_node_lookup_127" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_127" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0226905383 AS "Score" UNION ALL SELECT 3 AS nid, -0.0149216969 AS "Score" UNION ALL SELECT 5 AS nid, 0.059653528 AS "Score" UNION ALL SELECT 6 AS nid, -0.0128443902 AS "Score") AS "Values"),
"DT_Output_127" AS
(SELECT "DT_node_lookup_127"."KEY" AS "KEY", "DT_node_lookup_127".node_id_2 AS node_id_2, "DT_node_data_127".nid AS nid, "DT_node_data_127"."Score" AS "Score"
FROM "DT_node_lookup_127" LEFT OUTER JOIN "DT_node_data_127" ON "DT_node_lookup_127".node_id_2 = "DT_node_data_127".nid),
"XGB_Model_1_42" AS
(SELECT "DT_Output_127"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_127"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_127"),
"DT_node_lookup_128" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_128" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0326789171 AS "Score" UNION ALL SELECT 3 AS nid, -0.0362138897 AS "Score" UNION ALL SELECT 4 AS nid, 0.00485318247 AS "Score") AS "Values"),
"DT_Output_128" AS
(SELECT "DT_node_lookup_128"."KEY" AS "KEY", "DT_node_lookup_128".node_id_2 AS node_id_2, "DT_node_data_128".nid AS nid, "DT_node_data_128"."Score" AS "Score"
FROM "DT_node_lookup_128" LEFT OUTER JOIN "DT_node_data_128" ON "DT_node_lookup_128".node_id_2 = "DT_node_data_128".nid),
"XGB_Model_2_42" AS
(SELECT "DT_Output_128"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_128"."Score" AS "Score_virginica"
FROM "DT_Output_128"),
"DT_node_lookup_129" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_129" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00738066668 AS "Score") AS "Values"),
"DT_Output_129" AS
(SELECT "DT_node_lookup_129"."KEY" AS "KEY", "DT_node_lookup_129".node_id_2 AS node_id_2, "DT_node_data_129".nid AS nid, "DT_node_data_129"."Score" AS "Score"
FROM "DT_node_lookup_129" LEFT OUTER JOIN "DT_node_data_129" ON "DT_node_lookup_129".node_id_2 = "DT_node_data_129".nid),
"XGB_Model_0_43" AS
(SELECT "DT_Output_129"."KEY" AS "KEY", "DT_Output_129"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_129")
SELECT "XGB_esu_12"."KEY", "XGB_esu_12"."Score_setosa", "XGB_esu_12"."Score_versicolor", "XGB_esu_12"."Score_virginica"
FROM (SELECT "XGB_Model_0_40"."KEY" AS "KEY", CAST("XGB_Model_0_40"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_40"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_40"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_40" UNION ALL SELECT "XGB_Model_1_40"."KEY" AS "KEY", CAST("XGB_Model_1_40"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_40"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_40"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_40" UNION ALL SELECT "XGB_Model_2_40"."KEY" AS "KEY", CAST("XGB_Model_2_40"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_40"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_40"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_40" UNION ALL SELECT "XGB_Model_0_41"."KEY" AS "KEY", CAST("XGB_Model_0_41"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_41"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_41"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_41" UNION ALL SELECT "XGB_Model_1_41"."KEY" AS "KEY", CAST("XGB_Model_1_41"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_41"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_41"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_41" UNION ALL SELECT "XGB_Model_2_41"."KEY" AS "KEY", CAST("XGB_Model_2_41"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_41"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_41"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_41" UNION ALL SELECT "XGB_Model_0_42"."KEY" AS "KEY", CAST("XGB_Model_0_42"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_42"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_42"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_42" UNION ALL SELECT "XGB_Model_1_42"."KEY" AS "KEY", CAST("XGB_Model_1_42"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_42"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_42"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_42" UNION ALL SELECT "XGB_Model_2_42"."KEY" AS "KEY", CAST("XGB_Model_2_42"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_42"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_42"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_42" UNION ALL SELECT "XGB_Model_0_43"."KEY" AS "KEY", CAST("XGB_Model_0_43"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_43"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_43"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_43") AS "XGB_esu_12"),
"XGB_13" AS
(WITH "DT_node_lookup_130" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 5 ELSE 6 END END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_130" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0220483802 AS "Score" UNION ALL SELECT 3 AS nid, -0.0146755474 AS "Score" UNION ALL SELECT 5 AS nid, 0.0558080487 AS "Score" UNION ALL SELECT 6 AS nid, -0.0095294062 AS "Score") AS "Values"),
"DT_Output_130" AS
(SELECT "DT_node_lookup_130"."KEY" AS "KEY", "DT_node_lookup_130".node_id_2 AS node_id_2, "DT_node_data_130".nid AS nid, "DT_node_data_130"."Score" AS "Score"
FROM "DT_node_lookup_130" LEFT OUTER JOIN "DT_node_data_130" ON "DT_node_lookup_130".node_id_2 = "DT_node_data_130".nid),
"XGB_Model_1_43" AS
(SELECT "DT_Output_130"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_130"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_130"),
"DT_node_lookup_131" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_131" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0336857103 AS "Score" UNION ALL SELECT 3 AS nid, 0.0291546807 AS "Score" UNION ALL SELECT 4 AS nid, -0.059274856 AS "Score") AS "Values"),
"DT_Output_131" AS
(SELECT "DT_node_lookup_131"."KEY" AS "KEY", "DT_node_lookup_131".node_id_2 AS node_id_2, "DT_node_data_131".nid AS nid, "DT_node_data_131"."Score" AS "Score"
FROM "DT_node_lookup_131" LEFT OUTER JOIN "DT_node_data_131" ON "DT_node_lookup_131".node_id_2 = "DT_node_data_131".nid),
"XGB_Model_2_43" AS
(SELECT "DT_Output_131"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_131"."Score" AS "Score_virginica"
FROM "DT_Output_131"),
"DT_node_lookup_132" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_132" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00788490009 AS "Score") AS "Values"),
"DT_Output_132" AS
(SELECT "DT_node_lookup_132"."KEY" AS "KEY", "DT_node_lookup_132".node_id_2 AS node_id_2, "DT_node_data_132".nid AS nid, "DT_node_data_132"."Score" AS "Score"
FROM "DT_node_lookup_132" LEFT OUTER JOIN "DT_node_data_132" ON "DT_node_lookup_132".node_id_2 = "DT_node_data_132".nid),
"XGB_Model_0_44" AS
(SELECT "DT_Output_132"."KEY" AS "KEY", "DT_Output_132"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_132"),
"DT_node_lookup_133" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.44999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_133" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.02385813 AS "Score" UNION ALL SELECT 3 AS nid, 0.0286018085 AS "Score" UNION ALL SELECT 4 AS nid, -0.0204650033 AS "Score") AS "Values"),
"DT_Output_133" AS
(SELECT "DT_node_lookup_133"."KEY" AS "KEY", "DT_node_lookup_133".node_id_2 AS node_id_2, "DT_node_data_133".nid AS nid, "DT_node_data_133"."Score" AS "Score"
FROM "DT_node_lookup_133" LEFT OUTER JOIN "DT_node_data_133" ON "DT_node_lookup_133".node_id_2 = "DT_node_data_133".nid),
"XGB_Model_1_44" AS
(SELECT "DT_Output_133"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_133"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_133"),
"DT_node_lookup_134" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_134" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0224035457 AS "Score" UNION ALL SELECT 2 AS nid, 0.0269406084 AS "Score") AS "Values"),
"DT_Output_134" AS
(SELECT "DT_node_lookup_134"."KEY" AS "KEY", "DT_node_lookup_134".node_id_2 AS node_id_2, "DT_node_data_134".nid AS nid, "DT_node_data_134"."Score" AS "Score"
FROM "DT_node_lookup_134" LEFT OUTER JOIN "DT_node_data_134" ON "DT_node_lookup_134".node_id_2 = "DT_node_data_134".nid),
"XGB_Model_2_44" AS
(SELECT "DT_Output_134"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_134"."Score" AS "Score_virginica"
FROM "DT_Output_134"),
"DT_node_lookup_135" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_135" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00724415947 AS "Score") AS "Values"),
"DT_Output_135" AS
(SELECT "DT_node_lookup_135"."KEY" AS "KEY", "DT_node_lookup_135".node_id_2 AS node_id_2, "DT_node_data_135".nid AS nid, "DT_node_data_135"."Score" AS "Score"
FROM "DT_node_lookup_135" LEFT OUTER JOIN "DT_node_data_135" ON "DT_node_lookup_135".node_id_2 = "DT_node_data_135".nid),
"XGB_Model_0_45" AS
(SELECT "DT_Output_135"."KEY" AS "KEY", "DT_Output_135"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_135"),
"DT_node_lookup_136" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_136" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0195767097 AS "Score" UNION ALL SELECT 3 AS nid, 0.0351652093 AS "Score" UNION ALL SELECT 4 AS nid, -0.0108796507 AS "Score") AS "Values"),
"DT_Output_136" AS
(SELECT "DT_node_lookup_136"."KEY" AS "KEY", "DT_node_lookup_136".node_id_2 AS node_id_2, "DT_node_data_136".nid AS nid, "DT_node_data_136"."Score" AS "Score"
FROM "DT_node_lookup_136" LEFT OUTER JOIN "DT_node_data_136" ON "DT_node_lookup_136".node_id_2 = "DT_node_data_136".nid),
"XGB_Model_1_45" AS
(SELECT "DT_Output_136"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_136"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_136"),
"DT_node_lookup_137" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_137" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0211219937 AS "Score" UNION ALL SELECT 2 AS nid, 0.0262675695 AS "Score") AS "Values"),
"DT_Output_137" AS
(SELECT "DT_node_lookup_137"."KEY" AS "KEY", "DT_node_lookup_137".node_id_2 AS node_id_2, "DT_node_data_137".nid AS nid, "DT_node_data_137"."Score" AS "Score"
FROM "DT_node_lookup_137" LEFT OUTER JOIN "DT_node_data_137" ON "DT_node_lookup_137".node_id_2 = "DT_node_data_137".nid),
"XGB_Model_2_45" AS
(SELECT "DT_Output_137"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_137"."Score" AS "Score_virginica"
FROM "DT_Output_137"),
"DT_node_lookup_138" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_138" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00729489699 AS "Score") AS "Values"),
"DT_Output_138" AS
(SELECT "DT_node_lookup_138"."KEY" AS "KEY", "DT_node_lookup_138".node_id_2 AS node_id_2, "DT_node_data_138".nid AS nid, "DT_node_data_138"."Score" AS "Score"
FROM "DT_node_lookup_138" LEFT OUTER JOIN "DT_node_data_138" ON "DT_node_lookup_138".node_id_2 = "DT_node_data_138".nid),
"XGB_Model_0_46" AS
(SELECT "DT_Output_138"."KEY" AS "KEY", "DT_Output_138"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_138"),
"DT_node_lookup_139" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_0" < 5.55000019) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_139" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0224038325 AS "Score" UNION ALL SELECT 3 AS nid, -0.0187364873 AS "Score" UNION ALL SELECT 4 AS nid, 0.0281176493 AS "Score") AS "Values"),
"DT_Output_139" AS
(SELECT "DT_node_lookup_139"."KEY" AS "KEY", "DT_node_lookup_139".node_id_2 AS node_id_2, "DT_node_data_139".nid AS nid, "DT_node_data_139"."Score" AS "Score"
FROM "DT_node_lookup_139" LEFT OUTER JOIN "DT_node_data_139" ON "DT_node_lookup_139".node_id_2 = "DT_node_data_139".nid),
"XGB_Model_1_46" AS
(SELECT "DT_Output_139"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_139"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_139")
SELECT "XGB_esu_13"."KEY", "XGB_esu_13"."Score_setosa", "XGB_esu_13"."Score_versicolor", "XGB_esu_13"."Score_virginica"
FROM (SELECT "XGB_Model_1_43"."KEY" AS "KEY", CAST("XGB_Model_1_43"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_43"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_43"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_43" UNION ALL SELECT "XGB_Model_2_43"."KEY" AS "KEY", CAST("XGB_Model_2_43"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_43"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_43"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_43" UNION ALL SELECT "XGB_Model_0_44"."KEY" AS "KEY", CAST("XGB_Model_0_44"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_44"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_44"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_44" UNION ALL SELECT "XGB_Model_1_44"."KEY" AS "KEY", CAST("XGB_Model_1_44"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_44"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_44"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_44" UNION ALL SELECT "XGB_Model_2_44"."KEY" AS "KEY", CAST("XGB_Model_2_44"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_44"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_44"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_44" UNION ALL SELECT "XGB_Model_0_45"."KEY" AS "KEY", CAST("XGB_Model_0_45"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_45"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_45"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_45" UNION ALL SELECT "XGB_Model_1_45"."KEY" AS "KEY", CAST("XGB_Model_1_45"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_45"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_45"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_45" UNION ALL SELECT "XGB_Model_2_45"."KEY" AS "KEY", CAST("XGB_Model_2_45"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_45"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_45"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_45" UNION ALL SELECT "XGB_Model_0_46"."KEY" AS "KEY", CAST("XGB_Model_0_46"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_46"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_46"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_46" UNION ALL SELECT "XGB_Model_1_46"."KEY" AS "KEY", CAST("XGB_Model_1_46"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_46"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_46"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_46") AS "XGB_esu_13"),
"XGB_14" AS
(WITH "DT_node_lookup_140" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_140" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0314749628 AS "Score" UNION ALL SELECT 3 AS nid, 0.0227480922 AS "Score" UNION ALL SELECT 4 AS nid, -0.0422191471 AS "Score") AS "Values"),
"DT_Output_140" AS
(SELECT "DT_node_lookup_140"."KEY" AS "KEY", "DT_node_lookup_140".node_id_2 AS node_id_2, "DT_node_data_140".nid AS nid, "DT_node_data_140"."Score" AS "Score"
FROM "DT_node_lookup_140" LEFT OUTER JOIN "DT_node_data_140" ON "DT_node_lookup_140".node_id_2 = "DT_node_data_140".nid),
"XGB_Model_2_46" AS
(SELECT "DT_Output_140"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_140"."Score" AS "Score_virginica"
FROM "DT_Output_140"),
"DT_node_lookup_141" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_141" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0070170383 AS "Score") AS "Values"),
"DT_Output_141" AS
(SELECT "DT_node_lookup_141"."KEY" AS "KEY", "DT_node_lookup_141".node_id_2 AS node_id_2, "DT_node_data_141".nid AS nid, "DT_node_data_141"."Score" AS "Score"
FROM "DT_node_lookup_141" LEFT OUTER JOIN "DT_node_data_141" ON "DT_node_lookup_141".node_id_2 = "DT_node_data_141".nid),
"XGB_Model_0_47" AS
(SELECT "DT_Output_141"."KEY" AS "KEY", "DT_Output_141"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_141"),
"DT_node_lookup_142" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 5.05000019) THEN CASE WHEN ("ADS"."Feature_0" < 5.75) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_142" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.020607587 AS "Score" UNION ALL SELECT 3 AS nid, -0.0156736728 AS "Score" UNION ALL SELECT 4 AS nid, 0.0264031086 AS "Score") AS "Values"),
"DT_Output_142" AS
(SELECT "DT_node_lookup_142"."KEY" AS "KEY", "DT_node_lookup_142".node_id_2 AS node_id_2, "DT_node_data_142".nid AS nid, "DT_node_data_142"."Score" AS "Score"
FROM "DT_node_lookup_142" LEFT OUTER JOIN "DT_node_data_142" ON "DT_node_lookup_142".node_id_2 = "DT_node_data_142".nid),
"XGB_Model_1_47" AS
(SELECT "DT_Output_142"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_142"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_142"),
"DT_node_lookup_143" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_143" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0285282731 AS "Score" UNION ALL SELECT 2 AS nid, 0.0226429328 AS "Score") AS "Values"),
"DT_Output_143" AS
(SELECT "DT_node_lookup_143"."KEY" AS "KEY", "DT_node_lookup_143".node_id_2 AS node_id_2, "DT_node_data_143".nid AS nid, "DT_node_data_143"."Score" AS "Score"
FROM "DT_node_lookup_143" LEFT OUTER JOIN "DT_node_data_143" ON "DT_node_lookup_143".node_id_2 = "DT_node_data_143".nid),
"XGB_Model_2_47" AS
(SELECT "DT_Output_143"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_143"."Score" AS "Score_virginica"
FROM "DT_Output_143"),
"DT_node_lookup_144" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_144" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00695907138 AS "Score") AS "Values"),
"DT_Output_144" AS
(SELECT "DT_node_lookup_144"."KEY" AS "KEY", "DT_node_lookup_144".node_id_2 AS node_id_2, "DT_node_data_144".nid AS nid, "DT_node_data_144"."Score" AS "Score"
FROM "DT_node_lookup_144" LEFT OUTER JOIN "DT_node_data_144" ON "DT_node_lookup_144".node_id_2 = "DT_node_data_144".nid),
"XGB_Model_0_48" AS
(SELECT "DT_Output_144"."KEY" AS "KEY", "DT_Output_144"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_144"),
"DT_node_lookup_145" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.54999995) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_145" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0178022943 AS "Score" UNION ALL SELECT 3 AS nid, -0.0167261865 AS "Score" UNION ALL SELECT 4 AS nid, 0.0268989503 AS "Score") AS "Values"),
"DT_Output_145" AS
(SELECT "DT_node_lookup_145"."KEY" AS "KEY", "DT_node_lookup_145".node_id_2 AS node_id_2, "DT_node_data_145".nid AS nid, "DT_node_data_145"."Score" AS "Score"
FROM "DT_node_lookup_145" LEFT OUTER JOIN "DT_node_data_145" ON "DT_node_lookup_145".node_id_2 = "DT_node_data_145".nid),
"XGB_Model_1_48" AS
(SELECT "DT_Output_145"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_145"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_145"),
"DT_node_lookup_146" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_146" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0264765136 AS "Score" UNION ALL SELECT 3 AS nid, -0.0104803573 AS "Score" UNION ALL SELECT 4 AS nid, 0.0378091261 AS "Score") AS "Values"),
"DT_Output_146" AS
(SELECT "DT_node_lookup_146"."KEY" AS "KEY", "DT_node_lookup_146".node_id_2 AS node_id_2, "DT_node_data_146".nid AS nid, "DT_node_data_146"."Score" AS "Score"
FROM "DT_node_lookup_146" LEFT OUTER JOIN "DT_node_data_146" ON "DT_node_lookup_146".node_id_2 = "DT_node_data_146".nid),
"XGB_Model_2_48" AS
(SELECT "DT_Output_146"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_146"."Score" AS "Score_virginica"
FROM "DT_Output_146"),
"DT_node_lookup_147" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_147" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00728025055 AS "Score") AS "Values"),
"DT_Output_147" AS
(SELECT "DT_node_lookup_147"."KEY" AS "KEY", "DT_node_lookup_147".node_id_2 AS node_id_2, "DT_node_data_147".nid AS nid, "DT_node_data_147"."Score" AS "Score"
FROM "DT_node_lookup_147" LEFT OUTER JOIN "DT_node_data_147" ON "DT_node_lookup_147".node_id_2 = "DT_node_data_147".nid),
"XGB_Model_0_49" AS
(SELECT "DT_Output_147"."KEY" AS "KEY", "DT_Output_147"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_147"),
"DT_node_lookup_148" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_0" < 5.55000019) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_148" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0213688463 AS "Score" UNION ALL SELECT 3 AS nid, -0.0168650467 AS "Score" UNION ALL SELECT 4 AS nid, 0.025422236 AS "Score") AS "Values"),
"DT_Output_148" AS
(SELECT "DT_node_lookup_148"."KEY" AS "KEY", "DT_node_lookup_148".node_id_2 AS node_id_2, "DT_node_data_148".nid AS nid, "DT_node_data_148"."Score" AS "Score"
FROM "DT_node_lookup_148" LEFT OUTER JOIN "DT_node_data_148" ON "DT_node_lookup_148".node_id_2 = "DT_node_data_148".nid),
"XGB_Model_1_49" AS
(SELECT "DT_Output_148"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_148"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_148"),
"DT_node_lookup_149" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_149" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0246098172 AS "Score" UNION ALL SELECT 3 AS nid, 0.0256184656 AS "Score" UNION ALL SELECT 4 AS nid, 0.00602770271 AS "Score") AS "Values"),
"DT_Output_149" AS
(SELECT "DT_node_lookup_149"."KEY" AS "KEY", "DT_node_lookup_149".node_id_2 AS node_id_2, "DT_node_data_149".nid AS nid, "DT_node_data_149"."Score" AS "Score"
FROM "DT_node_lookup_149" LEFT OUTER JOIN "DT_node_data_149" ON "DT_node_lookup_149".node_id_2 = "DT_node_data_149".nid),
"XGB_Model_2_49" AS
(SELECT "DT_Output_149"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_149"."Score" AS "Score_virginica"
FROM "DT_Output_149")
SELECT "XGB_esu_14"."KEY", "XGB_esu_14"."Score_setosa", "XGB_esu_14"."Score_versicolor", "XGB_esu_14"."Score_virginica"
FROM (SELECT "XGB_Model_2_46"."KEY" AS "KEY", CAST("XGB_Model_2_46"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_46"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_46"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_46" UNION ALL SELECT "XGB_Model_0_47"."KEY" AS "KEY", CAST("XGB_Model_0_47"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_47"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_47"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_47" UNION ALL SELECT "XGB_Model_1_47"."KEY" AS "KEY", CAST("XGB_Model_1_47"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_47"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_47"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_47" UNION ALL SELECT "XGB_Model_2_47"."KEY" AS "KEY", CAST("XGB_Model_2_47"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_47"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_47"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_47" UNION ALL SELECT "XGB_Model_0_48"."KEY" AS "KEY", CAST("XGB_Model_0_48"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_48"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_48"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_48" UNION ALL SELECT "XGB_Model_1_48"."KEY" AS "KEY", CAST("XGB_Model_1_48"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_48"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_48"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_48" UNION ALL SELECT "XGB_Model_2_48"."KEY" AS "KEY", CAST("XGB_Model_2_48"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_48"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_48"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_48" UNION ALL SELECT "XGB_Model_0_49"."KEY" AS "KEY", CAST("XGB_Model_0_49"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_49"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_49"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_49" UNION ALL SELECT "XGB_Model_1_49"."KEY" AS "KEY", CAST("XGB_Model_1_49"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_49"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_49"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_49" UNION ALL SELECT "XGB_Model_2_49"."KEY" AS "KEY", CAST("XGB_Model_2_49"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_49"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_49"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_49") AS "XGB_esu_14"),
"XGB_15" AS
(WITH "DT_node_lookup_150" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_150" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00701172417 AS "Score") AS "Values"),
"DT_Output_150" AS
(SELECT "DT_node_lookup_150"."KEY" AS "KEY", "DT_node_lookup_150".node_id_2 AS node_id_2, "DT_node_data_150".nid AS nid, "DT_node_data_150"."Score" AS "Score"
FROM "DT_node_lookup_150" LEFT OUTER JOIN "DT_node_data_150" ON "DT_node_lookup_150".node_id_2 = "DT_node_data_150".nid),
"XGB_Model_0_50" AS
(SELECT "DT_Output_150"."KEY" AS "KEY", "DT_Output_150"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_150"),
"DT_node_lookup_151" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_151" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0178235248 AS "Score" UNION ALL SELECT 3 AS nid, -0.0427788384 AS "Score" UNION ALL SELECT 4 AS nid, 0.0189994946 AS "Score") AS "Values"),
"DT_Output_151" AS
(SELECT "DT_node_lookup_151"."KEY" AS "KEY", "DT_node_lookup_151".node_id_2 AS node_id_2, "DT_node_data_151".nid AS nid, "DT_node_data_151"."Score" AS "Score"
FROM "DT_node_lookup_151" LEFT OUTER JOIN "DT_node_data_151" ON "DT_node_lookup_151".node_id_2 = "DT_node_data_151".nid),
"XGB_Model_1_50" AS
(SELECT "DT_Output_151"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_151"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_151"),
"DT_node_lookup_152" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_152" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0184207112 AS "Score" UNION ALL SELECT 2 AS nid, 0.024525864 AS "Score") AS "Values"),
"DT_Output_152" AS
(SELECT "DT_node_lookup_152"."KEY" AS "KEY", "DT_node_lookup_152".node_id_2 AS node_id_2, "DT_node_data_152".nid AS nid, "DT_node_data_152"."Score" AS "Score"
FROM "DT_node_lookup_152" LEFT OUTER JOIN "DT_node_data_152" ON "DT_node_lookup_152".node_id_2 = "DT_node_data_152".nid),
"XGB_Model_2_50" AS
(SELECT "DT_Output_152"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_152"."Score" AS "Score_virginica"
FROM "DT_Output_152"),
"DT_node_lookup_153" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_153" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00778238941 AS "Score") AS "Values"),
"DT_Output_153" AS
(SELECT "DT_node_lookup_153"."KEY" AS "KEY", "DT_node_lookup_153".node_id_2 AS node_id_2, "DT_node_data_153".nid AS nid, "DT_node_data_153"."Score" AS "Score"
FROM "DT_node_lookup_153" LEFT OUTER JOIN "DT_node_data_153" ON "DT_node_lookup_153".node_id_2 = "DT_node_data_153".nid),
"XGB_Model_0_51" AS
(SELECT "DT_Output_153"."KEY" AS "KEY", "DT_Output_153"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_153"),
"DT_node_lookup_154" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_154" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.0221280977 AS "Score" UNION ALL SELECT 3 AS nid, 0.0244880971 AS "Score" UNION ALL SELECT 4 AS nid, -0.00869838428 AS "Score") AS "Values"),
"DT_Output_154" AS
(SELECT "DT_node_lookup_154"."KEY" AS "KEY", "DT_node_lookup_154".node_id_2 AS node_id_2, "DT_node_data_154".nid AS nid, "DT_node_data_154"."Score" AS "Score"
FROM "DT_node_lookup_154" LEFT OUTER JOIN "DT_node_data_154" ON "DT_node_lookup_154".node_id_2 = "DT_node_data_154".nid),
"XGB_Model_1_51" AS
(SELECT "DT_Output_154"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_154"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_154"),
"DT_node_lookup_155" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_155" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0305990018 AS "Score" UNION ALL SELECT 3 AS nid, 0.0245389305 AS "Score" UNION ALL SELECT 4 AS nid, -0.0408771113 AS "Score") AS "Values"),
"DT_Output_155" AS
(SELECT "DT_node_lookup_155"."KEY" AS "KEY", "DT_node_lookup_155".node_id_2 AS node_id_2, "DT_node_data_155".nid AS nid, "DT_node_data_155"."Score" AS "Score"
FROM "DT_node_lookup_155" LEFT OUTER JOIN "DT_node_data_155" ON "DT_node_lookup_155".node_id_2 = "DT_node_data_155".nid),
"XGB_Model_2_51" AS
(SELECT "DT_Output_155"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_155"."Score" AS "Score_virginica"
FROM "DT_Output_155"),
"DT_node_lookup_156" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_156" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00720711052 AS "Score") AS "Values"),
"DT_Output_156" AS
(SELECT "DT_node_lookup_156"."KEY" AS "KEY", "DT_node_lookup_156".node_id_2 AS node_id_2, "DT_node_data_156".nid AS nid, "DT_node_data_156"."Score" AS "Score"
FROM "DT_node_lookup_156" LEFT OUTER JOIN "DT_node_data_156" ON "DT_node_lookup_156".node_id_2 = "DT_node_data_156".nid),
"XGB_Model_0_52" AS
(SELECT "DT_Output_156"."KEY" AS "KEY", "DT_Output_156"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_156"),
"DT_node_lookup_157" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_157" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.015086486 AS "Score" UNION ALL SELECT 3 AS nid, -0.0408039689 AS "Score" UNION ALL SELECT 4 AS nid, 0.019849278 AS "Score") AS "Values"),
"DT_Output_157" AS
(SELECT "DT_node_lookup_157"."KEY" AS "KEY", "DT_node_lookup_157".node_id_2 AS node_id_2, "DT_node_data_157".nid AS nid, "DT_node_data_157"."Score" AS "Score"
FROM "DT_node_lookup_157" LEFT OUTER JOIN "DT_node_data_157" ON "DT_node_lookup_157".node_id_2 = "DT_node_data_157".nid),
"XGB_Model_1_52" AS
(SELECT "DT_Output_157"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_157"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_157"),
"DT_node_lookup_158" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_158" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0232871938 AS "Score" UNION ALL SELECT 3 AS nid, 0.0254928246 AS "Score" UNION ALL SELECT 4 AS nid, 0.00624388736 AS "Score") AS "Values"),
"DT_Output_158" AS
(SELECT "DT_node_lookup_158"."KEY" AS "KEY", "DT_node_lookup_158".node_id_2 AS node_id_2, "DT_node_data_158".nid AS nid, "DT_node_data_158"."Score" AS "Score"
FROM "DT_node_lookup_158" LEFT OUTER JOIN "DT_node_data_158" ON "DT_node_lookup_158".node_id_2 = "DT_node_data_158".nid),
"XGB_Model_2_52" AS
(SELECT "DT_Output_158"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_158"."Score" AS "Score_virginica"
FROM "DT_Output_158"),
"DT_node_lookup_159" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_159" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.008138909 AS "Score") AS "Values"),
"DT_Output_159" AS
(SELECT "DT_node_lookup_159"."KEY" AS "KEY", "DT_node_lookup_159".node_id_2 AS node_id_2, "DT_node_data_159".nid AS nid, "DT_node_data_159"."Score" AS "Score"
FROM "DT_node_lookup_159" LEFT OUTER JOIN "DT_node_data_159" ON "DT_node_lookup_159".node_id_2 = "DT_node_data_159".nid),
"XGB_Model_0_53" AS
(SELECT "DT_Output_159"."KEY" AS "KEY", "DT_Output_159"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_159")
SELECT "XGB_esu_15"."KEY", "XGB_esu_15"."Score_setosa", "XGB_esu_15"."Score_versicolor", "XGB_esu_15"."Score_virginica"
FROM (SELECT "XGB_Model_0_50"."KEY" AS "KEY", CAST("XGB_Model_0_50"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_50"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_50"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_50" UNION ALL SELECT "XGB_Model_1_50"."KEY" AS "KEY", CAST("XGB_Model_1_50"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_50"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_50"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_50" UNION ALL SELECT "XGB_Model_2_50"."KEY" AS "KEY", CAST("XGB_Model_2_50"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_50"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_50"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_50" UNION ALL SELECT "XGB_Model_0_51"."KEY" AS "KEY", CAST("XGB_Model_0_51"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_51"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_51"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_51" UNION ALL SELECT "XGB_Model_1_51"."KEY" AS "KEY", CAST("XGB_Model_1_51"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_51"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_51"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_51" UNION ALL SELECT "XGB_Model_2_51"."KEY" AS "KEY", CAST("XGB_Model_2_51"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_51"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_51"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_51" UNION ALL SELECT "XGB_Model_0_52"."KEY" AS "KEY", CAST("XGB_Model_0_52"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_52"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_52"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_52" UNION ALL SELECT "XGB_Model_1_52"."KEY" AS "KEY", CAST("XGB_Model_1_52"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_52"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_52"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_52" UNION ALL SELECT "XGB_Model_2_52"."KEY" AS "KEY", CAST("XGB_Model_2_52"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_52"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_52"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_52" UNION ALL SELECT "XGB_Model_0_53"."KEY" AS "KEY", CAST("XGB_Model_0_53"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_53"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_53"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_53") AS "XGB_esu_15"),
"XGB_16" AS
(WITH "DT_node_lookup_160" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_160" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, -0.019908132 AS "Score" UNION ALL SELECT 3 AS nid, 0.0217706487 AS "Score" UNION ALL SELECT 4 AS nid, -0.00792013295 AS "Score") AS "Values"),
"DT_Output_160" AS
(SELECT "DT_node_lookup_160"."KEY" AS "KEY", "DT_node_lookup_160".node_id_2 AS node_id_2, "DT_node_data_160".nid AS nid, "DT_node_data_160"."Score" AS "Score"
FROM "DT_node_lookup_160" LEFT OUTER JOIN "DT_node_data_160" ON "DT_node_lookup_160".node_id_2 = "DT_node_data_160".nid),
"XGB_Model_1_53" AS
(SELECT "DT_Output_160"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_160"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_160"),
"DT_node_lookup_161" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_161" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0159637313 AS "Score" UNION ALL SELECT 2 AS nid, 0.0235869922 AS "Score") AS "Values"),
"DT_Output_161" AS
(SELECT "DT_node_lookup_161"."KEY" AS "KEY", "DT_node_lookup_161".node_id_2 AS node_id_2, "DT_node_data_161".nid AS nid, "DT_node_data_161"."Score" AS "Score"
FROM "DT_node_lookup_161" LEFT OUTER JOIN "DT_node_data_161" ON "DT_node_lookup_161".node_id_2 = "DT_node_data_161".nid),
"XGB_Model_2_53" AS
(SELECT "DT_Output_161"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_161"."Score" AS "Score_virginica"
FROM "DT_Output_161"),
"DT_node_lookup_162" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_162" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00737558072 AS "Score") AS "Values"),
"DT_Output_162" AS
(SELECT "DT_node_lookup_162"."KEY" AS "KEY", "DT_node_lookup_162".node_id_2 AS node_id_2, "DT_node_data_162".nid AS nid, "DT_node_data_162"."Score" AS "Score"
FROM "DT_node_lookup_162" LEFT OUTER JOIN "DT_node_data_162" ON "DT_node_lookup_162".node_id_2 = "DT_node_data_162".nid),
"XGB_Model_0_54" AS
(SELECT "DT_Output_162"."KEY" AS "KEY", "DT_Output_162"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_162"),
"DT_node_lookup_163" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_163" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0216767676 AS "Score" UNION ALL SELECT 3 AS nid, 0.0317873806 AS "Score" UNION ALL SELECT 4 AS nid, -0.00788642094 AS "Score") AS "Values"),
"DT_Output_163" AS
(SELECT "DT_node_lookup_163"."KEY" AS "KEY", "DT_node_lookup_163".node_id_2 AS node_id_2, "DT_node_data_163".nid AS nid, "DT_node_data_163"."Score" AS "Score"
FROM "DT_node_lookup_163" LEFT OUTER JOIN "DT_node_data_163" ON "DT_node_lookup_163".node_id_2 = "DT_node_data_163".nid),
"XGB_Model_1_54" AS
(SELECT "DT_Output_163"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_163"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_163"),
"DT_node_lookup_164" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_164" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0273506958 AS "Score" UNION ALL SELECT 3 AS nid, -0.0275091585 AS "Score" UNION ALL SELECT 4 AS nid, 0.00618699752 AS "Score") AS "Values"),
"DT_Output_164" AS
(SELECT "DT_node_lookup_164"."KEY" AS "KEY", "DT_node_lookup_164".node_id_2 AS node_id_2, "DT_node_data_164".nid AS nid, "DT_node_data_164"."Score" AS "Score"
FROM "DT_node_lookup_164" LEFT OUTER JOIN "DT_node_data_164" ON "DT_node_lookup_164".node_id_2 = "DT_node_data_164".nid),
"XGB_Model_2_54" AS
(SELECT "DT_Output_164"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_164"."Score" AS "Score_virginica"
FROM "DT_Output_164"),
"DT_node_lookup_165" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_165" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00762268668 AS "Score") AS "Values"),
"DT_Output_165" AS
(SELECT "DT_node_lookup_165"."KEY" AS "KEY", "DT_node_lookup_165".node_id_2 AS node_id_2, "DT_node_data_165".nid AS nid, "DT_node_data_165"."Score" AS "Score"
FROM "DT_node_lookup_165" LEFT OUTER JOIN "DT_node_data_165" ON "DT_node_lookup_165".node_id_2 = "DT_node_data_165".nid),
"XGB_Model_0_55" AS
(SELECT "DT_Output_165"."KEY" AS "KEY", "DT_Output_165"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_165"),
"DT_node_lookup_166" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_166" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0209462252 AS "Score" UNION ALL SELECT 3 AS nid, 0.0302112438 AS "Score" UNION ALL SELECT 4 AS nid, -0.00647577085 AS "Score") AS "Values"),
"DT_Output_166" AS
(SELECT "DT_node_lookup_166"."KEY" AS "KEY", "DT_node_lookup_166".node_id_2 AS node_id_2, "DT_node_data_166".nid AS nid, "DT_node_data_166"."Score" AS "Score"
FROM "DT_node_lookup_166" LEFT OUTER JOIN "DT_node_data_166" ON "DT_node_lookup_166".node_id_2 = "DT_node_data_166".nid),
"XGB_Model_1_55" AS
(SELECT "DT_Output_166"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_166"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_166"),
"DT_node_lookup_167" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_167" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0310197864 AS "Score" UNION ALL SELECT 3 AS nid, -0.0412492417 AS "Score" UNION ALL SELECT 4 AS nid, 0.0241360273 AS "Score") AS "Values"),
"DT_Output_167" AS
(SELECT "DT_node_lookup_167"."KEY" AS "KEY", "DT_node_lookup_167".node_id_2 AS node_id_2, "DT_node_data_167".nid AS nid, "DT_node_data_167"."Score" AS "Score"
FROM "DT_node_lookup_167" LEFT OUTER JOIN "DT_node_data_167" ON "DT_node_lookup_167".node_id_2 = "DT_node_data_167".nid),
"XGB_Model_2_55" AS
(SELECT "DT_Output_167"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_167"."Score" AS "Score_virginica"
FROM "DT_Output_167"),
"DT_node_lookup_168" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_168" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00799198728 AS "Score") AS "Values"),
"DT_Output_168" AS
(SELECT "DT_node_lookup_168"."KEY" AS "KEY", "DT_node_lookup_168".node_id_2 AS node_id_2, "DT_node_data_168".nid AS nid, "DT_node_data_168"."Score" AS "Score"
FROM "DT_node_lookup_168" LEFT OUTER JOIN "DT_node_data_168" ON "DT_node_lookup_168".node_id_2 = "DT_node_data_168".nid),
"XGB_Model_0_56" AS
(SELECT "DT_Output_168"."KEY" AS "KEY", "DT_Output_168"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_168"),
"DT_node_lookup_169" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_169" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0184749812 AS "Score" UNION ALL SELECT 3 AS nid, -0.00318725151 AS "Score" UNION ALL SELECT 4 AS nid, 0.0210597869 AS "Score") AS "Values"),
"DT_Output_169" AS
(SELECT "DT_node_lookup_169"."KEY" AS "KEY", "DT_node_lookup_169".node_id_2 AS node_id_2, "DT_node_data_169".nid AS nid, "DT_node_data_169"."Score" AS "Score"
FROM "DT_node_lookup_169" LEFT OUTER JOIN "DT_node_data_169" ON "DT_node_lookup_169".node_id_2 = "DT_node_data_169".nid),
"XGB_Model_1_56" AS
(SELECT "DT_Output_169"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_169"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_169")
SELECT "XGB_esu_16"."KEY", "XGB_esu_16"."Score_setosa", "XGB_esu_16"."Score_versicolor", "XGB_esu_16"."Score_virginica"
FROM (SELECT "XGB_Model_1_53"."KEY" AS "KEY", CAST("XGB_Model_1_53"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_53"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_53"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_53" UNION ALL SELECT "XGB_Model_2_53"."KEY" AS "KEY", CAST("XGB_Model_2_53"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_53"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_53"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_53" UNION ALL SELECT "XGB_Model_0_54"."KEY" AS "KEY", CAST("XGB_Model_0_54"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_54"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_54"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_54" UNION ALL SELECT "XGB_Model_1_54"."KEY" AS "KEY", CAST("XGB_Model_1_54"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_54"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_54"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_54" UNION ALL SELECT "XGB_Model_2_54"."KEY" AS "KEY", CAST("XGB_Model_2_54"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_54"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_54"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_54" UNION ALL SELECT "XGB_Model_0_55"."KEY" AS "KEY", CAST("XGB_Model_0_55"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_55"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_55"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_55" UNION ALL SELECT "XGB_Model_1_55"."KEY" AS "KEY", CAST("XGB_Model_1_55"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_55"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_55"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_55" UNION ALL SELECT "XGB_Model_2_55"."KEY" AS "KEY", CAST("XGB_Model_2_55"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_55"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_55"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_55" UNION ALL SELECT "XGB_Model_0_56"."KEY" AS "KEY", CAST("XGB_Model_0_56"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_56"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_56"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_56" UNION ALL SELECT "XGB_Model_1_56"."KEY" AS "KEY", CAST("XGB_Model_1_56"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_56"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_56"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_56") AS "XGB_esu_16"),
"XGB_17" AS
(WITH "DT_node_lookup_170" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_170" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0216553435 AS "Score" UNION ALL SELECT 3 AS nid, 0.024661608 AS "Score" UNION ALL SELECT 4 AS nid, 0.00523735536 AS "Score") AS "Values"),
"DT_Output_170" AS
(SELECT "DT_node_lookup_170"."KEY" AS "KEY", "DT_node_lookup_170".node_id_2 AS node_id_2, "DT_node_data_170".nid AS nid, "DT_node_data_170"."Score" AS "Score"
FROM "DT_node_lookup_170" LEFT OUTER JOIN "DT_node_data_170" ON "DT_node_lookup_170".node_id_2 = "DT_node_data_170".nid),
"XGB_Model_2_56" AS
(SELECT "DT_Output_170"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_170"."Score" AS "Score_virginica"
FROM "DT_Output_170"),
"DT_node_lookup_171" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_171" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00829256326 AS "Score") AS "Values"),
"DT_Output_171" AS
(SELECT "DT_node_lookup_171"."KEY" AS "KEY", "DT_node_lookup_171".node_id_2 AS node_id_2, "DT_node_data_171".nid AS nid, "DT_node_data_171"."Score" AS "Score"
FROM "DT_node_lookup_171" LEFT OUTER JOIN "DT_node_data_171" ON "DT_node_lookup_171".node_id_2 = "DT_node_data_171".nid),
"XGB_Model_0_57" AS
(SELECT "DT_Output_171"."KEY" AS "KEY", "DT_Output_171"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_171"),
"DT_node_lookup_172" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_172" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0181566477 AS "Score" UNION ALL SELECT 3 AS nid, -0.00149284943 AS "Score" UNION ALL SELECT 4 AS nid, 0.0194284972 AS "Score") AS "Values"),
"DT_Output_172" AS
(SELECT "DT_node_lookup_172"."KEY" AS "KEY", "DT_node_lookup_172".node_id_2 AS node_id_2, "DT_node_data_172".nid AS nid, "DT_node_data_172"."Score" AS "Score"
FROM "DT_node_lookup_172" LEFT OUTER JOIN "DT_node_data_172" ON "DT_node_lookup_172".node_id_2 = "DT_node_data_172".nid),
"XGB_Model_1_57" AS
(SELECT "DT_Output_172"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_172"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_172"),
"DT_node_lookup_173" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_173" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0203742925 AS "Score" UNION ALL SELECT 3 AS nid, 0.0229623113 AS "Score" UNION ALL SELECT 4 AS nid, 0.00561472354 AS "Score") AS "Values"),
"DT_Output_173" AS
(SELECT "DT_node_lookup_173"."KEY" AS "KEY", "DT_node_lookup_173".node_id_2 AS node_id_2, "DT_node_data_173".nid AS nid, "DT_node_data_173"."Score" AS "Score"
FROM "DT_node_lookup_173" LEFT OUTER JOIN "DT_node_data_173" ON "DT_node_lookup_173".node_id_2 = "DT_node_data_173".nid),
"XGB_Model_2_57" AS
(SELECT "DT_Output_173"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_173"."Score" AS "Score_virginica"
FROM "DT_Output_173"),
"DT_node_lookup_174" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_174" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00855001993 AS "Score") AS "Values"),
"DT_Output_174" AS
(SELECT "DT_node_lookup_174"."KEY" AS "KEY", "DT_node_lookup_174".node_id_2 AS node_id_2, "DT_node_data_174".nid AS nid, "DT_node_data_174"."Score" AS "Score"
FROM "DT_node_lookup_174" LEFT OUTER JOIN "DT_node_data_174" ON "DT_node_lookup_174".node_id_2 = "DT_node_data_174".nid),
"XGB_Model_0_58" AS
(SELECT "DT_Output_174"."KEY" AS "KEY", "DT_Output_174"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_174"),
"DT_node_lookup_175" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_175" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0165577717 AS "Score" UNION ALL SELECT 3 AS nid, -0.0392327756 AS "Score" UNION ALL SELECT 4 AS nid, 0.017352879 AS "Score") AS "Values"),
"DT_Output_175" AS
(SELECT "DT_node_lookup_175"."KEY" AS "KEY", "DT_node_lookup_175".node_id_2 AS node_id_2, "DT_node_data_175".nid AS nid, "DT_node_data_175"."Score" AS "Score"
FROM "DT_node_lookup_175" LEFT OUTER JOIN "DT_node_data_175" ON "DT_node_lookup_175".node_id_2 = "DT_node_data_175".nid),
"XGB_Model_1_58" AS
(SELECT "DT_Output_175"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_175"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_175"),
"DT_node_lookup_176" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_176" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0120158792 AS "Score" UNION ALL SELECT 2 AS nid, 0.0281843096 AS "Score") AS "Values"),
"DT_Output_176" AS
(SELECT "DT_node_lookup_176"."KEY" AS "KEY", "DT_node_lookup_176".node_id_2 AS node_id_2, "DT_node_data_176".nid AS nid, "DT_node_data_176"."Score" AS "Score"
FROM "DT_node_lookup_176" LEFT OUTER JOIN "DT_node_data_176" ON "DT_node_lookup_176".node_id_2 = "DT_node_data_176".nid),
"XGB_Model_2_58" AS
(SELECT "DT_Output_176"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_176"."Score" AS "Score_virginica"
FROM "DT_Output_176"),
"DT_node_lookup_177" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_177" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00908363704 AS "Score") AS "Values"),
"DT_Output_177" AS
(SELECT "DT_node_lookup_177"."KEY" AS "KEY", "DT_node_lookup_177".node_id_2 AS node_id_2, "DT_node_data_177".nid AS nid, "DT_node_data_177"."Score" AS "Score"
FROM "DT_node_lookup_177" LEFT OUTER JOIN "DT_node_data_177" ON "DT_node_lookup_177".node_id_2 = "DT_node_data_177".nid),
"XGB_Model_0_59" AS
(SELECT "DT_Output_177"."KEY" AS "KEY", "DT_Output_177"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_177"),
"DT_node_lookup_178" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_178" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0182268359 AS "Score" UNION ALL SELECT 3 AS nid, 0.0285343435 AS "Score" UNION ALL SELECT 4 AS nid, -0.00770017179 AS "Score") AS "Values"),
"DT_Output_178" AS
(SELECT "DT_node_lookup_178"."KEY" AS "KEY", "DT_node_lookup_178".node_id_2 AS node_id_2, "DT_node_data_178".nid AS nid, "DT_node_data_178"."Score" AS "Score"
FROM "DT_node_lookup_178" LEFT OUTER JOIN "DT_node_data_178" ON "DT_node_lookup_178".node_id_2 = "DT_node_data_178".nid),
"XGB_Model_1_59" AS
(SELECT "DT_Output_178"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_178"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_178"),
"DT_node_lookup_179" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_179" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0105852969 AS "Score" UNION ALL SELECT 2 AS nid, 0.0272446219 AS "Score") AS "Values"),
"DT_Output_179" AS
(SELECT "DT_node_lookup_179"."KEY" AS "KEY", "DT_node_lookup_179".node_id_2 AS node_id_2, "DT_node_data_179".nid AS nid, "DT_node_data_179"."Score" AS "Score"
FROM "DT_node_lookup_179" LEFT OUTER JOIN "DT_node_data_179" ON "DT_node_lookup_179".node_id_2 = "DT_node_data_179".nid),
"XGB_Model_2_59" AS
(SELECT "DT_Output_179"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_179"."Score" AS "Score_virginica"
FROM "DT_Output_179")
SELECT "XGB_esu_17"."KEY", "XGB_esu_17"."Score_setosa", "XGB_esu_17"."Score_versicolor", "XGB_esu_17"."Score_virginica"
FROM (SELECT "XGB_Model_2_56"."KEY" AS "KEY", CAST("XGB_Model_2_56"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_56"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_56"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_56" UNION ALL SELECT "XGB_Model_0_57"."KEY" AS "KEY", CAST("XGB_Model_0_57"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_57"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_57"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_57" UNION ALL SELECT "XGB_Model_1_57"."KEY" AS "KEY", CAST("XGB_Model_1_57"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_57"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_57"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_57" UNION ALL SELECT "XGB_Model_2_57"."KEY" AS "KEY", CAST("XGB_Model_2_57"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_57"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_57"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_57" UNION ALL SELECT "XGB_Model_0_58"."KEY" AS "KEY", CAST("XGB_Model_0_58"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_58"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_58"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_58" UNION ALL SELECT "XGB_Model_1_58"."KEY" AS "KEY", CAST("XGB_Model_1_58"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_58"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_58"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_58" UNION ALL SELECT "XGB_Model_2_58"."KEY" AS "KEY", CAST("XGB_Model_2_58"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_58"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_58"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_58" UNION ALL SELECT "XGB_Model_0_59"."KEY" AS "KEY", CAST("XGB_Model_0_59"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_59"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_59"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_59" UNION ALL SELECT "XGB_Model_1_59"."KEY" AS "KEY", CAST("XGB_Model_1_59"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_59"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_59"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_59" UNION ALL SELECT "XGB_Model_2_59"."KEY" AS "KEY", CAST("XGB_Model_2_59"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_59"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_59"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_59") AS "XGB_esu_17"),
"XGB_18" AS
(WITH "DT_node_lookup_180" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_180" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00902668573 AS "Score") AS "Values"),
"DT_Output_180" AS
(SELECT "DT_node_lookup_180"."KEY" AS "KEY", "DT_node_lookup_180".node_id_2 AS node_id_2, "DT_node_data_180".nid AS nid, "DT_node_data_180"."Score" AS "Score"
FROM "DT_node_lookup_180" LEFT OUTER JOIN "DT_node_data_180" ON "DT_node_lookup_180".node_id_2 = "DT_node_data_180".nid),
"XGB_Model_0_60" AS
(SELECT "DT_Output_180"."KEY" AS "KEY", "DT_Output_180"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_180"),
"DT_node_lookup_181" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_181" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.01726643 AS "Score" UNION ALL SELECT 3 AS nid, 0.0275275242 AS "Score" UNION ALL SELECT 4 AS nid, -0.00768692419 AS "Score") AS "Values"),
"DT_Output_181" AS
(SELECT "DT_node_lookup_181"."KEY" AS "KEY", "DT_node_lookup_181".node_id_2 AS node_id_2, "DT_node_data_181".nid AS nid, "DT_node_data_181"."Score" AS "Score"
FROM "DT_node_lookup_181" LEFT OUTER JOIN "DT_node_data_181" ON "DT_node_lookup_181".node_id_2 = "DT_node_data_181".nid),
"XGB_Model_1_60" AS
(SELECT "DT_Output_181"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_181"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_181"),
"DT_node_lookup_182" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_182" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0291242655 AS "Score" UNION ALL SELECT 3 AS nid, -0.0418296531 AS "Score" UNION ALL SELECT 4 AS nid, 0.0232532918 AS "Score") AS "Values"),
"DT_Output_182" AS
(SELECT "DT_node_lookup_182"."KEY" AS "KEY", "DT_node_lookup_182".node_id_2 AS node_id_2, "DT_node_data_182".nid AS nid, "DT_node_data_182"."Score" AS "Score"
FROM "DT_node_lookup_182" LEFT OUTER JOIN "DT_node_data_182" ON "DT_node_lookup_182".node_id_2 = "DT_node_data_182".nid),
"XGB_Model_2_60" AS
(SELECT "DT_Output_182"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_182"."Score" AS "Score_virginica"
FROM "DT_Output_182"),
"DT_node_lookup_183" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_183" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00910138525 AS "Score") AS "Values"),
"DT_Output_183" AS
(SELECT "DT_node_lookup_183"."KEY" AS "KEY", "DT_node_lookup_183".node_id_2 AS node_id_2, "DT_node_data_183".nid AS nid, "DT_node_data_183"."Score" AS "Score"
FROM "DT_node_lookup_183" LEFT OUTER JOIN "DT_node_data_183" ON "DT_node_lookup_183".node_id_2 = "DT_node_data_183".nid),
"XGB_Model_0_61" AS
(SELECT "DT_Output_183"."KEY" AS "KEY", "DT_Output_183"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_183"),
"DT_node_lookup_184" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_184" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0195286535 AS "Score" UNION ALL SELECT 3 AS nid, -0.0163339786 AS "Score" UNION ALL SELECT 4 AS nid, 0.00240249839 AS "Score") AS "Values"),
"DT_Output_184" AS
(SELECT "DT_node_lookup_184"."KEY" AS "KEY", "DT_node_lookup_184".node_id_2 AS node_id_2, "DT_node_data_184".nid AS nid, "DT_node_data_184"."Score" AS "Score"
FROM "DT_node_lookup_184" LEFT OUTER JOIN "DT_node_data_184" ON "DT_node_lookup_184".node_id_2 = "DT_node_data_184".nid),
"XGB_Model_1_61" AS
(SELECT "DT_Output_184"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_184"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_184"),
"DT_node_lookup_185" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_185" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0292649101 AS "Score" UNION ALL SELECT 3 AS nid, 0.0241086036 AS "Score" UNION ALL SELECT 4 AS nid, -0.0386042893 AS "Score") AS "Values"),
"DT_Output_185" AS
(SELECT "DT_node_lookup_185"."KEY" AS "KEY", "DT_node_lookup_185".node_id_2 AS node_id_2, "DT_node_data_185".nid AS nid, "DT_node_data_185"."Score" AS "Score"
FROM "DT_node_lookup_185" LEFT OUTER JOIN "DT_node_data_185" ON "DT_node_lookup_185".node_id_2 = "DT_node_data_185".nid),
"XGB_Model_2_61" AS
(SELECT "DT_Output_185"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_185"."Score" AS "Score_virginica"
FROM "DT_Output_185"),
"DT_node_lookup_186" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_186" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00756628485 AS "Score") AS "Values"),
"DT_Output_186" AS
(SELECT "DT_node_lookup_186"."KEY" AS "KEY", "DT_node_lookup_186".node_id_2 AS node_id_2, "DT_node_data_186".nid AS nid, "DT_node_data_186"."Score" AS "Score"
FROM "DT_node_lookup_186" LEFT OUTER JOIN "DT_node_data_186" ON "DT_node_lookup_186".node_id_2 = "DT_node_data_186".nid),
"XGB_Model_0_62" AS
(SELECT "DT_Output_186"."KEY" AS "KEY", "DT_Output_186"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_186"),
"DT_node_lookup_187" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_187" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0167666133 AS "Score" UNION ALL SELECT 3 AS nid, -0.0396707952 AS "Score" UNION ALL SELECT 4 AS nid, 0.0175357908 AS "Score") AS "Values"),
"DT_Output_187" AS
(SELECT "DT_node_lookup_187"."KEY" AS "KEY", "DT_node_lookup_187".node_id_2 AS node_id_2, "DT_node_data_187".nid AS nid, "DT_node_data_187"."Score" AS "Score"
FROM "DT_node_lookup_187" LEFT OUTER JOIN "DT_node_data_187" ON "DT_node_lookup_187".node_id_2 = "DT_node_data_187".nid),
"XGB_Model_1_62" AS
(SELECT "DT_Output_187"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_187"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_187"),
"DT_node_lookup_188" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_188" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0213880688 AS "Score" UNION ALL SELECT 3 AS nid, 0.022851076 AS "Score" UNION ALL SELECT 4 AS nid, 0.00601967238 AS "Score") AS "Values"),
"DT_Output_188" AS
(SELECT "DT_node_lookup_188"."KEY" AS "KEY", "DT_node_lookup_188".node_id_2 AS node_id_2, "DT_node_data_188".nid AS nid, "DT_node_data_188"."Score" AS "Score"
FROM "DT_node_lookup_188" LEFT OUTER JOIN "DT_node_data_188" ON "DT_node_lookup_188".node_id_2 = "DT_node_data_188".nid),
"XGB_Model_2_62" AS
(SELECT "DT_Output_188"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_188"."Score" AS "Score_virginica"
FROM "DT_Output_188"),
"DT_node_lookup_189" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_189" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00836150907 AS "Score") AS "Values"),
"DT_Output_189" AS
(SELECT "DT_node_lookup_189"."KEY" AS "KEY", "DT_node_lookup_189".node_id_2 AS node_id_2, "DT_node_data_189".nid AS nid, "DT_node_data_189"."Score" AS "Score"
FROM "DT_node_lookup_189" LEFT OUTER JOIN "DT_node_data_189" ON "DT_node_lookup_189".node_id_2 = "DT_node_data_189".nid),
"XGB_Model_0_63" AS
(SELECT "DT_Output_189"."KEY" AS "KEY", "DT_Output_189"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_189")
SELECT "XGB_esu_18"."KEY", "XGB_esu_18"."Score_setosa", "XGB_esu_18"."Score_versicolor", "XGB_esu_18"."Score_virginica"
FROM (SELECT "XGB_Model_0_60"."KEY" AS "KEY", CAST("XGB_Model_0_60"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_60"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_60"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_60" UNION ALL SELECT "XGB_Model_1_60"."KEY" AS "KEY", CAST("XGB_Model_1_60"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_60"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_60"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_60" UNION ALL SELECT "XGB_Model_2_60"."KEY" AS "KEY", CAST("XGB_Model_2_60"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_60"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_60"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_60" UNION ALL SELECT "XGB_Model_0_61"."KEY" AS "KEY", CAST("XGB_Model_0_61"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_61"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_61"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_61" UNION ALL SELECT "XGB_Model_1_61"."KEY" AS "KEY", CAST("XGB_Model_1_61"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_61"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_61"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_61" UNION ALL SELECT "XGB_Model_2_61"."KEY" AS "KEY", CAST("XGB_Model_2_61"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_61"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_61"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_61" UNION ALL SELECT "XGB_Model_0_62"."KEY" AS "KEY", CAST("XGB_Model_0_62"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_62"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_62"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_62" UNION ALL SELECT "XGB_Model_1_62"."KEY" AS "KEY", CAST("XGB_Model_1_62"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_62"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_62"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_62" UNION ALL SELECT "XGB_Model_2_62"."KEY" AS "KEY", CAST("XGB_Model_2_62"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_62"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_62"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_62" UNION ALL SELECT "XGB_Model_0_63"."KEY" AS "KEY", CAST("XGB_Model_0_63"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_63"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_63"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_63") AS "XGB_esu_18"),
"XGB_19" AS
(WITH "DT_node_lookup_190" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_190" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0153474398 AS "Score" UNION ALL SELECT 3 AS nid, -0.0368831307 AS "Score" UNION ALL SELECT 4 AS nid, 0.0162832811 AS "Score") AS "Values"),
"DT_Output_190" AS
(SELECT "DT_node_lookup_190"."KEY" AS "KEY", "DT_node_lookup_190".node_id_2 AS node_id_2, "DT_node_data_190".nid AS nid, "DT_node_data_190"."Score" AS "Score"
FROM "DT_node_lookup_190" LEFT OUTER JOIN "DT_node_data_190" ON "DT_node_lookup_190".node_id_2 = "DT_node_data_190".nid),
"XGB_Model_1_63" AS
(SELECT "DT_Output_190"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_190"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_190"),
"DT_node_lookup_191" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_191" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0258017089 AS "Score" UNION ALL SELECT 3 AS nid, -0.038576819 AS "Score" UNION ALL SELECT 4 AS nid, 0.021958705 AS "Score") AS "Values"),
"DT_Output_191" AS
(SELECT "DT_node_lookup_191"."KEY" AS "KEY", "DT_node_lookup_191".node_id_2 AS node_id_2, "DT_node_data_191".nid AS nid, "DT_node_data_191"."Score" AS "Score"
FROM "DT_node_lookup_191" LEFT OUTER JOIN "DT_node_data_191" ON "DT_node_lookup_191".node_id_2 = "DT_node_data_191".nid),
"XGB_Model_2_63" AS
(SELECT "DT_Output_191"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_191"."Score" AS "Score_virginica"
FROM "DT_Output_191"),
"DT_node_lookup_192" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_192" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00898650009 AS "Score") AS "Values"),
"DT_Output_192" AS
(SELECT "DT_node_lookup_192"."KEY" AS "KEY", "DT_node_lookup_192".node_id_2 AS node_id_2, "DT_node_data_192".nid AS nid, "DT_node_data_192"."Score" AS "Score"
FROM "DT_node_lookup_192" LEFT OUTER JOIN "DT_node_data_192" ON "DT_node_lookup_192".node_id_2 = "DT_node_data_192".nid),
"XGB_Model_0_64" AS
(SELECT "DT_Output_192"."KEY" AS "KEY", "DT_Output_192"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_192"),
"DT_node_lookup_193" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_193" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0193761103 AS "Score" UNION ALL SELECT 3 AS nid, -0.0302262809 AS "Score" UNION ALL SELECT 4 AS nid, 0.0146953883 AS "Score") AS "Values"),
"DT_Output_193" AS
(SELECT "DT_node_lookup_193"."KEY" AS "KEY", "DT_node_lookup_193".node_id_2 AS node_id_2, "DT_node_data_193".nid AS nid, "DT_node_data_193"."Score" AS "Score"
FROM "DT_node_lookup_193" LEFT OUTER JOIN "DT_node_data_193" ON "DT_node_lookup_193".node_id_2 = "DT_node_data_193".nid),
"XGB_Model_1_64" AS
(SELECT "DT_Output_193"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_193"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_193"),
"DT_node_lookup_194" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_194" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0299099125 AS "Score" UNION ALL SELECT 3 AS nid, 0.022680629 AS "Score" UNION ALL SELECT 4 AS nid, -0.038362205 AS "Score") AS "Values"),
"DT_Output_194" AS
(SELECT "DT_node_lookup_194"."KEY" AS "KEY", "DT_node_lookup_194".node_id_2 AS node_id_2, "DT_node_data_194".nid AS nid, "DT_node_data_194"."Score" AS "Score"
FROM "DT_node_lookup_194" LEFT OUTER JOIN "DT_node_data_194" ON "DT_node_lookup_194".node_id_2 = "DT_node_data_194".nid),
"XGB_Model_2_64" AS
(SELECT "DT_Output_194"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_194"."Score" AS "Score_virginica"
FROM "DT_Output_194"),
"DT_node_lookup_195" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_195" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0074134036 AS "Score") AS "Values"),
"DT_Output_195" AS
(SELECT "DT_node_lookup_195"."KEY" AS "KEY", "DT_node_lookup_195".node_id_2 AS node_id_2, "DT_node_data_195".nid AS nid, "DT_node_data_195"."Score" AS "Score"
FROM "DT_node_lookup_195" LEFT OUTER JOIN "DT_node_data_195" ON "DT_node_lookup_195".node_id_2 = "DT_node_data_195".nid),
"XGB_Model_0_65" AS
(SELECT "DT_Output_195"."KEY" AS "KEY", "DT_Output_195"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_195"),
"DT_node_lookup_196" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_196" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0166527331 AS "Score" UNION ALL SELECT 3 AS nid, 0.0262411404 AS "Score" UNION ALL SELECT 4 AS nid, -0.00703831017 AS "Score") AS "Values"),
"DT_Output_196" AS
(SELECT "DT_node_lookup_196"."KEY" AS "KEY", "DT_node_lookup_196".node_id_2 AS node_id_2, "DT_node_data_196".nid AS nid, "DT_node_data_196"."Score" AS "Score"
FROM "DT_node_lookup_196" LEFT OUTER JOIN "DT_node_data_196" ON "DT_node_lookup_196".node_id_2 = "DT_node_data_196".nid),
"XGB_Model_1_65" AS
(SELECT "DT_Output_196"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_196"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_196"),
"DT_node_lookup_197" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_197" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0280269329 AS "Score" UNION ALL SELECT 3 AS nid, 0.0198736545 AS "Score" UNION ALL SELECT 4 AS nid, -0.035678152 AS "Score") AS "Values"),
"DT_Output_197" AS
(SELECT "DT_node_lookup_197"."KEY" AS "KEY", "DT_node_lookup_197".node_id_2 AS node_id_2, "DT_node_data_197".nid AS nid, "DT_node_data_197"."Score" AS "Score"
FROM "DT_node_lookup_197" LEFT OUTER JOIN "DT_node_data_197" ON "DT_node_lookup_197".node_id_2 = "DT_node_data_197".nid),
"XGB_Model_2_65" AS
(SELECT "DT_Output_197"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_197"."Score" AS "Score_virginica"
FROM "DT_Output_197"),
"DT_node_lookup_198" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_198" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00760188745 AS "Score") AS "Values"),
"DT_Output_198" AS
(SELECT "DT_node_lookup_198"."KEY" AS "KEY", "DT_node_lookup_198".node_id_2 AS node_id_2, "DT_node_data_198".nid AS nid, "DT_node_data_198"."Score" AS "Score"
FROM "DT_node_lookup_198" LEFT OUTER JOIN "DT_node_data_198" ON "DT_node_lookup_198".node_id_2 = "DT_node_data_198".nid),
"XGB_Model_0_66" AS
(SELECT "DT_Output_198"."KEY" AS "KEY", "DT_Output_198"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_198"),
"DT_node_lookup_199" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_199" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0181690697 AS "Score" UNION ALL SELECT 3 AS nid, -0.0143655492 AS "Score" UNION ALL SELECT 4 AS nid, 0.00088880281 AS "Score") AS "Values"),
"DT_Output_199" AS
(SELECT "DT_node_lookup_199"."KEY" AS "KEY", "DT_node_lookup_199".node_id_2 AS node_id_2, "DT_node_data_199".nid AS nid, "DT_node_data_199"."Score" AS "Score"
FROM "DT_node_lookup_199" LEFT OUTER JOIN "DT_node_data_199" ON "DT_node_lookup_199".node_id_2 = "DT_node_data_199".nid),
"XGB_Model_1_66" AS
(SELECT "DT_Output_199"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_199"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_199")
SELECT "XGB_esu_19"."KEY", "XGB_esu_19"."Score_setosa", "XGB_esu_19"."Score_versicolor", "XGB_esu_19"."Score_virginica"
FROM (SELECT "XGB_Model_1_63"."KEY" AS "KEY", CAST("XGB_Model_1_63"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_63"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_63"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_63" UNION ALL SELECT "XGB_Model_2_63"."KEY" AS "KEY", CAST("XGB_Model_2_63"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_63"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_63"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_63" UNION ALL SELECT "XGB_Model_0_64"."KEY" AS "KEY", CAST("XGB_Model_0_64"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_64"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_64"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_64" UNION ALL SELECT "XGB_Model_1_64"."KEY" AS "KEY", CAST("XGB_Model_1_64"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_64"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_64"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_64" UNION ALL SELECT "XGB_Model_2_64"."KEY" AS "KEY", CAST("XGB_Model_2_64"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_64"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_64"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_64" UNION ALL SELECT "XGB_Model_0_65"."KEY" AS "KEY", CAST("XGB_Model_0_65"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_65"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_65"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_65" UNION ALL SELECT "XGB_Model_1_65"."KEY" AS "KEY", CAST("XGB_Model_1_65"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_65"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_65"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_65" UNION ALL SELECT "XGB_Model_2_65"."KEY" AS "KEY", CAST("XGB_Model_2_65"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_65"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_65"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_65" UNION ALL SELECT "XGB_Model_0_66"."KEY" AS "KEY", CAST("XGB_Model_0_66"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_66"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_66"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_66" UNION ALL SELECT "XGB_Model_1_66"."KEY" AS "KEY", CAST("XGB_Model_1_66"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_66"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_66"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_66") AS "XGB_esu_19"),
"XGB_20" AS
(WITH "DT_node_lookup_200" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_200" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0207519326 AS "Score" UNION ALL SELECT 3 AS nid, 0.0228950195 AS "Score" UNION ALL SELECT 4 AS nid, 0.00547512481 AS "Score") AS "Values"),
"DT_Output_200" AS
(SELECT "DT_node_lookup_200"."KEY" AS "KEY", "DT_node_lookup_200".node_id_2 AS node_id_2, "DT_node_data_200".nid AS nid, "DT_node_data_200"."Score" AS "Score"
FROM "DT_node_lookup_200" LEFT OUTER JOIN "DT_node_data_200" ON "DT_node_lookup_200".node_id_2 = "DT_node_data_200".nid),
"XGB_Model_2_66" AS
(SELECT "DT_Output_200"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_200"."Score" AS "Score_virginica"
FROM "DT_Output_200"),
"DT_node_lookup_201" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_201" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.0062598777 AS "Score") AS "Values"),
"DT_Output_201" AS
(SELECT "DT_node_lookup_201"."KEY" AS "KEY", "DT_node_lookup_201".node_id_2 AS node_id_2, "DT_node_data_201".nid AS nid, "DT_node_data_201"."Score" AS "Score"
FROM "DT_node_lookup_201" LEFT OUTER JOIN "DT_node_data_201" ON "DT_node_lookup_201".node_id_2 = "DT_node_data_201".nid),
"XGB_Model_0_67" AS
(SELECT "DT_Output_201"."KEY" AS "KEY", "DT_Output_201"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_201"),
"DT_node_lookup_202" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_202" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0183907878 AS "Score" UNION ALL SELECT 3 AS nid, -0.0385146812 AS "Score" UNION ALL SELECT 4 AS nid, 0.0153024029 AS "Score") AS "Values"),
"DT_Output_202" AS
(SELECT "DT_node_lookup_202"."KEY" AS "KEY", "DT_node_lookup_202".node_id_2 AS node_id_2, "DT_node_data_202".nid AS nid, "DT_node_data_202"."Score" AS "Score"
FROM "DT_node_lookup_202" LEFT OUTER JOIN "DT_node_data_202" ON "DT_node_lookup_202".node_id_2 = "DT_node_data_202".nid),
"XGB_Model_1_67" AS
(SELECT "DT_Output_202"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_202"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_202"),
"DT_node_lookup_203" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_203" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0264876541 AS "Score" UNION ALL SELECT 3 AS nid, 0.0180352386 AS "Score" UNION ALL SELECT 4 AS nid, -0.0340283439 AS "Score") AS "Values"),
"DT_Output_203" AS
(SELECT "DT_node_lookup_203"."KEY" AS "KEY", "DT_node_lookup_203".node_id_2 AS node_id_2, "DT_node_data_203".nid AS nid, "DT_node_data_203"."Score" AS "Score"
FROM "DT_node_lookup_203" LEFT OUTER JOIN "DT_node_data_203" ON "DT_node_lookup_203".node_id_2 = "DT_node_data_203".nid),
"XGB_Model_2_67" AS
(SELECT "DT_Output_203"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_203"."Score" AS "Score_virginica"
FROM "DT_Output_203"),
"DT_node_lookup_204" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_204" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00711685419 AS "Score") AS "Values"),
"DT_Output_204" AS
(SELECT "DT_node_lookup_204"."KEY" AS "KEY", "DT_node_lookup_204".node_id_2 AS node_id_2, "DT_node_data_204".nid AS nid, "DT_node_data_204"."Score" AS "Score"
FROM "DT_node_lookup_204" LEFT OUTER JOIN "DT_node_data_204" ON "DT_node_lookup_204".node_id_2 = "DT_node_data_204".nid),
"XGB_Model_0_68" AS
(SELECT "DT_Output_204"."KEY" AS "KEY", "DT_Output_204"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_204"),
"DT_node_lookup_205" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_205" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0172085017 AS "Score" UNION ALL SELECT 3 AS nid, -0.0372298881 AS "Score" UNION ALL SELECT 4 AS nid, 0.0150043573 AS "Score") AS "Values"),
"DT_Output_205" AS
(SELECT "DT_node_lookup_205"."KEY" AS "KEY", "DT_node_lookup_205".node_id_2 AS node_id_2, "DT_node_data_205".nid AS nid, "DT_node_data_205"."Score" AS "Score"
FROM "DT_node_lookup_205" LEFT OUTER JOIN "DT_node_data_205" ON "DT_node_lookup_205".node_id_2 = "DT_node_data_205".nid),
"XGB_Model_1_68" AS
(SELECT "DT_Output_205"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_205"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_205"),
"DT_node_lookup_206" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_206" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.019924745 AS "Score" UNION ALL SELECT 3 AS nid, 0.0212878548 AS "Score" UNION ALL SELECT 4 AS nid, 0.00590447336 AS "Score") AS "Values"),
"DT_Output_206" AS
(SELECT "DT_node_lookup_206"."KEY" AS "KEY", "DT_node_lookup_206".node_id_2 AS node_id_2, "DT_node_data_206".nid AS nid, "DT_node_data_206"."Score" AS "Score"
FROM "DT_node_lookup_206" LEFT OUTER JOIN "DT_node_data_206" ON "DT_node_lookup_206".node_id_2 = "DT_node_data_206".nid),
"XGB_Model_2_68" AS
(SELECT "DT_Output_206"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_206"."Score" AS "Score_virginica"
FROM "DT_Output_206"),
"DT_node_lookup_207" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_207" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00786889344 AS "Score") AS "Values"),
"DT_Output_207" AS
(SELECT "DT_node_lookup_207"."KEY" AS "KEY", "DT_node_lookup_207".node_id_2 AS node_id_2, "DT_node_data_207".nid AS nid, "DT_node_data_207"."Score" AS "Score"
FROM "DT_node_lookup_207" LEFT OUTER JOIN "DT_node_data_207" ON "DT_node_lookup_207".node_id_2 = "DT_node_data_207".nid),
"XGB_Model_0_69" AS
(SELECT "DT_Output_207"."KEY" AS "KEY", "DT_Output_207"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_207"),
"DT_node_lookup_208" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_208" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0157563854 AS "Score" UNION ALL SELECT 3 AS nid, -0.0346141718 AS "Score" UNION ALL SELECT 4 AS nid, 0.0139622958 AS "Score") AS "Values"),
"DT_Output_208" AS
(SELECT "DT_node_lookup_208"."KEY" AS "KEY", "DT_node_lookup_208".node_id_2 AS node_id_2, "DT_node_data_208".nid AS nid, "DT_node_data_208"."Score" AS "Score"
FROM "DT_node_lookup_208" LEFT OUTER JOIN "DT_node_data_208" ON "DT_node_lookup_208".node_id_2 = "DT_node_data_208".nid),
"XGB_Model_1_69" AS
(SELECT "DT_Output_208"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_208"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_208"),
"DT_node_lookup_209" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.75) THEN CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 3 ELSE 4 END ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_209" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 2 AS nid, 0.0267178565 AS "Score" UNION ALL SELECT 3 AS nid, 0.019447377 AS "Score" UNION ALL SELECT 4 AS nid, -0.0337383226 AS "Score") AS "Values"),
"DT_Output_209" AS
(SELECT "DT_node_lookup_209"."KEY" AS "KEY", "DT_node_lookup_209".node_id_2 AS node_id_2, "DT_node_data_209".nid AS nid, "DT_node_data_209"."Score" AS "Score"
FROM "DT_node_lookup_209" LEFT OUTER JOIN "DT_node_data_209" ON "DT_node_lookup_209".node_id_2 = "DT_node_data_209".nid),
"XGB_Model_2_69" AS
(SELECT "DT_Output_209"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_209"."Score" AS "Score_virginica"
FROM "DT_Output_209")
SELECT "XGB_esu_20"."KEY", "XGB_esu_20"."Score_setosa", "XGB_esu_20"."Score_versicolor", "XGB_esu_20"."Score_virginica"
FROM (SELECT "XGB_Model_2_66"."KEY" AS "KEY", CAST("XGB_Model_2_66"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_66"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_66"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_66" UNION ALL SELECT "XGB_Model_0_67"."KEY" AS "KEY", CAST("XGB_Model_0_67"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_67"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_67"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_67" UNION ALL SELECT "XGB_Model_1_67"."KEY" AS "KEY", CAST("XGB_Model_1_67"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_67"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_67"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_67" UNION ALL SELECT "XGB_Model_2_67"."KEY" AS "KEY", CAST("XGB_Model_2_67"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_67"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_67"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_67" UNION ALL SELECT "XGB_Model_0_68"."KEY" AS "KEY", CAST("XGB_Model_0_68"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_68"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_68"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_68" UNION ALL SELECT "XGB_Model_1_68"."KEY" AS "KEY", CAST("XGB_Model_1_68"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_68"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_68"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_68" UNION ALL SELECT "XGB_Model_2_68"."KEY" AS "KEY", CAST("XGB_Model_2_68"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_68"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_68"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_68" UNION ALL SELECT "XGB_Model_0_69"."KEY" AS "KEY", CAST("XGB_Model_0_69"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_69"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_69"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_69" UNION ALL SELECT "XGB_Model_1_69"."KEY" AS "KEY", CAST("XGB_Model_1_69"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_69"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_69"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_69" UNION ALL SELECT "XGB_Model_2_69"."KEY" AS "KEY", CAST("XGB_Model_2_69"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_69"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_69"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_69") AS "XGB_esu_20"),
"XGB_21" AS
(WITH "DT_node_lookup_210" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_210" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00849767867 AS "Score") AS "Values"),
"DT_Output_210" AS
(SELECT "DT_node_lookup_210"."KEY" AS "KEY", "DT_node_lookup_210".node_id_2 AS node_id_2, "DT_node_data_210".nid AS nid, "DT_node_data_210"."Score" AS "Score"
FROM "DT_node_lookup_210" LEFT OUTER JOIN "DT_node_data_210" ON "DT_node_lookup_210".node_id_2 = "DT_node_data_210".nid),
"XGB_Model_0_70" AS
(SELECT "DT_Output_210"."KEY" AS "KEY", "DT_Output_210"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_210"),
"DT_node_lookup_211" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_211" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0148977535 AS "Score" UNION ALL SELECT 2 AS nid, -0.0126869176 AS "Score") AS "Values"),
"DT_Output_211" AS
(SELECT "DT_node_lookup_211"."KEY" AS "KEY", "DT_node_lookup_211".node_id_2 AS node_id_2, "DT_node_data_211".nid AS nid, "DT_node_data_211"."Score" AS "Score"
FROM "DT_node_lookup_211" LEFT OUTER JOIN "DT_node_data_211" ON "DT_node_lookup_211".node_id_2 = "DT_node_data_211".nid),
"XGB_Model_1_70" AS
(SELECT "DT_Output_211"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_211"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_211"),
"DT_node_lookup_212" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_212" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0134517336 AS "Score" UNION ALL SELECT 2 AS nid, 0.0215002112 AS "Score") AS "Values"),
"DT_Output_212" AS
(SELECT "DT_node_lookup_212"."KEY" AS "KEY", "DT_node_lookup_212".node_id_2 AS node_id_2, "DT_node_data_212".nid AS nid, "DT_node_data_212"."Score" AS "Score"
FROM "DT_node_lookup_212" LEFT OUTER JOIN "DT_node_data_212" ON "DT_node_lookup_212".node_id_2 = "DT_node_data_212".nid),
"XGB_Model_2_70" AS
(SELECT "DT_Output_212"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_212"."Score" AS "Score_virginica"
FROM "DT_Output_212"),
"DT_node_lookup_213" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_213" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, -0.00808534119 AS "Score") AS "Values"),
"DT_Output_213" AS
(SELECT "DT_node_lookup_213"."KEY" AS "KEY", "DT_node_lookup_213".node_id_2 AS node_id_2, "DT_node_data_213".nid AS nid, "DT_node_data_213"."Score" AS "Score"
FROM "DT_node_lookup_213" LEFT OUTER JOIN "DT_node_data_213" ON "DT_node_lookup_213".node_id_2 = "DT_node_data_213".nid),
"XGB_Model_0_71" AS
(SELECT "DT_Output_213"."KEY" AS "KEY", "DT_Output_213"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_213"),
"DT_node_lookup_214" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_214" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0178301204 AS "Score" UNION ALL SELECT 3 AS nid, 0.0274695475 AS "Score" UNION ALL SELECT 4 AS nid, -0.0073943981 AS "Score") AS "Values"),
"DT_Output_214" AS
(SELECT "DT_node_lookup_214"."KEY" AS "KEY", "DT_node_lookup_214".node_id_2 AS node_id_2, "DT_node_data_214".nid AS nid, "DT_node_data_214"."Score" AS "Score"
FROM "DT_node_lookup_214" LEFT OUTER JOIN "DT_node_data_214" ON "DT_node_lookup_214".node_id_2 = "DT_node_data_214".nid),
"XGB_Model_1_71" AS
(SELECT "DT_Output_214"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_214"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_214"),
"DT_node_lookup_215" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_215" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0129795521 AS "Score" UNION ALL SELECT 2 AS nid, 0.0200168211 AS "Score") AS "Values"),
"DT_Output_215" AS
(SELECT "DT_node_lookup_215"."KEY" AS "KEY", "DT_node_lookup_215".node_id_2 AS node_id_2, "DT_node_data_215".nid AS nid, "DT_node_data_215"."Score" AS "Score"
FROM "DT_node_lookup_215" LEFT OUTER JOIN "DT_node_data_215" ON "DT_node_lookup_215".node_id_2 = "DT_node_data_215".nid),
"XGB_Model_2_71" AS
(SELECT "DT_Output_215"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_215"."Score" AS "Score_virginica"
FROM "DT_Output_215"),
"DT_node_lookup_216" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_216" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_216" AS
(SELECT "DT_node_lookup_216"."KEY" AS "KEY", "DT_node_lookup_216".node_id_2 AS node_id_2, "DT_node_data_216".nid AS nid, "DT_node_data_216"."Score" AS "Score"
FROM "DT_node_lookup_216" LEFT OUTER JOIN "DT_node_data_216" ON "DT_node_lookup_216".node_id_2 = "DT_node_data_216".nid),
"XGB_Model_0_72" AS
(SELECT "DT_Output_216"."KEY" AS "KEY", "DT_Output_216"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_216"),
"DT_node_lookup_217" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_217" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0164881125 AS "Score" UNION ALL SELECT 3 AS nid, 0.026134599 AS "Score" UNION ALL SELECT 4 AS nid, -0.00704912236 AS "Score") AS "Values"),
"DT_Output_217" AS
(SELECT "DT_node_lookup_217"."KEY" AS "KEY", "DT_node_lookup_217".node_id_2 AS node_id_2, "DT_node_data_217".nid AS nid, "DT_node_data_217"."Score" AS "Score"
FROM "DT_node_lookup_217" LEFT OUTER JOIN "DT_node_data_217" ON "DT_node_lookup_217".node_id_2 = "DT_node_data_217".nid),
"XGB_Model_1_72" AS
(SELECT "DT_Output_217"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_217"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_217"),
"DT_node_lookup_218" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_218" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0181663483 AS "Score" UNION ALL SELECT 3 AS nid, 0.0219247285 AS "Score" UNION ALL SELECT 4 AS nid, 0.00409880746 AS "Score") AS "Values"),
"DT_Output_218" AS
(SELECT "DT_node_lookup_218"."KEY" AS "KEY", "DT_node_lookup_218".node_id_2 AS node_id_2, "DT_node_data_218".nid AS nid, "DT_node_data_218"."Score" AS "Score"
FROM "DT_node_lookup_218" LEFT OUTER JOIN "DT_node_data_218" ON "DT_node_lookup_218".node_id_2 = "DT_node_data_218".nid),
"XGB_Model_2_72" AS
(SELECT "DT_Output_218"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_218"."Score" AS "Score_virginica"
FROM "DT_Output_218"),
"DT_node_lookup_219" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_219" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_219" AS
(SELECT "DT_node_lookup_219"."KEY" AS "KEY", "DT_node_lookup_219".node_id_2 AS node_id_2, "DT_node_data_219".nid AS nid, "DT_node_data_219"."Score" AS "Score"
FROM "DT_node_lookup_219" LEFT OUTER JOIN "DT_node_data_219" ON "DT_node_lookup_219".node_id_2 = "DT_node_data_219".nid),
"XGB_Model_0_73" AS
(SELECT "DT_Output_219"."KEY" AS "KEY", "DT_Output_219"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_219")
SELECT "XGB_esu_21"."KEY", "XGB_esu_21"."Score_setosa", "XGB_esu_21"."Score_versicolor", "XGB_esu_21"."Score_virginica"
FROM (SELECT "XGB_Model_0_70"."KEY" AS "KEY", CAST("XGB_Model_0_70"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_70"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_70"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_70" UNION ALL SELECT "XGB_Model_1_70"."KEY" AS "KEY", CAST("XGB_Model_1_70"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_70"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_70"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_70" UNION ALL SELECT "XGB_Model_2_70"."KEY" AS "KEY", CAST("XGB_Model_2_70"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_70"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_70"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_70" UNION ALL SELECT "XGB_Model_0_71"."KEY" AS "KEY", CAST("XGB_Model_0_71"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_71"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_71"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_71" UNION ALL SELECT "XGB_Model_1_71"."KEY" AS "KEY", CAST("XGB_Model_1_71"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_71"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_71"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_71" UNION ALL SELECT "XGB_Model_2_71"."KEY" AS "KEY", CAST("XGB_Model_2_71"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_71"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_71"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_71" UNION ALL SELECT "XGB_Model_0_72"."KEY" AS "KEY", CAST("XGB_Model_0_72"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_72"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_72"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_72" UNION ALL SELECT "XGB_Model_1_72"."KEY" AS "KEY", CAST("XGB_Model_1_72"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_72"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_72"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_72" UNION ALL SELECT "XGB_Model_2_72"."KEY" AS "KEY", CAST("XGB_Model_2_72"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_72"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_72"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_72" UNION ALL SELECT "XGB_Model_0_73"."KEY" AS "KEY", CAST("XGB_Model_0_73"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_73"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_73"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_73") AS "XGB_esu_21"),
"XGB_22" AS
(WITH "DT_node_lookup_220" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.45000005) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_220" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0182825532 AS "Score" UNION ALL SELECT 3 AS nid, -0.0274697933 AS "Score" UNION ALL SELECT 4 AS nid, 0.0138388472 AS "Score") AS "Values"),
"DT_Output_220" AS
(SELECT "DT_node_lookup_220"."KEY" AS "KEY", "DT_node_lookup_220".node_id_2 AS node_id_2, "DT_node_data_220".nid AS nid, "DT_node_data_220"."Score" AS "Score"
FROM "DT_node_lookup_220" LEFT OUTER JOIN "DT_node_data_220" ON "DT_node_lookup_220".node_id_2 = "DT_node_data_220".nid),
"XGB_Model_1_73" AS
(SELECT "DT_Output_220"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_220"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_220"),
"DT_node_lookup_221" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_221" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0129728597 AS "Score" UNION ALL SELECT 2 AS nid, 0.0195845254 AS "Score") AS "Values"),
"DT_Output_221" AS
(SELECT "DT_node_lookup_221"."KEY" AS "KEY", "DT_node_lookup_221".node_id_2 AS node_id_2, "DT_node_data_221".nid AS nid, "DT_node_data_221"."Score" AS "Score"
FROM "DT_node_lookup_221" LEFT OUTER JOIN "DT_node_data_221" ON "DT_node_lookup_221".node_id_2 = "DT_node_data_221".nid),
"XGB_Model_2_73" AS
(SELECT "DT_Output_221"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_221"."Score" AS "Score_virginica"
FROM "DT_Output_221"),
"DT_node_lookup_222" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_222" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_222" AS
(SELECT "DT_node_lookup_222"."KEY" AS "KEY", "DT_node_lookup_222".node_id_2 AS node_id_2, "DT_node_data_222".nid AS nid, "DT_node_data_222"."Score" AS "Score"
FROM "DT_node_lookup_222" LEFT OUTER JOIN "DT_node_data_222" ON "DT_node_lookup_222".node_id_2 = "DT_node_data_222".nid),
"XGB_Model_0_74" AS
(SELECT "DT_Output_222"."KEY" AS "KEY", "DT_Output_222"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_222"),
"DT_node_lookup_223" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_223" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0159554221 AS "Score" UNION ALL SELECT 3 AS nid, -0.0341338627 AS "Score" UNION ALL SELECT 4 AS nid, 0.0152606135 AS "Score") AS "Values"),
"DT_Output_223" AS
(SELECT "DT_node_lookup_223"."KEY" AS "KEY", "DT_node_lookup_223".node_id_2 AS node_id_2, "DT_node_data_223".nid AS nid, "DT_node_data_223"."Score" AS "Score"
FROM "DT_node_lookup_223" LEFT OUTER JOIN "DT_node_data_223" ON "DT_node_lookup_223".node_id_2 = "DT_node_data_223".nid),
"XGB_Model_1_74" AS
(SELECT "DT_Output_223"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_223"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_223"),
"DT_node_lookup_224" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_224" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.012581978 AS "Score" UNION ALL SELECT 2 AS nid, 0.017930869 AS "Score") AS "Values"),
"DT_Output_224" AS
(SELECT "DT_node_lookup_224"."KEY" AS "KEY", "DT_node_lookup_224".node_id_2 AS node_id_2, "DT_node_data_224".nid AS nid, "DT_node_data_224"."Score" AS "Score"
FROM "DT_node_lookup_224" LEFT OUTER JOIN "DT_node_data_224" ON "DT_node_lookup_224".node_id_2 = "DT_node_data_224".nid),
"XGB_Model_2_74" AS
(SELECT "DT_Output_224"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_224"."Score" AS "Score_virginica"
FROM "DT_Output_224"),
"DT_node_lookup_225" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_225" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_225" AS
(SELECT "DT_node_lookup_225"."KEY" AS "KEY", "DT_node_lookup_225".node_id_2 AS node_id_2, "DT_node_data_225".nid AS nid, "DT_node_data_225"."Score" AS "Score"
FROM "DT_node_lookup_225" LEFT OUTER JOIN "DT_node_data_225" ON "DT_node_lookup_225".node_id_2 = "DT_node_data_225".nid),
"XGB_Model_0_75" AS
(SELECT "DT_Output_225"."KEY" AS "KEY", "DT_Output_225"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_225"),
"DT_node_lookup_226" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_226" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0158761628 AS "Score" UNION ALL SELECT 3 AS nid, 0.0252838079 AS "Score" UNION ALL SELECT 4 AS nid, -0.00555571355 AS "Score") AS "Values"),
"DT_Output_226" AS
(SELECT "DT_node_lookup_226"."KEY" AS "KEY", "DT_node_lookup_226".node_id_2 AS node_id_2, "DT_node_data_226".nid AS nid, "DT_node_data_226"."Score" AS "Score"
FROM "DT_node_lookup_226" LEFT OUTER JOIN "DT_node_data_226" ON "DT_node_lookup_226".node_id_2 = "DT_node_data_226".nid),
"XGB_Model_1_75" AS
(SELECT "DT_Output_226"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_226"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_226"),
"DT_node_lookup_227" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_227" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0176896695 AS "Score" UNION ALL SELECT 3 AS nid, 0.0211887695 AS "Score" UNION ALL SELECT 4 AS nid, 0.00328804087 AS "Score") AS "Values"),
"DT_Output_227" AS
(SELECT "DT_node_lookup_227"."KEY" AS "KEY", "DT_node_lookup_227".node_id_2 AS node_id_2, "DT_node_data_227".nid AS nid, "DT_node_data_227"."Score" AS "Score"
FROM "DT_node_lookup_227" LEFT OUTER JOIN "DT_node_data_227" ON "DT_node_lookup_227".node_id_2 = "DT_node_data_227".nid),
"XGB_Model_2_75" AS
(SELECT "DT_Output_227"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_227"."Score" AS "Score_virginica"
FROM "DT_Output_227"),
"DT_node_lookup_228" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_228" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_228" AS
(SELECT "DT_node_lookup_228"."KEY" AS "KEY", "DT_node_lookup_228".node_id_2 AS node_id_2, "DT_node_data_228".nid AS nid, "DT_node_data_228"."Score" AS "Score"
FROM "DT_node_lookup_228" LEFT OUTER JOIN "DT_node_data_228" ON "DT_node_lookup_228".node_id_2 = "DT_node_data_228".nid),
"XGB_Model_0_76" AS
(SELECT "DT_Output_228"."KEY" AS "KEY", "DT_Output_228"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_228"),
"DT_node_lookup_229" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_229" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0153005477 AS "Score" UNION ALL SELECT 3 AS nid, 0.0238013696 AS "Score" UNION ALL SELECT 4 AS nid, -0.00415517855 AS "Score") AS "Values"),
"DT_Output_229" AS
(SELECT "DT_node_lookup_229"."KEY" AS "KEY", "DT_node_lookup_229".node_id_2 AS node_id_2, "DT_node_data_229".nid AS nid, "DT_node_data_229"."Score" AS "Score"
FROM "DT_node_lookup_229" LEFT OUTER JOIN "DT_node_data_229" ON "DT_node_lookup_229".node_id_2 = "DT_node_data_229".nid),
"XGB_Model_1_76" AS
(SELECT "DT_Output_229"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_229"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_229")
SELECT "XGB_esu_22"."KEY", "XGB_esu_22"."Score_setosa", "XGB_esu_22"."Score_versicolor", "XGB_esu_22"."Score_virginica"
FROM (SELECT "XGB_Model_1_73"."KEY" AS "KEY", CAST("XGB_Model_1_73"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_73"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_73"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_73" UNION ALL SELECT "XGB_Model_2_73"."KEY" AS "KEY", CAST("XGB_Model_2_73"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_73"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_73"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_73" UNION ALL SELECT "XGB_Model_0_74"."KEY" AS "KEY", CAST("XGB_Model_0_74"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_74"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_74"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_74" UNION ALL SELECT "XGB_Model_1_74"."KEY" AS "KEY", CAST("XGB_Model_1_74"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_74"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_74"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_74" UNION ALL SELECT "XGB_Model_2_74"."KEY" AS "KEY", CAST("XGB_Model_2_74"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_74"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_74"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_74" UNION ALL SELECT "XGB_Model_0_75"."KEY" AS "KEY", CAST("XGB_Model_0_75"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_75"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_75"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_75" UNION ALL SELECT "XGB_Model_1_75"."KEY" AS "KEY", CAST("XGB_Model_1_75"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_75"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_75"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_75" UNION ALL SELECT "XGB_Model_2_75"."KEY" AS "KEY", CAST("XGB_Model_2_75"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_75"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_75"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_75" UNION ALL SELECT "XGB_Model_0_76"."KEY" AS "KEY", CAST("XGB_Model_0_76"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_76"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_76"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_76" UNION ALL SELECT "XGB_Model_1_76"."KEY" AS "KEY", CAST("XGB_Model_1_76"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_76"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_76"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_76") AS "XGB_esu_22"),
"XGB_23" AS
(WITH "DT_node_lookup_230" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_230" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0165796597 AS "Score" UNION ALL SELECT 3 AS nid, 0.020102378 AS "Score" UNION ALL SELECT 4 AS nid, 0.00320444698 AS "Score") AS "Values"),
"DT_Output_230" AS
(SELECT "DT_node_lookup_230"."KEY" AS "KEY", "DT_node_lookup_230".node_id_2 AS node_id_2, "DT_node_data_230".nid AS nid, "DT_node_data_230"."Score" AS "Score"
FROM "DT_node_lookup_230" LEFT OUTER JOIN "DT_node_data_230" ON "DT_node_lookup_230".node_id_2 = "DT_node_data_230".nid),
"XGB_Model_2_76" AS
(SELECT "DT_Output_230"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_230"."Score" AS "Score_virginica"
FROM "DT_Output_230"),
"DT_node_lookup_231" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_231" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_231" AS
(SELECT "DT_node_lookup_231"."KEY" AS "KEY", "DT_node_lookup_231".node_id_2 AS node_id_2, "DT_node_data_231".nid AS nid, "DT_node_data_231"."Score" AS "Score"
FROM "DT_node_lookup_231" LEFT OUTER JOIN "DT_node_data_231" ON "DT_node_lookup_231".node_id_2 = "DT_node_data_231".nid),
"XGB_Model_0_77" AS
(SELECT "DT_Output_231"."KEY" AS "KEY", "DT_Output_231"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_231"),
"DT_node_lookup_232" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_232" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0158085469 AS "Score" UNION ALL SELECT 3 AS nid, -0.013112396 AS "Score" UNION ALL SELECT 4 AS nid, -0.00353556452 AS "Score") AS "Values"),
"DT_Output_232" AS
(SELECT "DT_node_lookup_232"."KEY" AS "KEY", "DT_node_lookup_232".node_id_2 AS node_id_2, "DT_node_data_232".nid AS nid, "DT_node_data_232"."Score" AS "Score"
FROM "DT_node_lookup_232" LEFT OUTER JOIN "DT_node_data_232" ON "DT_node_lookup_232".node_id_2 = "DT_node_data_232".nid),
"XGB_Model_1_77" AS
(SELECT "DT_Output_232"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_232"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_232"),
"DT_node_lookup_233" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_233" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0130598247 AS "Score" UNION ALL SELECT 2 AS nid, 0.0192057602 AS "Score") AS "Values"),
"DT_Output_233" AS
(SELECT "DT_node_lookup_233"."KEY" AS "KEY", "DT_node_lookup_233".node_id_2 AS node_id_2, "DT_node_data_233".nid AS nid, "DT_node_data_233"."Score" AS "Score"
FROM "DT_node_lookup_233" LEFT OUTER JOIN "DT_node_data_233" ON "DT_node_lookup_233".node_id_2 = "DT_node_data_233".nid),
"XGB_Model_2_77" AS
(SELECT "DT_Output_233"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_233"."Score" AS "Score_virginica"
FROM "DT_Output_233"),
"DT_node_lookup_234" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_234" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_234" AS
(SELECT "DT_node_lookup_234"."KEY" AS "KEY", "DT_node_lookup_234".node_id_2 AS node_id_2, "DT_node_data_234".nid AS nid, "DT_node_data_234"."Score" AS "Score"
FROM "DT_node_lookup_234" LEFT OUTER JOIN "DT_node_data_234" ON "DT_node_lookup_234".node_id_2 = "DT_node_data_234".nid),
"XGB_Model_0_78" AS
(SELECT "DT_Output_234"."KEY" AS "KEY", "DT_Output_234"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_234"),
"DT_node_lookup_235" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_235" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0149064912 AS "Score" UNION ALL SELECT 3 AS nid, 0.0235418435 AS "Score" UNION ALL SELECT 4 AS nid, -0.00366221531 AS "Score") AS "Values"),
"DT_Output_235" AS
(SELECT "DT_node_lookup_235"."KEY" AS "KEY", "DT_node_lookup_235".node_id_2 AS node_id_2, "DT_node_data_235".nid AS nid, "DT_node_data_235"."Score" AS "Score"
FROM "DT_node_lookup_235" LEFT OUTER JOIN "DT_node_data_235" ON "DT_node_lookup_235".node_id_2 = "DT_node_data_235".nid),
"XGB_Model_1_78" AS
(SELECT "DT_Output_235"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_235"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_235"),
"DT_node_lookup_236" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_236" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0160766263 AS "Score" UNION ALL SELECT 3 AS nid, 0.0198315345 AS "Score" UNION ALL SELECT 4 AS nid, 0.0031137364 AS "Score") AS "Values"),
"DT_Output_236" AS
(SELECT "DT_node_lookup_236"."KEY" AS "KEY", "DT_node_lookup_236".node_id_2 AS node_id_2, "DT_node_data_236".nid AS nid, "DT_node_data_236"."Score" AS "Score"
FROM "DT_node_lookup_236" LEFT OUTER JOIN "DT_node_data_236" ON "DT_node_lookup_236".node_id_2 = "DT_node_data_236".nid),
"XGB_Model_2_78" AS
(SELECT "DT_Output_236"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_236"."Score" AS "Score_virginica"
FROM "DT_Output_236"),
"DT_node_lookup_237" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_237" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_237" AS
(SELECT "DT_node_lookup_237"."KEY" AS "KEY", "DT_node_lookup_237".node_id_2 AS node_id_2, "DT_node_data_237".nid AS nid, "DT_node_data_237"."Score" AS "Score"
FROM "DT_node_lookup_237" LEFT OUTER JOIN "DT_node_data_237" ON "DT_node_lookup_237".node_id_2 = "DT_node_data_237".nid),
"XGB_Model_0_79" AS
(SELECT "DT_Output_237"."KEY" AS "KEY", "DT_Output_237"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_237"),
"DT_node_lookup_238" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_238" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0143449558 AS "Score" UNION ALL SELECT 3 AS nid, 0.0221900661 AS "Score" UNION ALL SELECT 4 AS nid, -0.00248152902 AS "Score") AS "Values"),
"DT_Output_238" AS
(SELECT "DT_node_lookup_238"."KEY" AS "KEY", "DT_node_lookup_238".node_id_2 AS node_id_2, "DT_node_data_238".nid AS nid, "DT_node_data_238"."Score" AS "Score"
FROM "DT_node_lookup_238" LEFT OUTER JOIN "DT_node_data_238" ON "DT_node_lookup_238".node_id_2 = "DT_node_data_238".nid),
"XGB_Model_1_79" AS
(SELECT "DT_Output_238"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_238"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_238"),
"DT_node_lookup_239" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_239" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0118112294 AS "Score" UNION ALL SELECT 2 AS nid, 0.0180203635 AS "Score") AS "Values"),
"DT_Output_239" AS
(SELECT "DT_node_lookup_239"."KEY" AS "KEY", "DT_node_lookup_239".node_id_2 AS node_id_2, "DT_node_data_239".nid AS nid, "DT_node_data_239"."Score" AS "Score"
FROM "DT_node_lookup_239" LEFT OUTER JOIN "DT_node_data_239" ON "DT_node_lookup_239".node_id_2 = "DT_node_data_239".nid),
"XGB_Model_2_79" AS
(SELECT "DT_Output_239"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_239"."Score" AS "Score_virginica"
FROM "DT_Output_239")
SELECT "XGB_esu_23"."KEY", "XGB_esu_23"."Score_setosa", "XGB_esu_23"."Score_versicolor", "XGB_esu_23"."Score_virginica"
FROM (SELECT "XGB_Model_2_76"."KEY" AS "KEY", CAST("XGB_Model_2_76"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_76"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_76"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_76" UNION ALL SELECT "XGB_Model_0_77"."KEY" AS "KEY", CAST("XGB_Model_0_77"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_77"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_77"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_77" UNION ALL SELECT "XGB_Model_1_77"."KEY" AS "KEY", CAST("XGB_Model_1_77"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_77"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_77"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_77" UNION ALL SELECT "XGB_Model_2_77"."KEY" AS "KEY", CAST("XGB_Model_2_77"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_77"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_77"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_77" UNION ALL SELECT "XGB_Model_0_78"."KEY" AS "KEY", CAST("XGB_Model_0_78"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_78"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_78"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_78" UNION ALL SELECT "XGB_Model_1_78"."KEY" AS "KEY", CAST("XGB_Model_1_78"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_78"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_78"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_78" UNION ALL SELECT "XGB_Model_2_78"."KEY" AS "KEY", CAST("XGB_Model_2_78"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_78"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_78"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_78" UNION ALL SELECT "XGB_Model_0_79"."KEY" AS "KEY", CAST("XGB_Model_0_79"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_79"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_79"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_79" UNION ALL SELECT "XGB_Model_1_79"."KEY" AS "KEY", CAST("XGB_Model_1_79"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_79"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_79"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_79" UNION ALL SELECT "XGB_Model_2_79"."KEY" AS "KEY", CAST("XGB_Model_2_79"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_79"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_79"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_79") AS "XGB_esu_23"),
"XGB_24" AS
(WITH "DT_node_lookup_240" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_240" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_240" AS
(SELECT "DT_node_lookup_240"."KEY" AS "KEY", "DT_node_lookup_240".node_id_2 AS node_id_2, "DT_node_data_240".nid AS nid, "DT_node_data_240"."Score" AS "Score"
FROM "DT_node_lookup_240" LEFT OUTER JOIN "DT_node_data_240" ON "DT_node_lookup_240".node_id_2 = "DT_node_data_240".nid),
"XGB_Model_0_80" AS
(SELECT "DT_Output_240"."KEY" AS "KEY", "DT_Output_240"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_240"),
"DT_node_lookup_241" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_241" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0150218429 AS "Score" UNION ALL SELECT 3 AS nid, -0.0115613099 AS "Score" UNION ALL SELECT 4 AS nid, -0.0030388392 AS "Score") AS "Values"),
"DT_Output_241" AS
(SELECT "DT_node_lookup_241"."KEY" AS "KEY", "DT_node_lookup_241".node_id_2 AS node_id_2, "DT_node_data_241".nid AS nid, "DT_node_data_241"."Score" AS "Score"
FROM "DT_node_lookup_241" LEFT OUTER JOIN "DT_node_data_241" ON "DT_node_lookup_241".node_id_2 = "DT_node_data_241".nid),
"XGB_Model_1_80" AS
(SELECT "DT_Output_241"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_241"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_241"),
"DT_node_lookup_242" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_242" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0159966201 AS "Score" UNION ALL SELECT 3 AS nid, 0.0195903014 AS "Score" UNION ALL SELECT 4 AS nid, 0.00325024151 AS "Score") AS "Values"),
"DT_Output_242" AS
(SELECT "DT_node_lookup_242"."KEY" AS "KEY", "DT_node_lookup_242".node_id_2 AS node_id_2, "DT_node_data_242".nid AS nid, "DT_node_data_242"."Score" AS "Score"
FROM "DT_node_lookup_242" LEFT OUTER JOIN "DT_node_data_242" ON "DT_node_lookup_242".node_id_2 = "DT_node_data_242".nid),
"XGB_Model_2_80" AS
(SELECT "DT_Output_242"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_242"."Score" AS "Score_virginica"
FROM "DT_Output_242"),
"DT_node_lookup_243" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_243" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_243" AS
(SELECT "DT_node_lookup_243"."KEY" AS "KEY", "DT_node_lookup_243".node_id_2 AS node_id_2, "DT_node_data_243".nid AS nid, "DT_node_data_243"."Score" AS "Score"
FROM "DT_node_lookup_243" LEFT OUTER JOIN "DT_node_data_243" ON "DT_node_lookup_243".node_id_2 = "DT_node_data_243".nid),
"XGB_Model_0_81" AS
(SELECT "DT_Output_243"."KEY" AS "KEY", "DT_Output_243"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_243"),
"DT_node_lookup_244" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_244" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0140148643 AS "Score" UNION ALL SELECT 3 AS nid, 0.0219578631 AS "Score" UNION ALL SELECT 4 AS nid, -0.00209473958 AS "Score") AS "Values"),
"DT_Output_244" AS
(SELECT "DT_node_lookup_244"."KEY" AS "KEY", "DT_node_lookup_244".node_id_2 AS node_id_2, "DT_node_data_244".nid AS nid, "DT_node_data_244"."Score" AS "Score"
FROM "DT_node_lookup_244" LEFT OUTER JOIN "DT_node_data_244" ON "DT_node_lookup_244".node_id_2 = "DT_node_data_244".nid),
"XGB_Model_1_81" AS
(SELECT "DT_Output_244"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_244"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_244"),
"DT_node_lookup_245" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.94999981) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.25) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_245" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0145695154 AS "Score" UNION ALL SELECT 3 AS nid, 0.0186263826 AS "Score" UNION ALL SELECT 4 AS nid, 0.00302351057 AS "Score") AS "Values"),
"DT_Output_245" AS
(SELECT "DT_node_lookup_245"."KEY" AS "KEY", "DT_node_lookup_245".node_id_2 AS node_id_2, "DT_node_data_245".nid AS nid, "DT_node_data_245"."Score" AS "Score"
FROM "DT_node_lookup_245" LEFT OUTER JOIN "DT_node_data_245" ON "DT_node_lookup_245".node_id_2 = "DT_node_data_245".nid),
"XGB_Model_2_81" AS
(SELECT "DT_Output_245"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_245"."Score" AS "Score_virginica"
FROM "DT_Output_245"),
"DT_node_lookup_246" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_246" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_246" AS
(SELECT "DT_node_lookup_246"."KEY" AS "KEY", "DT_node_lookup_246".node_id_2 AS node_id_2, "DT_node_data_246".nid AS nid, "DT_node_data_246"."Score" AS "Score"
FROM "DT_node_lookup_246" LEFT OUTER JOIN "DT_node_data_246" ON "DT_node_lookup_246".node_id_2 = "DT_node_data_246".nid),
"XGB_Model_0_82" AS
(SELECT "DT_Output_246"."KEY" AS "KEY", "DT_Output_246"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_246"),
"DT_node_lookup_247" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_247" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0134652341 AS "Score" UNION ALL SELECT 3 AS nid, 0.0207261611 AS "Score" UNION ALL SELECT 4 AS nid, -0.00109715736 AS "Score") AS "Values"),
"DT_Output_247" AS
(SELECT "DT_node_lookup_247"."KEY" AS "KEY", "DT_node_lookup_247".node_id_2 AS node_id_2, "DT_node_data_247".nid AS nid, "DT_node_data_247"."Score" AS "Score"
FROM "DT_node_lookup_247" LEFT OUTER JOIN "DT_node_data_247" ON "DT_node_lookup_247".node_id_2 = "DT_node_data_247".nid),
"XGB_Model_1_82" AS
(SELECT "DT_Output_247"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_247"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_247"),
"DT_node_lookup_248" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_248" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0112573318 AS "Score" UNION ALL SELECT 2 AS nid, 0.0176630728 AS "Score") AS "Values"),
"DT_Output_248" AS
(SELECT "DT_node_lookup_248"."KEY" AS "KEY", "DT_node_lookup_248".node_id_2 AS node_id_2, "DT_node_data_248".nid AS nid, "DT_node_data_248"."Score" AS "Score"
FROM "DT_node_lookup_248" LEFT OUTER JOIN "DT_node_data_248" ON "DT_node_lookup_248".node_id_2 = "DT_node_data_248".nid),
"XGB_Model_2_82" AS
(SELECT "DT_Output_248"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_248"."Score" AS "Score_virginica"
FROM "DT_Output_248"),
"DT_node_lookup_249" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_249" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_249" AS
(SELECT "DT_node_lookup_249"."KEY" AS "KEY", "DT_node_lookup_249".node_id_2 AS node_id_2, "DT_node_data_249".nid AS nid, "DT_node_data_249"."Score" AS "Score"
FROM "DT_node_lookup_249" LEFT OUTER JOIN "DT_node_data_249" ON "DT_node_lookup_249".node_id_2 = "DT_node_data_249".nid),
"XGB_Model_0_83" AS
(SELECT "DT_Output_249"."KEY" AS "KEY", "DT_Output_249"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_249")
SELECT "XGB_esu_24"."KEY", "XGB_esu_24"."Score_setosa", "XGB_esu_24"."Score_versicolor", "XGB_esu_24"."Score_virginica"
FROM (SELECT "XGB_Model_0_80"."KEY" AS "KEY", CAST("XGB_Model_0_80"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_80"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_80"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_80" UNION ALL SELECT "XGB_Model_1_80"."KEY" AS "KEY", CAST("XGB_Model_1_80"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_80"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_80"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_80" UNION ALL SELECT "XGB_Model_2_80"."KEY" AS "KEY", CAST("XGB_Model_2_80"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_80"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_80"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_80" UNION ALL SELECT "XGB_Model_0_81"."KEY" AS "KEY", CAST("XGB_Model_0_81"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_81"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_81"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_81" UNION ALL SELECT "XGB_Model_1_81"."KEY" AS "KEY", CAST("XGB_Model_1_81"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_81"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_81"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_81" UNION ALL SELECT "XGB_Model_2_81"."KEY" AS "KEY", CAST("XGB_Model_2_81"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_81"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_81"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_81" UNION ALL SELECT "XGB_Model_0_82"."KEY" AS "KEY", CAST("XGB_Model_0_82"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_82"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_82"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_82" UNION ALL SELECT "XGB_Model_1_82"."KEY" AS "KEY", CAST("XGB_Model_1_82"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_82"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_82"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_82" UNION ALL SELECT "XGB_Model_2_82"."KEY" AS "KEY", CAST("XGB_Model_2_82"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_82"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_82"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_82" UNION ALL SELECT "XGB_Model_0_83"."KEY" AS "KEY", CAST("XGB_Model_0_83"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_83"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_83"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_83") AS "XGB_esu_24"),
"XGB_25" AS
(WITH "DT_node_lookup_250" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_250" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0147806043 AS "Score" UNION ALL SELECT 3 AS nid, -0.0109992456 AS "Score" UNION ALL SELECT 4 AS nid, -0.002473081 AS "Score") AS "Values"),
"DT_Output_250" AS
(SELECT "DT_node_lookup_250"."KEY" AS "KEY", "DT_node_lookup_250".node_id_2 AS node_id_2, "DT_node_data_250".nid AS nid, "DT_node_data_250"."Score" AS "Score"
FROM "DT_node_lookup_250" LEFT OUTER JOIN "DT_node_data_250" ON "DT_node_lookup_250".node_id_2 = "DT_node_data_250".nid),
"XGB_Model_1_83" AS
(SELECT "DT_Output_250"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_250"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_250"),
"DT_node_lookup_251" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_251" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0145885833 AS "Score" UNION ALL SELECT 2 AS nid, -0.0103134196 AS "Score") AS "Values"),
"DT_Output_251" AS
(SELECT "DT_node_lookup_251"."KEY" AS "KEY", "DT_node_lookup_251".node_id_2 AS node_id_2, "DT_node_data_251".nid AS nid, "DT_node_data_251"."Score" AS "Score"
FROM "DT_node_lookup_251" LEFT OUTER JOIN "DT_node_data_251" ON "DT_node_lookup_251".node_id_2 = "DT_node_data_251".nid),
"XGB_Model_2_83" AS
(SELECT "DT_Output_251"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_251"."Score" AS "Score_virginica"
FROM "DT_Output_251"),
"DT_node_lookup_252" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_252" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_252" AS
(SELECT "DT_node_lookup_252"."KEY" AS "KEY", "DT_node_lookup_252".node_id_2 AS node_id_2, "DT_node_data_252".nid AS nid, "DT_node_data_252"."Score" AS "Score"
FROM "DT_node_lookup_252" LEFT OUTER JOIN "DT_node_data_252" ON "DT_node_lookup_252".node_id_2 = "DT_node_data_252".nid),
"XGB_Model_0_84" AS
(SELECT "DT_Output_252"."KEY" AS "KEY", "DT_Output_252"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_252"),
"DT_node_lookup_253" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_253" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0145784589 AS "Score" UNION ALL SELECT 2 AS nid, -0.00868436228 AS "Score") AS "Values"),
"DT_Output_253" AS
(SELECT "DT_node_lookup_253"."KEY" AS "KEY", "DT_node_lookup_253".node_id_2 AS node_id_2, "DT_node_data_253".nid AS nid, "DT_node_data_253"."Score" AS "Score"
FROM "DT_node_lookup_253" LEFT OUTER JOIN "DT_node_data_253" ON "DT_node_lookup_253".node_id_2 = "DT_node_data_253".nid),
"XGB_Model_1_84" AS
(SELECT "DT_Output_253"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_253"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_253"),
"DT_node_lookup_254" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_254" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0102490969 AS "Score" UNION ALL SELECT 2 AS nid, 0.0166857578 AS "Score") AS "Values"),
"DT_Output_254" AS
(SELECT "DT_node_lookup_254"."KEY" AS "KEY", "DT_node_lookup_254".node_id_2 AS node_id_2, "DT_node_data_254".nid AS nid, "DT_node_data_254"."Score" AS "Score"
FROM "DT_node_lookup_254" LEFT OUTER JOIN "DT_node_data_254" ON "DT_node_lookup_254".node_id_2 = "DT_node_data_254".nid),
"XGB_Model_2_84" AS
(SELECT "DT_Output_254"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_254"."Score" AS "Score_virginica"
FROM "DT_Output_254"),
"DT_node_lookup_255" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_255" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_255" AS
(SELECT "DT_node_lookup_255"."KEY" AS "KEY", "DT_node_lookup_255".node_id_2 AS node_id_2, "DT_node_data_255".nid AS nid, "DT_node_data_255"."Score" AS "Score"
FROM "DT_node_lookup_255" LEFT OUTER JOIN "DT_node_data_255" ON "DT_node_lookup_255".node_id_2 = "DT_node_data_255".nid),
"XGB_Model_0_85" AS
(SELECT "DT_Output_255"."KEY" AS "KEY", "DT_Output_255"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_255"),
"DT_node_lookup_256" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_256" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0122836186 AS "Score" UNION ALL SELECT 3 AS nid, 0.000649199297 AS "Score" UNION ALL SELECT 4 AS nid, 0.017443737 AS "Score") AS "Values"),
"DT_Output_256" AS
(SELECT "DT_node_lookup_256"."KEY" AS "KEY", "DT_node_lookup_256".node_id_2 AS node_id_2, "DT_node_data_256".nid AS nid, "DT_node_data_256"."Score" AS "Score"
FROM "DT_node_lookup_256" LEFT OUTER JOIN "DT_node_data_256" ON "DT_node_lookup_256".node_id_2 = "DT_node_data_256".nid),
"XGB_Model_1_85" AS
(SELECT "DT_Output_256"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_256"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_256"),
"DT_node_lookup_257" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_257" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0154060824 AS "Score" UNION ALL SELECT 2 AS nid, -0.0113768792 AS "Score") AS "Values"),
"DT_Output_257" AS
(SELECT "DT_node_lookup_257"."KEY" AS "KEY", "DT_node_lookup_257".node_id_2 AS node_id_2, "DT_node_data_257".nid AS nid, "DT_node_data_257"."Score" AS "Score"
FROM "DT_node_lookup_257" LEFT OUTER JOIN "DT_node_data_257" ON "DT_node_lookup_257".node_id_2 = "DT_node_data_257".nid),
"XGB_Model_2_85" AS
(SELECT "DT_Output_257"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_257"."Score" AS "Score_virginica"
FROM "DT_Output_257"),
"DT_node_lookup_258" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_258" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_258" AS
(SELECT "DT_node_lookup_258"."KEY" AS "KEY", "DT_node_lookup_258".node_id_2 AS node_id_2, "DT_node_data_258".nid AS nid, "DT_node_data_258"."Score" AS "Score"
FROM "DT_node_lookup_258" LEFT OUTER JOIN "DT_node_data_258" ON "DT_node_lookup_258".node_id_2 = "DT_node_data_258".nid),
"XGB_Model_0_86" AS
(SELECT "DT_Output_258"."KEY" AS "KEY", "DT_Output_258"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_258"),
"DT_node_lookup_259" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_259" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0146727683 AS "Score" UNION ALL SELECT 3 AS nid, -0.00990157854 AS "Score" UNION ALL SELECT 4 AS nid, -0.00254212506 AS "Score") AS "Values"),
"DT_Output_259" AS
(SELECT "DT_node_lookup_259"."KEY" AS "KEY", "DT_node_lookup_259".node_id_2 AS node_id_2, "DT_node_data_259".nid AS nid, "DT_node_data_259"."Score" AS "Score"
FROM "DT_node_lookup_259" LEFT OUTER JOIN "DT_node_data_259" ON "DT_node_lookup_259".node_id_2 = "DT_node_data_259".nid),
"XGB_Model_1_86" AS
(SELECT "DT_Output_259"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_259"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_259")
SELECT "XGB_esu_25"."KEY", "XGB_esu_25"."Score_setosa", "XGB_esu_25"."Score_versicolor", "XGB_esu_25"."Score_virginica"
FROM (SELECT "XGB_Model_1_83"."KEY" AS "KEY", CAST("XGB_Model_1_83"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_83"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_83"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_83" UNION ALL SELECT "XGB_Model_2_83"."KEY" AS "KEY", CAST("XGB_Model_2_83"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_83"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_83"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_83" UNION ALL SELECT "XGB_Model_0_84"."KEY" AS "KEY", CAST("XGB_Model_0_84"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_84"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_84"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_84" UNION ALL SELECT "XGB_Model_1_84"."KEY" AS "KEY", CAST("XGB_Model_1_84"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_84"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_84"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_84" UNION ALL SELECT "XGB_Model_2_84"."KEY" AS "KEY", CAST("XGB_Model_2_84"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_84"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_84"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_84" UNION ALL SELECT "XGB_Model_0_85"."KEY" AS "KEY", CAST("XGB_Model_0_85"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_85"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_85"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_85" UNION ALL SELECT "XGB_Model_1_85"."KEY" AS "KEY", CAST("XGB_Model_1_85"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_85"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_85"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_85" UNION ALL SELECT "XGB_Model_2_85"."KEY" AS "KEY", CAST("XGB_Model_2_85"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_85"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_85"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_85" UNION ALL SELECT "XGB_Model_0_86"."KEY" AS "KEY", CAST("XGB_Model_0_86"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_86"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_86"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_86" UNION ALL SELECT "XGB_Model_1_86"."KEY" AS "KEY", CAST("XGB_Model_1_86"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_86"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_86"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_86") AS "XGB_esu_25"),
"XGB_26" AS
(WITH "DT_node_lookup_260" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_260" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00970013067 AS "Score" UNION ALL SELECT 2 AS nid, 0.0161001552 AS "Score") AS "Values"),
"DT_Output_260" AS
(SELECT "DT_node_lookup_260"."KEY" AS "KEY", "DT_node_lookup_260".node_id_2 AS node_id_2, "DT_node_data_260".nid AS nid, "DT_node_data_260"."Score" AS "Score"
FROM "DT_node_lookup_260" LEFT OUTER JOIN "DT_node_data_260" ON "DT_node_lookup_260".node_id_2 = "DT_node_data_260".nid),
"XGB_Model_2_86" AS
(SELECT "DT_Output_260"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_260"."Score" AS "Score_virginica"
FROM "DT_Output_260"),
"DT_node_lookup_261" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_261" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_261" AS
(SELECT "DT_node_lookup_261"."KEY" AS "KEY", "DT_node_lookup_261".node_id_2 AS node_id_2, "DT_node_data_261".nid AS nid, "DT_node_data_261"."Score" AS "Score"
FROM "DT_node_lookup_261" LEFT OUTER JOIN "DT_node_data_261" ON "DT_node_lookup_261".node_id_2 = "DT_node_data_261".nid),
"XGB_Model_0_87" AS
(SELECT "DT_Output_261"."KEY" AS "KEY", "DT_Output_261"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_261"),
"DT_node_lookup_262" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.54999995) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_262" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.011505154 AS "Score" UNION ALL SELECT 3 AS nid, -0.000373342948 AS "Score" UNION ALL SELECT 4 AS nid, 0.0164338667 AS "Score") AS "Values"),
"DT_Output_262" AS
(SELECT "DT_node_lookup_262"."KEY" AS "KEY", "DT_node_lookup_262".node_id_2 AS node_id_2, "DT_node_data_262".nid AS nid, "DT_node_data_262"."Score" AS "Score"
FROM "DT_node_lookup_262" LEFT OUTER JOIN "DT_node_data_262" ON "DT_node_lookup_262".node_id_2 = "DT_node_data_262".nid),
"XGB_Model_1_87" AS
(SELECT "DT_Output_262"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_262"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_262"),
"DT_node_lookup_263" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_263" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0104516167 AS "Score" UNION ALL SELECT 2 AS nid, 0.0138775138 AS "Score") AS "Values"),
"DT_Output_263" AS
(SELECT "DT_node_lookup_263"."KEY" AS "KEY", "DT_node_lookup_263".node_id_2 AS node_id_2, "DT_node_data_263".nid AS nid, "DT_node_data_263"."Score" AS "Score"
FROM "DT_node_lookup_263" LEFT OUTER JOIN "DT_node_data_263" ON "DT_node_lookup_263".node_id_2 = "DT_node_data_263".nid),
"XGB_Model_2_87" AS
(SELECT "DT_Output_263"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_263"."Score" AS "Score_virginica"
FROM "DT_Output_263"),
"DT_node_lookup_264" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_264" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_264" AS
(SELECT "DT_node_lookup_264"."KEY" AS "KEY", "DT_node_lookup_264".node_id_2 AS node_id_2, "DT_node_data_264".nid AS nid, "DT_node_data_264"."Score" AS "Score"
FROM "DT_node_lookup_264" LEFT OUTER JOIN "DT_node_data_264" ON "DT_node_lookup_264".node_id_2 = "DT_node_data_264".nid),
"XGB_Model_0_88" AS
(SELECT "DT_Output_264"."KEY" AS "KEY", "DT_Output_264"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_264"),
"DT_node_lookup_265" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_265" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0138610108 AS "Score" UNION ALL SELECT 3 AS nid, -0.00888610352 AS "Score" UNION ALL SELECT 4 AS nid, -0.00229297997 AS "Score") AS "Values"),
"DT_Output_265" AS
(SELECT "DT_node_lookup_265"."KEY" AS "KEY", "DT_node_lookup_265".node_id_2 AS node_id_2, "DT_node_data_265".nid AS nid, "DT_node_data_265"."Score" AS "Score"
FROM "DT_node_lookup_265" LEFT OUTER JOIN "DT_node_data_265" ON "DT_node_lookup_265".node_id_2 = "DT_node_data_265".nid),
"XGB_Model_1_88" AS
(SELECT "DT_Output_265"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_265"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_265"),
"DT_node_lookup_266" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_266" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00849635247 AS "Score" UNION ALL SELECT 2 AS nid, 0.0147949206 AS "Score") AS "Values"),
"DT_Output_266" AS
(SELECT "DT_node_lookup_266"."KEY" AS "KEY", "DT_node_lookup_266".node_id_2 AS node_id_2, "DT_node_data_266".nid AS nid, "DT_node_data_266"."Score" AS "Score"
FROM "DT_node_lookup_266" LEFT OUTER JOIN "DT_node_data_266" ON "DT_node_lookup_266".node_id_2 = "DT_node_data_266".nid),
"XGB_Model_2_88" AS
(SELECT "DT_Output_266"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_266"."Score" AS "Score_virginica"
FROM "DT_Output_266"),
"DT_node_lookup_267" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_267" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_267" AS
(SELECT "DT_node_lookup_267"."KEY" AS "KEY", "DT_node_lookup_267".node_id_2 AS node_id_2, "DT_node_data_267".nid AS nid, "DT_node_data_267"."Score" AS "Score"
FROM "DT_node_lookup_267" LEFT OUTER JOIN "DT_node_data_267" ON "DT_node_lookup_267".node_id_2 = "DT_node_data_267".nid),
"XGB_Model_0_89" AS
(SELECT "DT_Output_267"."KEY" AS "KEY", "DT_Output_267"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_267"),
"DT_node_lookup_268" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_268" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0110185109 AS "Score" UNION ALL SELECT 3 AS nid, -0.000907897891 AS "Score" UNION ALL SELECT 4 AS nid, 0.0165526457 AS "Score") AS "Values"),
"DT_Output_268" AS
(SELECT "DT_node_lookup_268"."KEY" AS "KEY", "DT_node_lookup_268".node_id_2 AS node_id_2, "DT_node_data_268".nid AS nid, "DT_node_data_268"."Score" AS "Score"
FROM "DT_node_lookup_268" LEFT OUTER JOIN "DT_node_data_268" ON "DT_node_lookup_268".node_id_2 = "DT_node_data_268".nid),
"XGB_Model_1_89" AS
(SELECT "DT_Output_268"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_268"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_268"),
"DT_node_lookup_269" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_269" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0156648047 AS "Score" UNION ALL SELECT 2 AS nid, -0.011341841 AS "Score") AS "Values"),
"DT_Output_269" AS
(SELECT "DT_node_lookup_269"."KEY" AS "KEY", "DT_node_lookup_269".node_id_2 AS node_id_2, "DT_node_data_269".nid AS nid, "DT_node_data_269"."Score" AS "Score"
FROM "DT_node_lookup_269" LEFT OUTER JOIN "DT_node_data_269" ON "DT_node_lookup_269".node_id_2 = "DT_node_data_269".nid),
"XGB_Model_2_89" AS
(SELECT "DT_Output_269"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_269"."Score" AS "Score_virginica"
FROM "DT_Output_269")
SELECT "XGB_esu_26"."KEY", "XGB_esu_26"."Score_setosa", "XGB_esu_26"."Score_versicolor", "XGB_esu_26"."Score_virginica"
FROM (SELECT "XGB_Model_2_86"."KEY" AS "KEY", CAST("XGB_Model_2_86"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_86"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_86"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_86" UNION ALL SELECT "XGB_Model_0_87"."KEY" AS "KEY", CAST("XGB_Model_0_87"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_87"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_87"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_87" UNION ALL SELECT "XGB_Model_1_87"."KEY" AS "KEY", CAST("XGB_Model_1_87"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_87"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_87"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_87" UNION ALL SELECT "XGB_Model_2_87"."KEY" AS "KEY", CAST("XGB_Model_2_87"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_87"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_87"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_87" UNION ALL SELECT "XGB_Model_0_88"."KEY" AS "KEY", CAST("XGB_Model_0_88"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_88"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_88"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_88" UNION ALL SELECT "XGB_Model_1_88"."KEY" AS "KEY", CAST("XGB_Model_1_88"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_88"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_88"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_88" UNION ALL SELECT "XGB_Model_2_88"."KEY" AS "KEY", CAST("XGB_Model_2_88"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_88"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_88"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_88" UNION ALL SELECT "XGB_Model_0_89"."KEY" AS "KEY", CAST("XGB_Model_0_89"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_89"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_89"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_89" UNION ALL SELECT "XGB_Model_1_89"."KEY" AS "KEY", CAST("XGB_Model_1_89"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_89"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_89"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_89" UNION ALL SELECT "XGB_Model_2_89"."KEY" AS "KEY", CAST("XGB_Model_2_89"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_89"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_89"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_89") AS "XGB_esu_26"),
"XGB_27" AS
(WITH "DT_node_lookup_270" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_270" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_270" AS
(SELECT "DT_node_lookup_270"."KEY" AS "KEY", "DT_node_lookup_270".node_id_2 AS node_id_2, "DT_node_data_270".nid AS nid, "DT_node_data_270"."Score" AS "Score"
FROM "DT_node_lookup_270" LEFT OUTER JOIN "DT_node_data_270" ON "DT_node_lookup_270".node_id_2 = "DT_node_data_270".nid),
"XGB_Model_0_90" AS
(SELECT "DT_Output_270"."KEY" AS "KEY", "DT_Output_270"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_270"),
"DT_node_lookup_271" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_271" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0141708553 AS "Score" UNION ALL SELECT 2 AS nid, -0.00756217027 AS "Score") AS "Values"),
"DT_Output_271" AS
(SELECT "DT_node_lookup_271"."KEY" AS "KEY", "DT_node_lookup_271".node_id_2 AS node_id_2, "DT_node_data_271".nid AS nid, "DT_node_data_271"."Score" AS "Score"
FROM "DT_node_lookup_271" LEFT OUTER JOIN "DT_node_data_271" ON "DT_node_lookup_271".node_id_2 = "DT_node_data_271".nid),
"XGB_Model_1_90" AS
(SELECT "DT_Output_271"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_271"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_271"),
"DT_node_lookup_272" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_272" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0138260145 AS "Score" UNION ALL SELECT 2 AS nid, -0.00910801068 AS "Score") AS "Values"),
"DT_Output_272" AS
(SELECT "DT_node_lookup_272"."KEY" AS "KEY", "DT_node_lookup_272".node_id_2 AS node_id_2, "DT_node_data_272".nid AS nid, "DT_node_data_272"."Score" AS "Score"
FROM "DT_node_lookup_272" LEFT OUTER JOIN "DT_node_data_272" ON "DT_node_lookup_272".node_id_2 = "DT_node_data_272".nid),
"XGB_Model_2_90" AS
(SELECT "DT_Output_272"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_272"."Score" AS "Score_virginica"
FROM "DT_Output_272"),
"DT_node_lookup_273" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_273" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_273" AS
(SELECT "DT_node_lookup_273"."KEY" AS "KEY", "DT_node_lookup_273".node_id_2 AS node_id_2, "DT_node_data_273".nid AS nid, "DT_node_data_273"."Score" AS "Score"
FROM "DT_node_lookup_273" LEFT OUTER JOIN "DT_node_data_273" ON "DT_node_lookup_273".node_id_2 = "DT_node_data_273".nid),
"XGB_Model_0_91" AS
(SELECT "DT_Output_273"."KEY" AS "KEY", "DT_Output_273"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_273"),
"DT_node_lookup_274" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_274" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0139668686 AS "Score" UNION ALL SELECT 2 AS nid, -0.00715820445 AS "Score") AS "Values"),
"DT_Output_274" AS
(SELECT "DT_node_lookup_274"."KEY" AS "KEY", "DT_node_lookup_274".node_id_2 AS node_id_2, "DT_node_data_274".nid AS nid, "DT_node_data_274"."Score" AS "Score"
FROM "DT_node_lookup_274" LEFT OUTER JOIN "DT_node_data_274" ON "DT_node_lookup_274".node_id_2 = "DT_node_data_274".nid),
"XGB_Model_1_91" AS
(SELECT "DT_Output_274"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_274"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_274"),
"DT_node_lookup_275" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_275" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00799066201 AS "Score" UNION ALL SELECT 2 AS nid, 0.014356846 AS "Score") AS "Values"),
"DT_Output_275" AS
(SELECT "DT_node_lookup_275"."KEY" AS "KEY", "DT_node_lookup_275".node_id_2 AS node_id_2, "DT_node_data_275".nid AS nid, "DT_node_data_275"."Score" AS "Score"
FROM "DT_node_lookup_275" LEFT OUTER JOIN "DT_node_data_275" ON "DT_node_lookup_275".node_id_2 = "DT_node_data_275".nid),
"XGB_Model_2_91" AS
(SELECT "DT_Output_275"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_275"."Score" AS "Score_virginica"
FROM "DT_Output_275"),
"DT_node_lookup_276" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_276" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_276" AS
(SELECT "DT_node_lookup_276"."KEY" AS "KEY", "DT_node_lookup_276".node_id_2 AS node_id_2, "DT_node_data_276".nid AS nid, "DT_node_data_276"."Score" AS "Score"
FROM "DT_node_lookup_276" LEFT OUTER JOIN "DT_node_data_276" ON "DT_node_lookup_276".node_id_2 = "DT_node_data_276".nid),
"XGB_Model_0_92" AS
(SELECT "DT_Output_276"."KEY" AS "KEY", "DT_Output_276"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_276"),
"DT_node_lookup_277" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_277" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00980856642 AS "Score" UNION ALL SELECT 3 AS nid, -0.0012947229 AS "Score" UNION ALL SELECT 4 AS nid, 0.0181092639 AS "Score") AS "Values"),
"DT_Output_277" AS
(SELECT "DT_node_lookup_277"."KEY" AS "KEY", "DT_node_lookup_277".node_id_2 AS node_id_2, "DT_node_data_277".nid AS nid, "DT_node_data_277"."Score" AS "Score"
FROM "DT_node_lookup_277" LEFT OUTER JOIN "DT_node_data_277" ON "DT_node_lookup_277".node_id_2 = "DT_node_data_277".nid),
"XGB_Model_1_92" AS
(SELECT "DT_Output_277"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_277"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_277"),
"DT_node_lookup_278" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_278" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0109551791 AS "Score" UNION ALL SELECT 2 AS nid, 0.0140367756 AS "Score") AS "Values"),
"DT_Output_278" AS
(SELECT "DT_node_lookup_278"."KEY" AS "KEY", "DT_node_lookup_278".node_id_2 AS node_id_2, "DT_node_data_278".nid AS nid, "DT_node_data_278"."Score" AS "Score"
FROM "DT_node_lookup_278" LEFT OUTER JOIN "DT_node_data_278" ON "DT_node_lookup_278".node_id_2 = "DT_node_data_278".nid),
"XGB_Model_2_92" AS
(SELECT "DT_Output_278"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_278"."Score" AS "Score_virginica"
FROM "DT_Output_278"),
"DT_node_lookup_279" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_279" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_279" AS
(SELECT "DT_node_lookup_279"."KEY" AS "KEY", "DT_node_lookup_279".node_id_2 AS node_id_2, "DT_node_data_279".nid AS nid, "DT_node_data_279"."Score" AS "Score"
FROM "DT_node_lookup_279" LEFT OUTER JOIN "DT_node_data_279" ON "DT_node_lookup_279".node_id_2 = "DT_node_data_279".nid),
"XGB_Model_0_93" AS
(SELECT "DT_Output_279"."KEY" AS "KEY", "DT_Output_279"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_279")
SELECT "XGB_esu_27"."KEY", "XGB_esu_27"."Score_setosa", "XGB_esu_27"."Score_versicolor", "XGB_esu_27"."Score_virginica"
FROM (SELECT "XGB_Model_0_90"."KEY" AS "KEY", CAST("XGB_Model_0_90"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_90"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_90"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_90" UNION ALL SELECT "XGB_Model_1_90"."KEY" AS "KEY", CAST("XGB_Model_1_90"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_90"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_90"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_90" UNION ALL SELECT "XGB_Model_2_90"."KEY" AS "KEY", CAST("XGB_Model_2_90"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_90"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_90"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_90" UNION ALL SELECT "XGB_Model_0_91"."KEY" AS "KEY", CAST("XGB_Model_0_91"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_91"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_91"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_91" UNION ALL SELECT "XGB_Model_1_91"."KEY" AS "KEY", CAST("XGB_Model_1_91"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_91"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_91"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_91" UNION ALL SELECT "XGB_Model_2_91"."KEY" AS "KEY", CAST("XGB_Model_2_91"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_91"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_91"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_91" UNION ALL SELECT "XGB_Model_0_92"."KEY" AS "KEY", CAST("XGB_Model_0_92"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_92"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_92"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_92" UNION ALL SELECT "XGB_Model_1_92"."KEY" AS "KEY", CAST("XGB_Model_1_92"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_92"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_92"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_92" UNION ALL SELECT "XGB_Model_2_92"."KEY" AS "KEY", CAST("XGB_Model_2_92"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_92"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_92"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_92" UNION ALL SELECT "XGB_Model_0_93"."KEY" AS "KEY", CAST("XGB_Model_0_93"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_93"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_93"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_93") AS "XGB_esu_27"),
"XGB_28" AS
(WITH "DT_node_lookup_280" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_280" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00932478253 AS "Score" UNION ALL SELECT 3 AS nid, -0.000291876553 AS "Score" UNION ALL SELECT 4 AS nid, 0.0167653803 AS "Score") AS "Values"),
"DT_Output_280" AS
(SELECT "DT_node_lookup_280"."KEY" AS "KEY", "DT_node_lookup_280".node_id_2 AS node_id_2, "DT_node_data_280".nid AS nid, "DT_node_data_280"."Score" AS "Score"
FROM "DT_node_lookup_280" LEFT OUTER JOIN "DT_node_data_280" ON "DT_node_lookup_280".node_id_2 = "DT_node_data_280".nid),
"XGB_Model_1_93" AS
(SELECT "DT_Output_280"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_280"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_280"),
"DT_node_lookup_281" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_2" < 4.85000038) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_281" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00970525946 AS "Score" UNION ALL SELECT 2 AS nid, 0.0130950063 AS "Score") AS "Values"),
"DT_Output_281" AS
(SELECT "DT_node_lookup_281"."KEY" AS "KEY", "DT_node_lookup_281".node_id_2 AS node_id_2, "DT_node_data_281".nid AS nid, "DT_node_data_281"."Score" AS "Score"
FROM "DT_node_lookup_281" LEFT OUTER JOIN "DT_node_data_281" ON "DT_node_lookup_281".node_id_2 = "DT_node_data_281".nid),
"XGB_Model_2_93" AS
(SELECT "DT_Output_281"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_281"."Score" AS "Score_virginica"
FROM "DT_Output_281"),
"DT_node_lookup_282" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_282" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_282" AS
(SELECT "DT_node_lookup_282"."KEY" AS "KEY", "DT_node_lookup_282".node_id_2 AS node_id_2, "DT_node_data_282".nid AS nid, "DT_node_data_282"."Score" AS "Score"
FROM "DT_node_lookup_282" LEFT OUTER JOIN "DT_node_data_282" ON "DT_node_lookup_282".node_id_2 = "DT_node_data_282".nid),
"XGB_Model_0_94" AS
(SELECT "DT_Output_282"."KEY" AS "KEY", "DT_Output_282"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_282"),
"DT_node_lookup_283" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_283" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0140290335 AS "Score" UNION ALL SELECT 3 AS nid, -0.00844134856 AS "Score" UNION ALL SELECT 4 AS nid, -0.00172872248 AS "Score") AS "Values"),
"DT_Output_283" AS
(SELECT "DT_node_lookup_283"."KEY" AS "KEY", "DT_node_lookup_283".node_id_2 AS node_id_2, "DT_node_data_283".nid AS nid, "DT_node_data_283"."Score" AS "Score"
FROM "DT_node_lookup_283" LEFT OUTER JOIN "DT_node_data_283" ON "DT_node_lookup_283".node_id_2 = "DT_node_data_283".nid),
"XGB_Model_1_94" AS
(SELECT "DT_Output_283"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_283"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_283"),
"DT_node_lookup_284" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_284" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00842211023 AS "Score" UNION ALL SELECT 2 AS nid, 0.0147860935 AS "Score") AS "Values"),
"DT_Output_284" AS
(SELECT "DT_node_lookup_284"."KEY" AS "KEY", "DT_node_lookup_284".node_id_2 AS node_id_2, "DT_node_data_284".nid AS nid, "DT_node_data_284"."Score" AS "Score"
FROM "DT_node_lookup_284" LEFT OUTER JOIN "DT_node_data_284" ON "DT_node_lookup_284".node_id_2 = "DT_node_data_284".nid),
"XGB_Model_2_94" AS
(SELECT "DT_Output_284"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_284"."Score" AS "Score_virginica"
FROM "DT_Output_284"),
"DT_node_lookup_285" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_285" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_285" AS
(SELECT "DT_node_lookup_285"."KEY" AS "KEY", "DT_node_lookup_285".node_id_2 AS node_id_2, "DT_node_data_285".nid AS nid, "DT_node_data_285"."Score" AS "Score"
FROM "DT_node_lookup_285" LEFT OUTER JOIN "DT_node_data_285" ON "DT_node_lookup_285".node_id_2 = "DT_node_data_285".nid),
"XGB_Model_0_95" AS
(SELECT "DT_Output_285"."KEY" AS "KEY", "DT_Output_285"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_285"),
"DT_node_lookup_286" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_0" < 5.85000038) THEN 1 ELSE CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_286" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00908977073 AS "Score" UNION ALL SELECT 3 AS nid, -0.00071469188 AS "Score" UNION ALL SELECT 4 AS nid, 0.0169941112 AS "Score") AS "Values"),
"DT_Output_286" AS
(SELECT "DT_node_lookup_286"."KEY" AS "KEY", "DT_node_lookup_286".node_id_2 AS node_id_2, "DT_node_data_286".nid AS nid, "DT_node_data_286"."Score" AS "Score"
FROM "DT_node_lookup_286" LEFT OUTER JOIN "DT_node_data_286" ON "DT_node_lookup_286".node_id_2 = "DT_node_data_286".nid),
"XGB_Model_1_95" AS
(SELECT "DT_Output_286"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_286"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_286"),
"DT_node_lookup_287" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_287" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0144375581 AS "Score" UNION ALL SELECT 2 AS nid, -0.0097343158 AS "Score") AS "Values"),
"DT_Output_287" AS
(SELECT "DT_node_lookup_287"."KEY" AS "KEY", "DT_node_lookup_287".node_id_2 AS node_id_2, "DT_node_data_287".nid AS nid, "DT_node_data_287"."Score" AS "Score"
FROM "DT_node_lookup_287" LEFT OUTER JOIN "DT_node_data_287" ON "DT_node_lookup_287".node_id_2 = "DT_node_data_287".nid),
"XGB_Model_2_95" AS
(SELECT "DT_Output_287"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_287"."Score" AS "Score_virginica"
FROM "DT_Output_287"),
"DT_node_lookup_288" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_288" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_288" AS
(SELECT "DT_node_lookup_288"."KEY" AS "KEY", "DT_node_lookup_288".node_id_2 AS node_id_2, "DT_node_data_288".nid AS nid, "DT_node_data_288"."Score" AS "Score"
FROM "DT_node_lookup_288" LEFT OUTER JOIN "DT_node_data_288" ON "DT_node_lookup_288".node_id_2 = "DT_node_data_288".nid),
"XGB_Model_0_96" AS
(SELECT "DT_Output_288"."KEY" AS "KEY", "DT_Output_288"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_288"),
"DT_node_lookup_289" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_289" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0140721863 AS "Score" UNION ALL SELECT 2 AS nid, -0.00673125172 AS "Score") AS "Values"),
"DT_Output_289" AS
(SELECT "DT_node_lookup_289"."KEY" AS "KEY", "DT_node_lookup_289".node_id_2 AS node_id_2, "DT_node_data_289".nid AS nid, "DT_node_data_289"."Score" AS "Score"
FROM "DT_node_lookup_289" LEFT OUTER JOIN "DT_node_data_289" ON "DT_node_lookup_289".node_id_2 = "DT_node_data_289".nid),
"XGB_Model_1_96" AS
(SELECT "DT_Output_289"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_289"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_289")
SELECT "XGB_esu_28"."KEY", "XGB_esu_28"."Score_setosa", "XGB_esu_28"."Score_versicolor", "XGB_esu_28"."Score_virginica"
FROM (SELECT "XGB_Model_1_93"."KEY" AS "KEY", CAST("XGB_Model_1_93"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_93"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_93"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_93" UNION ALL SELECT "XGB_Model_2_93"."KEY" AS "KEY", CAST("XGB_Model_2_93"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_93"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_93"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_93" UNION ALL SELECT "XGB_Model_0_94"."KEY" AS "KEY", CAST("XGB_Model_0_94"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_94"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_94"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_94" UNION ALL SELECT "XGB_Model_1_94"."KEY" AS "KEY", CAST("XGB_Model_1_94"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_94"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_94"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_94" UNION ALL SELECT "XGB_Model_2_94"."KEY" AS "KEY", CAST("XGB_Model_2_94"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_94"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_94"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_94" UNION ALL SELECT "XGB_Model_0_95"."KEY" AS "KEY", CAST("XGB_Model_0_95"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_95"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_95"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_95" UNION ALL SELECT "XGB_Model_1_95"."KEY" AS "KEY", CAST("XGB_Model_1_95"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_95"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_95"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_95" UNION ALL SELECT "XGB_Model_2_95"."KEY" AS "KEY", CAST("XGB_Model_2_95"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_95"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_95"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_95" UNION ALL SELECT "XGB_Model_0_96"."KEY" AS "KEY", CAST("XGB_Model_0_96"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_96"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_96"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_96" UNION ALL SELECT "XGB_Model_1_96"."KEY" AS "KEY", CAST("XGB_Model_1_96"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_96"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_96"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_96") AS "XGB_esu_28"),
"XGB_29" AS
(WITH "DT_node_lookup_290" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_290" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00789969973 AS "Score" UNION ALL SELECT 2 AS nid, 0.014570768 AS "Score") AS "Values"),
"DT_Output_290" AS
(SELECT "DT_node_lookup_290"."KEY" AS "KEY", "DT_node_lookup_290".node_id_2 AS node_id_2, "DT_node_data_290".nid AS nid, "DT_node_data_290"."Score" AS "Score"
FROM "DT_node_lookup_290" LEFT OUTER JOIN "DT_node_data_290" ON "DT_node_lookup_290".node_id_2 = "DT_node_data_290".nid),
"XGB_Model_2_96" AS
(SELECT "DT_Output_290"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_290"."Score" AS "Score_virginica"
FROM "DT_Output_290"),
"DT_node_lookup_291" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_291" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_291" AS
(SELECT "DT_node_lookup_291"."KEY" AS "KEY", "DT_node_lookup_291".node_id_2 AS node_id_2, "DT_node_data_291".nid AS nid, "DT_node_data_291"."Score" AS "Score"
FROM "DT_node_lookup_291" LEFT OUTER JOIN "DT_node_data_291" ON "DT_node_lookup_291".node_id_2 = "DT_node_data_291".nid),
"XGB_Model_0_97" AS
(SELECT "DT_Output_291"."KEY" AS "KEY", "DT_Output_291"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_291"),
"DT_node_lookup_292" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_0" < 6.05000019) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_292" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0112350043 AS "Score" UNION ALL SELECT 3 AS nid, 0.0201802086 AS "Score" UNION ALL SELECT 4 AS nid, -0.00480729435 AS "Score") AS "Values"),
"DT_Output_292" AS
(SELECT "DT_node_lookup_292"."KEY" AS "KEY", "DT_node_lookup_292".node_id_2 AS node_id_2, "DT_node_data_292".nid AS nid, "DT_node_data_292"."Score" AS "Score"
FROM "DT_node_lookup_292" LEFT OUTER JOIN "DT_node_data_292" ON "DT_node_lookup_292".node_id_2 = "DT_node_data_292".nid),
"XGB_Model_1_97" AS
(SELECT "DT_Output_292"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_292"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_292"),
"DT_node_lookup_293" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_293" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0139949881 AS "Score" UNION ALL SELECT 2 AS nid, -0.00915294793 AS "Score") AS "Values"),
"DT_Output_293" AS
(SELECT "DT_node_lookup_293"."KEY" AS "KEY", "DT_node_lookup_293".node_id_2 AS node_id_2, "DT_node_data_293".nid AS nid, "DT_node_data_293"."Score" AS "Score"
FROM "DT_node_lookup_293" LEFT OUTER JOIN "DT_node_data_293" ON "DT_node_lookup_293".node_id_2 = "DT_node_data_293".nid),
"XGB_Model_2_97" AS
(SELECT "DT_Output_293"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_293"."Score" AS "Score_virginica"
FROM "DT_Output_293"),
"DT_node_lookup_294" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_294" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_294" AS
(SELECT "DT_node_lookup_294"."KEY" AS "KEY", "DT_node_lookup_294".node_id_2 AS node_id_2, "DT_node_data_294".nid AS nid, "DT_node_data_294"."Score" AS "Score"
FROM "DT_node_lookup_294" LEFT OUTER JOIN "DT_node_data_294" ON "DT_node_lookup_294".node_id_2 = "DT_node_data_294".nid),
"XGB_Model_0_98" AS
(SELECT "DT_Output_294"."KEY" AS "KEY", "DT_Output_294"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_294"),
"DT_node_lookup_295" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.75) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_295" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0140472855 AS "Score" UNION ALL SELECT 2 AS nid, -0.00661657797 AS "Score") AS "Values"),
"DT_Output_295" AS
(SELECT "DT_node_lookup_295"."KEY" AS "KEY", "DT_node_lookup_295".node_id_2 AS node_id_2, "DT_node_data_295".nid AS nid, "DT_node_data_295"."Score" AS "Score"
FROM "DT_node_lookup_295" LEFT OUTER JOIN "DT_node_data_295" ON "DT_node_lookup_295".node_id_2 = "DT_node_data_295".nid),
"XGB_Model_1_98" AS
(SELECT "DT_Output_295"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_295"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_295"),
"DT_node_lookup_296" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_3" < 1.6500001) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_296" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.00748997414 AS "Score" UNION ALL SELECT 2 AS nid, 0.0137251941 AS "Score") AS "Values"),
"DT_Output_296" AS
(SELECT "DT_node_lookup_296"."KEY" AS "KEY", "DT_node_lookup_296".node_id_2 AS node_id_2, "DT_node_data_296".nid AS nid, "DT_node_data_296"."Score" AS "Score"
FROM "DT_node_lookup_296" LEFT OUTER JOIN "DT_node_data_296" ON "DT_node_lookup_296".node_id_2 = "DT_node_data_296".nid),
"XGB_Model_2_98" AS
(SELECT "DT_Output_296"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_296"."Score" AS "Score_virginica"
FROM "DT_Output_296"),
"DT_node_lookup_297" AS
(SELECT "ADS"."KEY" AS "KEY", 0 AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_297" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 0 AS nid, 0 AS "Score") AS "Values"),
"DT_Output_297" AS
(SELECT "DT_node_lookup_297"."KEY" AS "KEY", "DT_node_lookup_297".node_id_2 AS node_id_2, "DT_node_data_297".nid AS nid, "DT_node_data_297"."Score" AS "Score"
FROM "DT_node_lookup_297" LEFT OUTER JOIN "DT_node_data_297" ON "DT_node_lookup_297".node_id_2 = "DT_node_data_297".nid),
"XGB_Model_0_99" AS
(SELECT "DT_Output_297"."KEY" AS "KEY", "DT_Output_297"."Score" AS "Score_setosa", 0.0 AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_297"),
"DT_node_lookup_298" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.6500001) THEN 1 ELSE CASE WHEN ("ADS"."Feature_2" < 4.94999981) THEN 3 ELSE 4 END END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_298" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, -0.0106648374 AS "Score" UNION ALL SELECT 3 AS nid, -0.00187783444 AS "Score" UNION ALL SELECT 4 AS nid, 0.0195167251 AS "Score") AS "Values"),
"DT_Output_298" AS
(SELECT "DT_node_lookup_298"."KEY" AS "KEY", "DT_node_lookup_298".node_id_2 AS node_id_2, "DT_node_data_298".nid AS nid, "DT_node_data_298"."Score" AS "Score"
FROM "DT_node_lookup_298" LEFT OUTER JOIN "DT_node_data_298" ON "DT_node_lookup_298".node_id_2 = "DT_node_data_298".nid),
"XGB_Model_1_99" AS
(SELECT "DT_Output_298"."KEY" AS "KEY", 0.0 AS "Score_setosa", "DT_Output_298"."Score" AS "Score_versicolor", 0.0 AS "Score_virginica"
FROM "DT_Output_298"),
"DT_node_lookup_299" AS
(SELECT "ADS"."KEY" AS "KEY", CASE WHEN ("ADS"."Feature_1" < 2.8499999) THEN 1 ELSE 2 END AS node_id_2
FROM "INPUT_DATA" AS "ADS"),
"DT_node_data_299" AS
(SELECT "Values".nid AS nid, "Values"."Score" AS "Score"
FROM (SELECT 1 AS nid, 0.0136155514 AS "Score" UNION ALL SELECT 2 AS nid, -0.00905219559 AS "Score") AS "Values"),
"DT_Output_299" AS
(SELECT "DT_node_lookup_299"."KEY" AS "KEY", "DT_node_lookup_299".node_id_2 AS node_id_2, "DT_node_data_299".nid AS nid, "DT_node_data_299"."Score" AS "Score"
FROM "DT_node_lookup_299" LEFT OUTER JOIN "DT_node_data_299" ON "DT_node_lookup_299".node_id_2 = "DT_node_data_299".nid),
"XGB_Model_2_99" AS
(SELECT "DT_Output_299"."KEY" AS "KEY", 0.0 AS "Score_setosa", 0.0 AS "Score_versicolor", "DT_Output_299"."Score" AS "Score_virginica"
FROM "DT_Output_299")
SELECT "XGB_esu_29"."KEY", "XGB_esu_29"."Score_setosa", "XGB_esu_29"."Score_versicolor", "XGB_esu_29"."Score_virginica"
FROM (SELECT "XGB_Model_2_96"."KEY" AS "KEY", CAST("XGB_Model_2_96"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_96"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_96"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_96" UNION ALL SELECT "XGB_Model_0_97"."KEY" AS "KEY", CAST("XGB_Model_0_97"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_97"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_97"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_97" UNION ALL SELECT "XGB_Model_1_97"."KEY" AS "KEY", CAST("XGB_Model_1_97"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_97"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_97"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_97" UNION ALL SELECT "XGB_Model_2_97"."KEY" AS "KEY", CAST("XGB_Model_2_97"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_97"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_97"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_97" UNION ALL SELECT "XGB_Model_0_98"."KEY" AS "KEY", CAST("XGB_Model_0_98"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_98"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_98"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_98" UNION ALL SELECT "XGB_Model_1_98"."KEY" AS "KEY", CAST("XGB_Model_1_98"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_98"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_98"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_98" UNION ALL SELECT "XGB_Model_2_98"."KEY" AS "KEY", CAST("XGB_Model_2_98"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_98"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_98"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_98" UNION ALL SELECT "XGB_Model_0_99"."KEY" AS "KEY", CAST("XGB_Model_0_99"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_0_99"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_0_99"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_0_99" UNION ALL SELECT "XGB_Model_1_99"."KEY" AS "KEY", CAST("XGB_Model_1_99"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_1_99"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_1_99"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_1_99" UNION ALL SELECT "XGB_Model_2_99"."KEY" AS "KEY", CAST("XGB_Model_2_99"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_Model_2_99"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_Model_2_99"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_Model_2_99") AS "XGB_esu_29"),
"XGB" AS
(SELECT "XGBbig_esu"."KEY" AS "KEY", "XGBbig_esu"."Score_setosa" AS "Score_setosa", "XGBbig_esu"."Score_versicolor" AS "Score_versicolor", "XGBbig_esu"."Score_virginica" AS "Score_virginica"
FROM (SELECT "XGB_0"."KEY" AS "KEY", CAST("XGB_0"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_0"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_0"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_0" UNION ALL SELECT "XGB_1"."KEY" AS "KEY", CAST("XGB_1"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_1"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_1"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_1" UNION ALL SELECT "XGB_2"."KEY" AS "KEY", CAST("XGB_2"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_2"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_2"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_2" UNION ALL SELECT "XGB_3"."KEY" AS "KEY", CAST("XGB_3"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_3"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_3"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_3" UNION ALL SELECT "XGB_4"."KEY" AS "KEY", CAST("XGB_4"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_4"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_4"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_4" UNION ALL SELECT "XGB_5"."KEY" AS "KEY", CAST("XGB_5"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_5"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_5"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_5" UNION ALL SELECT "XGB_6"."KEY" AS "KEY", CAST("XGB_6"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_6"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_6"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_6" UNION ALL SELECT "XGB_7"."KEY" AS "KEY", CAST("XGB_7"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_7"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_7"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_7" UNION ALL SELECT "XGB_8"."KEY" AS "KEY", CAST("XGB_8"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_8"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_8"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_8" UNION ALL SELECT "XGB_9"."KEY" AS "KEY", CAST("XGB_9"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_9"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_9"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_9" UNION ALL SELECT "XGB_10"."KEY" AS "KEY", CAST("XGB_10"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_10"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_10"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_10" UNION ALL SELECT "XGB_11"."KEY" AS "KEY", CAST("XGB_11"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_11"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_11"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_11" UNION ALL SELECT "XGB_12"."KEY" AS "KEY", CAST("XGB_12"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_12"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_12"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_12" UNION ALL SELECT "XGB_13"."KEY" AS "KEY", CAST("XGB_13"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_13"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_13"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_13" UNION ALL SELECT "XGB_14"."KEY" AS "KEY", CAST("XGB_14"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_14"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_14"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_14" UNION ALL SELECT "XGB_15"."KEY" AS "KEY", CAST("XGB_15"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_15"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_15"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_15" UNION ALL SELECT "XGB_16"."KEY" AS "KEY", CAST("XGB_16"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_16"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_16"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_16" UNION ALL SELECT "XGB_17"."KEY" AS "KEY", CAST("XGB_17"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_17"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_17"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_17" UNION ALL SELECT "XGB_18"."KEY" AS "KEY", CAST("XGB_18"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_18"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_18"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_18" UNION ALL SELECT "XGB_19"."KEY" AS "KEY", CAST("XGB_19"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_19"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_19"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_19" UNION ALL SELECT "XGB_20"."KEY" AS "KEY", CAST("XGB_20"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_20"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_20"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_20" UNION ALL SELECT "XGB_21"."KEY" AS "KEY", CAST("XGB_21"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_21"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_21"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_21" UNION ALL SELECT "XGB_22"."KEY" AS "KEY", CAST("XGB_22"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_22"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_22"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_22" UNION ALL SELECT "XGB_23"."KEY" AS "KEY", CAST("XGB_23"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_23"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_23"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_23" UNION ALL SELECT "XGB_24"."KEY" AS "KEY", CAST("XGB_24"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_24"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_24"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_24" UNION ALL SELECT "XGB_25"."KEY" AS "KEY", CAST("XGB_25"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_25"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_25"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_25" UNION ALL SELECT "XGB_26"."KEY" AS "KEY", CAST("XGB_26"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_26"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_26"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_26" UNION ALL SELECT "XGB_27"."KEY" AS "KEY", CAST("XGB_27"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_27"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_27"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_27" UNION ALL SELECT "XGB_28"."KEY" AS "KEY", CAST("XGB_28"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_28"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_28"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_28" UNION ALL SELECT "XGB_29"."KEY" AS "KEY", CAST("XGB_29"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("XGB_29"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("XGB_29"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM "XGB_29") AS "XGBbig_esu"),
"XGB_sum" AS
(SELECT "T"."KEY" AS "KEY", CAST("T"."Score_setosa" AS FLOAT) AS "Score_setosa", CAST("T"."Score_versicolor" AS FLOAT) AS "Score_versicolor", CAST("T"."Score_virginica" AS FLOAT) AS "Score_virginica"
FROM (SELECT "XGB"."KEY" AS "KEY", sum("XGB"."Score_setosa") AS "Score_setosa", sum("XGB"."Score_versicolor") AS "Score_versicolor", sum("XGB"."Score_virginica") AS "Score_virginica"
FROM "XGB" GROUP BY "XGB"."KEY") AS "T"),
orig_cte AS
(SELECT "XGB_sum"."KEY" AS "KEY", "XGB_sum"."Score_setosa" AS "Score_setosa", "XGB_sum"."Score_versicolor" AS "Score_versicolor", "XGB_sum"."Score_virginica" AS "Score_virginica", CAST(NULL AS FLOAT) AS "Proba_setosa", CAST(NULL AS FLOAT) AS "Proba_versicolor", CAST(NULL AS FLOAT) AS "Proba_virginica", CAST(NULL AS FLOAT) AS "LogProba_setosa", CAST(NULL AS FLOAT) AS "LogProba_versicolor", CAST(NULL AS FLOAT) AS "LogProba_virginica", CAST(NULL AS BIGINT) AS "Decision", CAST(NULL AS FLOAT) AS "DecisionProba"
FROM "XGB_sum"),
score_class_union AS
(SELECT scu."KEY_u" AS "KEY_u", scu.class AS class, scu."LogProba" AS "LogProba", scu."Proba" AS "Proba", scu."Score" AS "Score"
FROM (SELECT orig_cte."KEY" AS "KEY_u", 'setosa' AS class, orig_cte."LogProba_setosa" AS "LogProba", orig_cte."Proba_setosa" AS "Proba", orig_cte."Score_setosa" AS "Score"
FROM orig_cte UNION ALL SELECT orig_cte."KEY" AS "KEY_u", 'versicolor' AS class, orig_cte."LogProba_versicolor" AS "LogProba", orig_cte."Proba_versicolor" AS "Proba", orig_cte."Score_versicolor" AS "Score"
FROM orig_cte UNION ALL SELECT orig_cte."KEY" AS "KEY_u", 'virginica' AS class, orig_cte."LogProba_virginica" AS "LogProba", orig_cte."Proba_virginica" AS "Proba", orig_cte."Score_virginica" AS "Score"
FROM orig_cte) AS scu),
score_max AS
(SELECT orig_cte."KEY" AS "KEY", orig_cte."Score_setosa" AS "Score_setosa", orig_cte."Score_versicolor" AS "Score_versicolor", orig_cte."Score_virginica" AS "Score_virginica", orig_cte."Proba_setosa" AS "Proba_setosa", orig_cte."Proba_versicolor" AS "Proba_versicolor", orig_cte."Proba_virginica" AS "Proba_virginica", orig_cte."LogProba_setosa" AS "LogProba_setosa", orig_cte."LogProba_versicolor" AS "LogProba_versicolor", orig_cte."LogProba_virginica" AS "LogProba_virginica", orig_cte."Decision" AS "Decision", orig_cte."DecisionProba" AS "DecisionProba", max_select."KEY_m" AS "KEY_m", max_select."max_Score" AS "max_Score"
FROM orig_cte LEFT OUTER JOIN (SELECT score_class_union."KEY_u" AS "KEY_m", max(score_class_union."Score") AS "max_Score"
FROM score_class_union GROUP BY score_class_union."KEY_u") AS max_select ON orig_cte."KEY" = max_select."KEY_m"),
score_soft_max_deltas AS
(SELECT score_max."KEY" AS "KEY", score_max."Score_setosa" AS "Score_setosa", score_max."Score_versicolor" AS "Score_versicolor", score_max."Score_virginica" AS "Score_virginica", score_max."Proba_setosa" AS "Proba_setosa", score_max."Proba_versicolor" AS "Proba_versicolor", score_max."Proba_virginica" AS "Proba_virginica", score_max."LogProba_setosa" AS "LogProba_setosa", score_max."LogProba_versicolor" AS "LogProba_versicolor", score_max."LogProba_virginica" AS "LogProba_virginica", score_max."Decision" AS "Decision", score_max."DecisionProba" AS "DecisionProba", score_max."KEY_m" AS "KEY_m", score_max."max_Score" AS "max_Score", exp(greatest(-100.0, score_max."Score_setosa" - score_max."max_Score")) AS "exp_delta_Score_setosa", exp(greatest(-100.0, score_max."Score_versicolor" - score_max."max_Score")) AS "exp_delta_Score_versicolor", exp(greatest(-100.0, score_max."Score_virginica" - score_max."max_Score")) AS "exp_delta_Score_virginica"
FROM score_max),
score_class_union_soft AS
(SELECT soft_scu."KEY" AS "KEY", soft_scu.class AS class, soft_scu."exp_delta_Score" AS "exp_delta_Score"
FROM (SELECT score_soft_max_deltas."KEY" AS "KEY", 'setosa' AS class, score_soft_max_deltas."exp_delta_Score_setosa" AS "exp_delta_Score"
FROM score_soft_max_deltas UNION ALL SELECT score_soft_max_deltas."KEY" AS "KEY", 'versicolor' AS class, score_soft_max_deltas."exp_delta_Score_versicolor" AS "exp_delta_Score"
FROM score_soft_max_deltas UNION ALL SELECT score_soft_max_deltas."KEY" AS "KEY", 'virginica' AS class, score_soft_max_deltas."exp_delta_Score_virginica" AS "exp_delta_Score"
FROM score_soft_max_deltas) AS soft_scu),
score_soft_max AS
(SELECT score_soft_max_deltas."KEY" AS "KEY", score_soft_max_deltas."Score_setosa" AS "Score_setosa", score_soft_max_deltas."Score_versicolor" AS "Score_versicolor", score_soft_max_deltas."Score_virginica" AS "Score_virginica", score_soft_max_deltas."Proba_setosa" AS "Proba_setosa", score_soft_max_deltas."Proba_versicolor" AS "Proba_versicolor", score_soft_max_deltas."Proba_virginica" AS "Proba_virginica", score_soft_max_deltas."LogProba_setosa" AS "LogProba_setosa", score_soft_max_deltas."LogProba_versicolor" AS "LogProba_versicolor", score_soft_max_deltas."LogProba_virginica" AS "LogProba_virginica", score_soft_max_deltas."Decision" AS "Decision", score_soft_max_deltas."DecisionProba" AS "DecisionProba", score_soft_max_deltas."KEY_m" AS "KEY_m", score_soft_max_deltas."max_Score" AS "max_Score", score_soft_max_deltas."exp_delta_Score_setosa" AS "exp_delta_Score_setosa", score_soft_max_deltas."exp_delta_Score_versicolor" AS "exp_delta_Score_versicolor", score_soft_max_deltas."exp_delta_Score_virginica" AS "exp_delta_Score_virginica", sum_exp_t."KEY_sum" AS "KEY_sum", sum_exp_t."sum_ExpDeltaScore" AS "sum_ExpDeltaScore"
FROM score_soft_max_deltas LEFT OUTER JOIN (SELECT score_class_union_soft."KEY" AS "KEY_sum", sum(score_class_union_soft."exp_delta_Score") AS "sum_ExpDeltaScore"
FROM score_class_union_soft GROUP BY score_class_union_soft."KEY") AS sum_exp_t ON score_soft_max_deltas."KEY" = sum_exp_t."KEY_sum"),
union_with_max AS
(SELECT score_class_union."KEY_u" AS "KEY_u", score_class_union.class AS class, score_class_union."LogProba" AS "LogProba", score_class_union."Proba" AS "Proba", score_class_union."Score" AS "Score", score_soft_max."KEY" AS "KEY", score_soft_max."Score_setosa" AS "Score_setosa", score_soft_max."Score_versicolor" AS "Score_versicolor", score_soft_max."Score_virginica" AS "Score_virginica", score_soft_max."Proba_setosa" AS "Proba_setosa", score_soft_max."Proba_versicolor" AS "Proba_versicolor", score_soft_max."Proba_virginica" AS "Proba_virginica", score_soft_max."LogProba_setosa" AS "LogProba_setosa", score_soft_max."LogProba_versicolor" AS "LogProba_versicolor", score_soft_max."LogProba_virginica" AS "LogProba_virginica", score_soft_max."Decision" AS "Decision", score_soft_max."DecisionProba" AS "DecisionProba", score_soft_max."KEY_m" AS "KEY_m", score_soft_max."max_Score" AS "max_Score", score_soft_max."exp_delta_Score_setosa" AS "exp_delta_Score_setosa", score_soft_max."exp_delta_Score_versicolor" AS "exp_delta_Score_versicolor", score_soft_max."exp_delta_Score_virginica" AS "exp_delta_Score_virginica", score_soft_max."KEY_sum" AS "KEY_sum", score_soft_max."sum_ExpDeltaScore" AS "sum_ExpDeltaScore"
FROM score_class_union LEFT OUTER JOIN score_soft_max ON score_class_union."KEY_u" = score_soft_max."KEY"),
arg_max_cte AS
(SELECT score_soft_max."KEY" AS "KEY", score_soft_max."Score_setosa" AS "Score_setosa", score_soft_max."Score_versicolor" AS "Score_versicolor", score_soft_max."Score_virginica" AS "Score_virginica", score_soft_max."Proba_setosa" AS "Proba_setosa", score_soft_max."Proba_versicolor" AS "Proba_versicolor", score_soft_max."Proba_virginica" AS "Proba_virginica", score_soft_max."LogProba_setosa" AS "LogProba_setosa", score_soft_max."LogProba_versicolor" AS "LogProba_versicolor", score_soft_max."LogProba_virginica" AS "LogProba_virginica", score_soft_max."Decision" AS "Decision", score_soft_max."DecisionProba" AS "DecisionProba", score_soft_max."KEY_m" AS "KEY_m", score_soft_max."max_Score" AS "max_Score", score_soft_max."exp_delta_Score_setosa" AS "exp_delta_Score_setosa", score_soft_max."exp_delta_Score_versicolor" AS "exp_delta_Score_versicolor", score_soft_max."exp_delta_Score_virginica" AS "exp_delta_Score_virginica", score_soft_max."KEY_sum" AS "KEY_sum", score_soft_max."sum_ExpDeltaScore" AS "sum_ExpDeltaScore", "arg_max_t_Score"."KEY_Score" AS "KEY_Score", "arg_max_t_Score"."arg_max_Score" AS "arg_max_Score", soft_max_comp."KEY_softmax" AS "KEY_softmax", soft_max_comp."SoftProba_setosa" AS "SoftProba_setosa", soft_max_comp."SoftProba_versicolor" AS "SoftProba_versicolor", soft_max_comp."SoftProba_virginica" AS "SoftProba_virginica"
FROM score_soft_max LEFT OUTER JOIN (SELECT union_with_max."KEY" AS "KEY_Score", min(union_with_max.class) AS "arg_max_Score"
FROM union_with_max
WHERE union_with_max."max_Score" <= union_with_max."Score" GROUP BY union_with_max."KEY") AS "arg_max_t_Score" ON score_soft_max."KEY" = "arg_max_t_Score"."KEY_Score" LEFT OUTER JOIN (SELECT score_soft_max."KEY" AS "KEY_softmax", score_soft_max."exp_delta_Score_setosa" / score_soft_max."sum_ExpDeltaScore" AS "SoftProba_setosa", score_soft_max."exp_delta_Score_versicolor" / score_soft_max."sum_ExpDeltaScore" AS "SoftProba_versicolor", score_soft_max."exp_delta_Score_virginica" / score_soft_max."sum_ExpDeltaScore" AS "SoftProba_virginica"
FROM score_soft_max) AS soft_max_comp ON soft_max_comp."KEY_softmax" = "arg_max_t_Score"."KEY_Score")
SELECT arg_max_cte."KEY" AS "KEY", CAST(NULL AS FLOAT) AS "Score_setosa", CAST(NULL AS FLOAT) AS "Score_versicolor", CAST(NULL AS FLOAT) AS "Score_virginica", arg_max_cte."SoftProba_setosa" AS "Proba_setosa", arg_max_cte."SoftProba_versicolor" AS "Proba_versicolor", arg_max_cte."SoftProba_virginica" AS "Proba_virginica", CASE WHEN (arg_max_cte."SoftProba_setosa" IS NULL OR arg_max_cte."SoftProba_setosa" > 0.0) THEN ln(arg_max_cte."SoftProba_setosa") ELSE -1.79769313486231e+308 END AS "LogProba_setosa", CASE WHEN (arg_max_cte."SoftProba_versicolor" IS NULL OR arg_max_cte."SoftProba_versicolor" > 0.0) THEN ln(arg_max_cte."SoftProba_versicolor") ELSE -1.79769313486231e+308 END AS "LogProba_versicolor", CASE WHEN (arg_max_cte."SoftProba_virginica" IS NULL OR arg_max_cte."SoftProba_virginica" > 0.0) THEN ln(arg_max_cte."SoftProba_virginica") ELSE -1.79769313486231e+308 END AS "LogProba_virginica", arg_max_cte."arg_max_Score" AS "Decision", greatest(arg_max_cte."SoftProba_setosa", arg_max_cte."SoftProba_versicolor", arg_max_cte."SoftProba_virginica") AS "DecisionProba"
FROM arg_max_cte
###Markdown
Execute the SQL Code
###Code
library(RODBC)
conn = odbcConnect("pgsql", uid="db", pwd="db", case="nochange")
odbcSetAutoCommit(conn , autoCommit = TRUE)
df_sql = as.data.frame(iris[,-5])
names(df_sql) = sprintf("Feature_%d",0:(ncol(df_sql)-1))
df_sql$KEY = seq.int(nrow(iris))
sqlDrop(conn , "INPUT_DATA" , errors = FALSE)
sqlSave(conn, df_sql, tablename = "INPUT_DATA", verbose = FALSE)
head(df_sql)
# colnames(df_sql)
# odbcGetInfo(conn)
# sqlTables(conn)
df_sql_out = sqlQuery(conn, lModelSQL)
head(df_sql_out[order(df_sql_out$KEY),])
# colnames(df1)
###Output
_____no_output_____
###Markdown
R XGBoost Output
###Code
pred_proba = predict(model, as.matrix(iris[,-5]), type = "prob")
df_r_out = data.frame(pred_proba)
names(df_r_out) = sprintf("Proba_%s",model$levels)
df_r_out$KEY = seq.int(nrow(dataset))
df_r_out$Score_setosa = NA
df_r_out$Score_versicolor = NA
df_r_out$Score_virginica = NA
df_r_out$LogProba_setosa = log(df_r_out$Proba_setosa)
df_r_out$LogProba_versicolor = log(df_r_out$Proba_versicolor)
df_r_out$LogProba_virginica = log(df_r_out$Proba_virginica)
df_r_out$Decision = predict(model, as.matrix(iris[,-5]), type = "raw")
df_r_out$DecisionProba = apply(pred_proba, 1, function(x) max(x))
head(df_r_out)
###Output
_____no_output_____
###Markdown
Compare R and SQL output
###Code
df_merge = merge(x = df_r_out, y = df_sql_out, by = "KEY", all = TRUE, , suffixes = c("_1","_2"))
head(df_merge)
diffs_df = df_merge[df_merge$Decision_1 != df_merge$Decision_2,]
head(diffs_df)
stopifnot(nrow(diffs_df) == 0)
summary(df_sql_out)
summary(df_r_out)
###Output
_____no_output_____ |
.ipynb_checkpoints/Absenteeism_at_work_DataSet-checkpoint.ipynb | ###Markdown
Absenteeism at work Data Set Data Set Information:The data set allows for several new combinations of attributes and attribute exclusions, or the modification of the attribute type (categorical, integer, or real) depending on the purpose of the research.The data set (Absenteeism at work - Part I) was used in academic research at the Universidade Nove de Julho - Postgraduate Program in Informatics and Knowledge Management. Informações sobre o conjunto de dados:O conjunto de dados permite várias novas combinações de atributos e exclusões de atributos, ou a modificação do tipo de atributo (categórico, inteiro ou real) dependendo da finalidade da pesquisa. O conjunto de dados (Absenteísmo no trabalho - Parte I) foi utilizado em pesquisas acadêmicas na Universidade Nove de Julho - Programa de Pós-graduação em Informática e Gestão do Conhecimento. Attribute Information:1. Individual identification (ID)2. Reason for absence (ICD).Absences attested by the International Code of Diseases (ICD) stratified into 21 categories (I to XXI) as follows: - I Certain infectious and parasitic diseases - II Neoplasms - III Diseases of the blood and blood-forming organs and certain disorders involving the immune mechanism - IV Endocrine, nutritional and metabolic diseases - V Mental and behavioural disorders - VI Diseases of the nervous system - VII Diseases of the eye and adnexa - VIII Diseases of the ear and mastoid process - IX Diseases of the circulatory system - X Diseases of the respiratory system - XI Diseases of the digestive system - XII Diseases of the skin and subcutaneous tissue - XIII Diseases of the musculoskeletal system and connective tissue - XIV Diseases of the genitourinary system - XV Pregnancy, childbirth and the puerperium - XVI Certain conditions originating in the perinatal period - XVII Congenital malformations, deformations and chromosomal abnormalities - XVIII Symptoms, signs and abnormal clinical and laboratory findings, not elsewhere classified - XIX Injury, poisoning and certain other consequences of external causes - XX External causes of morbidity and mortality - XXI Factors influencing health status and contact with health services.And 7 categories without (CID) patient follow-up (22), medical consultation (23), blood donation (24), laboratory examination (25), unjustified absence (26), physiotherapy (27), dental consultation (28).3. Month of absence4. Day of the week (Monday (2), Tuesday (3), Wednesday (4), Thursday (5), Friday (6))5. Seasons (summer (1), autumn (2), winter (3), spring (4))6. Transportation expense7. Distance from Residence to Work (kilometers)8. Service time9. Age10. Work load Average/day11. Hit target12. Disciplinary failure (yes=1; no=0)13. Education (high school (1), graduate (2), postgraduate (3), master and doctor (4))14. Son (number of children)15. Social drinker (yes=1; no=0)16. Social smoker (yes=1; no=0)17. Pet (number of pet)18. Weight19. Height20. Body mass index21. Absenteeism time in hours (target) Informações de Atributos:1. Identificação individual (ID)2. Motivo da ausência (DCI).Ausências atestadas pelo Código Internacional de Doenças (CID) estratificadas em 21 categorias (I a XXI), como segue: - I Certas doenças infecciosas e parasitárias - II Neoplasias - III Doenças do sangue e dos órgãos que formam o sangue e certas doenças que envolvem o mecanismo imunológico - IV Doenças endócrinas, nutricionais e metabólicas - V Distúrbios mentais e comportamentais - VI Doenças do sistema nervoso - VII Doenças do olho e adnexa - VIII Doenças do ouvido e do processo mastóide - IX Doenças do sistema circulatório - X Doenças do sistema respiratório - XI Doenças do sistema digestivo - XII Doenças da pele e tecido subcutâneo - XIII Doenças do sistema músculo-esquelético e do tecido conjuntivo - XIV Doenças do sistema geniturinário - XV Gravidez, parto e puerpério - XVI Certas condições originárias do período perinatal - XVII Malformações congênitas, deformações e anomalias cromossômicas - XVIII Sintomas, sinais e achados clínicos e laboratoriais anormais, não classificados em outros lugares - XIX Lesão, envenenamento e algumas outras conseqüências de causas externas - XX Causas externas de morbidade e mortalidade - XXI Fatores que influenciam o estado da saúde e o contato com os serviços de saúde.E 7 categorias sem acompanhamento de pacientes (CID) (22), consulta médica (23), doação de sangue (24), exame laboratorial (25), ausência injustificada (26), fisioterapia (27), consulta odontológica (28).3. Mês de ausência4. Dia da semana (segunda-feira (2), terça-feira (3), quarta-feira (4), quinta-feira (5), sexta-feira (6))5. Estações (verão (1), outono (2), inverno (3), primavera (4))6. Despesas de transporte7. Distância da residência ao trabalho (quilômetros)8. Tempo de serviço9. Idade10. Carga de trabalho Média/dia11. Acertar o alvo12. Falha disciplinar (sim=1; não=0)13. Educação (ensino médio (1), pós-graduação (2), pós-graduação (3), mestrado e doutorado (4))14. Filho (número de filhos)15. Bebedouro social (sim=1; não=0)16. Fumante social (sim=1; não=0)17. Animal de estimação (número de animais de estimação)18. Peso19. Altura20. Índice de massa corporal21. Tempo de absenteísmo em horas (alvo) Leitura de dados
###Code
import pandas as pd
df = pd.read_csv('Absenteeism_at_work.csv', sep = ';')
df.tail()
###Output
_____no_output_____ |
docs/source/example_notebooks/dowhy_confounder_example.ipynb | ###Markdown
Confounding Example: 观测数据中估计因果效应假设您获得了有关治疗和结果的一些数据。 Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
###Output
_____no_output_____
###Markdown
数据集让我们创建一个神秘数据集,我们需要为其确定是否存在因果效应。它是从以下两个模型之一生成的:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. 观察到的相关性都是由于共同原因造成的。也就是说两个模型的 treatment and outcome 都具备相关性,但是其相关性的来源不相同。
###Code
rvar = 1 if np.random.uniform() >0.5 else 0 # 是否具备因果效应, 它要么是0要么是1
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar, sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
rvar
# data_dict.keys()
# data_dict['gml_graph'], data_dict['ate'], data_dict['common_causes_names'], data_dict['time_val']
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
WARNING:matplotlib.legend:No handles with labels found to put in legend.
###Markdown
Does Treatment cause Outcome?Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* 对于该观测数据,我们如下的方法可以准确估计模拟数据中的因果效应。 STEP 1: Model the problem as a causal graph初始化因果模型。
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"],
proceed_when_unidentifiable=True)
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
INFO:dowhy.causal_graph:If this is observed data (not from a randomized experiment), there might always be missing confounders. Adding a node named "Unobserved Confounders" to reflect this.
INFO:dowhy.causal_model:Model to find the causal effect of treatment ['Treatment'] on outcome ['Outcome']
###Markdown
显示存储在本地文件 “causal_model.png” 中的因果图
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graph使用因果图来识别因果效应。
###Code
identified_estimand = model.identify_effect()
print(identified_estimand)
###Output
INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['w0', 'U']
WARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.
INFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.
INFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, 我们可以使用任何统计方法来估计因果效应。为了简单起见,让我们使用线性回归。
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
检查估计是否正确
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is 0.9939024153048353
Actual true causal effect was 1
###Markdown
Step 4: Refuting the estimate我们还可以反驳该估计值,以检查其是否符合假设(*aka* 敏感性分析)。 1) 添加一个随机的共因变量
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0+w_random
###Markdown
2) Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~placebo+w0
###Markdown
3) 删除数据的随机子集
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Confounding Example: Finding causal effects from observed dataSuppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
import logging
logging.getLogger("dowhy").setLevel(logging.WARNING)
###Output
_____no_output_____
###Markdown
Let's create a mystery dataset for which we need to determine whether there is a causal effect.Creating the dataset. It is generated from either one of two models:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
###Code
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar,
num_common_causes=1,
sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
_____no_output_____
###Markdown
Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* STEP 1: Model the problem as a causal graphInitializing the causal model.
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
###Markdown
Showing the causal model stored in the local file "causal_model.png"
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graphIdentify the causal effect using properties of the causal graph.
###Code
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
###Output
WARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, we can use any statistical method to estimate the causal effect. Let's use Linear Regression for simplicity.
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
Causal Estimate is -0.019836578259104343
###Markdown
Checking if the estimate is correct
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is -0.019836578259104343
Actual true causal effect was 0
###Markdown
Step 4: Refuting the estimateWe can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids). Adding a random common cause variable
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
Refute: Add a Random Common Cause
Estimated effect:-0.019836578259104343
New effect:-0.019826671323794898
###Markdown
Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
Refute: Use a Placebo Treatment
Estimated effect:-0.019836578259104343
New effect:3.341207189269113e-06
p value:0.47
###Markdown
Removing a random subset of the data
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
Refute: Use a subset of data
Estimated effect:-0.019836578259104343
New effect:-0.019587864980808264
p value:0.47
###Markdown
Confounding Example: Finding causal effects from observed dataSuppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
import logging
logging.getLogger("dowhy").setLevel(logging.WARNING)
###Output
_____no_output_____
###Markdown
Let's create a mystery dataset for which we need to determine whether there is a causal effect.Creating the dataset. It is generated from either one of two models:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
###Code
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar,
num_common_causes=1,
sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
_____no_output_____
###Markdown
Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* STEP 1: Model the problem as a causal graphInitializing the causal model.
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
###Markdown
Showing the causal model stored in the local file "causal_model.png"
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graphIdentify the causal effect using properties of the causal graph.
###Code
identified_estimand = model.identify_effect()
print(identified_estimand)
###Output
WARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, we can use any statistical method to estimate the causal effect. Let's use Linear Regression for simplicity.
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
Causal Estimate is 0.9905610344994829
###Markdown
Checking if the estimate is correct
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is 0.9905610344994829
Actual true causal effect was 1
###Markdown
Step 4: Refuting the estimateWe can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids). Adding a random common cause variable
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
Refute: Add a Random Common Cause
Estimated effect:0.9905610344994829
New effect:0.9904200614048078
###Markdown
Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
Refute: Use a Placebo Treatment
Estimated effect:0.9905610344994829
New effect:3.0487607018869765e-06
p value:0.45
###Markdown
Removing a random subset of the data
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
Refute: Use a subset of data
Estimated effect:0.9905610344994829
New effect:0.990524809173428
p value:0.48
###Markdown
相关性还是因果效应?假设您获得了有关治疗和结果的一些数据。 Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
###Output
_____no_output_____
###Markdown
数据集让我们创建一个神秘数据集,我们需要为其确定是否存在因果效应。它是从以下两个模型之一生成的:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. 观察到的相关性都是由于共同原因造成的。也就是说两个模型的 treatment and outcome 都具备相关性,但是其相关性的来源不相同。
###Code
rvar = 1 if np.random.uniform() >0.5 else 0 # 是否具备因果效应, 它要么是0要么是1
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar, sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
rvar
# data_dict.keys()
# data_dict['gml_graph'], data_dict['ate'], data_dict['common_causes_names'], data_dict['time_val']
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
WARNING:matplotlib.legend:No handles with labels found to put in legend.
###Markdown
Does Treatment cause Outcome?Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* 对于该观测数据,我们如下的方法可以准确估计模拟数据中的因果效应。 STEP 1: Model the problem as a causal graph初始化因果模型。
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"],
proceed_when_unidentifiable=True)
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
INFO:dowhy.causal_graph:If this is observed data (not from a randomized experiment), there might always be missing confounders. Adding a node named "Unobserved Confounders" to reflect this.
INFO:dowhy.causal_model:Model to find the causal effect of treatment ['Treatment'] on outcome ['Outcome']
###Markdown
显示存储在本地文件 “causal_model.png” 中的因果图
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graph使用因果图来识别因果效应。
###Code
identified_estimand = model.identify_effect()
print(identified_estimand)
###Output
INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['w0', 'U']
WARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.
INFO:dowhy.causal_identifier:Continuing by ignoring these unobserved confounders because proceed_when_unidentifiable flag is True.
INFO:dowhy.causal_identifier:Instrumental variables for treatment and outcome:[]
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, 我们可以使用任何统计方法来估计因果效应。为了简单起见,让我们使用线性回归。
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
检查估计是否正确
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is 0.9939024153048353
Actual true causal effect was 1
###Markdown
Step 4: Refuting the estimate我们还可以反驳该估计值,以检查其是否符合假设(*aka* 敏感性分析)。 1) 添加一个随机的共因变量
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0+w_random
###Markdown
2) Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~placebo+w0
###Markdown
3) 删除数据的随机子集
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Confounding Example: Finding causal effects from observed dataSuppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
###Output
_____no_output_____
###Markdown
Let's create a mystery dataset for which we need to determine whether there is a causal effect.Creating the dataset. It is generated from either one of two models:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
###Code
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar, sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
No handles with labels found to put in legend.
###Markdown
Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* STEP 1: Model the problem as a causal graphInitializing the causal model.
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
INFO:dowhy.causal_graph:If this is observed data (not from a randomized experiment), there might always be missing confounders. Adding a node named "Unobserved Confounders" to reflect this.
INFO:dowhy.causal_model:Model to find the causal effect of treatment ['Treatment'] on outcome ['Outcome']
###Markdown
Showing the causal model stored in the local file "causal_model.png"
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graphIdentify the causal effect using properties of the causal graph.
###Code
identified_estimand = model.identify_effect()
print(identified_estimand)
###Output
INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['w0', 'U']
WARNING:dowhy.causal_identifier:If this is observed data (not from a randomized experiment), there might always be missing confounders. Causal effect cannot be identified perfectly.
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, we can use any statistical method to estimate the causal effect. Let's use Linear Regression for simplicity.
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Checking if the estimate is correct
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is 1.0154712956668286
Actual true causal effect was 1
###Markdown
Step 4: Refuting the estimateWe can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids). Adding a random common cause variable
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0+w_random
###Markdown
Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~placebo+w0
###Markdown
Removing a random subset of the data
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Confounding Example: Finding causal effects from observed dataSuppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import os, sys
sys.path.append(os.path.abspath("../../"))
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
###Output
_____no_output_____
###Markdown
Let's create a mystery dataset for which we need to determine whether there is a causal effect.Creating the dataset. It is generated from either one of two models:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
###Code
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar, sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
_____no_output_____
###Markdown
Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* STEP 1: Model the problem as a causal graphInitializing the causal model.
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
###Output
WARNING:dowhy.causal_model:Causal Graph not provided. DoWhy will construct a graph based on data inputs.
INFO:dowhy.causal_model:Model to find the causal effect of treatment ['Treatment'] on outcome ['Outcome']
###Markdown
Showing the causal model stored in the local file "causal_model.png"
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graphIdentify the causal effect using properties of the causal graph.
###Code
identified_estimand = model.identify_effect()
print(identified_estimand)
###Output
INFO:dowhy.causal_identifier:Common causes of treatment and outcome:['w0', 'U']
WARNING:dowhy.causal_identifier:There are unobserved common causes. Causal effect cannot be identified.
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, we can use any statistical method to estimate the causal effect. Let's use Linear Regression for simplicity.
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Checking if the estimate is correct
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
DoWhy estimate is 1.0099765763913107
Actual true causal effect was 1
###Markdown
Step 4: Refuting the estimateWe can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids). Adding a random common cause variable
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0+w_random
###Markdown
Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~placebo+w0
###Markdown
Removing a random subset of the data
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
INFO:dowhy.causal_estimator:INFO: Using Linear Regression Estimator
INFO:dowhy.causal_estimator:b: Outcome~Treatment+w0
###Markdown
Confounding Example: Finding causal effects from observed dataSuppose you are given some data with treatment and outcome. Can you determine whether the treatment causes the outcome, or the correlation is purely due to another common cause?
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import dowhy
from dowhy import CausalModel
import dowhy.datasets, dowhy.plotter
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'INFO',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
###Output
_____no_output_____
###Markdown
Let's create a mystery dataset for which we need to determine whether there is a causal effect.Creating the dataset. It is generated from either one of two models:* **Model 1**: Treatment does cause outcome. * **Model 2**: Treatment does not cause outcome. All observed correlation is due to a common cause.
###Code
rvar = 1 if np.random.uniform() >0.5 else 0
data_dict = dowhy.datasets.xy_dataset(10000, effect=rvar,
num_common_causes=1,
sd_error=0.2)
df = data_dict['df']
print(df[["Treatment", "Outcome", "w0"]].head())
dowhy.plotter.plot_treatment_outcome(df[data_dict["treatment_name"]], df[data_dict["outcome_name"]],
df[data_dict["time_val"]])
###Output
_____no_output_____
###Markdown
Using DoWhy to resolve the mystery: *Does Treatment cause Outcome?* STEP 1: Model the problem as a causal graphInitializing the causal model.
###Code
model= CausalModel(
data=df,
treatment=data_dict["treatment_name"],
outcome=data_dict["outcome_name"],
common_causes=data_dict["common_causes_names"],
instruments=data_dict["instrument_names"])
model.view_model(layout="dot")
###Output
_____no_output_____
###Markdown
Showing the causal model stored in the local file "causal_model.png"
###Code
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
###Output
_____no_output_____
###Markdown
STEP 2: Identify causal effect using properties of the formal causal graphIdentify the causal effect using properties of the causal graph.
###Code
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
###Output
_____no_output_____
###Markdown
STEP 3: Estimate the causal effectOnce we have identified the estimand, we can use any statistical method to estimate the causal effect. Let's use Linear Regression for simplicity.
###Code
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.linear_regression")
print("Causal Estimate is " + str(estimate.value))
# Plot Slope of line between treamtent and outcome =causal effect
dowhy.plotter.plot_causal_effect(estimate, df[data_dict["treatment_name"]], df[data_dict["outcome_name"]])
###Output
_____no_output_____
###Markdown
Checking if the estimate is correct
###Code
print("DoWhy estimate is " + str(estimate.value))
print ("Actual true causal effect was {0}".format(rvar))
###Output
_____no_output_____
###Markdown
Step 4: Refuting the estimateWe can also refute the estimate to check its robustness to assumptions (*aka* sensitivity analysis, but on steroids). Adding a random common cause variable
###Code
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
###Output
_____no_output_____
###Markdown
Replacing treatment with a random (placebo) variable
###Code
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
###Output
_____no_output_____
###Markdown
Removing a random subset of the data
###Code
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
###Output
_____no_output_____ |
TF_lecture.ipynb | ###Markdown
CS294-112 Fall 2018 Tensorflow Tutorial This tutorial will provide a brief overview of the core concepts and functionality of Tensorflow. This tutorial will cover the following:0. What is Tensorflow1. How to input data2. How to perform computations3. How to create variables4. How to train a neural network for a simple regression problem5. Tips and tricks
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as mpatches
def tf_reset():
try:
sess.close()
except:
pass
tf.reset_default_graph()
return tf.Session()
###Output
_____no_output_____
###Markdown
0. What is TensorflowTensorflow is a framework to define a series of computations. You define inputs, what operations should be performed, and then Tensorflow will compute the outputs for you.Below is a simple high-level example:
###Code
# create the session you'll work in
# you can think of this as a "blank piece of paper" that you'll be writing math on
sess = tf_reset()
# define your inputs
a = tf.constant(1.0)
b = tf.constant(2.0)
# do some operations
c = a + b
# get the result
c_run = sess.run(c)
print('c = {0}'.format(c_run))
###Output
c = 3.0
###Markdown
1. How to input dataTensorflow has multiple ways for you to input data. One way is to have the inputs be constants:
###Code
sess = tf_reset()
# define your inputs
a = tf.constant(1.0)
b = tf.constant(2.0)
# do some operations
c = a + b
# get the result
c_run = sess.run(c)
print('c = {0}'.format(c_run))
###Output
c = 3.0
###Markdown
However, having our inputs be constants is inflexible. We want to be able to change what data we input at runtime. We can do this using placeholders:
###Code
sess = tf_reset()
# define your inputs
a = tf.placeholder(dtype=tf.float32, shape=[1], name='a_placeholder')
b = tf.placeholder(dtype=tf.float32, shape=[1], name='b_placeholder')
# do some operations
c = a + b
# get the result
c0_run = sess.run(c, feed_dict={a: [1.0], b: [2.0]})
c1_run = sess.run(c, feed_dict={a: [2.0], b: [4.0]})
print('c0 = {0}'.format(c0_run))
print('c1 = {0}'.format(c1_run))
###Output
c0 = [3.]
c1 = [6.]
###Markdown
But what if we don't know the size of our input beforehand? One dimension of a tensor is allowed to be 'None', which means it can be variable sized:
###Code
sess = tf_reset()
# inputs
a = tf.placeholder(dtype=tf.float32, shape=[None], name='a_placeholder')
b = tf.placeholder(dtype=tf.float32, shape=[None], name='b_placeholder')
# do some operations
c = a + b
# get outputs
c0_run = sess.run(c, feed_dict={a: [1.0], b: [2.0]})
c1_run = sess.run(c, feed_dict={a: [1.0, 2.0], b: [2.0, 4.0]})
print(a)
print('a shape: {0}'.format(a.get_shape()))
print(b)
print('b shape: {0}'.format(b.get_shape()))
print('c0 = {0}'.format(c0_run))
print('c1 = {0}'.format(c1_run))
###Output
Tensor("a_placeholder:0", shape=(?,), dtype=float32)
a shape: (?,)
Tensor("b_placeholder:0", shape=(?,), dtype=float32)
b shape: (?,)
c0 = [3.]
c1 = [3. 6.]
###Markdown
2. How to perform computationsNow that we can input data, we want to perform useful computations on the data. First, let's create some data to work with:
###Code
sess = tf_reset()
# inputs
a = tf.constant([[-1.], [-2.], [-3.]], dtype=tf.float32)
b = tf.constant([[1., 2., 3.]], dtype=tf.float32)
a_run, b_run = sess.run([a, b])
print('a:\n{0}'.format(a_run))
print('b:\n{0}'.format(b_run))
###Output
a:
[[-1.]
[-2.]
[-3.]]
b:
[[1. 2. 3.]]
###Markdown
We can do simple operations, such as addition:
###Code
c = b + b
c_run = sess.run(c)
print('b:\n{0}'.format(b_run))
print('c:\n{0}'.format(c_run))
###Output
b:
[[1. 2. 3.]]
c:
[[2. 4. 6.]]
###Markdown
Be careful about the dimensions of the tensors, some operations may work even when you think they shouldn't...
###Code
c = a + b
c_run = sess.run(c)
print('a:\n{0}'.format(a_run))
print('b:\n{0}'.format(b_run))
print('c:\n{0}'.format(c_run))
###Output
a:
[[-1.]
[-2.]
[-3.]]
b:
[[1. 2. 3.]]
c:
[[ 0. 1. 2.]
[-1. 0. 1.]
[-2. -1. 0.]]
###Markdown
Also, some operations may be different than what you expect:
###Code
c_elementwise = a * b
c_matmul = tf.matmul(b, a)
c_elementwise_run, c_matmul_run = sess.run([c_elementwise, c_matmul])
print('a:\n{0}'.format(a_run))
print('b:\n{0}'.format(b_run))
print('c_elementwise:\n{0}'.format(c_elementwise_run))
print('c_matmul: \n{0}'.format(c_matmul_run))
###Output
a:
[[-1.]
[-2.]
[-3.]]
b:
[[1. 2. 3.]]
c_elementwise:
[[-1. -2. -3.]
[-2. -4. -6.]
[-3. -6. -9.]]
c_matmul:
[[-14.]]
###Markdown
Operations can be chained together:
###Code
# operations can be chained together
c0 = b + b
c1 = c0 + 1
c0_run, c1_run = sess.run([c0, c1])
print('b:\n{0}'.format(b_run))
print('c0:\n{0}'.format(c0_run))
print('c1:\n{0}'.format(c1_run))
###Output
b:
[[1. 2. 3.]]
c0:
[[2. 4. 6.]]
c1:
[[3. 5. 7.]]
###Markdown
Finally, Tensorflow has many useful built-in operations:
###Code
c = tf.reduce_mean(b)
c_run = sess.run(c)
print('b:\n{0}'.format(b_run))
print('c:\n{0}'.format(c_run))
###Output
b:
[[1. 2. 3.]]
c:
2.0
###Markdown
3. How to create variablesNow that we can input data and perform computations, we want some of these operations to involve variables that are free parameters, and can be trained using an optimizer (e.g., gradient descent). First, let's create some data to work with:
###Code
sess = tf_reset()
# inputs
b = tf.constant([[1., 2., 3.]], dtype=tf.float32)
b_run = sess.run(b)
print('b:\n{0}'.format(b_run))
###Output
b:
[[1. 2. 3.]]
###Markdown
We'll now create a variable
###Code
var_init_value = [[2.0, 4.0, 6.0]]
var = tf.get_variable(name='myvar',
shape=[1, 3],
dtype=tf.float32,
initializer=tf.constant_initializer(var_init_value))
print(var)
print(tf.global_variables())
###Output
[<tf.Variable 'myvar:0' shape=(1, 3) dtype=float32_ref>]
###Markdown
We can do operations with the variable just like any other tensor:
###Code
# can do operations
c = b + var
print(b)
print(var)
print(c)
###Output
Tensor("Const:0", shape=(1, 3), dtype=float32)
<tf.Variable 'myvar:0' shape=(1, 3) dtype=float32_ref>
Tensor("add:0", shape=(1, 3), dtype=float32)
###Markdown
Before we can run any of these operations, we must first initalize the variables
###Code
init_op = tf.global_variables_initializer()
sess.run(init_op)
###Output
_____no_output_____
###Markdown
and then we can run the operations just as we normally would.
###Code
c_run = sess.run(c)
print('b:\n{0}'.format(b_run))
print('var:\n{0}'.format(var_init_value))
print('c:\n{0}'.format(c_run))
###Output
b:
[[1. 2. 3.]]
var:
[[2.0, 4.0, 6.0]]
c:
[[3. 6. 9.]]
###Markdown
So far we haven't said yet how to optimize these variables. We'll cover that next in the context of an example. 4. How to train a neural network for a simple regression problemWe've discussed how to input data, perform operations, and create variables. We'll now show how to combine all of these---with some minor additions---to train a neural network on a simple regression problem. First, we'll create data for a 1-dimensional regression problem:
###Code
# generate the data
inputs = np.linspace(-2*np.pi, 2*np.pi, 10000)[:, None]
outputs = np.sin(inputs) + 0.05 * np.random.normal(size=[len(inputs),1])
plt.scatter(inputs[:, 0], outputs[:, 0], s=0.1, color='k', marker='o')
###Output
_____no_output_____
###Markdown
The below code creates the inputs, variables, neural network operations, mean-squared-error loss, gradient descent optimizer, and runs the optimizer using minibatches of the data.
###Code
sess = tf_reset()
def create_model():
# create inputs
input_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1])
output_ph = tf.placeholder(dtype=tf.float32, shape=[None, 1])
# create variables
W0 = tf.get_variable(name='W0', shape=[1, 20], initializer=tf.contrib.layers.xavier_initializer())
W1 = tf.get_variable(name='W1', shape=[20, 20], initializer=tf.contrib.layers.xavier_initializer())
W2 = tf.get_variable(name='W2', shape=[20, 1], initializer=tf.contrib.layers.xavier_initializer())
b0 = tf.get_variable(name='b0', shape=[20], initializer=tf.constant_initializer(0.))
b1 = tf.get_variable(name='b1', shape=[20], initializer=tf.constant_initializer(0.))
b2 = tf.get_variable(name='b2', shape=[1], initializer=tf.constant_initializer(0.))
weights = [W0, W1, W2]
biases = [b0, b1, b2]
activations = [tf.nn.relu, tf.nn.relu, None]
# create computation graph
layer = input_ph
for W, b, activation in zip(weights, biases, activations):
layer = tf.matmul(layer, W) + b
if activation is not None:
layer = activation(layer)
output_pred = layer
return input_ph, output_ph, output_pred
input_ph, output_ph, output_pred = create_model()
# create loss
mse = tf.reduce_mean(0.5 * tf.square(output_pred - output_ph))
# create optimizer
opt = tf.train.AdamOptimizer().minimize(mse)
# initialize variables
sess.run(tf.global_variables_initializer())
# create saver to save model variables
saver = tf.train.Saver()
# run training
batch_size = 32
for training_step in range(10000):
# get a random subset of the training data
indices = np.random.randint(low=0, high=len(inputs), size=batch_size)
input_batch = inputs[indices]
output_batch = outputs[indices]
# run the optimizer and get the mse
_, mse_run = sess.run([opt, mse], feed_dict={input_ph: input_batch, output_ph: output_batch})
# print the mse every so often
if training_step % 1000 == 0:
print('{0:04d} mse: {1:.3f}'.format(training_step, mse_run))
saver.save(sess, '/tmp/model.ckpt')
###Output
0000 mse: 0.253
1000 mse: 0.080
2000 mse: 0.018
3000 mse: 0.016
4000 mse: 0.010
5000 mse: 0.001
6000 mse: 0.002
7000 mse: 0.002
8000 mse: 0.002
9000 mse: 0.002
###Markdown
Now that the neural network is trained, we can use it to make predictions:
###Code
sess = tf_reset()
# create the model
input_ph, output_ph, output_pred = create_model()
# restore the saved model
saver = tf.train.Saver()
saver.restore(sess, "/tmp/model.ckpt")
output_pred_run = sess.run(output_pred, feed_dict={input_ph: inputs})
plt.scatter(inputs[:, 0], outputs[:, 0], c='k', marker='o', s=0.1)
plt.scatter(inputs[:, 0], output_pred_run[:, 0], c='r', marker='o', s=0.1)
plt.show()
###Output
INFO:tensorflow:Restoring parameters from /tmp/model.ckpt
###Markdown
Not so hard after all! There is much more functionality to Tensorflow besides what we've covered, but you now know the basics. 5. Tips and tricks (a) Check your dimensions
###Code
# example of "surprising" resulting dimensions due to broadcasting
a = tf.constant(np.random.random((4, 1)))
b = tf.constant(np.random.random((1, 4)))
c = a * b
assert c.get_shape() == (4, 4)
###Output
_____no_output_____
###Markdown
(b) Check what variables have been created
###Code
sess = tf_reset()
a = tf.get_variable('I_am_a_variable', shape=[4, 6])
b = tf.get_variable('I_am_a_variable_too', shape=[2, 7])
for var in tf.global_variables():
print(var.name)
###Output
I_am_a_variable:0
I_am_a_variable_too:0
###Markdown
(c) Look at the [tensorflow API](https://www.tensorflow.org/api_docs/python/), or open up a python terminal and investigate!
###Code
help(tf.reduce_mean)
###Output
Help on function reduce_mean in module tensorflow.python.ops.math_ops:
reduce_mean(input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None)
Computes the mean of elements across dimensions of a tensor. (deprecated arguments)
SOME ARGUMENTS ARE DEPRECATED. They will be removed in a future version.
Instructions for updating:
keep_dims is deprecated, use keepdims instead
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
```python
x = tf.constant([1, 0, 1, 0])
tf.reduce_mean(x) # 0
y = tf.constant([1., 0., 1., 0.])
tf.reduce_mean(y) # 0.5
```
@end_compatibility
###Markdown
(d) Tensorflow has some built-in layers to simplify your code.
###Code
help(tf.contrib.layers.fully_connected)
###Output
Help on function fully_connected in module tensorflow.contrib.layers.python.layers.layers:
fully_connected(inputs, num_outputs, activation_fn=<function relu at 0x000001E70AA69510>, normalizer_fn=None, normalizer_params=None, weights_initializer=<function variance_scaling_initializer.<locals>._initializer at 0x000001E70E2CE6A8>, weights_regularizer=None, biases_initializer=<tensorflow.python.ops.init_ops.Zeros object at 0x000001E70E2C9F60>, biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None)
Adds a fully connected layer.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
###Markdown
(e) Use [variable scope](https://www.tensorflow.org/guide/variablessharing_variables) to keep your variables organized.
###Code
sess = tf_reset()
# create variables
with tf.variable_scope('layer_0'):
W0 = tf.get_variable(name='W0', shape=[1, 20], initializer=tf.contrib.layers.xavier_initializer())
b0 = tf.get_variable(name='b0', shape=[20], initializer=tf.constant_initializer(0.))
with tf.variable_scope('layer_1'):
W1 = tf.get_variable(name='W1', shape=[20, 20], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable(name='b1', shape=[20], initializer=tf.constant_initializer(0.))
with tf.variable_scope('layer_2'):
W2 = tf.get_variable(name='W2', shape=[20, 1], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable(name='b2', shape=[1], initializer=tf.constant_initializer(0.))
# print the variables
var_names = sorted([v.name for v in tf.global_variables()])
print('\n'.join(var_names))
###Output
layer_0/W0:0
layer_0/b0:0
layer_1/W1:0
layer_1/b1:0
layer_2/W2:0
layer_2/b2:0
###Markdown
(f) You can specify which GPU you want to use and how much memory you want to use
###Code
gpu_device = 0
gpu_frac = 0.5
# make only one of the GPUs visible
import os
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_device)
# only use part of the GPU memory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_frac)
config = tf.ConfigProto(gpu_options=gpu_options)
# create the session
tf_sess = tf.Session(graph=tf.Graph(), config=config)
###Output
_____no_output_____ |
tuple.ipynb | ###Markdown
Tuples
###Code
# create a tuple
t = ('Peanut butter', 'Jelly')
print(t)
print(type(t))
# convert from array
a = ['Peanut butter', 'Jelly']
t = tuple(a)
print(t)
print(type(t))
# create tuple from dictionary (direct)
# only keys are added to the tuple
d = {
"i_1": "Peanut butter",
"i_2": "Jelly"
}
t = tuple(d)
print(t)
print(type(t))
# create tuple from dictionary (using items)
# creates a tuple of tuples.
d = {
"i_1": "Peanut butter",
"i_2": "Jelly"
}
t = tuple(d.items())
print(t)
print(type(t))
# unpack a tuple
t = ('Peanut butter', 'Jelly')
a, b = t
print(a, "-", b)
###Output
Peanut butter - Jelly
###Markdown
TUPLES
###Code
my_tuple = (1, 2, 3, 4, 5, 6)
print(my_tuple)
print(5 in my_tuple)
new_tuple = my_tuple[1:4]
print(new_tuple)
new_tuple = my_tuple[1:2]
print(new_tuple)
x,y,z, *other = (1,2,3,4,5)
print(x)
print(y)
print(z)
print(other)
my_tuple = (1,2,3,4,5,5,6)
print(my_tuple.count(4))
print(my_tuple.count(5))
print(my_tuple.index(4))
print(my_tuple.index(5))
print(len(my_tuple))
###Output
7
###Markdown
**Create a Tuple**
###Code
thistuple = ("apple", "banana", "cherry")
print(thistuple)
###Output
_____no_output_____
###Markdown
**Tuples allow duplicate values**
###Code
thistuple = ("apple", "banana", "cherry", "apple", "cherry")
print(thistuple)
###Output
_____no_output_____
###Markdown
**Print the number of items in the tuple**
###Code
thistuple = ("apple", "banana", "cherry")
print(len(thistuple))
###Output
_____no_output_____
###Markdown
**Tuple items can be of any data type**
###Code
tuple1 = ("apple", "banana", "cherry")
tuple2 = (1, 5, 7, 9, 3)
tuple3 = (True, False, False)
tuple4 = ("abc", 34, True, 40, "male")
###Output
_____no_output_____
###Markdown
**What is the data type of a tuple?**
###Code
mytuple = ("apple", "banana", "cherry")
print(type(mytuple))
###Output
_____no_output_____
###Markdown
**Using the tuple() method to make a tuple**
###Code
thistuple = tuple(("apple", "banana", "cherry")) # note the double round-brackets
print(thistuple)
###Output
_____no_output_____
###Markdown
**Print the second item in the tuple**
###Code
thistuple = ("apple", "banana", "cherry")
print(thistuple[1])
###Output
_____no_output_____
###Markdown
**Print the last item of the tuple**
###Code
thistuple = ("apple", "banana", "cherry")
print(thistuple[-1])
###Output
_____no_output_____
###Markdown
**Return the third, fourth, and fifth item**
###Code
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[2:5])
###Output
_____no_output_____
###Markdown
**returns the items from the beginning to, but NOT included, "kiwi"**
###Code
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[:4])
###Output
_____no_output_____
###Markdown
**example returns the items from "cherry" and to the end**
###Code
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[2:])
###Output
_____no_output_____
###Markdown
**returns the items from index -4 (included) to index -1 (excluded)**
###Code
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[-4:-1])
###Output
_____no_output_____
###Markdown
**Check if "apple" is present in the tuple**
###Code
thistuple = ("apple", "banana", "cherry")
if "melon" in thistuple:
print("Yes, 'apple' is in the fruits tuple")
###Output
_____no_output_____
###Markdown
**Convert the tuple into a list to be able to change it**
###Code
x = ("apple", "banana", "cherry")
y = list(x)
y[1] = "kiwi"
x = tuple(y)
print(x)
###Output
('apple', 'kiwi', 'cherry')
###Markdown
**Convert the tuple into a list, add "orange", and convert it back into a tuple**
###Code
thistuple = ("apple", "banana", "cherry")
y = list(thistuple)
y.append("orange")
thistuple = tuple(y)
print(thistuple)
###Output
_____no_output_____
###Markdown
**Create a new tuple with the value "orange", and add that tuple**
###Code
thistuple = ("apple", "banana", "cherry")
y = ("orange",)
thistuple += y
print(thistuple)
###Output
_____no_output_____
###Markdown
**Convert the tuple into a list, remove "apple", and convert it back into a tuple**
###Code
thistuple = ("apple", "banana", "cherry")
y = list(thistuple)
y.remove("apple")
thistuple = tuple(y)
print(thistuple)
###Output
_____no_output_____
###Markdown
**The del keyword can delete the tuple completely**
###Code
thistuple = ("apple", "banana", "cherry")
del thistuple
print(thistuple)
###Output
_____no_output_____
###Markdown
**Unpacking a tuple**
###Code
fruits = ("apple", "banana", "cherry")
(green, yellow, red) = fruits
print(green)
print(yellow)
print(red)
###Output
_____no_output_____
###Markdown
**Assign the rest of the values as a list called "red"**
###Code
fruits = ("apple", "banana", "cherry", "strawberry", "raspberry")
(green, yellow, *red) = fruits
print(green)
print(yellow)
print(red)
###Output
_____no_output_____
###Markdown
**Add a list of values the "tropic" variable**
###Code
fruits = ("apple", "mango", "papaya", "pineapple", "cherry")
(green, *tropic, red) = fruits
print(green)
print(tropic)
print(red)
###Output
_____no_output_____
###Markdown
**Iterate through the items and print the values**
###Code
thistuple = ("apple", "banana", "cherry")
for x in thistuple:
print(x)
###Output
_____no_output_____
###Markdown
**Print all items by referring to their index number**
###Code
thistuple = ("apple", "banana", "cherry")
for i in range(len(thistuple)):
print(thistuple[i])
###Output
_____no_output_____
###Markdown
**Print all items, using a while loop to go through all the index numbers**
###Code
thistuple = ("apple", "banana", "cherry")
i = 0
while i < len(thistuple):
print(thistuple[i])
i = i + 1
###Output
_____no_output_____
###Markdown
**Join two tuples**
###Code
tuple1 = ("a", "b" , "c")
tuple2 = (1, 2, 3)
tuple3 = tuple1 + tuple2
print(tuple3)
###Output
_____no_output_____
###Markdown
**Multiply the fruits tuple by 2**
###Code
fruits = ("apple", "banana", "cherry")
mytuple = fruits * 2
print(mytuple)
###Output
_____no_output_____ |
deeppixel/Face_Detection/Face_Detect.ipynb | ###Markdown
**Importing all required libraries**
###Code
import numpy as np
import cv2
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
**Displaying the test image**
###Code
image = cv2.imread('Sample_Images/emma.jpg')
plt.imshow(image , cmap=None)
###Output
_____no_output_____
###Markdown
**Converting to grayscale**
###Code
# Converting to grayscale as opencv detector takes in input gray scale images
test_image_gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
# Displaying grayscale image
plt.imshow(test_image_gray, cmap='gray')
###Output
_____no_output_____
###Markdown
**Function to convert gray images to RGB**
###Code
#Function to convert gray images to RGB
def convertToRGB(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
###Output
_____no_output_____
###Markdown
**OpenCV provides pre-trained models for detecting various objects like car , etc . here we are using haar cascade file for face detection only . One can train her own models too.** **Loading harcascade classifier for frontal face**
###Code
haar_cascade_face = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
###Output
_____no_output_____
###Markdown
**Detecting faces**
###Code
#Detects the faces in the input image and this function will return the co-ordinates(x and y posiiton plus the height and width) of the detected faces as Rect(x,y,w,h).
faces_rects = haar_cascade_face.detectMultiScale(test_image_gray, scaleFactor = 1.1, minNeighbors = 4);
# Let us print the no. of faces found
print('Faces found: ', len(faces_rects))
# loop over all the co-ordinates it returned and draw rectangles around them using Open CV.We will be drawing a greenand yellow rectangle with thicknessof 15
for (x,y,w,h) in faces_rects:
cv2.rectangle(test_image, (x, y), (x+w, y+h), (0, 255, 255), 15)
###Output
_____no_output_____
###Markdown
**Display rectangle over detected face**
###Code
plt.imshow(convertToRGB(test_image))
###Output
_____no_output_____
###Markdown
**General function for face detection**
###Code
def detect_faces(cascade, test_img, scaleFactor = 1.1):
# create a copy of the image to prevent any changes to the original one.
img_copy = test_img.copy()
#convert the test image to gray scale as opencv face detector expects gray images
gray_image = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
# Applying the haar classifier to detect faces
faces_rect = cascade.detectMultiScale(gray_image, scaleFactor=scaleFactor, minNeighbors=5)
for (x, y, w, h) in faces_rect:
cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 0, 255), 5)
return img_copy
#loading image testing_2
test_image2 = cv2.imread('Sample_Images/group.jpeg')
#call the function to detect faces
faces = detect_faces(haar_cascade_face, test_image2)
#convert to RGB and display image
plt.imshow(convertToRGB(faces))
#loading image testing_2
test_image3 = cv2.imread('Sample_Images/run2.jpeg')
#call the function to detect faces
faces = detect_faces(haar_cascade_face, test_image3)
#convert to RGB and display image
plt.imshow(convertToRGB(faces))
###Output
_____no_output_____
###Markdown
**Saving output images**
###Code
cv2.imwrite('image1.png',faces)
###Output
_____no_output_____
###Markdown
**Importing all required libraries**
###Code
import numpy as np
import cv2
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
**Displaying the test image**
###Code
image = cv2.imread('Sample_Images/emma.jpg')
plt.imshow(image , cmap=None)
###Output
_____no_output_____
###Markdown
**Converting to grayscale**
###Code
# Converting to grayscale as opencv detector takes in input gray scale images
test_image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Displaying grayscale image
plt.imshow(test_image_gray, cmap='gray')
###Output
_____no_output_____
###Markdown
**Function to convert gray images to RGB**
###Code
#Function to convert gray images to RGB
def convertToRGB(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
###Output
_____no_output_____
###Markdown
**OpenCV provides pre-trained models for detecting various objects like car , etc . here we are using haar cascade file for face detection only . One can train her own models too.** **Loading harcascade classifier for frontal face**
###Code
haar_cascade_face = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
###Output
_____no_output_____
###Markdown
**Detecting faces**
###Code
#Detects the faces in the input image and this function will return the co-ordinates(x and y posiiton plus the height and width) of the detected faces as Rect(x,y,w,h).
faces_rects = haar_cascade_face.detectMultiScale(test_image_gray, scaleFactor = 1.1, minNeighbors = 4);
# Let us print the no. of faces found
print('Faces found: ', len(faces_rects))
# loop over all the co-ordinates it returned and draw rectangles around them using Open CV.We will be drawing a greenand yellow rectangle with thicknessof 15
for (x,y,w,h) in faces_rects:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 255), 15)
###Output
_____no_output_____
###Markdown
**Display rectangle over detected face**
###Code
plt.imshow(convertToRGB(image))
###Output
_____no_output_____
###Markdown
**General function for face detection**
###Code
def detect_faces(cascade, test_img, scaleFactor = 1.1):
# create a copy of the image to prevent any changes to the original one.
img_copy = test_img.copy()
#convert the test image to gray scale as opencv face detector expects gray images
gray_image = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
# Applying the haar classifier to detect faces
faces_rect = cascade.detectMultiScale(gray_image, scaleFactor=scaleFactor, minNeighbors=5)
for (x, y, w, h) in faces_rect:
cv2.rectangle(img_copy, (x, y), (x+w, y+h), (0, 0, 255), 5)
return img_copy
#loading image testing_2
test_image2 = cv2.imread('Sample_Images/group.jpeg')
#call the function to detect faces
faces = detect_faces(haar_cascade_face, test_image2)
#convert to RGB and display image
plt.imshow(convertToRGB(faces))
#loading image testing_2
test_image3 = cv2.imread('Sample_Images/run2.jpeg')
#call the function to detect faces
faces = detect_faces(haar_cascade_face, test_image3)
#convert to RGB and display image
plt.imshow(convertToRGB(faces))
###Output
_____no_output_____
###Markdown
**Saving output images**
###Code
cv2.imwrite('image1.png',faces)
###Output
_____no_output_____ |
1t_DataAnalysisMLPython/1j_ML/DS_ML_Py_SBO/DataScience/3_Distributions/Distributions.ipynb | ###Markdown
Examples of Data Distributions Uniform Distribution
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
values = np.random.uniform(-10.0, 10.0, 100000)
plt.hist(values, 50)
plt.show()
###Output
_____no_output_____
###Markdown
Normal / Gaussian Visualize the probability density function:
###Code
from scipy.stats import norm
import matplotlib.pyplot as plt
x = np.arange(-3, 3, 0.001)
plt.plot(x, norm.pdf(x))
###Output
_____no_output_____
###Markdown
Generate some random numbers with a normal distribution. "mu" is the desired mean, "sigma" is the standard deviation:
###Code
import numpy as np
import matplotlib.pyplot as plt
mu = 5.0
sigma = 2.0
values = np.random.normal(mu, sigma, 10000)
plt.hist(values, 50)
plt.show()
###Output
_____no_output_____
###Markdown
Exponential PDF / "Power Law"
###Code
from scipy.stats import expon
import matplotlib.pyplot as plt
x = np.arange(0, 10, 0.001)
plt.plot(x, expon.pdf(x))
###Output
_____no_output_____
###Markdown
Binomial Probability Mass Function
###Code
from scipy.stats import binom
import matplotlib.pyplot as plt
# n -> number of events, i.e. flipping a coin 10 times
# p -> probability of the event occuring: 50% chance of getting heads
n, p = 10, 0.5
x = np.arange(0, 10, 0.001)
plt.plot(x, binom.pmf(x, n, p))
###Output
_____no_output_____
###Markdown
Poisson Probability Mass Function Example: My website gets on average 500 visits per day. What's the odds of getting 550?
###Code
from scipy.stats import poisson
import matplotlib.pyplot as plt
mu = 500
x = np.arange(400, 600, 0.5)
plt.plot(x, poisson.pmf(x, mu))
###Output
_____no_output_____
###Markdown
Pop Quiz! What's the equivalent of a probability distribution function when using discrete instead of continuous data?
###Code
# probability mass function
###Output
_____no_output_____ |
course1/week4/w4_task_clt_1.ipynb | ###Markdown
Равномерное распределение (от 2 до 4)
###Code
a=2
b=4
uniform_rv=sts.uniform(a,b-a)
uniform_rv.rvs(10)
###Output
_____no_output_____
###Markdown
плотность распределения
###Code
x = np.linspace(0,6,100)
pdf = uniform_rv.pdf(x)
plt.plot(x, pdf)
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
###Output
_____no_output_____
###Markdown
функция распределения
###Code
x = np.linspace(0,6,100)
cdf = uniform_rv.cdf(x)
plt.plot(x, cdf)
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
sample = uniform_rv.rvs(1000)
plt.hist(sample, bins=30)
plt.ylabel('fraction of samples')
plt.xlabel('$x$')
x = np.linspace(0,6,100)
pdf = uniform_rv.pdf(x)
plt.plot(x, pdf, label='theoretical PDF')
plt.hist(sample, bins=30,normed=True, label='theoretical PDF')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Распределеня выборочного среднего n=5
###Code
n=5
sr=[]
for i in range(1000):
sample = uniform_rv.rvs(n)
sr.append(mean(sample))
plt.hist(sr, bins=30,normed=True, label='the distribution of the average n=5')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Оценка среднего и дисперсии распределения среднего n=5
###Code
#среднее
m=uniform_rv.mean()
#дисперсия
d=uniform_rv.std()/n
std=d**0.5
print(m,std)
#выборочное стандартное отклонение и среднее
print(np.array(sr).mean())
print(np.array(sr).std())
#нормальное распредление
norm_rv = sts.norm(m,std)
plt.hist(sr, bins=30,normed=True, label='the distribution of the average n=5')
x = np.linspace(0,6,1000)
cdf = norm_rv.pdf(x)
plt.plot(x, cdf, label='norm')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Распределеня выборочного среднего n=10
###Code
n=10
sr1=[]
for i in range(1000):
sample = uniform_rv.rvs(n)
sr1.append(mean(sample))
plt.hist(sr1, bins=30,normed=True, label='the distribution of the average n=10')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Оценка среднего и дисперсии распределения среднего n=10
###Code
#среднее
m=uniform_rv.mean()
#дисперсия
d=uniform_rv.std()/n
std=d**0.5
print(m,std)
#выборочное стандартное отклонение и среднее
print(np.array(sr1).mean())
print(np.array(sr1).std())
#нормальное распредление
norm_rv = sts.norm(m,std)
plt.hist(sr1, bins=30,normed=True, label='the distribution of the average n=5')
x = np.linspace(0,6,1000)
cdf = norm_rv.pdf(x)
plt.plot(x, cdf, label='norm')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Распределеня выборочного среднего n=15
###Code
n=15
sr2=[]
for i in range(1000):
sample = uniform_rv.rvs(n)
sr2.append(mean(sample))
plt.hist(sr2, bins=30,normed=True, label='the distribution of the average n=15')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Оценка среднего и дисперсии распределения среднего n=15
###Code
#среднее
m=uniform_rv.mean()
#дисперсия
d=uniform_rv.std()/n
std=d**0.5
print(m,std)
#выборочное стандартное отклонение и среднее
print(np.array(sr2).mean())
print(np.array(sr2).std())
#нормальное распредление
norm_rv = sts.norm(m,std)
plt.hist(sr2, bins=30,normed=True, label='the distribution of the average n=15')
x = np.linspace(0,6,1000)
cdf = norm_rv.pdf(x)
plt.plot(x, cdf, label='norm')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____
###Markdown
Гистограммы распределений средних
###Code
plt.hist(sr, bins=30,normed=True, label='the distribution of the average n=5')
plt.hist(sr1, bins=30,normed=True, label='the distribution of the average n=10')
plt.hist(sr2, bins=30,normed=True, label='the distribution of the average n=10')
plt.ylabel('$f(x)$')
plt.xlabel('$x$')
plt.legend(loc='upper left')
###Output
_____no_output_____ |
UnRel_Preprocessing.ipynb | ###Markdown
Flickr30k dataset exploration* This notebook will help us understand what is needed in order to produce captions on the UnRel dataset by investigating how captions are generated on the Flickr30k dataset. Imports
###Code
import json
import os
CWD = os.getcwd()
CWD
dataset_filename = os.path.join(CWD, "NBT", "data", "flickr30k", "dataset_flickr30k.json")
with open(dataset_filename, "r") as f:
dataset = json.load(f, encoding="utf-8")
splits = {}
for img in dataset["images"]:
if splits.get(img["split"]):
splits[img["split"]] += 1
else:
splits[img["split"]] = 1
splits
###Output
_____no_output_____
###Markdown
**Comment:*** My remark about how only the "val" split was used in NBT might be invalid. At least there is a "test" split in the original splits by Karpathy. dataset_unrel.json* My goal now is to apply some preprocessing to the UnRel captions that we produced in order to generate a file that looks like the original "dataset_flickr30k.json"
###Code
import csv
root = {"dataset": "UnRel",
"images": list()}
unrel_dataset_filename = os.path.join(CWD, "data", "unrelcropped.csv")
with open(unrel_dataset_filename, "r") as f:
reader = csv.reader(f, delimiter=",", )
for i, row in enumerate(reader):
if i == 0: # Header information
continue
filename = row[0]
sent1 = row[1]
sent1id = int(row[2])
sent2 = row[3]
sent2id = int(row[4])
sent3 = row[5]
sent3id = int(row[6])
imgid = int(filename.split(".")[0])
#sent1_tokens = {}
#for i, word in enumerate(sent1.split(" ")):
# sent1_tokens[i] = word.strip(".")
sentences = list()
for tup in [(sent1, sent1id), (sent2, sent2id), (sent3, sent3id)]:
sent, sentid = tup
tokens = {i: word.strip(".") for i, word in enumerate(sent.split(" "))}
sentences.append({"tokens": tokens,
"raw": sent,
"imgid": imgid,
"sentid": sentid})
# TODO: Build sub dictionary in images list
tmp = {"sentids": [sent1id, sent2id, sent3id],
"imgid": imgid,
"split": "test",
"filename": filename,
"sentences": sentences}
# TODO: Append dictionary in images list
root["images"].append(tmp)
# Dump the json
with open(os.path.join(CWD, "data", "dataset_unrel.json"), "w") as f:
json.dump(root, f)
###Output
_____no_output_____
###Markdown
cap_unrel.json* We do it the **COCO** way. The dictionary ``root`` is a list of a list of *captions* that are in the form of a sequence of *tokens*.
###Code
dataset_filename = os.path.join(CWD, "data", "dataset_unrel.json")
caption_filename = os.path.join(CWD, "data", "cap_unrel.json")
with open(dataset_filename, "r") as f:
dataset = json.load(f, encoding="utf-8")
n_images = len(dataset["images"])
n_captions = 3
print("Annotated images in UnRel: ", n_images)
print("Number of captions per image: ", n_captions)
root = list()
for i, image in enumerate(dataset["images"]):
sentences = list()
for j, sentence in enumerate(image["sentences"]):
sentences.append(sentence["tokens"])
root.append(sentences)
with open(caption_filename, "w") as f:
json.dump(root, f)
###Output
_____no_output_____
###Markdown
dic_unrel.json* We do it the **COCO** way but using **Flickr30k** vocabulary since there is a greater overlap (see Categories_Exploration notebook).* We only wish to modify the ``images`` field in ``dic_flickr30k.json`` to populate it with the **UnRel** images instead. We save the result as ``dic_unrel.json``.
###Code
dataset_filename = os.path.join(CWD, "data", "dataset_unrel.json")
flickr30k_dic_filename = os.path.join(CWD, "NBT", "data", "flickr30k", "dic_flickr30k.json")
unrel_dic_filename = os.path.join(CWD, "data", "dic_unrel.json")
with open(dataset_filename, "r") as f:
dataset = json.load(f, encoding="utf-8")
with open(flickr30k_dic_filename, "r") as f:
flickr30k_dic = json.load(f, encoding="utf-8")
dataset["images"][0].keys()
flickr30k_dic["images"][0].keys()
# Copy dictionary
unrel_dic = dict(flickr30k_dic)
# Build a list of images to replace the current flickr30k_dic field
images = list()
for i, image in enumerate(dataset["images"]):
images.append({"file_path": image["filename"],
"id": image["imgid"],
"split": image["split"]})
unrel_dic["images"] = images
# Save UnRel dictionary
with open(unrel_dic_filename, "w") as f:
json.dump(unrel_dic, f)
###Output
_____no_output_____
###Markdown
Now is time to modify the source code to introduce the UnRel dataset* Here we keep track of the changes we have made so far: * ``demo.py`` => ``demo_unrel.py`` * Remove ``bboxs`` and ``masks`` in demo code (not used anyway) * ``main.py`` => ``main_unrel.py`` * ``dataloader_flickr30k.py`` => ``dataloader_unrel.py`` * Add ``utils.RandomCrop`` * Rewrite the proposals h5 file loading part * Get rid of useless ``gt_seq``, ``gt_bboxes``, ``mask``, ``input_seq``... * ``utils.RandomCropWithBbox`` => ``utils.RandomCrop`` * Remove the ``bboxs`` component in the random crop * Next step, try ``demo_unrel.py`` **Current progress:*** The proposals I am using only contain the Language evaluation .json file* This section aims to create a .json file similar to ``caption_flickr30k.json`` found in ``tools/coco-caption/pycocotools/annotations`` so that the language evaluation can be conducted.* The expected file format is as follow: ``root = {"images": [{"file_name": ..., "id": ...}, ... ] (duplicated images), "info": None, "licenses": None, "type": "captions", "annotations": [{"image_id", "id", "caption" (raw)}, ...]}``
###Code
import json
import os
CWD = os.getcwd()
with open(os.path.join(CWD, "data", "dataset_unrel.json"), "r") as f:
dataset = json.load(f)
dataset.keys()
root = {"images": list(),
"info": None,
"licenses": None,
"type": "captions",
"annotations": list()}
sent_id = 0
img_id = 0
for i, image in enumerate(dataset["images"]):
for j, sent in enumerate(image["sentences"]):
root["images"].append({"file_name": image["filename"],
"id": image["imgid"]})
root["annotations"].append({"image_id": image["imgid"],
"id": sent["sentid"],
"caption": sent["raw"]})
# Dump the json
with open(os.path.join(CWD, "NBT", "tools", "coco-caption", "annotations", "caption_unrel.json"), "w") as f:
json.dump(root, f)
###Output
_____no_output_____ |
2017/ferran/day07/day07.ipynb | ###Markdown
Recursive Circus Part 1
###Code
import csv
import copy
def parse_list():
weights = {}
parents = {}
children = {}
with open('input.txt', 'rt') as f_input:
csv_reader = csv.reader(f_input, delimiter=' ')
for line in csv_reader:
prog = line[0]
weights[prog] = int(line[1][1: -1])
if '->' in line:
ind = line.index('->')
offspring = [a.rstrip(',') for a in line[ind + 1:]]
children[prog] = offspring
for child in offspring:
parents[child] = prog
else:
children[prog] = []
return weights, parents, children
weights, parents, children = parse_list()
def has_bottom(prog):
for k, v in children.items():
if prog in children[k]:
return True
return False
def bottomest():
for prog in children:
if not has_bottom(prog):
return prog
bottomest()
###Output
_____no_output_____
###Markdown
Part 2
###Code
weights, parents, children = parse_list()
# we shall not assume all the children are leaves
def pick_cherry(leaves):
while leaves:
leaf = leaves.pop()
parent = parents[leaf]
offspring = children[parent]
try:
for child in offspring:
assert(children[child] == [])
return parent
except AssertionError:
pass
import copy
def scan_tower():
weights_to_prune = copy.copy(weights)
leaves = [prog for prog in children if children[prog] == []]
while leaves:
parent = pick_cherry(leaves)
offspring = children[parent]
offspring_weights = [weights_to_prune[child] for child in offspring]
if offspring_weights[1:] == offspring_weights[:-1]:
# update weight of parent
weights_to_prune[parent] += sum(offspring_weights)
# prune balanced
for child in offspring:
del parents[child]
del weights_to_prune[child]
if child in leaves:
leaves.remove(child)
children[parent] = []
leaves.append(parent)
else:
print('weights of the cherry: ', [weights[child] for child in offspring])
print('total weights supported by the cherry: ', offspring_weights)
print('parent of the cherry: ', parent)
break
scan_tower()
1283 - (1823 - 1815)
###Output
_____no_output_____ |
Yudi TANG/Application/Exploratory data analysis/notebooks/2-Aquastat-EDA/1-Aquastat-Introduction.ipynb | ###Markdown
The dataset We will be using the [Food and Agriculture Organization](http://www.fao.org) (FAO) of the United Nation's AQUASTAT dataset. From FAO: 粮农组织的三个主要目标是:1. 消除饥饿、粮食不安全和营养不良2. 消除贫困促进经济社会进步3. 自然资源的可持续管理和利用,包括土地、水、空气、气候和遗传资源,以造福今世后代。为支持这些目标,《宪法》第1条要求粮农组织“收集、分析、解释和传播与营养、粮食和农业有关的信息”。因此,水温自动调节器开始,其目的是通过收集有助于联合国粮农组织的目标,与水资源相关的信息传播分析,用水和农业用水管理,对国家重点在非洲,亚洲,美国,拉丁美洲,加勒比海。联合国粮农组织提供数据,元数据,报告国家概况,河流域概况,分析区域,图,表空间,数据,指导方针,和其他的在线工具:* 水资源:内部、跨界、总* 水的用途:按部门,按来源,废水* 灌溉:地点、面积、类型、技术、作物* 水坝:位置,高度,容量,表面积* 与水有关的机构、政策和立法 http://www.fao.org/nr/water/aquastat/data/query/index.html Question: *水的供应和用水是否与人均国内生产总值有关?* Our planExploratory data analysis consists of the following major tasks, which we present linearly here because each task doesn't make much sense to do without the ones prior to it. However, in reality, you are going to constantly jump around from step to step. You may want to do all the steps for a subset of the variables first. Or often, an observation will bring up a question you want to investigate and you'll branch off and explore to answer that question before returning down the main path of exhaustive EDA.1. **Form hypotheses/develop investigation themes to explore** 3. **Wrangle data** 3. Assess quality of data 4. Profile data 5. Explore each individual variable in the dataset 6. Assess the relationship between each variable and the target 7. Assess interactions between variables 8. Explore data across many dimensions Throughout the entire analysis you want to:* Capture a list of hypotheses and questions that come up for further exploration.* Record things to watch out for/ be aware of in future analyses. * Show intermediate results to colleagues to get a fresh perspective, feedback, domain knowledge. Don't do EDA in a bubble! Get feedback throughout especially from people removed from the problem and/or with relevant domain knowledge. * Position visuals and results together. EDA relies on your natural pattern recognition abilities so maximize what you'll find by putting visualizations and results in close proximity. Things to consider doing **Make your data [tidy](https://tomaugspurger.github.io/modern-5-tidy.html)**1. Each variable forms a column2. Each observation forms a row3. Each type of observational unit forms a table**Transform data** Sometimes you will need to transform your data to be able to extract information from it. This step will usually occur after some of the other steps of EDA unless domain knowledge can inform these choices beforehand. Transforms include: * Log: when data is highly skewed (versus normally distributed like a bell curve), sometimes it has a log-normal distribution and taking the log of each data point will normalize it. * Binning of continuous variables: Binning continuous variables and then analyzing the groups of observations created can allow for easier pattern identification. Especially with non-linear relationships. * Simplifying of categories: you really don't want more than 8-10 categories within a single data field. Try to aggregate to higher-level categories when it makes sense. Load the data
###Code
data = pd.read_csv('aquastat.csv.gzip', compression='gzip')
data.head()
data.shape
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 143280 entries, 0 to 143279
Data columns (total 7 columns):
country 143280 non-null object
region 143280 non-null object
variable 143280 non-null object
variable_full 143280 non-null object
time_period 143280 non-null object
year_measured 96411 non-null float64
value 96411 non-null float64
dtypes: float64(2), object(5)
memory usage: 7.7+ MB
###Markdown
Research the variables
###Code
data[['variable','variable_full']].drop_duplicates()
#total_area 国土面积(1000公顷)
#arable_land 可耕作面积
#permanent_crop_area 多年生作物面积
#cultivated_area 耕地面积
#percent_cultivated 耕地面积占比
#total_pop 总人口
#rural_pop 农村人口
#urban_pop 城市人口
#gdp 国内生产总值
#gdp_per_capita 人均国内生产总值
#agg_to_gdp 农业,增加国内生产总值
#human_dev_index 人类发展指数
#gender_inequal_index 性别不平等指数
#percent_undernourished 营养不良患病率
#avg_annual_rain_depth 长期平均年降水量
#national_rainfall_index 全国降雨指数
###Output
_____no_output_____
###Markdown
Describe the panel 199 unique countries involved
###Code
data.country.nunique()
countries = data.country.unique()
###Output
_____no_output_____
###Markdown
For 12 time periods
###Code
data.time_period.nunique()
###Output
_____no_output_____
###Markdown
Each 5 years in length since 1958
###Code
time_periods = data.time_period.unique()
print(time_periods)
mid_periods = range(1960,2017,5)
###Output
_____no_output_____
###Markdown
Dataset is unbalanced because there is not data for every country at every time period (more on missing data in the next notebook).
###Code
data[data.variable=='total_area'].value.isnull().sum()
###Output
_____no_output_____
###Markdown
Ways to look at this data We can look at this data set in a number of ways: * 横截面:一个时期内所有国家* 时间序列:一个国家随着时间的推移* 面板数据:所有国家随着时间的推移(作为数据给出)* 地理空间:所有地理上相互联系的国家 Slicing For a given time slice
###Code
def time_slice(df, time_period):
# Only take data for time period of interest
df = df[df.time_period==time_period]
# Pivot table
df = df.pivot(index='country', columns='variable', values='value')
df.columns.name = time_period
return df
time_slice(data, time_periods[0]).head()
###Output
_____no_output_____
###Markdown
For a given country
###Code
def country_slice(df, country):
# Only take data for country of interest
df = df[df.country==country]
# Pivot table
df = df.pivot(index='variable', columns='time_period', values='value')
df.index.name = country
return df
country_slice(data, countries[40]).head()
###Output
_____no_output_____
###Markdown
By variable
###Code
def variable_slice(df, variable):
# Only data for that variable
df = df[df.variable==variable]
# Get variable for each country over the time periods
df = df.pivot(index='country', columns='time_period', values='value')
return df
variable_slice(data, 'total_pop').head()
###Output
_____no_output_____
###Markdown
Time series for given country and variable
###Code
def time_series(df, country, variable):
# Only take data for country/variable combo
series = df[(df.country==country) & (df.variable==variable)]
# Drop years with no data
series = series.dropna()[['year_measured', 'value']]
# Change years to int and set as index
series.year_measured = series.year_measured.astype(int)
series.set_index('year_measured', inplace=True)
series.columns = [variable]
return series
time_series(data, 'Belarus', 'total_pop')
###Output
_____no_output_____
###Markdown
By region 我们可能需要查看某些评估数据的子集。区域是一种直观的数据细分方式。
###Code
data.region.unique()
###Output
_____no_output_____
###Markdown
减少区域数量有助于模式评估。 创建一个字典来查找新的、更简单的区域(亚洲、北美洲、南美洲、非洲、欧洲、大洋洲)
###Code
simple_regions ={
'World | Asia':'Asia',
'Americas | Central America and Caribbean | Central America': 'North America',
'Americas | Central America and Caribbean | Greater Antilles': 'North America',
'Americas | Central America and Caribbean | Lesser Antilles and Bahamas': 'North America',
'Americas | Northern America | Northern America': 'North America',
'Americas | Northern America | Mexico': 'North America',
'Americas | Southern America | Guyana':'South America',
'Americas | Southern America | Andean':'South America',
'Americas | Southern America | Brazil':'South America',
'Americas | Southern America | Southern America':'South America',
'World | Africa':'Africa',
'World | Europe':'Europe',
'World | Oceania':'Oceania'
}
data.region = data.region.apply(lambda x: simple_regions[x])
print(data.region.unique())
###Output
['Asia' 'North America' 'South America' 'Africa' 'Europe' 'Oceania']
###Markdown
提取单个区域的函数
###Code
def subregion(data, region):
return data[data.region==region]
###Output
_____no_output_____ |
practicals/08.FeatureTransformations.ipynb | ###Markdown
Learning ObjectivesThis should be another short one, but we need to go ahead and do this. I have shown you how to transform qualitative features into quantitative ones, and this is quite useful for computing summary statistics, doing bootstrap and making visualizations, but now we are starting to be interested in prediction. For this we will need to do a slightly different feature transformaiton for qualitative variables and we will need to do some cleaning of quantitative variables too. These steps are outlines below:1. Dummy variable drop one2. Impute or drop Nans3. StandardizationAgain, before I go on, the above steps are not a complete list of what you could do and are not always needed, which is why I'll try to give some intuition into why we do them and what else can be done: Dummy Variable dropIf you remember from a couple of lessons ago we talked about transforming qualitative variables into quantitative ones by creating an equal number of columns as there were qualitative values. This is a good approach for visualization or trying to come up with confidence intervals for specific features under different conditions, but is not effective for machine learning.In most machine learning algorithms you will need to remove correlated features as they can prove detramental to the results. You can determine which features might be highly correlated by using a correlation matrix, let's use one below:
###Code
import pandas as pd
import numpy as np
df = pd.read_csv('../data/billionaires.csv')
del df['was founder']
del df['inherited']
del df['from emerging']
df.age.replace(-1, np.NaN, inplace=True)
df.founded.replace(0, np.NaN, inplace=True)
df.gdp.replace(0, np.NaN, inplace=True)
df[['gdp', 'worth in billions']].corr()
###Output
_____no_output_____
###Markdown
So notice that the above are not correlated features so you will generally not have to worry about them. That being said, the correlation matrix above will only measure linear correlation between two entities, so you will need to use your head to figure out what is correlated sometimes. And this is all too true when talking about dummy variables.Let's say you have a qualitative feature that is red, green and blue. If you make three columns, these three columns will be completely dependent on each other. Why? Because if I know that the red column is 0 and the green column is 0, then I am completely assured that the blue column is 1. In fact if I know what the green and the red columns are, I will always know what the blue column is. Thus the columns all together are correlated and redundent. We can solve this issue easily by dropping one of the dummy variable columns.I'll show you a quick way to do this below:
###Code
pd.get_dummies(df, columns=['wealth.type'], drop_first=True).head()
###Output
_____no_output_____
###Markdown
Impute or Drop NaNsThis is an important step but is often missed. Machine learning algorithms don't know what to do with missing data on their own. The best way to deal with missing data is task dependent. But there are some common strategies that work. One of these strategies is to impute the data points. This is where we try to infer what the missing values were.Some simple imputation strategies would be to take the median or the mean of the data, I'll show you how to do this below:
###Code
from sklearn.preprocessing import Imputer
imp = Imputer(strategy='median')
df[['age']].info()
imp.fit_transform(df[['age']]).shape
###Output
_____no_output_____
###Markdown
Here we filled all the missing ages with the median age. The second thing that you can do is you can drop missing values. Where you have plenty of data points and the missing values are not correlated, this can work particularly well. I'll show how you do this below:
###Code
df[['age']].dropna().count()
###Output
_____no_output_____
###Markdown
StandardizationThis is the final common technique that is used. In this technique we normalize the mean and variance of our features. This can be good or bad. Some ML algorithms will work much better with standardized features (especially those with complex learning algorithms). It can be good because certain features have a much bigger scale than others (let's say the gdp above), but this can also be bad at times. Sometimes you will want to retain the original feature values because of interpretability.One of the small things that you will want to do before you do this is remove features of very low variance. I'll show you how to do this below:
###Code
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
df[['age']].dropna().head()
std.fit_transform(df[['age']].dropna())[:5]
###Output
_____no_output_____ |
4.ComorbidityAnalysisByMedicationAuthomatic.ipynb | ###Markdown
Comorbidity analysis by medications ACT ontology mapping- To calculate the prevalence of various comorbidities in the ASD cohort, we first had to develop a mapping table using the ACT Ontology v2.0.1, which contained ICD-9 and ICD-10 codes.- 108,024 distinct ICD-9 and ICD-10 codes were mapped to ACT terms aggregated in levels using Microsoft Excel. The LEN function, which is built into Excel, was used to construct the following formula: **=LEN(A3)-LEN(SUBSTITUTE(A3,"\",""))**- When applied to the input table, this formula transformed the ICD-9 and ICD-10 codes into a table organized from Level 0 (the most general category) to Level 9 (the deepest category). ICD-10 codes were organized up to category n, while ICD-9 codes were organized up to category n-1.- The transformed table was then uploaded into the SQL server and named ACT_ICD10_ICD9_3. - To retrieve comorbidities associated with each individual in the ASD cohort and join the ASD cohort with the ACT Ontology mapping table, the ASDMembers, FactICD, and ACT_codes all had to be joined. Since this mapping process was time-intensive, it was performed separately for each level of the mapping table. Comorbidity analysis- In order to estimate the prevalence of comorbidities in the ASD cohort based on the primary medication being taken, the previously mapped ACT table had to also be joined with a table of pharmacy claims.- First, pharmacy claims for the ASD cohort from 2014-2019 was retrieved.- The ASD cohort was then divided into subsets based on the medications of interest in this study (e.g., methylphenidate, guanfacine). First, a subset of the cohort was created for individuals taking each drug. Medication Input listFirst we read the file that contains all the medications that we are analyzing.
###Code
library("devtools")
library("SqlServerJtds")
library("SqlTools")
library("FactToCube")
library("ggplot2")
library("plotly")
library("ggalluvial")
medInputList <- read.delim("./medInputList", header = TRUE, sep = "\t", colClasses = "character")
groups <- as.character( unique( medInputList$Group))
###Output
_____no_output_____
###Markdown
Query to identify patients taking only one specific drug
###Code
for( i in 1:length( groups )){
print(i)
meds <- paste( tolower(medInputList[ medInputList$Group == groups[i], "medicationName"]), collapse="%' OR LOWER(NdcDescription) like '%")
queryStart <- paste0( "SELECT DISTINCT MemberId, MIN(YEAR(DispenseDate)) AS DispenseYear,
sum( case when NdcDescription like '", meds, "%' then 1 else 0 end) as n_", groups[i],",")
otherMeds <- medInputList[ ! medInputList$Group %in% groups[i], ]
otherGroups <- unique(otherMeds$Group)
for( j in 1:length(otherGroups)){
print(j)
otherMedsList <- paste( tolower(otherMeds[ otherMeds$Group == otherGroups[j], "medicationName"]), collapse="%' OR LOWER(NdcDescription) like '%")
if( j == 1){
queryContinue <- paste0(queryStart, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) as n_", otherGroups[j],",")
}else if(j < length(otherGroups)){
queryContinue <- paste0(queryContinue, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) as n_", otherGroups[j],",")
}else{
queryEnds <- paste0(queryContinue, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) as n_", otherGroups[j],
" INTO ", groups[i], "_only from PharmacySubsetTest2014
GROUP BY MemberId having
sum( case when NdcDescription like '", meds, "%' then 1 else 0 end) > 0 and ")
}
}
for( w in 1:length(otherGroups)){
print(w)
otherMedsList <- paste( tolower(otherMeds[ otherMeds$Group == otherGroups[w], "medicationName"]), collapse="%' OR LOWER(NdcDescription) like '%")
if( w == 1){
finalQuery <- paste0(queryEnds, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) = 0 and ")
}else if(w < length(otherGroups)){
finalQuery <- paste0(finalQuery, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) = 0 and ")
}else{
finalQuery <- paste0(finalQuery, "sum( case when NdcDescription like '", otherMedsList, "%' then 1 else 0 end) = 0")
}
}
dbSendUpdate( cn, paste0("DROP TABLE IF EXISTS ", groups[i], "_only"))
dbSendUpdate( cn, finalQuery)
}
###Output
_____no_output_____
###Markdown
Create the ACTMap3 and ACTlevel3_CM_3Times tables
###Code
dbSendUpdate( cn, "drop table if exists ACTMap3")
dbSendUpdate( cn, "SELECT ASD.MemberId, F.DateServiceStarted, ACT.Level3
INTO ACTMap3
FROM
ASDMembers ASD
INNER JOIN FactIcd F ON
F.MemberId = ASD.MemberId
INNER JOIN ACT_codes ACT ON
ACT.IcdCode = F.Icd
WHERE YEAR(F.DateServiceStarted) >= 2012
GROUP BY ASD.MemberId, DateServiceStarted, Level3")
#only include individuals with comorbidities diagnosed >= 3 times
dbSendUpdate( cn, "drop table if exists ACTLevel3_CM_3Times")
dbSendUpdate( cn, "SELECT MemberId, Level3, COUNT(*) as Level3_counts
INTO ACTLevel3_CM_3Times
FROM ACTMap3
GROUP BY MemberID, Level3
HAVING COUNT(Level3) >= 3")
###Output
_____no_output_____
###Markdown
Subset patients taking only one drugSubset those that are only taking one of the drugs and extract the total number of patients in each group, that will be used later to estimate the prevalence of each comorbidity.
###Code
for( i in 1:length(groups)){
print( groups[i] )
# individuals in cohort taking ONLY one drug
print( dbGetQuery( cn, paste0( "SELECT COUNT(DISTINCT MemberId) FROM ", groups[i],"_only")))
print( "####")
#extract all the comorbidities from those patients
allComorb <- dbGetQuery( cn, paste0("SELECT A.MemberId, A.Level3, YEAR(A.minDate) as minDatee FROM ACTLevel3_CM_3Times A
INNER JOIN ",
groups[i],"_only G ON
A.MemberId = G.MemberId"))
#extract all comorbidities before the first time drug was prescribed
allComorbBefore <- dbGetQuery( cn, paste0("SELECT A.MemberId, A.Level3, YEAR(A.minDate) as minDatee FROM ACTLevel3_CM_3Times A
INNER JOIN ",
groups[i],"_only G ON
A.MemberId = G.MemberId
WHERE YEAR(A.minDate) < G.DispenseYear"))
#estimate the prevalence of each comorbidity ans save it into a table
prevalenceAll <- as.data.frame( table( allComorb$Level3))
colnames(prevalenceAll) <- c("Level3","Level3_prevalence")
dbSendUpdate( cn, paste0( "DROP TABLE IF EXISTS ", groups[i],"_CMs_all"))
dbWriteTable( cn, paste0(groups[i],"_CMs_all"), prevalenceAll, row.names = FALSE)
#estimate the prevalence of each comorbidity ans save it into a table
prevalenceBefore <- as.data.frame( table( allComorbBefore$Level3))
colnames(prevalenceBefore) <- c("Level3","Level3_prevalence")
dbSendUpdate( cn, paste0( "DROP TABLE IF EXISTS ", groups[i],"_CMs_before"))
dbWriteTable( cn, paste0(groups[i],"_CMs_before"), prevalenceBefore, row.names = FALSE)
}
###Output
_____no_output_____
###Markdown
Heatmap representation: all comorbidities
###Code
inputData <- as.data.frame( matrix(ncol=3, nrow=length(groups)))
colnames(inputData) <- c("Drug", "n", "tableName")
for( i in 1:length(groups)){
inputData$Drug[i] <- groups[i]
inputData$n[i] <- dbGetQuery( cn, paste0( "SELECT COUNT(DISTINCT MemberId) FROM ", groups[i],"_only"))
inputData$tableName[i] <- paste0( groups[i],"_CMs_all")
}
for( i in 1:nrow( inputData)){
queryCounts <- paste0( "SELECT * FROM ", inputData$tableName[i],
" ORDER BY Level3_prevalence DESC")
print( i )
if( i == 1){
outputAll <- dbGetQuery( cn, queryCounts )
outputAll$drug <- inputData$Drug[i]
outputAll$totalPatients <- inputData$n[i]
}else{
intermediateOutputAll <- dbGetQuery( cn, queryCounts )
intermediateOutputAll$drug <- inputData$Drug[i]
intermediateOutputAll$totalPatients <- inputData$n[i]
outputAll <- rbind( outputAll, intermediateOutputAll )
}
}
###Output
_____no_output_____
###Markdown
Heatmap representation: all comorbidities before first time drug was prescribed
###Code
inputData <- as.data.frame( matrix(ncol=3, nrow=length(groups)))
colnames(inputData) <- c("Drug", "n", "tableName")
for( i in 1:length(groups)){
inputData$Drug[i] <- groups[i]
inputData$n[i] <- dbGetQuery( cn, paste0( "SELECT COUNT(DISTINCT MemberId) FROM ", groups[i],"_only"))
inputData$tableName[i] <- paste0( groups[i],"_CMs_before")
}
for( i in 1:nrow( inputData)){
queryCounts <- paste0( "SELECT * FROM ", inputData$tableName[i],
" ORDER BY Level3_prevalence DESC")
print( i )
if( i == 1){
outputBefore <- dbGetQuery( cn, queryCounts )
outputBefore$drug <- inputData$Drug[i]
outputBefore$totalPatients <- inputData$n[i]
}else{
intermediateOutputBefore <- dbGetQuery( cn, queryCounts )
intermediateOutputBefore$drug <- inputData$Drug[i]
intermediateOutputBefore$totalPatients <- inputData$n[i]
outputBefore <- rbind( outputBefore, intermediateOutputBefore )
}
}
###Output
_____no_output_____
###Markdown
Percentage of patients with each comorbidityWe estimate the percentage of patients with each comorbidities and we do a first subset selecting only those comorbidities that are in at least 10% of the patients. We mapp to the ACT levels, to later aggregate by a higher category if need it, and we remove some comorbidities that are not considered as clinically relevant for this study.
###Code
output <- outputAll
#output <- outputBefore
output$totalPatients <- as.numeric( output$totalPatients )
output$percentage <- round( 100*(output$Level3_prevalence / output$totalPatients), 3)
#select only those comorbidities in at least 1% of the patients
outputSubset <- output[ output$percentage >= 1, ]
#map to act
actMapping <- dbGetQuery( cn, "SELECT Level1, Level3 FROM ACT_ICD10_ICD9_3")
actMapping <- actMapping[!duplicated( actMapping), ]
actMapping <- actMapping[!duplicated( actMapping$Level3 ), ]
#mapped the level3 to level1
outputMapped <- merge( outputSubset, actMapping)
#exclude the comorbidities that are not clinically relevant
excludedGroups <- c('Autistic disorder',
'Encounter for newborn, infant and child health examinations',
'motorized bicycle',
'Other unknown and unspecified cause of morbidity or mortality',
'Need for prophylactic vaccination and inoculation, Influenza',
'Bus occupant injured in transport accident (v70-v79)',
'Encounter for other specified aftercare',
'Other long term (current) drug therapy',
'Body mass index (bmi) pediatric',
'Pharyngitis (acute) nos',
'Acute upper respiratory infection, unspecified',
'Acne vulgaris',
'Hyperlipidemia, unspecified',
'Encounter for adult periodic examination (annual) (physical) and any associated laboratory and radiologic examinations'
)
outputMapped <- outputMapped[! outputMapped$Level3 %in% excludedGroups, ]
save(outputMapped, file = "outputMapped.RData")
###Output
_____no_output_____
###Markdown
Plot the heatmap
###Code
#plot the heatmap
htmpComorbBefore <- ggplot(outputMapped, aes(drug, Level3, fill= percentage)) +
geom_tile()+
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1),
panel.grid = element_blank(),
text = ggplot2::element_text(size = 6),
axis.title = ggplot2::element_text(size = 6))
save(htmpComorbBefore, file = "./htmpComorbBefore.RData")
### plot in R studio (out from o2)
load( "./outputMapped.RData")
#remove Autistic disorder (sanity check, all patients should have ASD)
toplot <- outputMapped[! outputMapped$Level3 %in% c("Autistic disorder", "Asperger's syndrome"), ]
#select 10% to filter
drugs <- unique(toplot$drug)
for( i in 1:length(drugs)){
selection <- toplot[ toplot$drug == drugs[i] &
toplot$percentage > 10, ]
if(i == 1){
phenoList <- selection$Level3
}else{
subSet <- selection$Level3
phenoList <- unique( c( phenoList, subSet))
}
}
toplot <- toplot[ toplot$Level3 %in% phenoList, ]
toplot <- toplot[ order(toplot$Level1), ]
# sort/display the groups based on groups
groups <- as.character( unique( medInputList$Group))
toplot$drug <- factor(toplot$drug, levels=groups)
# they should be same!
stopifnot(sort(unique(outputMapped$drug))==sort(groups))
# unify the names of the comorbidities removing the ICD original code
toplot$Level3[ toplot$Level3 == "(K59.0) Constipation"] <- "Constipation"
toplot$Level3[ toplot$Level3 == "(G47.0) Insomnia"] <- "Insomnia"
# cut the name of the comorbidity to the first 60 characters
toplot$Level3 <- stringr::str_wrap(toplot$Level3, 60)
# sort/display the comorbidities based on Level 1 ACT ontology
act_level <- as.character( unique( toplot$Level3))
toplot$Level3 <- factor(toplot$Level3, levels=act_level)
#create the heatmap
htmpOutput<- ggplot(toplot, aes(drug, Level3, fill= percentage)) + # 60
geom_tile()+
scale_fill_gradient(low="white", high="blue") +
#scale_fill_distiller(palette = "YlOrRd")+
#scale_fill_continuous(low="#F7FBFF", high="#2171B5", name="Events")+
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 45, hjust = 1,face="bold"),
panel.grid = element_blank(),
axis.text.y = ggplot2::element_text(size=rel(0.9)),
axis.title = ggplot2:: element_text(size=rel(1.05)))+
labs(title = NULL, x = "", y = "",fill="Percentage")
htmpOutput
write.table( toplot, file="heatmap_values.txt",
col.names = TRUE, row.names = FALSE, quote = FALSE, sep = "\t")
ggsave(filename="htmpOutput_L3.png", plot=htmpOutput, device="png",
height=11, width=14, units="in", dpi=500)
htmpOutput2 <- htmpOutput + scale_y_discrete(position = "right") +
facet_grid( vars( stringr::str_wrap(Level1, 40)), scales = "free", space = "free",switch = "y") +
theme(strip.text.y.left = element_text(angle = 0,size=rel(1.0)))
htmpOutput2
ggsave(filename="htmpOutputL3_L1.png", plot=htmpOutput2, device="png",
height=12.5, width=15, units="in", dpi=500)
###Output
_____no_output_____ |
code/17.networkx.ipynb | ###Markdown
网络科学理论****** 网络科学:使用NetworkX分析复杂网络******王成军 [email protected]计算传播网 http://computational-communication.com http://networkx.readthedocs.org/en/networkx-1.11/tutorial/
###Code
%matplotlib inline
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph() # G = nx.DiGraph() # 有向网络
# 添加(孤立)节点
G.add_node("spam")
# 添加节点和链接
G.add_edge(1,2)
print(G.nodes())
print(G.edges())
# 绘制网络
nx.draw(G, with_labels = True)
###Output
_____no_output_____
###Markdown
WWW Data download http://www3.nd.edu/~networks/resources.htmhttps://pan.baidu.com/s/1o86ZaTcWorld-Wide-Web: [README] [DATA]Réka Albert, Hawoong Jeong and Albert-László Barabási:Diameter of the World Wide Web Nature 401, 130 (1999) [ PDF ] 作业:- 下载www数据- 构建networkx的网络对象g(提示:有向网络)- 将www数据添加到g当中- 计算网络中的节点数量和链接数量
###Code
G = nx.Graph()
n = 0
with open ('/Users/datalab/bigdata/cjc/www.dat.gz.txt') as f:
for line in f:
n += 1
#if n % 10**4 == 0:
#flushPrint(n)
x, y = line.rstrip().split(' ')
G.add_edge(x,y)
nx.info(G)
###Output
_____no_output_____
###Markdown
描述网络 nx.karate_club_graph 我们从karate_club_graph开始,探索网络的基本性质。
###Code
G = nx.karate_club_graph()
clubs = [G.node[i]['club'] for i in G.nodes()]
colors = []
for j in clubs:
if j == 'Mr. Hi':
colors.append('r')
else:
colors.append('g')
nx.draw(G, with_labels = True, node_color = colors)
G.node[1], G.node[9] # 节点1的属性 # 节点1的属性
G.edges# 前三条边的id
#dir(G)
nx.info(G)
G.nodes()
list(G.edges())[:3]
print(*G.neighbors(1))
nx.average_shortest_path_length(G)
###Output
_____no_output_____
###Markdown
网络直径
###Code
nx.diameter(G)#返回图G的直径(最长最短路径的长度)
###Output
_____no_output_____
###Markdown
密度
###Code
nx.density(G)
nodeNum = len(G.nodes())
edgeNum = len(G.edges())
2.0*edgeNum/(nodeNum * (nodeNum - 1))
###Output
_____no_output_____
###Markdown
作业:- 计算www网络的网络密度 聚集系数
###Code
cc = nx.clustering(G)
cc.items()
plt.hist(cc.values(), bins = 15)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Spacing in Math ModeIn a math environment, LaTeX ignores the spaces you type and puts in the spacing that it thinks is best. LaTeX formats mathematics the way it's done in mathematics texts. If you want different spacing, LaTeX provides the following four commands for use in math mode:\; - a thick space\: - a medium space\, - a thin space\\! - a negative thin space 匹配系数
###Code
# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。
Ge=nx.Graph()
Ge.add_nodes_from([0,1],size=2)
Ge.add_nodes_from([2,3],size=3)
Ge.add_edges_from([(0,1),(2,3)])
node_size = [list(Ge.node[i].values())[0]*1000 for i in Ge.nodes()]
nx.draw(Ge, with_labels = True, node_size = node_size)
print(nx.numeric_assortativity_coefficient(Ge,'size'))
# plot degree correlation
from collections import defaultdict
import numpy as np
l=defaultdict(list)
g = nx.karate_club_graph()
for i in g.nodes():
k = []
for j in g.neighbors(i):
k.append(g.degree(j))
l[g.degree(i)].append(np.mean(k))
#l.append([g.degree(i),np.mean(k)])
x = list(l.keys())
y = [np.mean(i) for i in l.values()]
#x, y = np.array(l).T
plt.plot(x, y, 'ro', label = '$Karate\;Club$')
plt.legend(loc=1,fontsize=10, numpoints=1)
plt.xscale('log'); plt.yscale('log')
plt.ylabel(r'$<knn(k)$> ', fontsize = 20)
plt.xlabel('$k$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Degree centrality measures.(度中心性)* degree_centrality(G) Compute the degree centrality for nodes.* in_degree_centrality(G) Compute the in-degree centrality for nodes.* out_degree_centrality(G) Compute the out-degree centrality for nodes.* closeness_centrality(G[, v, weighted_edges]) Compute closeness centrality for nodes.* betweenness_centrality(G[, normalized, ...]) Betweenness centrality measures.(介数中心性)
###Code
dc = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness= nx.betweenness_centrality(G)
fig = plt.figure(figsize=(15, 4),facecolor='white')
ax = plt.subplot(1, 3, 1)
plt.hist(dc.values(), bins = 20)
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
ax = plt.subplot(1, 3, 2)
plt.hist(closeness.values(), bins = 20)
plt.xlabel('$Closeness \, Centrality$', fontsize = 20)
ax = plt.subplot(1, 3, 3)
plt.hist(betweenness.values(), bins = 20)
plt.xlabel('$Betweenness \, Centrality$', fontsize = 20)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(15, 8),facecolor='white')
for k in betweenness:
plt.scatter(dc[k], closeness[k], s = betweenness[k]*10000)
plt.text(dc[k], closeness[k]+0.02, str(k))
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Closeness \, Centrality$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
度分布
###Code
from collections import defaultdict
import numpy as np
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in dict(G.degree()).values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P(K)$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
G = nx.karate_club_graph()
plotDegreeDistribution(G)
###Output
_____no_output_____
###Markdown
网络科学理论简介****** 网络科学:分析网络结构******王成军 [email protected]计算传播网 http://computational-communication.com 规则网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
RG = nx.random_graphs.random_regular_graph(3,200)
#生成包含200个节点、 每个节点有3个邻居的规则图RG
pos = nx.spectral_layout(RG)
#定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别
nx.draw(RG,pos,with_labels=False,node_size = range(1, 201))
#绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径
plt.show() #显示图形
plotDegreeDistribution(RG)
###Output
/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/matplotlib/axes/_base.py:3443: UserWarning: Attempting to set identical bottom==top results
in singular transformations; automatically expanding.
bottom=1.0, top=1.0
'bottom=%s, top=%s') % (bottom, top))
###Markdown
ER随机网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
ER = nx.random_graphs.erdos_renyi_graph(200,0.1)
#生成包含20个节点、以概率0.2连接的随机图
pos = nx.spring_layout(ER)
#定义一个布局,此处采用了shell布局方式
nx.draw(ER,pos,with_labels=False,node_size = 30)
plt.show()
#ER = nx.random_graphs.erdos_renyi_graph(2000,0.1)
plotDegreeDistribution(ER)
###Output
_____no_output_____
###Markdown
小世界网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3)
#生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络
pos = nx.spring_layout(WS)
#定义一个布局,此处采用了circular布局方式
nx.draw(WS,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(WS)
nx.diameter(WS)
cc = nx.clustering(WS)
plt.hist(cc.values(), bins = 10)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
import numpy as np
np.mean(list(cc.values()))
###Output
_____no_output_____
###Markdown
BA网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(200,2)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(BA)
BA= nx.random_graphs.barabasi_albert_graph(20000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(500,1)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
nx.degree_histogram(BA)[:3]
list(dict(BA.degree()).items())[:3]
plt.hist( list(dict(BA.degree()).values()) , bins = 100)
# plt.xscale('log')
# plt.yscale('log')
plt.show()
from collections import defaultdict
import numpy as np
def plotDegreeDistributionLongTail(G):
degs = defaultdict(int)
for i in list(dict(G.degree()).values()): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(5000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistributionLongTail(BA)
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in list(dict(G.degree()).values()): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
x, y = np.array(items).T
y_sum = np.sum(y)
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P(K)$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(50000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
###Output
_____no_output_____
###Markdown
作业:- 阅读 Barabasi (1999) Diameter of the world wide web.Nature.401- 绘制www网络的出度分布、入度分布- 使用BA模型生成节点数为N、幂指数为$\gamma$的网络- 计算平均路径长度d与节点数量的关系
###Code
Ns = [i*10 for i in [1, 10, 100, 1000]]
ds = []
for N in Ns:
print(N)
BA= nx.random_graphs.barabasi_albert_graph(N,2)
d = nx.average_shortest_path_length(BA)
ds.append(d)
plt.plot(Ns, ds, 'r-o')
plt.xlabel('$N$', fontsize = 20)
plt.ylabel('$<d>$', fontsize = 20)
plt.xscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
网络科学理论****** 网络科学:使用NetworkX分析复杂网络******王成军 [email protected]计算传播网 http://computational-communication.com http://networkx.readthedocs.org/en/networkx-1.11/tutorial/
###Code
%matplotlib inline
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph() # G = nx.DiGraph() # 有向网络
# 添加(孤立)节点
G.add_node("spam")
# 添加节点和链接
G.add_edge(1,2)
print(G.nodes())
print(G.edges())
# 绘制网络
nx.draw(G, with_labels = True)
###Output
_____no_output_____
###Markdown
WWW Data download http://www3.nd.edu/~networks/resources.htmhttps://pan.baidu.com/s/1o86ZaTcWorld-Wide-Web: [README] [DATA]Réka Albert, Hawoong Jeong and Albert-László Barabási:Diameter of the World Wide Web Nature 401, 130 (1999) [ PDF ] 作业:- 下载www数据- 构建networkx的网络对象g(提示:有向网络)- 将www数据添加到g当中- 计算网络中的节点数量和链接数量
###Code
G = nx.Graph()
n = 0
with open ('/Users/datalab/bigdata/cjc/www.dat.gz.txt') as f:
for line in f:
n += 1
#if n % 10**4 == 0:
#flushPrint(n)
x, y = line.rstrip().split(' ')
G.add_edge(x,y)
nx.info(G)
###Output
_____no_output_____
###Markdown
描述网络 nx.karate_club_graph 我们从karate_club_graph开始,探索网络的基本性质。
###Code
G = nx.karate_club_graph()
clubs = [G.node[i]['club'] for i in G.nodes()]
colors = []
for j in clubs:
if j == 'Mr. Hi':
colors.append('r')
else:
colors.append('g')
nx.draw(G, with_labels = True, node_color = colors)
G.node[1], G.node[9] # 节点1的属性 # 节点1的属性
G.edges# 前三条边的id
#dir(G)
nx.info(G)
G.nodes()
list(G.edges())[:3]
print(*G.neighbors(1))
nx.average_shortest_path_length(G)
###Output
_____no_output_____
###Markdown
网络直径
###Code
nx.diameter(G)#返回图G的直径(最长最短路径的长度)
###Output
_____no_output_____
###Markdown
密度
###Code
nx.density(G)
nodeNum = len(G.nodes())
edgeNum = len(G.edges())
2.0*edgeNum/(nodeNum * (nodeNum - 1))
###Output
_____no_output_____
###Markdown
作业:- 计算www网络的网络密度 聚集系数
###Code
cc = nx.clustering(G)
cc.items()
plt.hist(cc.values(), bins = 15)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Spacing in Math ModeIn a math environment, LaTeX ignores the spaces you type and puts in the spacing that it thinks is best. LaTeX formats mathematics the way it's done in mathematics texts. If you want different spacing, LaTeX provides the following four commands for use in math mode:\; - a thick space\: - a medium space\, - a thin space\\! - a negative thin space 匹配系数
###Code
# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。
Ge=nx.Graph()
Ge.add_nodes_from([0,1],size=2)
Ge.add_nodes_from([2,3],size=3)
Ge.add_edges_from([(0,1),(2,3)])
node_size = [list(Ge.node[i].values())[0]*1000 for i in Ge.nodes()]
nx.draw(Ge, with_labels = True, node_size = node_size)
print(nx.numeric_assortativity_coefficient(Ge,'size'))
# plot degree correlation
from collections import defaultdict
import numpy as np
l=defaultdict(list)
g = nx.karate_club_graph()
for i in g.nodes():
k = []
for j in g.neighbors(i):
k.append(g.degree(j))
l[g.degree(i)].append(np.mean(k))
#l.append([g.degree(i),np.mean(k)])
x = list(l.keys())
y = [np.mean(i) for i in l.values()]
#x, y = np.array(l).T
plt.plot(x, y, 'ro', label = '$Karate\;Club$')
plt.legend(loc=1,fontsize=10, numpoints=1)
plt.xscale('log'); plt.yscale('log')
plt.ylabel(r'$<knn(k)$> ', fontsize = 20)
plt.xlabel('$k$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Degree centrality measures.(度中心性)* degree_centrality(G) Compute the degree centrality for nodes.* in_degree_centrality(G) Compute the in-degree centrality for nodes.* out_degree_centrality(G) Compute the out-degree centrality for nodes.* closeness_centrality(G[, v, weighted_edges]) Compute closeness centrality for nodes.* betweenness_centrality(G[, normalized, ...]) Betweenness centrality measures.(介数中心性)
###Code
dc = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness= nx.betweenness_centrality(G)
fig = plt.figure(figsize=(15, 4),facecolor='white')
ax = plt.subplot(1, 3, 1)
plt.hist(dc.values(), bins = 20)
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
ax = plt.subplot(1, 3, 2)
plt.hist(closeness.values(), bins = 20)
plt.xlabel('$Closeness \, Centrality$', fontsize = 20)
ax = plt.subplot(1, 3, 3)
plt.hist(betweenness.values(), bins = 20)
plt.xlabel('$Betweenness \, Centrality$', fontsize = 20)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(15, 8),facecolor='white')
for k in betweenness:
plt.scatter(dc[k], closeness[k], s = betweenness[k]*10000)
plt.text(dc[k], closeness[k]+0.02, str(k))
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Closeness \, Centrality$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
度分布
###Code
from collections import defaultdict
import numpy as np
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in dict(G.degree()).values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P(K)$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
G = nx.karate_club_graph()
plotDegreeDistribution(G)
###Output
_____no_output_____
###Markdown
网络科学理论简介****** 网络科学:分析网络结构******王成军 [email protected]计算传播网 http://computational-communication.com 规则网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
RG = nx.random_graphs.random_regular_graph(3,200)
#生成包含200个节点、 每个节点有3个邻居的规则图RG
pos = nx.spectral_layout(RG)
#定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别
nx.draw(RG,pos,with_labels=False,node_size = range(1, 201))
#绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径
plt.show() #显示图形
plotDegreeDistribution(RG)
###Output
/Users/datalab/Applications/anaconda/lib/python3.5/site-packages/matplotlib/axes/_base.py:3443: UserWarning: Attempting to set identical bottom==top results
in singular transformations; automatically expanding.
bottom=1.0, top=1.0
'bottom=%s, top=%s') % (bottom, top))
###Markdown
ER随机网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
ER = nx.random_graphs.erdos_renyi_graph(200,0.1)
#生成包含20个节点、以概率0.2连接的随机图
pos = nx.spring_layout(ER)
#定义一个布局,此处采用了shell布局方式
nx.draw(ER,pos,with_labels=False,node_size = 30)
plt.show()
#ER = nx.random_graphs.erdos_renyi_graph(2000,0.1)
plotDegreeDistribution(ER)
###Output
_____no_output_____
###Markdown
小世界网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3)
#生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络
pos = nx.spring_layout(WS)
#定义一个布局,此处采用了circular布局方式
nx.draw(WS,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(WS)
nx.diameter(WS)
cc = nx.clustering(WS)
plt.hist(cc.values(), bins = 10)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
import numpy as np
np.mean(list(cc.values()))
###Output
_____no_output_____
###Markdown
BA网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(200,2)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(BA)
BA= nx.random_graphs.barabasi_albert_graph(20000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(500,1)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
nx.degree_histogram(BA)[:3]
list(dict(BA.degree()).items())[:3]
plt.hist( list(dict(BA.degree()).values()) , bins = 100)
# plt.xscale('log')
# plt.yscale('log')
plt.show()
from collections import defaultdict
import numpy as np
def plotDegreeDistributionLongTail(G):
degs = defaultdict(int)
for i in list(dict(G.degree()).values()): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(5000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistributionLongTail(BA)
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in list(dict(G.degree()).values()): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
x, y = np.array(items).T
y_sum = np.sum(y)
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P(K)$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(50000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
###Output
_____no_output_____
###Markdown
作业:- 阅读 Barabasi (1999) Diameter of the world wide web.Nature.401- 绘制www网络的出度分布、入度分布- 使用BA模型生成节点数为N、幂指数为$\gamma$的网络- 计算平均路径长度d与节点数量的关系
###Code
Ns = [i*10 for i in [1, 10, 100, 1000]]
ds = []
for N in Ns:
print(N)
BA= nx.random_graphs.barabasi_albert_graph(N,2)
d = nx.average_shortest_path_length(BA)
ds.append(d)
plt.plot(Ns, ds, 'r-o')
plt.xlabel('$N$', fontsize = 20)
plt.ylabel('$<d>$', fontsize = 20)
plt.xscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
网络科学理论****** 网络科学:使用NetworkX分析复杂网络******王成军 [email protected]计算传播网 http://computational-communication.com http://networkx.readthedocs.org/en/networkx-1.11/tutorial/
###Code
%matplotlib inline
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph() # G = nx.DiGraph() # 有向网络
# 添加(孤立)节点
G.add_node("spam")
# 添加节点和链接
G.add_edge(1,2)
print(G.nodes())
print(G.edges())
# 绘制网络
nx.draw(G, with_labels = True)
###Output
_____no_output_____
###Markdown
WWW Data download http://www3.nd.edu/~networks/resources.htmhttps://pan.baidu.com/s/1o86ZaTcWorld-Wide-Web: [README] [DATA]Réka Albert, Hawoong Jeong and Albert-László Barabási:Diameter of the World Wide Web Nature 401, 130 (1999) [ PDF ] 作业:- 下载www数据- 构建networkx的网络对象g(提示:有向网络)- 将www数据添加到g当中- 计算网络中的节点数量和链接数量
###Code
G = nx.Graph()
n = 0
with open ('/Users/chengjun/bigdata/www.dat.gz.txt') as f:
for line in f:
n += 1
#if n % 10**4 == 0:
#flushPrint(n)
x, y = line.rstrip().split(' ')
G.add_edge(x,y)
nx.info(G)
###Output
_____no_output_____
###Markdown
描述网络 nx.karate_club_graph 我们从karate_club_graph开始,探索网络的基本性质。
###Code
G = nx.karate_club_graph()
clubs = [G.node[i]['club'] for i in G.nodes()]
colors = []
for j in clubs:
if j == 'Mr. Hi':
colors.append('r')
else:
colors.append('g')
nx.draw(G, with_labels = True, node_color = colors)
G.node[1], G.node[9] # 节点1的属性 # 节点1的属性
G.edge.keys()[:3] # 前三条边的id
nx.info(G)
G.nodes()[:10]
G.edges()[:3]
G.neighbors(1)
nx.average_shortest_path_length(G)
###Output
_____no_output_____
###Markdown
网络直径
###Code
nx.diameter(G)#返回图G的直径(最长最短路径的长度)
###Output
_____no_output_____
###Markdown
密度
###Code
nx.density(G)
nodeNum = len(G.nodes())
edgeNum = len(G.edges())
2.0*edgeNum/(nodeNum * (nodeNum - 1))
###Output
_____no_output_____
###Markdown
作业:- 计算www网络的网络密度 聚集系数
###Code
cc = nx.clustering(G)
cc.items()[:5]
plt.hist(cc.values(), bins = 15)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Spacing in Math ModeIn a math environment, LaTeX ignores the spaces you type and puts in the spacing that it thinks is best. LaTeX formats mathematics the way it's done in mathematics texts. If you want different spacing, LaTeX provides the following four commands for use in math mode:\; - a thick space\: - a medium space\, - a thin space\\! - a negative thin space 匹配系数
###Code
# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。
Ge=nx.Graph()
Ge.add_nodes_from([0,1],size=2)
Ge.add_nodes_from([2,3],size=3)
Ge.add_edges_from([(0,1),(2,3)])
node_size = [Ge.node[i].values()[0]*1000 for i in Ge.nodes()]
nx.draw(Ge, with_labels = True, node_size = node_size)
print(nx.numeric_assortativity_coefficient(Ge,'size'))
# plot degree correlation
from collections import defaultdict
import numpy as np
l=defaultdict(list)
g = nx.karate_club_graph()
for i in g.nodes():
k = []
for j in g.neighbors(i):
k.append(g.degree(j))
l[g.degree(i)].append(np.mean(k))
#l.append([g.degree(i),np.mean(k)])
x = l.keys()
y = [np.mean(i) for i in l.values()]
#x, y = np.array(l).T
plt.plot(x, y, 'r-o', label = '$Karate\;Club$')
plt.legend(loc=1,fontsize=10, numpoints=1)
plt.xscale('log'); plt.yscale('log')
plt.ylabel(r'$<knn(k)$> ', fontsize = 20)
plt.xlabel('$k$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Degree centrality measures.(度中心性)* degree_centrality(G) Compute the degree centrality for nodes.* in_degree_centrality(G) Compute the in-degree centrality for nodes.* out_degree_centrality(G) Compute the out-degree centrality for nodes.* closeness_centrality(G[, v, weighted_edges]) Compute closeness centrality for nodes.* betweenness_centrality(G[, normalized, ...]) Betweenness centrality measures.(介数中心性)
###Code
dc = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness= nx.betweenness_centrality(G)
fig = plt.figure(figsize=(15, 4),facecolor='white')
ax = plt.subplot(1, 3, 1)
plt.hist(dc.values(), bins = 20)
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
ax = plt.subplot(1, 3, 2)
plt.hist(closeness.values(), bins = 20)
plt.xlabel('$Closeness \, Centrality$', fontsize = 20)
ax = plt.subplot(1, 3, 3)
plt.hist(betweenness.values(), bins = 20)
plt.xlabel('$Betweenness \, Centrality$', fontsize = 20)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(15, 8),facecolor='white')
for k in betweenness:
plt.scatter(dc[k], closeness[k], s = betweenness[k]*1000)
plt.text(dc[k], closeness[k]+0.02, str(k))
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Closeness \, Centrality$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
度分布
###Code
from collections import defaultdict
import numpy as np
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P(K)$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
G = nx.karate_club_graph()
plotDegreeDistribution(G)
###Output
_____no_output_____
###Markdown
网络科学理论简介****** 网络科学:分析网络结构******王成军 [email protected]计算传播网 http://computational-communication.com 规则网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
RG = nx.random_graphs.random_regular_graph(3,200)
#生成包含200个节点、 每个节点有3个邻居的规则图RG
pos = nx.spectral_layout(RG)
#定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别
nx.draw(RG,pos,with_labels=False,node_size = range(1, 201))
#绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径
plt.show() #显示图形
plotDegreeDistribution(RG)
###Output
_____no_output_____
###Markdown
ER随机网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
ER = nx.random_graphs.erdos_renyi_graph(200,0.05)
#生成包含20个节点、以概率0.2连接的随机图
pos = nx.shell_layout(ER)
#定义一个布局,此处采用了shell布局方式
nx.draw(ER,pos,with_labels=False,node_size = 30)
plt.show()
plotDegreeDistribution(ER)
###Output
_____no_output_____
###Markdown
小世界网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3)
#生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络
pos = nx.circular_layout(WS)
#定义一个布局,此处采用了circular布局方式
nx.draw(WS,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(WS)
nx.diameter(WS)
cc = nx.clustering(WS)
plt.hist(cc.values(), bins = 10)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
import numpy as np
np.mean(cc.values())
###Output
_____no_output_____
###Markdown
BA网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(200,2)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
plotDegreeDistribution(BA)
BA= nx.random_graphs.barabasi_albert_graph(20000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(500,1)
#生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA)
#定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30)
#绘制图形
plt.show()
nx.degree_histogram(BA)[:3]
BA.degree().items()[:3]
plt.hist(BA.degree().values())
plt.show()
from collections import defaultdict
import numpy as np
def plotDegreeDistributionLongTail(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(5000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistributionLongTail(BA)
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
x, y = np.array(items).T
y_sum = np.sum(y)
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(50000,2)
#生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
###Output
_____no_output_____
###Markdown
作业:- 阅读 Barabasi (1999) Internet Diameter of the world wide web.Nature.401- 绘制www网络的出度分布、入度分布- 使用BA模型生成节点数为N、幂指数为$\gamma$的网络- 计算平均路径长度d与节点数量的关系
###Code
Ns = [i*10 for i in [1, 10, 100, 1000]]
ds = []
for N in Ns:
print N
BA= nx.random_graphs.barabasi_albert_graph(N,2)
d = nx.average_shortest_path_length(BA)
ds.append(d)
plt.plot(Ns, ds, 'r-o')
plt.xlabel('$N$', fontsize = 20)
plt.ylabel('$<d>$', fontsize = 20)
plt.xscale('log')
plt.show()
###Output
_____no_output_____
###Markdown
网络科学理论****** 网络科学:使用NetworkX分析复杂网络******王成军 [email protected]计算传播网 http://computational-communication.com http://networkx.readthedocs.org/en/networkx-1.11/tutorial/
###Code
%matplotlib inline
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph() # G = nx.DiGraph() # 有向网络
# 添加(孤立)节点
G.add_node("spam")
# 添加节点和链接
G.add_edge(1,2)
print(G.nodes())
print(G.edges())
# 绘制网络
nx.draw(G, with_labels = True)
###Output
_____no_output_____
###Markdown
WWW Data download http://www3.nd.edu/~networks/resources.htmWorld-Wide-Web: [README] [DATA]Réka Albert, Hawoong Jeong and Albert-László Barabási:Diameter of the World Wide Web Nature 401, 130 (1999) [ PDF ] 作业:- 下载www数据- 构建networkx的网络对象g(提示:有向网络)- 将www数据添加到g当中- 计算网络中的节点数量和链接数量
###Code
G = nx.Graph()
n = 0
with open ('/Users/chengjun/bigdata/www.dat.gz.txt') as f:
for line in f:
n += 1
#if n % 10**4 == 0:
#flushPrint(n)
x, y = line.rstrip().split(' ')
G.add_edge(x,y)
nx.info(G)
###Output
_____no_output_____
###Markdown
描述网络 nx.karate_club_graph 我们从karate_club_graph开始,探索网络的基本性质。
###Code
G = nx.karate_club_graph()
clubs = [G.node[i]['club'] for i in G.nodes()]
colors = []
for j in clubs:
if j == 'Mr. Hi':
colors.append('r')
else:
colors.append('g')
nx.draw(G, with_labels = True, node_color = colors)
G.node[1] # 节点1的属性
G.edge.keys()[:3] # 前三条边的id
nx.info(G)
G.nodes()[:10]
G.edges()[:3]
G.neighbors(1)
nx.average_shortest_path_length(G)
###Output
_____no_output_____
###Markdown
网络直径
###Code
nx.diameter(G)#返回图G的直径(最长最短路径的长度)
###Output
_____no_output_____
###Markdown
密度
###Code
nx.density(G)
nodeNum = len(G.nodes())
edgeNum = len(G.edges())
2.0*edgeNum/(nodeNum * (nodeNum - 1))
###Output
_____no_output_____
###Markdown
作业:- 计算www网络的网络密度 聚集系数
###Code
cc = nx.clustering(G)
cc.items()[:5]
plt.hist(cc.values(), bins = 15)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Spacing in Math ModeIn a math environment, LaTeX ignores the spaces you type and puts in the spacing that it thinks is best. LaTeX formats mathematics the way it's done in mathematics texts. If you want different spacing, LaTeX provides the following four commands for use in math mode:\; - a thick space\: - a medium space\, - a thin space\\! - a negative thin space 匹配系数
###Code
# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。
Ge=nx.Graph()
Ge.add_nodes_from([0,1],size=2)
Ge.add_nodes_from([2,3],size=3)
Ge.add_edges_from([(0,1),(2,3)])
print(nx.numeric_assortativity_coefficient(Ge,'size'))
# plot degree correlation
from collections import defaultdict
import numpy as np
l=defaultdict(list)
g = nx.karate_club_graph()
for i in g.nodes():
k = []
for j in g.neighbors(i):
k.append(g.degree(j))
l[g.degree(i)].append(np.mean(k))
#l.append([g.degree(i),np.mean(k)])
x = l.keys()
y = [np.mean(i) for i in l.values()]
#x, y = np.array(l).T
plt.plot(x, y, 'r-o', label = '$Karate\;Club$')
plt.legend(loc=1,fontsize=10, numpoints=1)
plt.xscale('log'); plt.yscale('log')
plt.ylabel(r'$<knn(k)$> ', fontsize = 20)
plt.xlabel('$k$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
Degree centrality measures.(度中心性)* degree_centrality(G) Compute the degree centrality for nodes.* in_degree_centrality(G) Compute the in-degree centrality for nodes.* out_degree_centrality(G) Compute the out-degree centrality for nodes.* closeness_centrality(G[, v, weighted_edges]) Compute closeness centrality for nodes.* betweenness_centrality(G[, normalized, ...]) Betweenness centrality measures.(介数中心性)
###Code
dc = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness= nx.betweenness_centrality(G)
fig = plt.figure(figsize=(15, 4),facecolor='white')
ax = plt.subplot(1, 3, 1)
plt.hist(dc.values(), bins = 20)
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
ax = plt.subplot(1, 3, 2)
plt.hist(closeness.values(), bins = 20)
plt.xlabel('$Closeness \, Centrality$', fontsize = 20)
ax = plt.subplot(1, 3, 3)
plt.hist(betweenness.values(), bins = 20)
plt.xlabel('$Betweenness \, Centrality$', fontsize = 20)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(15, 8),facecolor='white')
for k in betweenness:
plt.scatter(dc[k], closeness[k], s = betweenness[k]*1000)
plt.text(dc[k], closeness[k]+0.02, str(k))
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Closeness \, Centrality$', fontsize = 20)
plt.show()
###Output
_____no_output_____
###Markdown
度分布
###Code
from collections import defaultdict
import numpy as np
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
G = nx.karate_club_graph()
plotDegreeDistribution(G)
###Output
_____no_output_____
###Markdown
网络科学理论简介****** 网络科学:分析网络结构******王成军 [email protected]计算传播网 http://computational-communication.com 规则网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
RG = nx.random_graphs.random_regular_graph(3,200) #生成包含200个节点、每个节点有3个邻居的规则图RG
pos = nx.spectral_layout(RG) #定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别
nx.draw(RG,pos,with_labels=False,node_size = 30) #绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径
plt.show() #显示图形
plotDegreeDistribution(RG)
###Output
_____no_output_____
###Markdown
ER随机网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
ER = nx.random_graphs.erdos_renyi_graph(200,0.05) #生成包含20个节点、以概率0.2连接的随机图
pos = nx.shell_layout(ER) #定义一个布局,此处采用了shell布局方式
nx.draw(ER,pos,with_labels=False,node_size = 30)
plt.show()
plotDegreeDistribution(ER)
###Output
_____no_output_____
###Markdown
小世界网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3) #生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络
pos = nx.circular_layout(WS) #定义一个布局,此处采用了circular布局方式
nx.draw(WS,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
plotDegreeDistribution(WS)
nx.diameter(WS)
cc = nx.clustering(WS)
plt.hist(cc.values(), bins = 10)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
import numpy as np
np.mean(cc.values())
###Output
_____no_output_____
###Markdown
BA网络
###Code
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(200,2) #生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
plotDegreeDistribution(BA)
BA= nx.random_graphs.barabasi_albert_graph(20000,2) #生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(500,1) #生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
nx.degree_histogram(BA)[:3]
BA.degree().items()[:3]
plt.hist(BA.degree().values())
plt.show()
from collections import defaultdict
import numpy as np
def plotDegreeDistributionLongTail(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
y_sum = np.sum(y)
y = [float(i)/y_sum for i in y]
plt.plot(x, y, 'b-o')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(5000,2) #生成n=20、m=1的BA无标度网络
plotDegreeDistributionLongTail(BA)
def plotDegreeDistribution(G):
degs = defaultdict(int)
for i in G.degree().values(): degs[i]+=1
items = sorted ( degs.items () )
x, y = np.array(items).T
x, y = np.array(items).T
y_sum = np.sum(y)
plt.plot(x, y, 'b-o')
plt.xscale('log')
plt.yscale('log')
plt.legend(['Degree'])
plt.xlabel('$K$', fontsize = 20)
plt.ylabel('$P_K$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.show()
BA= nx.random_graphs.barabasi_albert_graph(50000,2) #生成n=20、m=1的BA无标度网络
plotDegreeDistribution(BA)
###Output
_____no_output_____
###Markdown
作业:- 阅读 Barabasi (1999) Internet Diameter of the world wide web.Nature.401- 绘制www网络的出度分布、入度分布- 使用BA模型生成节点数为N、幂指数为$\gamma$的网络- 计算平均路径长度d与节点数量的关系
###Code
Ns = [i*10 for i in [1, 10, 100, 1000]]
ds = []
for N in Ns:
print N
BA= nx.random_graphs.barabasi_albert_graph(N,2)
d = nx.average_shortest_path_length(BA)
ds.append(d)
plt.plot(Ns, ds, 'r-o')
plt.xlabel('$N$', fontsize = 20)
plt.ylabel('$<d>$', fontsize = 20)
plt.xscale('log')
plt.show()
###Output
_____no_output_____ |
02_tools-and-packages/02_numpy.ipynb | ###Markdown
[Table of contents](../toc.ipynb) NumPy* Numpy is probably the most applied and used Python package. Almost any scientific package builds on numpy.* Numpy provides multidimensional array objects, and allows to deal with matrix computations.* Add to this, numpy provides interfaces to C and C++.* Numpy's methods are very efficient implemented.* Please find numpy's documentation here [https://numpy.org/](https://numpy.org/).* Numpy is one building block of Python's scientific eco system. If you want to know more about scientific Python, consult the scipy lecture notes on [https://scipy-lectures.org/](https://scipy-lectures.org/). Numpy importFirst, let us import numpy, create a vector and compare this with a Python list, which ships per default with Python.
###Code
import numpy as np
a_list = range(0, 8000)
a_np_array = np.arange(0, 8000)
###Output
_____no_output_____
###Markdown
Now, let us compare how long it takes to add 3 to each element of the list and the numpy array with `%timeit` magic command.
###Code
%timeit [a_i + 3 for a_i in a_list]
%timeit a_np_array + 3
###Output
4.96 µs ± 15 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Numpy is much faster than the list and the code of numpy is easier to read! Create arrays manually* The "manual" syntax to create a numpy array is * `np.array([x_0, x_1, ..., x_n])` for one dimensional arrays * and `np.array([[x_0, x_1, ... x_n], [y_0, y_1, ..., y_n]])` for multidimensional arrays.* Add to this, various functions like `np.ones()`, `np.eye()`, `np.arange()`, `np.linspace()`, and many more create specific arrays which are often required in matrix computing.
###Code
# a one dimensional array
a = np.array([0, 1, 2, 3])
a
# here two dimesions
b = np.array([[0, 1], [2, 3]])
b
# now check their shapes
print(a.shape)
print(b.shape)
###Output
(4,)
(2, 2)
###Markdown
Basic attributesIn addition to `ndarray.shape`, numpy arrays contain the attributes:* `ndarray.ndim` which is the dimension of the array,* `ndarray.size` which is the number of elements in the array,* `ndarray.dtype` which is the type of the array (int16, int32, uint16, ..., the default is int64, or float64).
###Code
print('a.dtype = ', a.dtype)
print('b.dtype = ', b.dtype)
print('a.ndim = ', a.ndim)
print('b.ndim = ', b.ndim)
print('a.size = ', a.size)
print('b.size = ', b.size)
###Output
a.size = 4
b.size = 4
###Markdown
You can also specify the type.
###Code
np.array([1, 2, 3], dtype=np.uint16)
###Output
_____no_output_____
###Markdown
More array constructors np.arange* Create arrays with start, stop, and step size.
###Code
# linear growing array, zero based
np.arange(8)
# np.arange with start, end, step call
np.arange(0, 12, 2)
###Output
_____no_output_____
###Markdown
np.linspace* Create equidistant (abstandsgleiche) arrays with start, stop, and number of points.
###Code
np.linspace(0, 10, 3)
np.linspace(0, 10, 13)
###Output
_____no_output_____
###Markdown
Special matrices
###Code
np.ones(3)
np.ones(shape=(3, 3))
np.eye(3)
np.zeros(3)
np.zeros(shape=(3, 3))
###Output
_____no_output_____
###Markdown
Random number arrays* Numpy's `np.random.xxx` module offers many random number generators.
###Code
np.random.rand(3, 3) # uniform distribution over [0, 1)
np.random.randn(3, 3) # standard normal distribution
###Output
_____no_output_____
###Markdown
Operators* All operators `+`, `-`, `*`, `>`, ..., work element wise per default.* A new array will be created unless you use the `+=`, `-=` and so forth operators.* Matrix multiplication can be done with `@` operator (requires Python 3.5) or `.dot` method.
###Code
a = np.ones(4)
a + 4
a * 2
a[2] = 5
a > 4
a = np.array([0, 1, 2, 3])
b = np.array([0, 1, 2, 3])
a * b
a @ b # matrix product
a.dot(b) # the "old" way to write a matrix prodcuct
###Output
_____no_output_____
###Markdown
Universal functions* Numpy offers almost all mathematical functions you might need, such as * exp * max, min * sqrt * argmax * median, mean, stdev * ...
###Code
a = np.linspace(0, 8, 16)
np.exp(a)
###Output
_____no_output_____
###Markdown
Shape modification* There are many ways to change the shape of an array.* Most prominent methods are `reshape` and `transpose`.
###Code
a = np.arange(12).reshape(3, 4)
a
a.reshape(6, 2)
a.reshape(a.size) # Flatten an array, similar as a.shape(-1), a.flatten() or a.ravel()
a = np.zeros((1, 4))
a
a.transpose() # transpose
a.T # transpose, short hand code
###Output
_____no_output_____
###Markdown
Indexing and iterating* Is very similar to lists.* Indexing is done by braces `[]`.
###Code
a = np.random.rand(3, 3)
a
# modify one element
a[1, 1] = 96
a[0, 0]
a[0, :]
# iteration over first dimension
for row in a:
print(row)
###Output
[0.38735673 0.21925784 0.86007269]
[5.31764080e-02 9.60000000e+01 6.09385371e-01]
[0.30931129 0.99157167 0.48558514]
###Markdown
If you want to iterate over all elements instead, use the flat attribute.
###Code
for a_i in a.flat:
print(a_i)
###Output
0.3873567346457457
0.21925784169612772
0.8600726858855035
0.05317640799444645
96.0
0.6093853711851168
0.30931128928917273
0.9915716728447703
0.4855851363494864
###Markdown
Slicing arrays* Slicing is also done with braces. * There is a great "conversion" table for Matlab users here [https://numpy.org/devdocs/user/numpy-for-matlab-users.html](https://numpy.org/devdocs/user/numpy-for-matlab-users.html).* The basic slicing syntax is `[start:stop:step]`, see a full tutorial here [https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html).
###Code
a = np.arange(20)
a
###Output
_____no_output_____
###Markdown
All indices are zero based, the stop is not inclusive, and negative means to reverse counting (count from end to start).
###Code
a[-1] # last element
a[0] # first element
a[2:-1:2] # start from index 2 to last index and take every second value
###Output
_____no_output_____
###Markdown
Stacking arrays
###Code
a = np.ones(3)
b = np.ones(3) + 2
np.vstack((a, b))
np.hstack((a, b))
###Output
_____no_output_____
###Markdown
Comparing floats* The `numpy.allclose` is ideal to compare arrays element wise with relative or absolute tolerance.* The syntax is `np.allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)`.
###Code
np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
np.allclose([1e10,1e-7], [1.00001e10,1e-8], atol=1e-3) # with larger absolute tolerance, it returns True
###Output
_____no_output_____
###Markdown
Linear algebra* Numpy comes with a linear algebra module `numpy.linalg`.* Next comes an example to solve an overdetermined system of equations where: * $A \in \mathbb{R}^{n \times m}$ denotes input data, * $X \in \mathbb{R}^{n}$ is the parameter vector, * and $b \in \mathbb{R}^{m}$ is the output vector which follows $b = A^\top X$
###Code
n = 2; m = 20
A = np.random.rand(n, m) # input data, two dimensions, 20 samples
X = np.array([[3], [5]]) # two dimensional prameter vector
b = A.T @ X # model the output
# now solve the system of equations
np.linalg.solve(a=(A @ A.T), b=(A @ b))
np.linalg.inv(A @ A.T)
###Output
_____no_output_____
###Markdown
Read csv data* Numpy's `genfromtxt` is very convenient to read csv files into numpy arrays.* The syntax is `dat = genfromtxt('my_file.csv', delimiter=',')`, and there are many additional import properties like skip_rows, missing_values, ... We will load this .csv file into a numpy array.```"Time", "Torque"0, 2001, 2202, 2253, 2304, 231```
###Code
# This if else is a fix to make the file available for Jupyter and Travis CI
import os
if os.path.isfile('my_file.csv'):
file = 'my_file.csv'
else:
file = '02_tools-and-packages/my_file.csv'
from numpy import genfromtxt
genfromtxt(file, delimiter=',', skip_header=1)
###Output
_____no_output_____
###Markdown
You can also use the column names during the import with `names=True`.
###Code
dat = genfromtxt(file, delimiter=',', names=True)
dat
print(dat["Time"])
print(dat["Torque"])
###Output
[0. 1. 2. 3. 4.]
[200. 220. 225. 230. 231.]
###Markdown
Matrix computations become so much simpler with numpyRemember from last lesson that we used lists to plot for instance a parabola. This code was not that handy. ```x = [i for i in range(-20, 21)]y = [x_i**2 for x_i in x]```With numpy, such tasks become very simple.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
x = np.arange(-20, 20, 0.01)
plt.plot(x, x**2) # x**2 is easy to read compared with a list [x_i**2 for x_i in x]
plt.show()
###Output
_____no_output_____
###Markdown
Exercise: Numpy mini project (20 minutes)Now that you are familiar with matplotlib and numpy, you can solve the first more elaborate data task.This is what you should strive for:* Write a Python script which creates a linear multiplier model of type $AX \approx b$. The dimension of $A$ should be 1000 times 2, and the respective dimension of X becomes 2. * Select two values for $X$ as you like. These are the true model parameters.* Generate input data (random numbers or sine waves, as you like) and compute $b$.* The $\approx$ sign in the above equation is due to noise that you should add to $b$.* The noise should be Gaussian $\mathcal{N} \sim(0, 0.01)$.* Plot the noisy data $b$.* Use `np.linalg.solve` to compute the least squares solution $\hat{X}$ for the parameters.* Use the same input data and $\hat{X}$ to compute and to plot the fit of the least squares solution.* Print the true and estimated parameters.
###Code
#own solution
n=2
m=1000
#create input data (Matrix A with 2 including 1000 values each and than transposed)
time = np.linspace(0, 10, m)
A = np.vstack((np.sin(time), np.cos(time))).T
#true model parameters
X = np.array([[3], [-8]])
print('True parameters:', X.flatten())
#create noise
noise = np.random.randn(m) * 0.02
#compute b with additional noise
b = A @ X.flatten() + noise
#solve least squares estimate (nach Parametern X auflösen)
ata = A.T @ A
atb = A.T @ b
x_hat = np.linalg.solve(ata, atb)
print('Estimated parameters:', x_hat)
b_est = A @ x_hat.flatten()
#plot noisy data b and predicted data b_est
plt.plot(b, 'k.')
plt.title('Linear Regression')
plt.plot(b_est, 'y--')
plt.legend(["Noisy data", "Least squares estimate"])
plt.show()
###Output
True parameters: [ 3 -8]
Estimated parameters: [ 2.99961968 -7.99976659]
###Markdown
SolutionPlease find one possible solution in [`solution_numpy.py`](solution_numpy.py) file.
###Code
%run solution_numpy.py
###Output
True params [ 3. -8.]
Estimated params [ 2.99942396 -7.99943241]
###Markdown
Indexing and iterating* Is very similar to lists.* Indexing is done by braces `[]`.
###Code
a = np.random.rand(3, 3)
# modify one element
a[1, 1] = 96
a[0, 0]
a[0, :]
# iteration over first dimension
for row in a:
print(row)
###Output
[0.90156267 0.15860076 0.92586081]
[ 0.82601745 96. 0.33312838]
[0.60133987 0.53074791 0.81656546]
###Markdown
If you want to iterate over all elements instead, use the flat attribute.
###Code
for i in a.flat:
print(i)
###Output
0.9015626721609997
0.15860075987052058
0.9258608073857245
0.8260174527163244
96.0
0.3331283759775331
0.6013398695667754
0.5307479085804412
0.8165654631841754
###Markdown
Slicing arrays* Slicing is also done with braces. * There is a great "conversion" table for Matlab users here [https://numpy.org/devdocs/user/numpy-for-matlab-users.html](https://numpy.org/devdocs/user/numpy-for-matlab-users.html).* The basic slicing syntax is `[start:stop:step]`, see a full tutorial here [https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html).
###Code
a = np.arange(20)
a
###Output
_____no_output_____
###Markdown
All indices are zero based, the stop is not inclusive, and negative means to reverse counting (count from end to start).
###Code
a[-1] # last element
a[0] # first element
a[2:-1:2] # start from index 2 to last index and take every second value
###Output
_____no_output_____
###Markdown
Stacking arrays
###Code
a = np.ones(3)
b = np.ones(3) + 2
np.vstack((a, b))
np.hstack((a, b))
###Output
_____no_output_____
###Markdown
Comparing floats* The `numpy.allclose` is ideal to compare arrays element wise with relative or absolute tolerance.* The syntax is `np.allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)`.
###Code
np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
np.allclose([1e10,1e-7], [1.00001e10,1e-8], atol=1e-3) # with larger absolute tolerance, it returns True
###Output
_____no_output_____
###Markdown
Linear algebra* Numpy comes with a linear algebra module `numpy.linalg`.* Next comes an example to solve an overdetermined system of equations where: * $A \in \mathbb{R}^{n \times m}$ denotes input data, * $X \in \mathbb{R}^{n}$ is the parameter vector, * and $b \in \mathbb{R}^{m}$ is the output vector which follows $b = A^\top X$
###Code
n = 2; m = 20
A = np.random.rand(n, m) # input data, two dimensions, 20 samples
X = np.array([[3], [5]]) # two dimensional prameter vector
b = A.T @ X # model the output
# now solve the system of equations
np.linalg.solve(a=(A @ A.T), b=(A @ b))
np.linalg.inv(A @ A.T)
###Output
_____no_output_____
###Markdown
Read csv data* Numpy's `genfromtxt` is very convenient to read csv files into numpy arrays.* The syntax is `dat = genfromtxt('my_file.csv', delimiter=',')`, and there are many additional import properties like skip_rows, missing_values, ... We will load this .csv file into a numpy array.```"Time", "Torque"0, 2001, 2202, 2253, 2304, 231```
###Code
# This if else is a fix to make the file available for Jupyter and Travis CI
import os
if os.path.isfile('my_file.csv'):
file = 'my_file.csv'
else:
file = '02_tools-and-packages/my_file.csv'
from numpy import genfromtxt
genfromtxt(file, delimiter=',', skip_header=1)
###Output
_____no_output_____
###Markdown
You can also use the column names during the import with `names=True`.
###Code
dat = genfromtxt(file, delimiter=',', names=True)
dat
print(dat["Time"])
print(dat["Torque"])
###Output
[0. 1. 2. 3. 4.]
[200. 220. 225. 230. 231.]
###Markdown
Matrix computations become so much simpler with numpyRemember from last lesson that we used lists to plot for instance a parabola. This code was not that handy. ```x = [i for i in range(-20, 20)]y = [x[i]**2 for i in range(0,40)]```With numpy, such tasks become very simple.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
x = np.arange(-20, 20, 0.01)
plt.plot(x, x**2) # x**2 is easy to read compared with a list [x[i]**2 for i in range(0,40)]
plt.show()
###Output
_____no_output_____
###Markdown
Exercise: Numpy mini project (20 minutes)Know that you are familiar with matplotlib and numpy, you can solve the first more elaborate data task.This is what you should strive for:* Write a Python script which creates a linear multiplier model of type $AX \approx b$. The dimension of $A$ should be 1000 times 2, and the respective dimension of X becomes 2. * Select two values for $X$ as you like. These are the true model parameters.* Generate input data (random numbers or sine waves, as you like) and compute $b$.* The $\approx$ sign in the above equation is due to noise that you should add to $b$.* The noise should be Gaussian $\mathcal{N} \sim(0, 0.01)$.* Plot the noisy data $b$.* Use `np.linalg.solve` to compute the least squares solution $\hat{X}$ for the parameters.* Use the same input data and $\hat{X}$ to compute and to plot the fit of the least squares solution.* Print the true and estimated parameters. SolutionPlease find one possible solution in [`solution_numpy.py`](solution_numpy.py) file.
###Code
%run solution_numpy.py
###Output
True params [ 3. -8.]
Estimated params [ 2.99942396 -7.99943241]
###Markdown
[Table of contents](../toc.ipynb) NumPy* Numpy is probably the most applied and used Python package. Almost any scientific package builds on numpy.* Numpy provides multidimensional array objects, and allows to deal with matrix computations.* Add to this, numpy provides interfaces to C and C++.* Numpy's methods are very efficient implemented.* Please find numpy's documentation here [https://numpy.org/](https://numpy.org/).* Numpy is one building block of Python's scientific eco system. If you want to know more about scientific Python, consult the scipy lecture notes on [https://scipy-lectures.org/](https://scipy-lectures.org/). Numpy importFirst, let us import numpy, create a vector and compare this with a Python list, which ships per default with Python.
###Code
import numpy as np
a_list = range(0, 8000)
a_np_array = np.arange(0, 8000)
###Output
_____no_output_____
###Markdown
Now, let us compare how long it takes to add 3 to each element of the list and the numpy array with `%timeit` magic command.
###Code
%timeit [a_i + 3 for a_i in a_list]
%timeit a_np_array + 3
###Output
1.89 µs ± 40.2 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
###Markdown
Numpy is much faster than the list and the code of numpy is easier to read! Create arrays manually* The "manual" syntax to create a numpy array is * `np.array([x_0, x_1, ..., x_n])` for one dimensional arrays * and `np.array([[x_0, x_1, ... x_n], [y_0, y_1, ..., y_n]])` for multidimensional arrays.* Add to this, various functions like `np.ones()`, `np.eye()`, `np.arange()`, `np.linspace()`, and many more create specific arrays which are often required in matrix computing.
###Code
# a one dimensional array
a = np.array([0, 1, 2, 3])
a
# here two dimesions
b = np.array([[0, 1], [2, 3]])
b
# now check their shapes
print(a.shape)
print(b.shape)
###Output
(4,)
(2, 2)
###Markdown
Basic attributesIn addition to `ndarray.shape`, numpy arrays contain the attributes:* `ndarray.ndim` which is the dimension of the array,* `ndarray.size` which is the number of elements in the array,* `ndarray.dtype` which is the type of the array (int16, int32, uint16, ..., the default is int64, or float64).
###Code
print('a.dtype = ', a.dtype)
print('b.dtype = ', b.dtype)
print('a.ndim = ', a.ndim)
print('b.ndim = ', b.ndim)
print('a.size = ', a.size)
print('b.size = ', b.size)
###Output
a.size = 4
b.size = 4
###Markdown
You can also specify the type.
###Code
np.array([1, 2, 3], dtype=np.uint16)
###Output
_____no_output_____
###Markdown
More array constructors np.arange* Create arrays with start, stop, and step size.
###Code
# linear growing array, zero based
np.arange(8)
# np.arange with start, end, step call
np.arange(0, 12, 2)
###Output
_____no_output_____
###Markdown
np.linspace* Create equidistant arrays with start, stop, and number of points.
###Code
np.linspace(0, 10, 3)
np.linspace(0, 10, 13)
###Output
_____no_output_____
###Markdown
Special matrices
###Code
np.ones(3)
np.ones(shape=(3, 3))
np.eye(3)
np.zeros(3)
np.zeros(shape=(3, 3))
###Output
_____no_output_____
###Markdown
Random number arrays* Numpy's `np.random.xxx` module offers many random number generators.
###Code
np.random.rand(3, 3) # uniform distribution over [0, 1)
np.random.randn(3, 3) # standard normal distribution
###Output
_____no_output_____
###Markdown
Operators* All operators `+`, `-`, `*`, `>`, ..., work element wise per default.* A new array will be created unless you use the `+=`, `-=` and so forth operators.* Matrix multiplication can be done with `@` operator (requires Python 3.5) or `.dot` method.
###Code
a = np.ones(4)
a + 4
a * 2
a[2] = 5
a > 4
a = np.array([0, 1, 2, 3])
b = np.array([0, 1, 2, 3])
a * b
a @ b # matrix product
a.dot(b) # the "old" way to write a matrix prodcuct
###Output
_____no_output_____
###Markdown
Universal functions* Numpy offers almost all mathematical functions you might need, such as * exp * max, min * sqrt * argmax * median, mean, stdev * ...
###Code
a = np.linspace(0, 8, 16)
np.exp(a)
###Output
_____no_output_____
###Markdown
Shape modification* There are many ways to change the shape of an array.* Most prominent methods are `reshape` and `transpose`.
###Code
a = np.arange(12).reshape(3, 4)
a
a.reshape(6, 2)
a.reshape(a.size) # Flatten an array, similar as a.shape(-1), a.flatten() or a.ravel()
a = np.zeros((1, 4))
a
a.transpose() # transpose
a.T # transpose, short hand code
###Output
_____no_output_____
###Markdown
Indexing and iterating* Is very similar to lists.* Indexing is done by braces `[]`.
###Code
a = np.random.rand(3, 3)
# modify one element
a[1, 1] = 96
a[0, 0]
a[0, :]
# iteration over first dimension
for row in a:
print(row)
###Output
[0.00338789 0.10839671 0.73954119]
[ 0.4885861 96. 0.72729338]
[0.55189617 0.30620525 0.76777526]
###Markdown
If you want to iterate over all elements instead, use the flat attribute.
###Code
for a_i in a.flat:
print(a_i)
###Output
0.0033878851391556664
0.10839670531990542
0.7395411911135307
0.48858610415647385
96.0
0.7272933816988456
0.5518961727767244
0.30620524819339745
0.7677752590953459
###Markdown
Slicing arrays* Slicing is also done with braces. * There is a great "conversion" table for Matlab users here [https://numpy.org/devdocs/user/numpy-for-matlab-users.html](https://numpy.org/devdocs/user/numpy-for-matlab-users.html).* The basic slicing syntax is `[start:stop:step]`, see a full tutorial here [https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html).
###Code
a = np.arange(20)
a
###Output
_____no_output_____
###Markdown
All indices are zero based, the stop is not inclusive, and negative means to reverse counting (count from end to start).
###Code
a[-1] # last element
a[0] # first element
a[2:-1:2] # start from index 2 to last index and take every second value
###Output
_____no_output_____
###Markdown
Stacking arrays
###Code
a = np.ones(3)
b = np.ones(3) + 2
np.vstack((a, b))
np.hstack((a, b))
###Output
_____no_output_____
###Markdown
Comparing floats* The `numpy.allclose` is ideal to compare arrays element wise with relative or absolute tolerance.* The syntax is `np.allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)`.
###Code
np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
np.allclose([1e10,1e-7], [1.00001e10,1e-8], atol=1e-3) # with larger absolute tolerance, it returns True
###Output
_____no_output_____
###Markdown
Linear algebra* Numpy comes with a linear algebra module `numpy.linalg`.* Next comes an example to solve an overdetermined system of equations where: * $A \in \mathbb{R}^{n \times m}$ denotes input data, * $X \in \mathbb{R}^{n}$ is the parameter vector, * and $b \in \mathbb{R}^{m}$ is the output vector which follows $b = A^\top X$
###Code
n = 2; m = 20
A = np.random.rand(n, m) # input data, two dimensions, 20 samples
X = np.array([[3], [5]]) # two dimensional prameter vector
b = A.T @ X # model the output
# now solve the system of equations
np.linalg.solve(a=(A @ A.T), b=(A @ b))
np.linalg.inv(A @ A.T)
###Output
_____no_output_____
###Markdown
Read csv data* Numpy's `genfromtxt` is very convenient to read csv files into numpy arrays.* The syntax is `dat = genfromtxt('my_file.csv', delimiter=',')`, and there are many additional import properties like skip_rows, missing_values, ... We will load this .csv file into a numpy array.```"Time", "Torque"0, 2001, 2202, 2253, 2304, 231```
###Code
# This if else is a fix to make the file available for Jupyter and Travis CI
import os
if os.path.isfile('my_file.csv'):
file = 'my_file.csv'
else:
file = '02_tools-and-packages/my_file.csv'
from numpy import genfromtxt
genfromtxt(file, delimiter=',', skip_header=1)
###Output
_____no_output_____
###Markdown
You can also use the column names during the import with `names=True`.
###Code
dat = genfromtxt(file, delimiter=',', names=True)
dat
print(dat["Time"])
print(dat["Torque"])
###Output
[0. 1. 2. 3. 4.]
[200. 220. 225. 230. 231.]
###Markdown
Matrix computations become so much simpler with numpyRemember from last lesson that we used lists to plot for instance a parabola. This code was not that handy. ```x = [i for i in range(-20, 21)]y = [x_i**2 for x_i in x]```With numpy, such tasks become very simple.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
x = np.arange(-20, 20, 0.01)
plt.plot(x, x**2) # x**2 is easy to read compared with a list [x_i**2 for x_i in x]
plt.show()
###Output
_____no_output_____
###Markdown
Exercise: Numpy mini project (20 minutes)Now that you are familiar with matplotlib and numpy, you can solve the first more elaborate data task.This is what you should strive for:* Write a Python script which creates a linear multiplier model of type $AX \approx b$. The dimension of $A$ should be 1000 times 2, and the respective dimension of X becomes 2. * Select two values for $X$ as you like. These are the true model parameters.* Generate input data (random numbers or sine waves, as you like) and compute $b$.* The $\approx$ sign in the above equation is due to noise that you should add to $b$.* The noise should be Gaussian $\mathcal{N} \sim(0, 0.01)$.* Plot the noisy data $b$.* Use `np.linalg.solve` to compute the least squares solution $\hat{X}$ for the parameters.* Use the same input data and $\hat{X}$ to compute and to plot the fit of the least squares solution.* Print the true and estimated parameters. SolutionPlease find one possible solution in [`solution_numpy.py`](solution_numpy.py) file.
###Code
%run solution_numpy.py
###Output
True params [ 3. -8.]
Estimated params [ 3.00599789 -7.9939015 ]
###Markdown
[Table of contents](../toc.ipynb) NumPy* Numpy is probably the most applied and used Python package. Almost any scientific package builds on numpy.* Numpy provides multidimensional array objects, and allows to deal with matrix computations.* Add to this, numpy provides interfaces to C and C++.* Numpy's methods are very efficient implemented.* Please find numpy's documentation here [https://numpy.org/](https://numpy.org/).* Numpy is one building block of Python's scientific eco system. If you want to know more about scientific Python, consult the scipy lecture notes on [https://scipy-lectures.org/](https://scipy-lectures.org/). Numpy importFirst, let us import numpy, create a vector and compare this with a Python list, which ships per default with Python.
###Code
import numpy as np
a_list = range(0, 8000)
a_np_array = np.arange(0, 8000)
###Output
_____no_output_____
###Markdown
Now, let us compare how long it takes to loop over the list and over the numpy array with `%timeit` magic command.
###Code
%timeit [i + 3 for i in a_list]
%timeit a_np_array + 3
###Output
2.67 µs ± 40.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Numpy is much faster than the list and the code of numpy is easier to read! Create arrays manually* The "manual" syntax to create a numpy array is * `np.array([x, x])` for one dimensional arrays * and `np.array([[x, x], [x, x]])` for multidimensional arrays.* Add to this, various functions like `np.ones()`, `np.eye()`, `np.arrange()`, `np.linspace()`, and many more create specific arrays which are often required in matrix computing.
###Code
# a one dimensional array
a = np.array([0, 1, 2, 3])
a
# here two dimesions
b = np.array([[0, 1], [2, 3]])
b
# now check their shapes
print(a.shape)
print(b.shape)
###Output
(4,)
(2, 2)
###Markdown
Basic attributesIn addition to `ndarray.shape`, numpy arrays contain the attributes:* `ndarray.ndim` which is the dimension of the array,* `ndarray.size` which is the number of elements in the array,* `ndarray.dtype` which is the type of the array (int16, int32, uint16, ..., the default is int64, or float64).
###Code
a.dtype
a.ndim
a.size
###Output
_____no_output_____
###Markdown
You can also specify the type.
###Code
np.array([1, 2, 3], dtype=np.uint16)
###Output
_____no_output_____
###Markdown
More array constructors np.arange* Create arrays with start, stop, and step size.
###Code
# liniear growing array, zero based
np.arange(8)
# np.arange with start, end, step call
np.arange(0, 12, 2)
###Output
_____no_output_____
###Markdown
np.linspace* Create equidistant arrays with start, stop, and number of points.
###Code
np.linspace(0, 10, 3)
np.linspace(0, 10, 13)
###Output
_____no_output_____
###Markdown
Special matrices
###Code
np.ones(3)
np.ones(shape=(3, 3))
np.eye(3)
np.zeros(3)
np.zeros(shape=(3, 3))
###Output
_____no_output_____
###Markdown
Random number arrays* Numpy's `np.random.xxx` module offers many random number generators.
###Code
np.random.rand(3, 3) # is uniform
np.random.randn(3, 3) # standard normal distribution
###Output
_____no_output_____
###Markdown
Operators* All operators `+`, `-`, `*`, `>`, ..., work element wise per default.* A new array will be created unless you use the `+=`, `-=` and so forth operators.* Matrix multiplication can be done with `@` operator (requires Python 3.5) or dot method.
###Code
a = np.ones(4)
a + 4
a * 2
a[2] = 5
a > 4
a = np.array([0, 1, 2, 3])
b = np.array([0, 1, 2, 3])
a * b
a @ b # matrix product
a.dot(b) # the "old" way to write a matrix prodcuct
###Output
_____no_output_____
###Markdown
Universal functions* Numpy offers almost all mathematical functions you might need, such as * exp * max, min * sqrt * argmax * median, mean, stdev * ...
###Code
a = np.linspace(0, 8, 16)
np.exp(a)
###Output
_____no_output_____
###Markdown
Shape modification* There are many ways to change the shape of an array.* Most prominent methods are `reshape` and `transpose`.
###Code
a = np.ones((3,4))
a
a.reshape(a.size)
a = np.zeros((1, 4))
a
a.transpose() # transpose
a.T # transpose, short hand code
a.ravel() # flattens the array
###Output
_____no_output_____
###Markdown
[Table of contents](../toc.ipynb) NumPy* Numpy is probably the most applied and used Python package. Almost any scientific package builds on numpy.* Numpy provides multidimensional array objects, and allows to deal with matrix computations.* Add to this, numpy provides interfaces to C and C++.* Numpy's methods are very efficient implemented.* Please find numpy's documentation here [https://numpy.org/](https://numpy.org/).* Numpy is one building block of Python's scientific eco system. If you want to know more about scientific Python, consult the scipy lecture notes on [https://scipy-lectures.org/](https://scipy-lectures.org/). Numpy importFirst, let us import numpy, create a vector and compare this with a Python list, which ships per default with Python.
###Code
import numpy as np
a_list = range(0, 8000)
a_np_array = np.arange(0, 8000)
###Output
_____no_output_____
###Markdown
Now, let us compare how long it takes to add 3 to each element of the list and the numpy array with `%timeit` magic command.
###Code
%timeit [a_i + 3 for a_i in a_list]
%timeit a_np_array + 3
###Output
2.67 µs ± 40.4 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
###Markdown
Numpy is much faster than the list and the code of numpy is easier to read! Create arrays manually* The "manual" syntax to create a numpy array is * `np.array([x_0, x_1, ..., x_n])` for one dimensional arrays * and `np.array([[x_0, x_1, ... x_n], [y_0, y_1, ..., y_n]])` for multidimensional arrays.* Add to this, various functions like `np.ones()`, `np.eye()`, `np.arange()`, `np.linspace()`, and many more create specific arrays which are often required in matrix computing.
###Code
# a one dimensional array
a = np.array([0, 1, 2, 3])
a
# here two dimesions
b = np.array([[0, 1], [2, 3]])
b
# now check their shapes
print(a.shape)
print(b.shape)
###Output
(4,)
(2, 2)
###Markdown
Basic attributesIn addition to `ndarray.shape`, numpy arrays contain the attributes:* `ndarray.ndim` which is the dimension of the array,* `ndarray.size` which is the number of elements in the array,* `ndarray.dtype` which is the type of the array (int16, int32, uint16, ..., the default is int64, or float64).
###Code
print('a.dtype = ', a.dtype)
print('b.dtype = ', b.dtype)
print('a.ndim = ', a.ndim)
print('b.ndim = ', b.ndim)
print('a.size = ', a.size)
print('b.size = ', b.size)
###Output
a.size = 4
b.size = 4
###Markdown
You can also specify the type.
###Code
np.array([1, 2, 3], dtype=np.uint16)
###Output
_____no_output_____
###Markdown
More array constructors np.arange* Create arrays with start, stop, and step size.
###Code
# linear growing array, zero based
np.arange(8)
# np.arange with start, end, step call
np.arange(0, 12, 2)
###Output
_____no_output_____
###Markdown
np.linspace* Create equidistant arrays with start, stop, and number of points.
###Code
np.linspace(0, 10, 3)
np.linspace(0, 10, 13)
###Output
_____no_output_____
###Markdown
Special matrices
###Code
np.ones(3)
np.ones(shape=(3, 3))
np.eye(3)
np.zeros(3)
np.zeros(shape=(3, 3))
###Output
_____no_output_____
###Markdown
Random number arrays* Numpy's `np.random.xxx` module offers many random number generators.
###Code
np.random.rand(3, 3) # uniform distribution over [0, 1)
np.random.randn(3, 3) # standard normal distribution
###Output
_____no_output_____
###Markdown
Operators* All operators `+`, `-`, `*`, `>`, ..., work element wise per default.* A new array will be created unless you use the `+=`, `-=` and so forth operators.* Matrix multiplication can be done with `@` operator (requires Python 3.5) or `.dot` method.
###Code
a = np.ones(4)
a + 4
a * 2
a[2] = 5
a > 4
a = np.array([0, 1, 2, 3])
b = np.array([0, 1, 2, 3])
a * b
a @ b # matrix product
a.dot(b) # the "old" way to write a matrix prodcuct
###Output
_____no_output_____
###Markdown
Universal functions* Numpy offers almost all mathematical functions you might need, such as * exp * max, min * sqrt * argmax * median, mean, stdev * ...
###Code
a = np.linspace(0, 8, 16)
np.exp(a)
###Output
_____no_output_____
###Markdown
Shape modification* There are many ways to change the shape of an array.* Most prominent methods are `reshape` and `transpose`.
###Code
a = np.arange(12).reshape(3, 4)
a
a.reshape(6, 2)
a.reshape(a.size) # Flatten an array, similar as a.shape(-1), a.flatten() or a.ravel()
a = np.zeros((1, 4))
a
a.transpose() # transpose
a.T # transpose, short hand code
###Output
_____no_output_____
###Markdown
Indexing and iterating* Is very similar to lists.* Indexing is done by braces `[]`.
###Code
a = np.random.rand(3, 3)
# modify one element
a[1, 1] = 96
a[0, 0]
a[0, :]
# iteration over first dimension
for row in a:
print(row)
###Output
[0.90156267 0.15860076 0.92586081]
[ 0.82601745 96. 0.33312838]
[0.60133987 0.53074791 0.81656546]
###Markdown
If you want to iterate over all elements instead, use the flat attribute.
###Code
for a_i in a.flat:
print(a_i)
###Output
0.9015626721609997
0.15860075987052058
0.9258608073857245
0.8260174527163244
96.0
0.3331283759775331
0.6013398695667754
0.5307479085804412
0.8165654631841754
###Markdown
Slicing arrays* Slicing is also done with braces. * There is a great "conversion" table for Matlab users here [https://numpy.org/devdocs/user/numpy-for-matlab-users.html](https://numpy.org/devdocs/user/numpy-for-matlab-users.html).* The basic slicing syntax is `[start:stop:step]`, see a full tutorial here [https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html](https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html).
###Code
a = np.arange(20)
a
###Output
_____no_output_____
###Markdown
All indices are zero based, the stop is not inclusive, and negative means to reverse counting (count from end to start).
###Code
a[-1] # last element
a[0] # first element
a[2:-1:2] # start from index 2 to last index and take every second value
###Output
_____no_output_____
###Markdown
Stacking arrays
###Code
a = np.ones(3)
b = np.ones(3) + 2
np.vstack((a, b))
np.hstack((a, b))
###Output
_____no_output_____
###Markdown
Comparing floats* The `numpy.allclose` is ideal to compare arrays element wise with relative or absolute tolerance.* The syntax is `np.allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False)`.
###Code
np.allclose([1e10, 1e-7], [1.00001e10, 1e-8])
np.allclose([1e10,1e-7], [1.00001e10,1e-8], atol=1e-3) # with larger absolute tolerance, it returns True
###Output
_____no_output_____
###Markdown
Linear algebra* Numpy comes with a linear algebra module `numpy.linalg`.* Next comes an example to solve an overdetermined system of equations where: * $A \in \mathbb{R}^{n \times m}$ denotes input data, * $X \in \mathbb{R}^{n}$ is the parameter vector, * and $b \in \mathbb{R}^{m}$ is the output vector which follows $b = A^\top X$
###Code
n = 2; m = 20
A = np.random.rand(n, m) # input data, two dimensions, 20 samples
X = np.array([[3], [5]]) # two dimensional prameter vector
b = A.T @ X # model the output
# now solve the system of equations
np.linalg.solve(a=(A @ A.T), b=(A @ b))
np.linalg.inv(A @ A.T)
###Output
_____no_output_____
###Markdown
Read csv data* Numpy's `genfromtxt` is very convenient to read csv files into numpy arrays.* The syntax is `dat = genfromtxt('my_file.csv', delimiter=',')`, and there are many additional import properties like skip_rows, missing_values, ... We will load this .csv file into a numpy array.```"Time", "Torque"0, 2001, 2202, 2253, 2304, 231```
###Code
# This if else is a fix to make the file available for Jupyter and Travis CI
import os
if os.path.isfile('my_file.csv'):
file = 'my_file.csv'
else:
file = '02_tools-and-packages/my_file.csv'
from numpy import genfromtxt
genfromtxt(file, delimiter=',', skip_header=1)
###Output
_____no_output_____
###Markdown
You can also use the column names during the import with `names=True`.
###Code
dat = genfromtxt(file, delimiter=',', names=True)
dat
print(dat["Time"])
print(dat["Torque"])
###Output
[0. 1. 2. 3. 4.]
[200. 220. 225. 230. 231.]
###Markdown
Matrix computations become so much simpler with numpyRemember from last lesson that we used lists to plot for instance a parabola. This code was not that handy. ```x = [i for i in range(-20, 21)]y = [x_i**2 for x_i in x]```With numpy, such tasks become very simple.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
x = np.arange(-20, 20, 0.01)
plt.plot(x, x**2) # x**2 is easy to read compared with a list [x_i**2 for x_i in x]
plt.show()
###Output
_____no_output_____
###Markdown
Exercise: Numpy mini project (20 minutes)Now that you are familiar with matplotlib and numpy, you can solve the first more elaborate data task.This is what you should strive for:* Write a Python script which creates a linear multiplier model of type $AX \approx b$. The dimension of $A$ should be 1000 times 2, and the respective dimension of X becomes 2. * Select two values for $X$ as you like. These are the true model parameters.* Generate input data (random numbers or sine waves, as you like) and compute $b$.* The $\approx$ sign in the above equation is due to noise that you should add to $b$.* The noise should be Gaussian $\mathcal{N} \sim(0, 0.01)$.* Plot the noisy data $b$.* Use `np.linalg.solve` to compute the least squares solution $\hat{X}$ for the parameters.* Use the same input data and $\hat{X}$ to compute and to plot the fit of the least squares solution.* Print the true and estimated parameters. SolutionPlease find one possible solution in [`solution_numpy.py`](solution_numpy.py) file.
###Code
%run solution_numpy.py
###Output
True params [ 3. -8.]
Estimated params [ 2.99942396 -7.99943241]
|
SavingModels.ipynb | ###Markdown
Saving Models
###Code
!pip install -q h5py pyyaml
from __future__ import absolute_import, division, print_function
import os
import tensorflow as tf
from tensorflow import kerasas
tf.__version__
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28*28) / 255.0
test_images = test_images[:1000].reshape(-1, 28*28) / 255.0
def create_model():
model = keras.Sequential([
keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
return model
model = create_model()
model.summary()
checkpoint_path = 'training_1/cp.ckpt'
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
model = create_model()
model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels),
callbacks = [cp_callback])
!dir {checkpoint_dir}
###Output
Volume in drive C is OSDisk
Volume Serial Number is 9A0F-9300
Directory of C:\Jupyternotebooks\training_1
09/21/2018 12:25 AM <DIR> .
09/21/2018 12:25 AM <DIR> ..
09/21/2018 12:25 AM 71 checkpoint
09/21/2018 12:25 AM 1,631,720 cp.ckpt.data-00000-of-00001
09/21/2018 12:25 AM 647 cp.ckpt.index
3 File(s) 1,632,438 bytes
2 Dir(s) 175,823,003,648 bytes free
###Markdown
Loading the weights into untrained model
###Code
model = create_model()
loss, acc = model.evaluate(test_images, test_labels)
print('Untrained model, accuracy: {:5.2f}%'.format(100*acc))
model.load_weights(checkpoint_path)
loss, acc = model.evaluate(test_images, test_labels)
print('Restored model, accuracy: {:5.2f}%'.format(100*acc))
###Output
1000/1000 [==============================] - 0s 93us/step
Restored model, accuracy: 85.90%
|
cloud_function_trigger.ipynb | ###Markdown
References* https://github.com/GoogleCloudPlatform/mlops-with-vertex-ai/blob/main/05-continuous-training.ipynb Setting up
###Code
!pip install --upgrade -q google-cloud-pubsub
###Output
_____no_output_____
###Markdown
***Restart runtime.***
###Code
!gcloud init
from google.colab import auth
auth.authenticate_user()
GOOGLE_CLOUD_PROJECT = "fast-ai-exploration"
GOOGLE_CLOUD_REGION = "us-central1"
GCS_BUCKET_NAME = "vertex-tfx-mlops"
PIPELINE_NAME = "penguin-vertex-training"
PIPELINE_ROOT = "gs://{}/pipeline_root/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
PIPELINE_LOCATION = f"{PIPELINE_ROOT}/{PIPELINE_NAME}.json"
PUBSUB_TOPIC = f"trigger-{PIPELINE_NAME}"
DATA_ROOT = "gs://{}/data/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
MODULE_ROOT = "gs://{}/pipeline_module/{}".format(GCS_BUCKET_NAME, PIPELINE_NAME)
if not (GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_REGION and GCS_BUCKET_NAME):
from absl import logging
logging.error("Please set all required parameters.")
###Output
_____no_output_____
###Markdown
Create Pub/Sub Topic
###Code
!gcloud pubsub topics create {PUBSUB_TOPIC}
###Output
_____no_output_____
###Markdown
Deploy Cloud Function
###Code
ENV_VARS=f"""\
PROJECT={GOOGLE_CLOUD_PROJECT},\
REGION={GOOGLE_CLOUD_REGION},\
GCS_PIPELINE_FILE_LOCATION={PIPELINE_LOCATION}
"""
!echo {ENV_VARS}
!git clone -b dev https://github.com/sayakpaul/CI-CD-for-Model-Training --quiet
%cd CI-CD-for-Model-Training
BUCKET = f'gs://{GCS_BUCKET_NAME}'
CLOUD_FUNCTION_NAME = f'trigger-{PIPELINE_NAME}-fn'
!gcloud functions deploy {CLOUD_FUNCTION_NAME} \
--region={GOOGLE_CLOUD_REGION} \
--trigger-topic={PUBSUB_TOPIC} \
--runtime=python37 \
--source=cloud_function\
--entry-point=trigger_pipeline\
--stage-bucket={BUCKET}\
--update-env-vars={ENV_VARS}
# `trigger_pipeline` is the name of the function inside
# `cloud_function/main.py`
import IPython
cloud_fn_url = f"https://console.cloud.google.com/functions/details/{GOOGLE_CLOUD_REGION}/{CLOUD_FUNCTION_NAME}"
html = (
f'See the Cloud Function details <a href="{cloud_fn_url}" target="_blank">here</a>.'
)
IPython.display.display(IPython.display.HTML(html))
###Output
_____no_output_____
###Markdown
Public Topic Message
###Code
from google.cloud import pubsub
import json
publish_client = pubsub.PublisherClient()
topic = f"projects/{GOOGLE_CLOUD_PROJECT}/topics/{PUBSUB_TOPIC}"
data = {"num_epochs": 3, "learning_rate": 1e-2}
message = json.dumps(data)
_ = publish_client.publish(topic, message.encode())
###Output
_____no_output_____ |
topscorers/challenge-4-kpe.ipynb | ###Markdown
Here I'll shortly describe the final solution, and if there is interestI could go more extensively in details on the different optimizationsteps I took in the course of the challenge.The main observations however are as follows:1. Solving the smaller equivalent problem with C1=0, i.e. (0, C2-C1, C_max-sum(C1)).2. Using a single QFT/IQFT transform and performing all additions in QFT space.3. Reducing/replacing the initial QFT to/with H.4. Using the most significant `carry` bit in the data register (2**c - C_max -1 + cost) directly as a flag qubit and c=data_bits-1.5. Using approximate QFT with approximation_degree 1 or 2.6. Using transpile(['cx','u3'], optimization_level=3)**N.B.** you can also check the notes and comments in my [submission](challenge_4_kpe.py).
###Code
# lets do the imports
from typing import Union
import math
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit import Gate
###Output
_____no_output_____
###Markdown
Let's start by describing the general structure of the solution circuit(proposed in [arXiv:1908.02210](https://arxiv.org/abs/1908.02210).We represent the $N$ choices needed to be made with $N$ index qubits (for allthe knapsack problem instances in this IQC problem $N=\text{index_qubits}=11$).We then repetitively apply two operators - $U(\gamma)$ und $U(\beta)$, with different values for $\gamma$ and $\beta$, in each iteration, such that initially $\gamma$ is small and $\beta$ large, and with their relative strength gradually changing towards $\gamma$ being large and $\beta$ being small. (intuitively this corresponds to gradually decreasing the system temperature, until the system anneals to its lowest energy state - but I'm not quantum physicist, so this intuition might be incorrect)By all means we only have: * a quantum circuit with * 11 index qubits * 5 data qubits - for calculating the cost of a solution * a for loop to repetitively apply the $U(\gamma)$ and $U(\beta)$ operators: * $U(\gamma)$ application consist of two parts: * phase return part * cost penalty part - for this we need the data qubits * cost calculation using QRAM to encode the cost of each choice * constraint testing to flag all infeasible solutions (where the cost constraint is not satisfied) * penalty dephasing of the infeasible solutions * reinitialization of the data qubits using the inverse operation, so they can be used in the next iteration * $U(\beta)$ application (mixing operator)So the overall structure of the circuit would be like this: ```python problem definitionL1, L2 = [],[] list of value returns for associated with each choiceC1, C2 = [],[] list of cost associated with each choiceC_max: int the maximal cost of a feasible solution circuit definitionindex_qubits = len(L1) number of choices to be madedata_qubits:int number of data qubits required to calculate a solution costqr_index = QuantumRegister(index_qubits, "index")qr_data = QuantumRegister(data_qubits, "data")cr_index = ClassicalRegister(index_qubits, "c_index")qc = QuantumCircuit(qr_index, qr_data, cr_index)qc.h(qr_index) put the index/solution qubits in a superpositionp, alpha = 5, 1for i in range(p): beta, gamma = 1 - (i + 1) / p, (i + 1) / p calculate beta and gamma return part qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index) penalty part step 1: cost calculation qc.append(cost_calculation(index_qubits, data_qubits, C1, C2), qr_index[:] + qr_data[:]) step 2: Constraint testing qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:]) step 3: penalty dephasing qc.append(penalty_dephasing(data_qubits, alpha, gamma), qr_data[:] + qr_f[:]) step 4: reinitialization qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max), qr_index[:] + qr_data[:] + qr_f[:]) mixing operator qc.append(mixing_operator(index_qubits, beta), qr_index) measure the index qc.measure(qr_index, cr_index[::-1])``` The phase return part is quite straightforward:
###Code
### Phase Operator ###
# return part
def phase_return(index_qubits: int, gamma: float, L1: list, L2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
for ndx in range(index_qubits):
qc.p(- gamma * (L2[ndx] - L1[ndx]), qr_index[ndx])
return qc.to_gate(label=" phase return ") if to_gate else qc
###Output
_____no_output_____
###Markdown
Implementing the QFT adder can be done by simply followingthe illustration figures in [arXiv:quant-ph/0008033](https://arxiv.org/pdf/quant-ph/0008033.pdf).Getting the QFT Adder right is probably the most complex part ofthe solution - so we better unit test it well. (I can provide my unit tests later).(We could have also used the [Qiskit QFTAdder](https://qiskit.org/documentation/stubs/qiskit.circuit.library.DraperQFTAdder.html)or the [Qiskit QFT](https://qiskit.org/documentation/stubs/qiskit.circuit.library.QFT.html) directly,but as I have found out, it might be somewhat confusing to get the qubit order right, so it was easierfor me to just follow the illustrations in[arXiv:quant-ph/0008033](https://qiskit.org/documentation/stubs/qiskit.circuit.library.QFT.html))
###Code
QFT_AD = 1 # default approximation degree of the QFT
#
# QFT implementation (with qiskit bit ordering).
#
def qft(data_qubits: int, approximation_degree: int = QFT_AD, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr)
for target in reversed(range(data_qubits)):
qc.h(target)
for k, control in enumerate(reversed(range(target)), start=2):
if k > (data_qubits - approximation_degree):
continue
qc.cp(2 * np.pi / (2 ** k), control, target)
return qc.to_gate(label=" qft ") if to_gate else qc
#
# Adds a constant to tha data register in QFT space.
# Should be used with the above QFT implementation.
#
def subroutine_add_const(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr_data)
# prepares the const bits in a list
cbits = list(map(int, reversed(f"{np.abs(const):02b}".zfill(data_qubits))))
for target in reversed(range(data_qubits)):
tsum = 0 # cumulative phase for target
for k, control in enumerate(reversed(range(target + 1)), start=1):
cbit = cbits[control]
if cbit:
tsum += 1 / 2 ** k
if tsum > 0:
qc.p(np.sign(const) * 2 * np.pi * tsum, qr_data[target])
return qc.to_gate(label=" [+" + str(const) + "] ") if to_gate else qc
#
# the const parameter in the above subroutine_add_const() could be negative,
# but I have never really used this, so feel free to remove the np.abs and np.sign above.
#
###Output
_____no_output_____
###Markdown
Using the `qft()` and `subroutine_add_const()` form above, we coulddefine `const_adder()` and `cost_calculation()` as below, but in ourfinal optimized solution we would notice, that we can do all additionsin QFT space, thus having only one QFT transform in each iteration.
###Code
#
# This is how a **single** QFT addition of a const to the
# data register would look like, however we'll not use this method,
# but instead do a single QFT, do all phase rotations there, before
# doing the inverse IQFT transformation.
# (This is actually the main idea in optimizing the 4c solution.)
#
def const_adder(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr_data)
qc.append(qft(data_qubits), qr_data[:])
qc.append(subroutine_add_const(data_qubits, const, to_gate=False).to_gate(), qr_data[:])
qc.append(qft(data_qubits).inverse(), qr_data[:])
return qc.to_gate(label=" [+" + str(const) + "] ") if to_gate else qc
#
# This is how the QRAM cost calculation function would look like, however
# we will not use it directly, but do all additions in QFT space, wrapped
# in a single QFT.
#
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
# note the -1 bellow - the cost would fit in (data_qubits-1) bits,
# and we'll use the most significant bit to flag solutions with an infeasible cost.
for i, (val1, val2) in enumerate(zip(list1, list2)):
qc.append(const_adder(data_qubits - 1, val2).control(1), [qr_index[i]] + qr_data[:-1])
qc.x(qr_index[i])
qc.append(const_adder(data_qubits - 1, val1).control(1), [qr_index[i]] + qr_data[:-1])
qc.x(qr_index[i])
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
#
# This is how `constraint_testing` could be implemented, when
# using the above `cost_calculation()`.
# (We'll be using a single QFT however - see the next `cost_calculation()` implementation below)
#
def constraint_testing(data_qubits: int, C_max: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
c = data_qubits - 1
w = 2**c - (C_max+1)
qc.append(const_adder(data_qubits, w), qr_data[:]) # offset by w = 2**c -(C_max+1) to flag infeasible costs in MSB
# set the flag qubit when the MSB is set
# qc.cx(qr_data[-1], qr_f) # we'll be using the MSB directly without the explicit qr_f qubit
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
###Output
_____no_output_____
###Markdown
And now we present the final optimized version of `cost_calculation()`,where only a single QFT is used, to do all cost additions in QFT space. We'llalso perform the additon of the offset factor `w` here, so that we can do alladditions with a single QFT.The overall structure of the `cost_calculation()` would be like:```pythonc = data_qubits - 1w = 2 ** c - (C_max + 1) QFTqc.append(qft(data_qubits), qr_data[:]) qft QRAMfor i, (val1, val2) in enumerate(zip(list1, list2)): assert val1 == 0 qc.append(subroutine_add_const(data_qubits, val2).control(1), [qr_index[i]] + qr_data[:]) offset by w to flag infeasible costs directly with the data_qubits MSBqc.append(subroutine_add_const(data_qubits, w), qr_data[:]) inverse QFTqc.append(qft(data_qubits).inverse(), qr_data[:])```We can now note, that the initial state of all data qubits is `|+> = H|0>`,and by observing the QFT figure in [arXiv:quant-ph/0008033](https://qiskit.org/documentation/stubs/qiskit.circuit.library.QFT.html)we note, that the QFT acts oneach data qubits by applying a H-Gate first, followed by conditional rotations. However this second application ofthe H-Gate sets the data qubit back in the |0> state, so that the followingconditional rotations have no effect. Therefore, this initial QFT (acting on dataqubits in the `|+>` state) can be replaced by a single H-Gate.
###Code
#
# This is the actual cost_calculation() used in the final solution:
#
# Here we'll complete the QRAM cost addition to the data register
# in QFT space and also add the w = (2^c - (C_max+1)) offset term,
# so that the most significant bit of the data register is set to 1 whenever
# the cost in the data register exceeds C_max (i.e. cost > C_max).
#
# N.B. even the paper uses a (cost >= C_max) condition to set a penalty
# of alpha*(cost - C_max), the penalty would be zero for cost == C_max,
# therefore we use the strict inequality.
#
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate: bool = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
## lets mark only cost > C_max
c = data_qubits - 1 # use the MSB in data directly as a flag qubit
w = 2 ** c - (C_max + 1)
###########
## QFT start - do all additions in QFT space
# qc.append(qft(data_qubits), qr_data[:])
qc.h(qr_data[:]) # initially all data qubits are |0> so qft is just Hadamard
# QRAM
for i, (val1, val2) in enumerate(zip(list1, list2)):
assert val1 == 0
qc.append(subroutine_add_const(data_qubits, val2).control(1), [qr_index[i]] + qr_data[:])
qc.append(subroutine_add_const(data_qubits, w), qr_data[:]) # offset by w = 2**c -(C_max+1) to flag infeasible costs in MSB
qc.append(qft(data_qubits).inverse(), qr_data[:])
## QFT end
####################
return qc.to_gate(label=" Cost Constraint Testing ") if to_gate else qc
#
# After returning from QFT space, we can use the MSB of the data register
# to set the flag qubit (for infeasible solutions), but instead
# we'll be using the MSB directly.
#
def constraint_testing(data_qubits: int, C_max: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
# qc.cx(qr_data[-1], qr_f) # we'll be using the MSB directly without the explicit qr_f qubit
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
###Output
_____no_output_____
###Markdown
The `penalty_dephasing()` is straight forward following Figure 13 in [arXiv:1908.02210](https://arxiv.org/abs/1908.02210).
###Code
# penalty part
def penalty_dephasing(data_qubits: int, alpha: float, gamma: float, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
c = data_qubits - 1
#
# we use the qr_data[-1] as a flag directly
#
qr_f = qr_data[-1]
for k in range(c):
qc.cp(alpha * gamma * (2**k), qr_f, qr_data[k])
qc.p(-alpha * gamma * (2**c), qr_f)
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
###Output
_____no_output_____
###Markdown
We then use `reinitialization()` to inverse the calculation on thedata and flag (well we don't use that ove) qubits, by applying the inverse transforms done so far.
###Code
# penalty part
def reinitialization(index_qubits: int, data_qubits: int, C1: list, C2: list, C_max: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
# constrain_testing is empty see above
qc.append(constraint_testing(data_qubits, C_max).inverse(), qr_data[:] + qr_f[:])
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2, to_gate=True).inverse(), qr_index[:] + qr_data[:])
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
###Output
_____no_output_____
###Markdown
The `mixing_operator()` is also quit straight forward, following the paper:
###Code
### Mixing Operator ###
def mixing_operator(index_qubits: int, beta: float, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
for ndx in range(index_qubits):
qc.rx(2 * beta, ndx)
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
###Output
_____no_output_____
###Markdown
Now before combining all we have so far, lets prepare a python decorator, we'll beusing to decorate our `solver_function()` so it gets optimized by Qiskit:
###Code
OPT_LEVEL = 3
def transpile(qc: QuantumCircuit) -> QuantumCircuit:
# this was added, but can be disabled with setting OPT_LEVEL to -1 above.
if OPT_LEVEL != -1:
qc = qiskit.transpile(qc, basis_gates=['cx', 'u3'], seed_transpiler=42, optimization_level=OPT_LEVEL)
return qc
def transpile_optimize(fn):
def wrapper(*args, **kwargs):
return transpile(fn(*args, **kwargs))
return wrapper
###Output
_____no_output_____
###Markdown
This way we end up with the following `solver_function()` function, also used for the final submission:
###Code
from typing import Union
import math
import numpy as np
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit import Gate
# True for using the explicit flag qubit, False for using the MSB in data register
# when USE_FLAG_QUBIT==True - we need only (data_qubits-1) qubits
USE_SCORING = "new" # new, old
USE_5_QUBITS = True # True to use 1 qubit less than the template suggestion
QFT_AD = 1 # QFT approximation degree (number of terms to omit)
OPT_LEVEL = 3 # transpiler optimization level (0-3) or -1 to disable
if USE_SCORING == 'new':
QFT_AD = 2
USE_5_QUBITS = True
elif USE_SCORING == 'old':
QFT_AD = 1
USE_5_QUBITS = False
else:
raise "Unexpected value for USE_SCORING (expecting one of {new, old})"
def transpile(qc: QuantumCircuit) -> QuantumCircuit:
# this was added, but can be disabled with setting OPT_LEVEL to -1 above.
if OPT_LEVEL != -1:
qc = qiskit.transpile(qc, basis_gates=['cx', 'u3'], seed_transpiler=42, optimization_level=OPT_LEVEL)
return qc
def transpile_optimize(fn):
def wrapper(*args, **kwargs):
return transpile(fn(*args, **kwargs))
return wrapper
@transpile_optimize
def solver_function(L1: list, L2: list, C1: list, C2: list, C_max: int) -> QuantumCircuit:
"""
Solves the 4c knapsack problem (assuming C1<C2, and L1<L2).
"""
# print name and score
author = 'Kamen Petroff (kpe)'
score = 'old:260_084 new:3_482_625'
print(f'{author}: {score}')
# first let's convert it to the equivalent problem with cost (0, C2-C1)
C1, C2 = np.array(C1), np.array(C2)
C1, C2, C_max = C1 - C1, C2 - C1, C_max - C1.sum()
# the number of qubits representing answers
index_qubits = len(L1)
# the maximum possible total cost
max_c = sum([max(l0, l1) for l0, l1 in zip(C1, C2)])
# the number of qubits representing data values can be defined using the maximum possible total cost as follows:
data_qubits = math.ceil(math.log(max_c, 2)) + 1 if not max_c & (max_c - 1) == 0 else math.ceil(math.log(max_c, 2)) + 2
if USE_5_QUBITS:
data_qubits -= 1
### Phase Operator ###
# return part
def phase_return(index_qubits: int, gamma: float, L1: list, L2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
for ndx in range(index_qubits):
qc.p(- gamma * (L2[ndx] - L1[ndx]), qr_index[ndx])
return qc.to_gate(label=" phase return ") if to_gate else qc
#
# QFT implementation (with qiskit bit ordering).
#
def qft(data_qubits: int, approximation_degree: int = QFT_AD, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr)
for target in reversed(range(data_qubits)):
qc.h(target)
for k, control in enumerate(reversed(range(target)), start=2):
if k > (data_qubits - approximation_degree):
continue
qc.cp(2 * np.pi / (2 ** k), control, target)
return qc.to_gate(label=" qft ") if to_gate else qc
#
# Adds a constant to tha data register in QFT space.
# Should be used with the above QFT implementation.
#
def subroutine_add_const(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr_data)
# prepares the const bits in a list
cbits = list(map(int, reversed(f"{np.abs(const):02b}".zfill(data_qubits))))
for target in reversed(range(data_qubits)):
tsum = 0 # cumulative phase for target
for k, control in enumerate(reversed(range(target + 1)), start=1):
cbit = cbits[control]
if cbit:
tsum += 1 / 2 ** k
if tsum > 0:
qc.p(np.sign(const) * 2 * np.pi * tsum, qr_data[target])
return qc.to_gate(label=" [+" + str(const) + "] ") if to_gate else qc
#
# This is how a **single** QFT addition of a const to the
# data register would look like, however we'll not use this method,
# but instead do a single QFT, do all phase rotations there, before
# doing the inverse IQFT transformation.
# (This is actually the main idea in optimizing the 4c solution.)
#
def const_adder(data_qubits: int, const: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits)
qc = QuantumCircuit(qr_data)
qc.append(qft(data_qubits), qr_data[:])
qc.append(subroutine_add_const(data_qubits, const, to_gate=False).to_gate(), qr_data[:])
qc.append(qft(data_qubits).inverse(), qr_data[:])
return qc.to_gate(label=" [+" + str(const) + "] ") if to_gate else qc
#
# This is how the QRAM cost calculation function would look like, however
# we will not use it directly, but do all additions in QFT space, wrapped
# in a single QFT.
#
def _cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
# note the -1 bellow - the cost would fit in (data_qubits-1) bits,
# and we'll use the most significant bit to flag solutions with an infeasible cost.
for i, (val1, val2) in enumerate(zip(list1, list2)):
qc.append(const_adder(data_qubits - 1, val2).control(1), [qr_index[i]] + qr_data[:-1])
qc.x(qr_index[i])
qc.append(const_adder(data_qubits - 1, val1).control(1), [qr_index[i]] + qr_data[:-1])
qc.x(qr_index[i])
return qc.to_gate(label=" Cost Calculation ") if to_gate else qc
#
# This is the actual cost_calculation() used:
#
# Here we'll complete the QRAM cost addition to the data register
# in QFT space and also add a (2^c - (C_max+1)) term, so that the
# most significant bit of the data register is set to 1 whenever
# the cost in the data register exceeds C_max (i.e. cost > C_max).
#
# N.B. even the paper uses a (cost >= C_max) condition to set a penalty
# of alpha*(cost - C_max), the penalty would be zero for cost == C_max,
# therefore we use a strict inequality.
#
def cost_calculation(index_qubits: int, data_qubits: int, list1: list, list2: list, to_gate: bool = True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qc = QuantumCircuit(qr_index, qr_data)
## lets mark only cost > C_max
c = data_qubits - 1 # use the MSB in data directly as a flag qubit
w = 2 ** c - (C_max + 1)
###########
## QFT start - do all additions in QFT space
# qc.append(qft(data_qubits), qr_data[:])
qc.h(qr_data[:]) # initially all data qubits are |0> so qft is just Hadamard
# QRAM
for i, (val1, val2) in enumerate(zip(list1, list2)):
assert val1 == 0
qc.append(subroutine_add_const(data_qubits, val2).control(1), [qr_index[i]] + qr_data[:])
qc.append(subroutine_add_const(data_qubits, w), qr_data[:]) # offset by w = 2**c -(C_max+1) to flag infeasible costs in MSB
qc.append(qft(data_qubits).inverse(), qr_data[:])
## QFT end
####################
return qc.to_gate(label=" Cost Constraint Testing ") if to_gate else qc
#
# After returning from QFT space, we can use the MSB of the data register
# to set the flag qubit (for infeasible solutions).
#
def constraint_testing(data_qubits: int, C_max: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
# qc.cx(qr_data[-1], qr_f) # we'll be using the MSB directly without the explicit qr_f qubit
return qc.to_gate(label=" Constraint Testing ") if to_gate else qc
# penalty part
def penalty_dephasing(data_qubits: int, alpha: float, gamma: float, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_data, qr_f)
c = data_qubits - 1
#
# we use the qr_data[-1] as a flag directly
#
qr_f = qr_data[-1]
for k in range(c):
qc.cp(alpha * gamma * (2**k), qr_f, qr_data[k])
qc.p(-alpha * gamma * (2**c), qr_f)
return qc.to_gate(label=" Penalty Dephasing ") if to_gate else qc
# penalty part
def reinitialization(index_qubits: int, data_qubits: int, C1: list, C2: list, C_max: int, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qr_data = QuantumRegister(data_qubits, "data")
qr_f = QuantumRegister(1, "flag")
qc = QuantumCircuit(qr_index, qr_data, qr_f)
# constrain_testing is empty see above
qc.append(constraint_testing(data_qubits, C_max).inverse(), qr_data[:] + qr_f[:])
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2, to_gate=True).inverse(), qr_index[:] + qr_data[:])
return qc.to_gate(label=" Reinitialization ") if to_gate else qc
### Mixing Operator ###
def mixing_operator(index_qubits: int, beta: float, to_gate=True) -> Union[Gate, QuantumCircuit]:
qr_index = QuantumRegister(index_qubits, "index")
qc = QuantumCircuit(qr_index)
for ndx in range(index_qubits):
qc.rx(2 * beta, ndx)
return qc.to_gate(label=" Mixing Operator ") if to_gate else qc
#
# everything bellow this line was not touched, with exception
# of the transpiling/optimization in the last line which can be
# controlled using the OPT_LEVEL=-1 define above.
#
##############################
qr_index = QuantumRegister(index_qubits, "index") # index register
qr_data = QuantumRegister(data_qubits, "data") # data register
qr_f = QuantumRegister(1, "flag") # flag register
cr_index = ClassicalRegister(index_qubits, "c_index") # classical register storing the measurement result of index register
qc = QuantumCircuit(qr_index, qr_data, qr_f, cr_index)
### initialize the index register with uniform superposition state ###
qc.h(qr_index)
### DO NOT CHANGE THE CODE BELOW
p = 5
alpha = 1
for i in range(p):
### set fixed parameters for each round ###
beta = 1 - (i + 1) / p
gamma = (i + 1) / p
### return part ###
qc.append(phase_return(index_qubits, gamma, L1, L2), qr_index)
### step 1: cost calculation ###
qc.append(cost_calculation(index_qubits, data_qubits, C1, C2), qr_index[:] + qr_data[:])
### step 2: Constraint testing ###
qc.append(constraint_testing(data_qubits, C_max), qr_data[:] + qr_f[:])
### step 3: penalty dephasing ###
qc.append(penalty_dephasing(data_qubits, alpha, gamma), qr_data[:] + qr_f[:])
### step 4: reinitialization ###
qc.append(reinitialization(index_qubits, data_qubits, C1, C2, C_max), qr_index[:] + qr_data[:] + qr_f[:])
### mixing operator ###
qc.append(mixing_operator(index_qubits, beta), qr_index)
### measure the index ###
### since the default measurement outcome is shown in big endian, it is necessary to reverse the classical bits in order to unify the endian ###
qc.measure(qr_index, cr_index[::-1])
return qc
###Output
_____no_output_____ |
Data_Processing/Analysis.ipynb | ###Markdown
Analysis This Jupyter notebook is the data analysis script to better study the cleaned up data from the Dyson Smell Station sensors.This script requires the following packages:
###Code
import sys
import pandas as pd
import numpy as np
from pymongo import MongoClient
import matplotlib
from matplotlib import pyplot as plt
import seaborn as sns
import statistics
import config
import math
import datetime
from scipy import stats
###Output
_____no_output_____
###Markdown
Loading Data
###Code
df = pd.read_csv("Export_Data/data.csv")
df_cleaned = pd.read_csv("Export_Data/cleanedData.csv")
df_analysis = pd.read_csv("Export_Data/analysisData.csv")
dailyH2S = pd.read_csv("Export_Data/dailyH2S.csv")
df = df.drop("Unnamed: 0", axis=1)
df_cleaned = df_cleaned.drop("Unnamed: 0", axis=1)
df_analysis = df_analysis.drop("Unnamed: 0", axis=1)
df_analysis.head()
###Output
_____no_output_____
###Markdown
Basic Analytics Basic analytics to have a better idea of the parameters of the data.
###Code
df_stats = pd.DataFrame(columns = ['Name', 'Mean', 'Median', 'Std', 'Variance', 'Min', 'Max', 'Range'])
df_cleaned_stats = pd.DataFrame(columns = ['Name', 'Mean', 'Median', 'Std', 'Variance', 'Min', 'Max', 'Range'])
def round(val):
return round(val * 100) / 100
for _index, column in enumerate(df):
if column == "timestamp" or column == "date" or column == "time":
pass
else:
_current_column = df[column]
df_stats.loc[_index] = [column, statistics.mean(_current_column),statistics.median(_current_column),statistics.stdev(_current_column), statistics.variance(_current_column),min(_current_column),max(_current_column),max(_current_column)-min(_current_column)]
_current_column = df_cleaned[column]
df_cleaned_stats.loc[_index] = [column, statistics.mean(_current_column),statistics.median(_current_column),statistics.stdev(_current_column), statistics.variance(_current_column),min(_current_column),max(_current_column),max(_current_column)-min(_current_column)]
df_stats
df_cleaned_stats
###Output
_____no_output_____
###Markdown
Initial Analysis Plotting the different potential correlated data sources to visually study them. Basic correlation study.
###Code
_fig = ["bin1","bin2"]
sns.lmplot(x=_fig[0], y='H2S', data=df_cleaned)
sns.lmplot(x=_fig[1], y='H2S', data=df_cleaned)
print("R-value bin1: " + str(stats.pearsonr(df_analysis["bin1"], df_analysis["H2S"])[0] ** 2))
print("R-value bin2: " + str(stats.pearsonr(df_analysis["bin2"], df_analysis["H2S"])[0] ** 2))
def histogram_intersection(a, b):
v = np.minimum(a, b).sum().round(decimals=1)
return v
df_analysis.corr(method=histogram_intersection)
df_analysis = df_analysis.reset_index()
df_analysis = df_analysis.drop("index",axis=1)
sns.heatmap(df_analysis)
# sns.heatmap(df_analysis[df_analysis["date"] == datetime.date(2021, 12, 9)].drop("date",axis=1))
corr = df_analysis[(df_analysis["date"] != datetime.date(2021, 12, 11)) & (df_analysis["date"] != datetime.date(2021, 12, 14)) & (df_analysis["date"] != datetime.date(2021, 12, 15)) & (df_analysis["date"] != datetime.date(2021, 12, 13))].corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, annot=True, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
fig,axn = plt.subplots(3, 3, figsize=(20,15))
for index,ax in enumerate(axn.flat):
# Compute the correlation matrix
corr = df_analysis[df_analysis["date"] == datetime.date(2021, 12, 8+index)].drop("date",axis=1).corr()
# Layout
mask = np.triu(np.ones_like(corr, dtype=bool))
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, annot=True, mask=mask, cmap=cmap, ax=ax, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
###Output
_____no_output_____
###Markdown
Peak Analysis Analysing the peak values of the daily H2S reading to identify the time of the highest value.
###Code
dailyH2S = pd.read_csv("Export_Data/dailyH2S.csv")
dailyH2S = dailyH2S.drop("Unnamed: 0", axis=1)
dailyH2S["timestamp"] = dailyH2S["time"]
dailyH2S = dailyH2S.drop("time", axis=1)
dailyH2S.head()
from datetime import datetime
largest_values = []
figure, axis = plt.subplots(3, 3,figsize=(12,12))
for _index, _column in enumerate(dailyH2S):
if _column == "hour_minute" or _column == "timestamp":
pass
else:
largest_values.append(dailyH2S[_column].idxmax())
print(dailyH2S.loc[dailyH2S[_column].idxmax(),_column])
axis[_index//3, _index%3].plot(dailyH2S["timestamp"], dailyH2S[_column])
axis[_index//3, _index%3].set_title(_column)
peak = plt.Circle((dailyH2S.loc[largest_values[_index], "timestamp"],dailyH2S.loc[largest_values[_index], _column]),100,color='r')
axis[_index//3, _index%3].add_patch(peak)
now = datetime.fromtimestamp(dailyH2S.loc[largest_values[_index], "timestamp"])
# largest_values
for _index, _column in enumerate(dailyH2S):
print(largest_values[_index])
print(dailyH2S.loc[largest_values[_index], "H2S"])
df.describe
###Output
_____no_output_____ |
text-summarization-with-seq2seq-model.ipynb | ###Markdown
**Seq2Seq LSTM Modelling**
###Code
pre['text'][:10]
###Output
_____no_output_____
###Markdown
> **Perform Data Cleansing**
###Code
import re
#Removes non-alphabetic characters:
def text_strip(column):
for row in column:
#ORDER OF REGEX IS VERY VERY IMPORTANT!!!!!!
row=re.sub("(\\t)", ' ', str(row)).lower() #remove escape charecters
row=re.sub("(\\r)", ' ', str(row)).lower()
row=re.sub("(\\n)", ' ', str(row)).lower()
row=re.sub("(__+)", ' ', str(row)).lower() #remove _ if it occors more than one time consecutively
row=re.sub("(--+)", ' ', str(row)).lower() #remove - if it occors more than one time consecutively
row=re.sub("(~~+)", ' ', str(row)).lower() #remove ~ if it occors more than one time consecutively
row=re.sub("(\+\++)", ' ', str(row)).lower() #remove + if it occors more than one time consecutively
row=re.sub("(\.\.+)", ' ', str(row)).lower() #remove . if it occors more than one time consecutively
row=re.sub(r"[<>()|&©ø\[\]\'\",;?~*!]", ' ', str(row)).lower() #remove <>()|&©ø"',;?~*!
row=re.sub("(mailto:)", ' ', str(row)).lower() #remove mailto:
row=re.sub(r"(\\x9\d)", ' ', str(row)).lower() #remove \x9* in text
row=re.sub("([iI][nN][cC]\d+)", 'INC_NUM', str(row)).lower() #replace INC nums to INC_NUM
row=re.sub("([cC][mM]\d+)|([cC][hH][gG]\d+)", 'CM_NUM', str(row)).lower() #replace CM# and CHG# to CM_NUM
row=re.sub("(\.\s+)", ' ', str(row)).lower() #remove full stop at end of words(not between)
row=re.sub("(\-\s+)", ' ', str(row)).lower() #remove - at end of words(not between)
row=re.sub("(\:\s+)", ' ', str(row)).lower() #remove : at end of words(not between)
row=re.sub("(\s+.\s+)", ' ', str(row)).lower() #remove any single charecters hanging between 2 spaces
#Replace any url as such https://abc.xyz.net/browse/sdf-5327 ====> abc.xyz.net
try:
url = re.search(r'((https*:\/*)([^\/\s]+))(.[^\s]+)', str(row))
repl_url = url.group(3)
row = re.sub(r'((https*:\/*)([^\/\s]+))(.[^\s]+)',repl_url, str(row))
except:
pass #there might be emails with no url in them
row = re.sub("(\s+)",' ',str(row)).lower() #remove multiple spaces
#Should always be last
row=re.sub("(\s+.\s+)", ' ', str(row)).lower() #remove any single charecters hanging between 2 spaces
yield row
brief_cleaning1 = text_strip(pre['text'])
brief_cleaning2 = text_strip(pre['summary'])
from time import time
import spacy
nlp = spacy.load('en', disable=['ner', 'parser']) # disabling Named Entity Recognition for speed
#Taking advantage of spaCy .pipe() method to speed-up the cleaning process:
#If data loss seems to be happening(i.e len(text) = 50 instead of 75 etc etc) in this cell , decrease the batch_size parametre
t = time()
text = [str(doc) for doc in nlp.pipe(brief_cleaning1, batch_size=1000, n_threads=-1)]
#Takes 7-8 mins
print('Time to clean up everything: {} mins'.format(round((time() - t) / 60, 2)))
#Taking advantage of spaCy .pipe() method to speed-up the cleaning process:
t = time()
summary = ['_START_ '+ str(doc) + ' _END_' for doc in nlp.pipe(brief_cleaning2, batch_size=1000, n_threads=-1)]
#Takes 7-8 mins
print('Time to clean up everything: {} mins'.format(round((time() - t) / 60, 2)))
text[100]
summary[100]
pre['cleaned_text'] = pd.Series(text)
pre['cleaned_summary'] = pd.Series(summary)
text_count = []
summary_count = []
for sent in pre['cleaned_text']:
text_count.append(len(sent.split()))
for sent in pre['cleaned_summary']:
summary_count.append(len(sent.split()))
graph_df= pd.DataFrame()
graph_df['text']=text_count
graph_df['summary']=summary_count
#Model to summarize the text between 0-15 words for Summary and 0-100 words for Text
max_text_len=100
max_summary_len=15
#Select the Summaries and Text between max len defined above
cleaned_text =np.array(pre['cleaned_text'])
cleaned_summary=np.array(pre['cleaned_summary'])
short_text=[]
short_summary=[]
for i in range(len(cleaned_text)):
if(len(cleaned_summary[i].split())<=max_summary_len and len(cleaned_text[i].split())<=max_text_len):
short_text.append(cleaned_text[i])
short_summary.append(cleaned_summary[i])
post_pre=pd.DataFrame({'text':short_text,'summary':short_summary})
post_pre.head(2)
#Add sostok and eostok at
post_pre['summary'] = post_pre['summary'].apply(lambda x : 'sostok '+ x + ' eostok')
###Output
_____no_output_____
###Markdown
**SEQ2SEQ MODEL BUILDING ** Split the data to TRAIN and VALIDATION sets
###Code
from sklearn.model_selection import train_test_split
x_tr,x_val,y_tr,y_val=train_test_split(np.array(post_pre['text']),np.array(post_pre['summary']),test_size=0.1,random_state=0,shuffle=True)
#Lets tokenize the text to get the vocab count , you can use Spacy here also
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer()
x_tokenizer.fit_on_texts(list(x_tr))
###Output
_____no_output_____
###Markdown
**RARE WORD ANALYSIS FOR X i.e 'text'*** tot_cnt gives the size of vocabulary (which means every unique words in the text)* cnt gives me the no. of rare words whose count falls below threshold* tot_cnt - cnt gives me the top most common words
###Code
thresh=4
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in x_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
x_tokenizer.fit_on_texts(list(x_tr))
#convert text sequences into integer sequences (i.e one-hot encodeing all the words)
x_tr_seq = x_tokenizer.texts_to_sequences(x_tr)
x_val_seq = x_tokenizer.texts_to_sequences(x_val)
#padding zero upto maximum length
x_tr = pad_sequences(x_tr_seq, maxlen=max_text_len, padding='post')
x_val = pad_sequences(x_val_seq, maxlen=max_text_len, padding='post')
#size of vocabulary ( +1 for padding token)
x_voc = x_tokenizer.num_words + 1
print("Size of vocabulary in X = {}".format(x_voc))
###Output
Size of vocabulary in X = 33412
###Markdown
**RARE WORD ANALYSIS FOR Y i.e 'summary'*** tot_cnt gives the size of vocabulary (which means every unique words in the text)* cnt gives me the no. of rare words whose count falls below threshold* tot_cnt - cnt gives me the top most common words
###Code
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer()
y_tokenizer.fit_on_texts(list(y_tr))
thresh=6
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in y_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
y_tokenizer.fit_on_texts(list(y_tr))
#convert text sequences into integer sequences (i.e one hot encode the text in Y)
y_tr_seq = y_tokenizer.texts_to_sequences(y_tr)
y_val_seq = y_tokenizer.texts_to_sequences(y_val)
#padding zero upto maximum length
y_tr = pad_sequences(y_tr_seq, maxlen=max_summary_len, padding='post')
y_val = pad_sequences(y_val_seq, maxlen=max_summary_len, padding='post')
#size of vocabulary
y_voc = y_tokenizer.num_words +1
print("Size of vocabulary in Y = {}".format(y_voc))
###Output
Size of vocabulary in Y = 11581
###Markdown
We will now remove "Summary" i.e Y (both train and val) which has only _START_ and _END_
###Code
ind=[]
for i in range(len(y_tr)):
cnt=0
for j in y_tr[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_tr=np.delete(y_tr,ind, axis=0)
x_tr=np.delete(x_tr,ind, axis=0)
ind=[]
for i in range(len(y_val)):
cnt=0
for j in y_val[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_val=np.delete(y_val,ind, axis=0)
x_val=np.delete(x_val,ind, axis=0)
from keras import backend as K
import gensim
from numpy import *
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
import warnings
pd.set_option("display.max_colwidth", 200)
warnings.filterwarnings("ignore")
print("Size of vocabulary from the w2v model = {}".format(x_voc))
K.clear_session()
latent_dim = 300
embedding_dim=200
# Encoder
encoder_inputs = Input(shape=(max_text_len,))
#embedding layer
enc_emb = Embedding(x_voc, embedding_dim,trainable=True)(encoder_inputs)
#encoder lstm 1
encoder_lstm1 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)
#encoder lstm 2
encoder_lstm2 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)
#encoder lstm 3
encoder_lstm3=LSTM(latent_dim, return_state=True, return_sequences=True,dropout=0.4,recurrent_dropout=0.4)
encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
#embedding layer
dec_emb_layer = Embedding(y_voc, embedding_dim,trainable=True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True,dropout=0.4,recurrent_dropout=0.2)
decoder_outputs,decoder_fwd_state, decoder_back_state = decoder_lstm(dec_emb,initial_state=[state_h, state_c])
#dense layer
decoder_dense = TimeDistributed(Dense(y_voc, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',metrics=['sparse_categorical_accuracy'] )
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,patience=2)
###Output
_____no_output_____
###Markdown
**Start fitting the model with the data**
###Code
history=model.fit([x_tr,y_tr[:,:-1]], y_tr.reshape(y_tr.shape[0],y_tr.shape[1], 1)[:,1:] ,
epochs=10,callbacks=[es],batch_size=128,
validation_data=([x_val,y_val[:,:-1]], y_val.reshape(y_val.shape[0],y_val.shape[1], 1)[:,1:]))
###Output
Epoch 1/10
692/692 [==============================] - 790s 1s/step - loss: 5.1226 - sparse_categorical_accuracy: 0.3522 - val_loss: 4.7997 - val_sparse_categorical_accuracy: 0.3805
Epoch 2/10
692/692 [==============================] - 772s 1s/step - loss: 4.7341 - sparse_categorical_accuracy: 0.3820 - val_loss: 4.5658 - val_sparse_categorical_accuracy: 0.3939
Epoch 3/10
692/692 [==============================] - 774s 1s/step - loss: 4.4980 - sparse_categorical_accuracy: 0.3959 - val_loss: 4.3545 - val_sparse_categorical_accuracy: 0.4059
Epoch 4/10
692/692 [==============================] - 763s 1s/step - loss: 4.2882 - sparse_categorical_accuracy: 0.4082 - val_loss: 4.1704 - val_sparse_categorical_accuracy: 0.4168
Epoch 5/10
692/692 [==============================] - 754s 1s/step - loss: 4.0942 - sparse_categorical_accuracy: 0.4205 - val_loss: 4.0206 - val_sparse_categorical_accuracy: 0.4271
Epoch 6/10
692/692 [==============================] - 751s 1s/step - loss: 3.9452 - sparse_categorical_accuracy: 0.4311 - val_loss: 3.9016 - val_sparse_categorical_accuracy: 0.4368
Epoch 7/10
692/692 [==============================] - 752s 1s/step - loss: 3.8225 - sparse_categorical_accuracy: 0.4408 - val_loss: 3.8095 - val_sparse_categorical_accuracy: 0.4448
Epoch 8/10
692/692 [==============================] - 754s 1s/step - loss: 3.7218 - sparse_categorical_accuracy: 0.4495 - val_loss: 3.7352 - val_sparse_categorical_accuracy: 0.4509
Epoch 9/10
692/692 [==============================] - 754s 1s/step - loss: 3.6324 - sparse_categorical_accuracy: 0.4574 - val_loss: 3.6720 - val_sparse_categorical_accuracy: 0.4573
Epoch 10/10
692/692 [==============================] - 756s 1s/step - loss: 3.5537 - sparse_categorical_accuracy: 0.4647 - val_loss: 3.6190 - val_sparse_categorical_accuracy: 0.4611
###Markdown
**Visualize the model learning**
###Code
from matplotlib import pyplot
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
###Output
_____no_output_____
###Markdown
**Next, let’s build the dictionary to convert the index to word for target and source vocabulary:**
###Code
reverse_target_word_index=y_tokenizer.index_word
reverse_source_word_index=x_tokenizer.index_word
target_word_index=y_tokenizer.word_index
# Encode the input sequence to get the feature vector
encoder_model = Model(inputs=encoder_inputs,outputs=[encoder_outputs, state_h, state_c])
# Decoder setup
# Below tensors will hold the states of the previous time step
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_hidden_state_input = Input(shape=(max_text_len,latent_dim))
# Get the embeddings of the decoder sequence
dec_emb2= dec_emb_layer(decoder_inputs)
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c])
# A dense softmax layer to generate prob dist. over the target vocabulary
decoder_outputs2 = decoder_dense(decoder_outputs2)
# Final decoder model
decoder_model = Model(
[decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c],
[decoder_outputs2] + [state_h2, state_c2])
###Output
_____no_output_____
###Markdown
**We are defining a function below which is the implementation of the inference process**
###Code
def decode_sequence(input_seq):
# Encode the input as state vectors.
e_out, e_h, e_c = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first word of target sequence with the start word.
target_seq[0, 0] = target_word_index['sostok']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c])
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence += ' '+sampled_token
# Exit condition: either hit max length or find stop word.
if (sampled_token == 'eostok' or len(decoded_sentence.split()) >= (max_summary_len-1)):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update internal states
e_h, e_c = h, c
return decoded_sentence
###Output
_____no_output_____
###Markdown
**Let us define the functions to convert an integer sequence to a word sequence for summary as well as the reviews:**
###Code
def seq2summary(input_seq):
newString=''
for i in input_seq:
if((i!=0 and i!=target_word_index['sostok']) and i!=target_word_index['eostok']):
newString=newString+reverse_target_word_index[i]+' '
return newString
def seq2text(input_seq):
newString=''
for i in input_seq:
if(i!=0):
newString=newString+reverse_source_word_index[i]+' '
return newString
###Output
_____no_output_____
###Markdown
**Run the model over the data to see the results**
###Code
for i in range(0,100):
print("Review:",seq2text(x_tr[i]))
print("Original summary:",seq2summary(y_tr[i]))
print("Predicted summary:",decode_sequence(x_tr[i].reshape(1,max_text_len)))
print("\n")
###Output
Review: pope francis on tuesday called for respect for each ethnic group in speech delivered in myanmar avoiding reference to the rohingya minority community as the nation works to restore peace the healing of wounds must be priority he said the pope myanmar visit comes amid the country military crackdown resulting in the rohingya refugee crisis
Original summary: start pope avoids mention of rohingyas in key myanmar speech end
Predicted summary: start pope calls for not rohingya muslims end
Review: students of government school in uttar pradesh sambhal were seen washing dishes at in school premises on being approached basic shiksha adhikari virendra pratap singh said yes have also received this complaint from elsewhere we are inquiring and action will be taken against those found guilty
Original summary: start students seen washing dishes at govt school in up end
Predicted summary: start up school students protest over sexual assault cases end
Review: apple india profit surged by 140 in 2017 18 to crore compared to ã¢ââ¹373 crore in the previous fiscal the indian unit of the us based company posted 12 growth in revenue last fiscal at ã¢ââ¹13 crore apple share of the indian smartphone market dropped to 1 in the second quarter of 2018 according to counterpoint research
Original summary: start apple india profit rises 140 to nearly ã¢ââ¹900 crore in fy18 end
Predicted summary: start apple india profit rises to ã¢ââ¹1 crore in fy18 end
Review: uber has launched its electric scooter service in santa monica us at 1 to unlock and then 15 cents per minute to ride it comes after uber acquired the bike sharing startup jump for reported amount of 200 million uber said it is branding the scooters with jump for the sake of consistency for its other personal electric vehicle services
Original summary: start uber launches electric scooter service in us at 1 per ride end
Predicted summary: start uber launches its self driving car in self driving car end
Review: around 80 people were injured in accidents related to kite flying during celebrations of makar sankranti in rajasthan jaipur officials said the victims included those who fell while flying kites and those injured by glass coated kite string officials added meanwhile around 100 birds were reported to be injured by between january 13 and 15
Original summary: start 80 people injured in flying related accidents in jaipur end
Predicted summary: start 12 dead as fire breaks out in uttarakhand end
Review: uk entrepreneur richard browning has announced the launch of his startup gravity which has created flight jet powered suit that will be priced at about ã¢ââ¹1 3 crore the suit has custom built exoskeleton with six attached micro jet engines fuelled by kerosene from backpack browning claims the can travel at speed of up to 450 kmph
Original summary: start startup makes ã¢ââ¹1 3 crore jet powered flying suit end
Predicted summary: start uk firm launches its first ever flight end
Review: andhra pradesh chief minister chandrababu naidu on monday announced that his government will provide 100 units free power to most backward classes he added that the government would also give aid of up to ã¢ââ¹15 lakh to backward classes for foreign education we will spread out the poverty eradication program under pro basis he further said n
Original summary: start most backward classes to get 100 units free power andhra cm end
Predicted summary: start govt to launch new new state of 2019 govt end
Review: taking dig at pm modi congress president rahul gandhi tweeted while our pm around his garden making yoga videos india leads afghanistan syria in rape violence against women this comes after thomson reuters foundation survey declared india as world most dangerous country for women pm modi shared video of himself doing yoga and other exercises last week
Original summary: start pm modi makes yoga videos while india leads in rape rahul end
Predicted summary: start india is the world first ever pm modi end
Review: external affairs minister sushma swaraj on saturday called upon the united nations to pass the comprehensive convention on international terrorism to end pakistan sponsored terrorism proposed by india in 1996 aims to arrive at universal definition of terrorism ban all terror groups prosecute terrorists under special laws and make cross border terrorism an offence
Original summary: start india calls on un to pass global anti terror convention end
Predicted summary: start pakistan calls for terror attacks on terror attacks end
Review: the 23 richest indians in the 500 member bloomberg billionaires index saw wealth erosion of 21 billion this year lakshmi mittal who controls the world largest steelmaker arcelormittal lost 5 6 billion or 29 of his net worth followed by sun pharma founder dilip shanghvi whose wealth declined 4 6 billion asia richest person mukesh ambani added 4 billion to his fortune
Original summary: start lakshmi mittal lost 10 bn in 2018 ambani added 4 bn end
Predicted summary: start world richest richest man in 10 years end
Review: the haryana police have arrested 19 year old for killing 75 year old woman after attempting rape on her in village in the state the accused gagged the victim with scarf and hit brick on her head when she shouted for help the teenager mother then washed the blood from their house and outside to save him
Original summary: start teen kills lady after rape attempt mother cleans blood to save him end
Predicted summary: start woman kills self in school over rape case end
Review: a in pakistan has been jailed for 24 years for blackmailing and harassing nearly 200 lady doctors and nurses he introduced himself as military intelligence official according to the complainant who filed the case he was arrested in 2015 for hacking the whatsapp accounts of lady doctors blackmailing them with objectionable content and extorting money from them
Original summary: start pak jailed for harassing 200 lady doctors nurses end
Predicted summary: start pak man jailed for refusing to enter sabarimala end
Review: a doctor at andhra pradesh government hospital allegedly attacked the hospital superintendent with an injection containing hiv infected blood while the attempt to inject the infected blood was thwarted he managed to spray the blood on the senior dress the accused who said he only wanted to scare the superintendent was reportedly upset over being reprimanded by him
Original summary: start andhra doc tries to senior with hiv infected blood end
Predicted summary: start man who stole from car in andhra pradesh end
Review: after congress vice president rahul gandhi slammed pm narendra modi while talking at the university of california party leader anand sharma backed him saying it justified to condemn prime minister in democracy if they bjp leaders think we ll ask them before making any statement they are wrong they should be prepared for this kind of attacks he added
Original summary: start pm justified in democracy cong on rahul remark end
Predicted summary: start pm modi is not the pm modi on rahul gandhi end
Review: former england cricket team spinner ashley giles will succeed former captain andrew strauss as the managing director of england men cricket in january the ecb has confirmed tom harrison the chief executive described giles as the candidate amongst very strong field giles was part of england 2005 ashes winning team
Original summary: start appointed director of cricket end
Predicted summary: start bcci ceo appointed as coach of bcci end
Review: a case has been registered against raja chauhan for firing gunshots in violence during bharat bandh called by dalit groups in madhya pradesh gwalior on monday video showing chauhan alleged to be bjp worker firing during the protest had surfaced online of the 12 people killed during the nationwide protests at least three were from gwalior
Original summary: start case filed man who fired in dalit protests end
Predicted summary: start bjp leader shot dead by police in up end
Review: there are believed to be 80 different ways to spell the name of english playwright william shakespeare including and shakespeare is known to have signed his name using variations such as and due to no proper documentation april 23 is regarded as his birth date and april 23 as his death date
Original summary: start there are 80 recorded ways to spell shakespeare end
Predicted summary: start i am the most expensive in the year end
Review: actor shahid kapoor while speaking about ranveer singh portrayal of alauddin khilji in the film padmaavat said he would have played the character differently we are two different actors and our style of acting is different he added earlier ranveer had said he would have played shahid character in the film better than shahid
Original summary: start i would have played khilji differently shahid kapoor end
Predicted summary: start i am not the best actor in the deepika ranveer end
Review: minors cannot opt out of aadhaar after turning 18 year old since it not permissible under the aadhaar act uidai has told the supreme court they can lock their biometrics permanently and can unlock it temporarily if needed for biometric authentication further the sc was told the biometrics failure rate at the national level was 6 for fingerprints and 8 54 for iris
Original summary: start minors can opt out of aadhaar after turning 18 uidai end
Predicted summary: start aadhaar not mandatory for aadhaar sc end
Review: producer ekta kapoor slammed karni sena threat that they will cut deepika padukone nose over the row on padmavati and asked aren we getting these jailed for open threats and attacks in the tweet she also wrote about an incident wherein people threw bottles at her at an event in jaipur over her tv series akbar
Original summary: start aren those threatening deepika getting jailed asks ekta end
Predicted summary: start deepika is not to be deepika on padmavati row end
Review: talking about being ranked 7th on forbes list of world highest paid actors 2018 akshay kumar said it feels good but never take these lists too seriously they keep changing like seasons he further said the idea is only to do good better and best work but of course forever grateful when things like these come my way
Original summary: start feels good akshay on being 7th highest paid actor end
Predicted summary: start i was the world best actor in world best actor end
Review: an indian origin couple was killed by their daughter ex boyfriend in an apparent revenge crime in the us on friday the 24 year old suspect mirza fatally shot naren prabhu silicon valley tech executive and his wife in san jose the police called the swat team after stand off with who was later killed after bullet hit him
Original summary: start indian origin couple killed in revenge crime in us end
Predicted summary: start us man who killed wife wife on his death end
Review: a 64 year old father and his 35 year old daughter flew their last flight together as british airways pilots on thursday david said he felt mixed emotions at retiring and was glad his daughter kat would continue their legacy the two had flown together numerous times and kat said she believed their father daughter bond helped them in the cockpit
Original summary: start father and daughter fly last flight together as ba pilots end
Predicted summary: start woman who was the world bank to be auctioned end
Review: pakistan chief selector inzamam ul haq nephew imam ul haq collided with wicketkeeper brien and kane while completing single off ireland first ever test delivery on saturday debutant imam fell flat on his back after his head into brien hip while trying to make his crease uncle would ve just both fielders user tweeted reacting to the incident
Original summary: start imam collides with two on ireland first test ball end
Predicted summary: start pak pacer takes catch with catch on field end
Review: reacting to apple launch event on wednesday several users took to twitter saying dual sim they are really going after indians here tweet read the iphone xs max will come in smaller model ipad mini while another mocked weakening rupee against dollar saying apple watch series can detect fall someone make the rupee wear it
Original summary: start dual sim they re going after indians twitter on new iphones end
Predicted summary: start apple is the most important for the time apple ceo end
Review: pakistan on sunday rejected india request for consular access to kulbhushan jadhav for the time saying he is not an ordinary citizen pakistan foreign office claimed that he was sent by an indian intelligence agency and that he killed several innocent pakistanis jadhav was sentenced to death by pakistani military court in april on charges of espionage
Original summary: start pak denies india consular access to kulbhushan for 18th time end
Predicted summary: start pak not to pak pakistan over jadhav row end
Review: a 9 year old south african child has become only the third known case worldwide to show signs of aids virus long term remission without any drugs the child was given anti aids medicine for 10 months till the age of one which helped lower hiv levels from very high to the kid was then taken off drugs as part of trial
Original summary: start 9 year old found to control hiv without any treatment end
Predicted summary: start teen who was the first child sex in the age of 2017 end
Review: a 23 year old female finnish tourist was found dead at hotel in tamil nadu chennai on wednesday the police who retrieved drugs from the hotel room suspect that the woman died of drug overdose an investigation has been launched into the incident and the police are questioning the woman boyfriend
Original summary: start finnish tourist found dead in tn drug overdose suspected end
Predicted summary: start woman killed in car in car crash end
Review: us president donald trump has told his south korean counterpart moon jae in that his country is open to talks with north korea at the appropriate time under the right circumstances trump also assured jae in that there would be no us military action against north korea as long as south korea is having dialogue with the reclusive nation
Original summary: start us open to talk to north korea under right end
Predicted summary: start trump to be nuclear war with north korea end
Review: actress yami gautam has said rajkummar rao would have been amazing as the lead character vicky in the 2012 film vicky donor the character was originally portrayed by ayushmann khurrana who made his bollywood debut in the film while yami played the female lead when asked who would have played yami character well ayushmann had earlier taken bhumi pednekar name
Original summary: start rajkummar would have been amazing as vicky in vicky donor yami end
Predicted summary: start i will be part of film on film with rajkummar rao end
Review: an east german secret police identity card belonging to russian president vladimir putin when he was soviet spy has been found in police archives in the city of dresden the card was issued in 1985 when putin was mid ranking soviet spy stationed in dresden in communist east germany which was then under russian occupation
Original summary: start putin spy id card found in germany end
Predicted summary: start us prez putin putin putin end
Review: bangladesh pm sheikh hasina has accused myanmar of finding new excuses to delay the return of over lakh rohingya muslims who fled myanmar to enter bangladesh over the past year adding that under no would the refugees remain permanently in bangladesh hasina said already have 16 crore people in my country can take any other burden
Original summary: start myanmar delaying tactics blocking rohingya return desh end
Predicted summary: start pak pm may be held for rohingya violence end
Review: surat police has issued summons to former gujarat bjp vice president jayanti bhanushali in connection with rape case against him 21 year old girl from surat has accused him of raping her multiple times since november 2017 after promising to get her admitted to fashion designing institute bhanushali resigned from his position following the rape accusation
Original summary: start police ex gujarat bjp vice president in rape case end
Predicted summary: start bjp mla booked for raping woman in rape case end
Review: technology giant google will reportedly prioritise articles in the search results from those publications which are subscribed by users the company will also start sharing search data that show the users which are most likely to buy subscription this is part of the technology giant efforts to help media companies find and retain paying readers
Original summary: start google may prioritise stories for paid news subscribers end
Predicted summary: start google to launch feature in new york end
Review: india is model for peacefully resolving maritime disputes and strong provider of security us navy secretary richard spencer has said praising india for peacefully resolving maritime border dispute with the us in the indo pacific region the relationship between the us and india is based on our shared values and desire to preserve peace the us official added
Original summary: start india model for maritime disputes us end
Predicted summary: start india is the world biggest us president end
Review: the archaeological site rani ki vav that features on the rear of new ã¢ââ¹100 notes is unesco world heritage site in gujarat patan the 11th century site is which was built by the solanki dynasty queen as memorial for her deceased husband in 2016 it was awarded the cleanest iconic place in india title
Original summary: start what is the importance of rani ki featured on new ã¢ââ¹100 note end
Predicted summary: start indian origin woman gets its first ever end
Review: the us has accused china of pointing military grade blinding lasers at its pilots in djibouti in nearly 10 such incidents in the past few weeks two us military pilots suffered minor eye injuries in one case officials said while the us has military base in djibouti since 2001 china opened its first overseas base in the region last year
Original summary: start us accuses china of lasers at its pilots eyes end
Predicted summary: start china accuses us of missile in south china sea end
Review: a fake news update is being shared on whatsapp which claims that the government provides compensation in case of death due to accident it claims that if person dies in an accident and has been filing his income tax since the last three years the government will provide compensation equivalent to 10 times his average annual income
Original summary: start news about money from government in accidental deaths fake end
Predicted summary: start facebook to pay over fake news of fake news end
Review: sridevi starrer song hawa hawai from the 1987 film mr india has been recreated for vidya balan film tumhari sulu the vocals of kavita krishnamurthy who sang the original song have been retained and the new version has been recreated and programmed by tanishk bagchi directed by suresh triveni tumhari sulu is scheduled to release on november 17
Original summary: start hawa hawai recreated for tumhari sulu end
Predicted summary: start release date of akshay kumar starrer released end
Review: the us state department has approved the sale of 160 missiles to the united arab emirates for an estimated 2 billion over ã¢ââ¹12 800 crore the pentagon said on thursday describing uae as force for political stability in the middle east the pentagon stated this proposed sale will contribute to the foreign policy and national security of the united states
Original summary: start us approves sale of missiles worth ã¢ââ¹12 800 crore to uae end
Predicted summary: start us sanctions to buy nuclear weapons end
Review: iran would not comply with the fully illegal us sanctions and would not discuss the volume or destination of its oil exports amidst the sanctions iranian oil minister zanganeh said on thursday the us sanctions targeting iran key economic sectors like oil and gas shipping and banking were imposed following the us withdrawal from the 2015 iran nuclear deal
Original summary: start iran says it will not comply with illegal us sanctions end
Predicted summary: start iran to iran nuclear deal with iran end
Review: pornstar stormy daniels has been ordered to pay us president donald trump nearly 293 000 in legal fees and sanctions after her defamation suit against him was dismissed daniels alleges she was paid to hide her alleged affair with trump in 2016 she sued trump for defamation after he called her claims total con job in tweet
Original summary: start pornstar stormy daniels ordered to pay trump 000 end
Predicted summary: start trump son sues us for over us report end
Review: windies all rounder dwayne bravo compared team india and rcb captain virat kohli to footballer cristiano ronaldo admire the talent he has the passion and talent that he has for the sport and the way he plays bravo added notably ronaldo is champions league all time top goalscorer with 120 goals and is the leading scorer this season with 15 goals
Original summary: start virat kohli is cristiano ronaldo of cricket dwayne bravo end
Predicted summary: start kohli is the best indian team to play in ipl 2018 end
Review: singer ariana grande visited young children in hospital who were injured after blast at her concert in manchester uk in may the 23 year old singer presented t shirts and gifts to the young children ariana along with justin bieber miley cyrus and coldplay will perform on june to raise funds for the victims of the terrorist attack at the concert
Original summary: start ariana grande visits kids injured after blast at her concert end
Predicted summary: start video shows couple who killed in car accident end
Review: a former uber manager robert miller had reportedly warned uberã¢ââs executives about safety issues before the fatal self driving car crash in arizona in march ã¢ââthe cars are routinely in accidents resulting in damage this is not how we should be operating ã¢ââ miller had said he added ã¢ââseveral of the drivers appear to not have been properly vetted or trained ã¢ââ
Original summary: start ex uber exec warned staff before self driving crash report end
Predicted summary: start uber ceo sues us for over 100 million end
Review: in chhattisgarh bharatpur assembly constituency village there are only four voters with three belonging to the same family to reach the forest village situated 15 kilometres away from the main road six kilometre rocky path and river have to be crossed the election officials will reach the village day before and erect tent for the voters
Original summary: start chhattisgarh village has only voters from same family end
Predicted summary: start up in pictures of 10 000 in days in india end
Review: aimim president asaduddin owaisi has said that no beef ban in three northeast states which will go to polls next year reflects dual standards and hypocrisy in up bjp is doing appeasement of hindutva forces he added his remark came after the bjp clarified that it will not impose beef ban in northeastern states if voted to power
Original summary: start no beef ban in poll bound northeast shows owaisi end
Predicted summary: start will be allowed to be in us govt to fight end
Review: taking dig at bjp for its proposed rath yatra west bengal cm mamata banerjee on friday said that rath yatras are not carried out to kill people those who carry out yatras to kill common people indulge in yatras she added this comes after calcutta high court recently put stay on bjp rath yatra in the state
Original summary: start rath yatras are not carried out to kill people wb cm end
Predicted summary: start no one of muslims in india are not rss end
Review: a picture of russian mp natalya leaning against wall ahead of vladimir putin inauguration has gone viral reacting to the picture twitter user wrote current mood natalya other users tweeted am natalya at every party and maybe she was ordered to open and close the door
Original summary: start pic of mp leaning against wall before putin oath goes viral end
Predicted summary: start video shows russian prez putin in us end
Review: actor purab kohli has said that the release of films on fridays is like the appraisal period for actors just like employees in corporate offices have their annual appraisal period where they get nervous about what will happen with their salaries we actors too feel the first friday said purab he added that ultimately audience is the best judge
Original summary: start friday releases are like period for actors end
Predicted summary: start i am not the film of the film industry end
Review: a new trailer of the hindi version of deadpool has been released where ranveer singh is seen voicing ryan reynolds who plays the lead role of deadpool bollywood desi cool is india deadpool tweeted fox star india the distributors of the film in india earlier another trailer had been released which featured different voice artiste
Original summary: start ranveer singh voices reynolds in deadpool hindi version end
Predicted summary: start trailer of the film released end
Review: the archaeological survey of india asi has banned selfie sticks at its 46 site museums across the country including taj museum in agra and indian war memorial museum in delhi further the visitors would need to get permission for using flash multiple lenses and carrying large photography bags 15 days before the scheduled visit
Original summary: start selfie sticks banned at 46 site across india end
Predicted summary: start india first indian army to get its own end
Review: indian commodity exchange on monday commenced operations as the world first diamond futures exchange indian manufacturers most require this type of financial product md prasad said adding shall offer fair transparent and nationwide market bringing in large market participation into diamond trade the exchange is backed by companies like reliance capital and
Original summary: start worldã¢ââs 1st diamond futures exchange starts trading in india end
Predicted summary: start india 1st indian firm to become world largest end
Review: the us state department on thursday imposed sanctions against the chinese military for buying russian sukhoi su 35 fighter jets and s 400 air defence missile systems the us also blacklisted 33 people and entities associated with the russian intelligence china has called on the us to withdraw the sanctions or bear the consequences
Original summary: start us sanctions china for buying russian jets missiles end
Predicted summary: start us approves us sanctions on trade war with china end
Review: the mumbai police on wednesday cancelled the conditional licences granted to the three of the city dance bars for failing to comply with fire safety norms the establishments didn respond to repeated show cause notices and failed to produce the required documents during the hearing police said in 2016 the supreme court had permitted these bars to operate after the ban
Original summary: start licence of mumbai only three dance bars cancelled end
Predicted summary: start police police to get police station in schools end
Review: congress president rahul gandhi has told party leaders to strengthen the organisation in such fashion that it is not candidate but the party that will fight the election party mp pl punia claimed state incharges have been told to identify the strong and weak booths while special attention is being given to shakti congress interaction platform for workers
Original summary: start rahul told us party will fight polls not candidate cong mp end
Predicted summary: start bjp should not be bjp leader on rahul gandhi end
Review: japanese conglomerate softbank is still considering if it should sell its reported 20 22 stake in indian e commerce startup flipkart to walmart as per reports softbank ceo masayoshi son will take call in the next 7 10 days the reports added softbank which invested 2 5 billion in flipkart last year would sell the stake for 4 billion reports had earlier suggested
Original summary: start softbank still considering selling flipkart stake reports end
Predicted summary: start flipkart may buy flipkart stake in flipkart reports end
Review: the bjp has crossed the halfway mark leading in 112 seats in the ongoing karnataka assembly elections for 222 electoral constituencies while the congress is leading in 56 seats the jd has secured lead in 38 seats the voting for two constituencies was postponed due to candidate demise in jayanagar and the voter id row in rr nagar
Original summary: start bjp crosses halfway mark leads in 112 seats in taka polls end
Predicted summary: start bjp mlas in gujarat assembly polls in 2019 polls end
Review: kartik aaryan is coming today for live interactive session on huawei mate 20 pro touted in media reports as the king of smartphones at ambience mall gurugram pm onwards first buyer will get chance to the smartphone with kartik as while the next will get to take group selfie with him
Original summary: start kartik aaryan comes to flag off offline strategy end
Predicted summary: start tv show features on cover of india end
Review: days after bjp and its allies sealed seat sharing arrangement in bihar ahead of 2019 lok sabha polls bjp mp gopal narayan singh said like all over india in bihar also our main brand is pm narendra modiji claiming both jd and bjp are equal partners singh added if bjp needs cm nitish kumar then he also needs bjp
Original summary: start pm modi is our main brand in bihar says bjp mp end
Predicted summary: start bjp will be bjp if bjp alliance with bjp end
Review: a mysterious carcass washed ashore an indonesian island suspected of being giant or elephant has been identified by experts us based biologist believes the bloodied creature was decomposing whale other scientists agreed that the remains of plates most likely belonged to the whale indonesia marine authorities said to carry sample to confirm its identity
Original summary: start dead sea washed up on indonesian coast identified end
Predicted summary: start scientists find human species in japan end
Review: a farmer in karnataka has filed complaint against bjp workers for allegedly destroying his farmland for constructing helipad for party president amit shah he claimed the bjp workers had entered the land forcefully and abused him when he questioned them bjp spokesperson prakash said that any high handedness by local leaders will be looked into
Original summary: start farmer claims bjp workers destroyed land for shah end
Predicted summary: start bjp mla booked for bjp leader in taka end
Review: the supreme court has banned all construction activities in maharashtra madhya pradesh uttarakhand and chandigarh till october the interim order came after the states and the union territory failed to comply with the court order to come up with policy on solid waste management the attitude of the states union territories is pathetic to say the least the court said
Original summary: start sc bans construction in maharashtra mp uttarakhand end
Predicted summary: start sc orders centre to centre over liquor ban on end
Review: mumbai based startup taxi fabric co founder has been accused by his former colleague swapna nair of sending her ck pics even after she asked him not to send them he repeatedly kept talking about how my dark skin would make him hard instantly said swapna tried to change the conversation but it was impossible without his ck interrupting she added
Original summary: start taxi fabric founder accused of sending ck pics to ex colleague end
Predicted summary: start twitter users slam uk for sexual harassment claims end
Review: after passenger who travelled in air india newark mumbai flight complained of bed bugs in their business class seats the airline claimed may have happened due to the current weather conditions apologising for the inconvenience air india also offered to refund 75 of the passenger fare the passenger had tweeted photo of bite marks all over her arm
Original summary: start air india blames weather for bed bugs in business class end
Predicted summary: start air india flight to be held for air india flight end
Review: the bombay high court on monday accepted maharashtra government proposal that it wouldn allow children below 14 years of age to participate in the dahi handi festival which witnesses several injuries to youngsters however the bench refused to impose any restriction on the height of human pyramid formations during the festival notably since 2014 children below 18 years weren allowed
Original summary: start hc accepts maha govt 14 yrs age restriction for dahi handi end
Predicted summary: start hc orders centre to stay in schools in schools end
Review: the rajasthan high court has issued notice to the centre asking why condom advertisements cannot be shown on tv between am and 10 pm the court was hearing petition filed by nonprofit organisation which has called the order arbitrary and meaningless earlier this month the ministry had issued an advisory restricting timings of condom ads
Original summary: start raj hc issues notice to centre over condom ads restrictions end
Predicted summary: start sc asks centre to ban on govt order on end
Review: an 80 year old woman threw coins into the engine of plane at shanghai airport for good luck on tuesday all 150 passengers were evacuated and the flight was delayed for nearly six hours after one of the nine coins she threw entered the engine the woman has been exempted from jail because she is aged over 70 the police said
Original summary: start woman throws coins into plane engine for luck delays flight end
Predicted summary: start woman falls off plane to fly after plane crash end
Review: actress margot best known for playing the role of lois lane in the superman film franchise of the 1970s and 1980s passed away on sunday at the age of 69 an autopsy will be performed to determine the cause of her death margot who suffered from car accident in 1990 had experienced long term mental health issues
Original summary: start actress who portrayed lane in superman film passes away end
Predicted summary: start actor who was the best actor in the world end
Review: zaheer khan fiancãâ actor sagarika ghatge posted picture on instagram of the couple in which zaheer new clean look sagarika captioned the picture came back home to this stranger doing rather well cricketers including ravindra jadeja hardik pandya rohit sharma and ajinkya rahane also posted selfies on instagram earlier
Original summary: start zaheer broke the beard rather well says fiancãâ sagarika end
Predicted summary: start sonam shares pic with his daughter wedding end
Review: for the first time bcci anti corruption unit has restricted cricketers from meeting outsiders in private during the two ipl matches on may 10 and 13 in kanpur players will only be allowed to interact with outsiders in the presence of the hotel manager in the lobby additionally phone records of players and franchise owners would be checked daily
Original summary: start players barred from meeting outsiders during ipl end
Predicted summary: start bcci to play in ipl 2018 for ipl 2018 end
Review: kareena kapoor has said that shah rukh khan is india biggest romantic hero when you go to any corner of the world when you say india the first thing that comes to your mind is shah rukh khan she added kareena further said he has touched people lives with his roles his passion his genuine love and his charm
Original summary: start shah rukh khan is india biggest romantic hero kareena end
Predicted summary: start i am the world most important for world bank end
Review: aligarh mayor mohammed on wednesday confessed that he did not remember the national anthem completely but that he respected it listen to the national anthem every day stand and pay respect to it he added reporters had asked to recite the national anthem after addressing his first press conference after assuming charge as the city mayor
Original summary: start don remember the national anthem completely aligarh mayor end
Predicted summary: start women should be allowed to women in the world cup end
Review: reacting to kylie jenner naming her baby stormi webster twitter user wrote if ever have child ll be naming it north chicago storm weather forecast commented another user while referring to north and chicago the daughters of kylie half sister kim kardashian another tweet read it wasn going to be called something boring like brenda
Original summary: start will name kid tweets user on kylie naming kid end
Predicted summary: start twitter users to be made to us tweets user on end
Review: the union home ministry has warned that threat to prime minister narendra modi is at an all time high the home ministry has laid down security protocol which includes even ministers and officers to be cleared by special protection group spg staff before they can come close to pm modi this comes after maharashtra police seized documents mentioning rajiv gandhi type incident
Original summary: to pm at all time high even ministers need clearance to meet him end
Predicted summary: start govt to give centre to centre on rafale deal end
Review: the pakistan cricket board welcomed ab de villiers to the pakistan super league with tweet that read the goat from south africa is now part of psl warm welcome to the former south african international announced his arrival with video captioned it time for so there going to be party in february
Original summary: start goat now part of psl pcb welcomes ab de villiers to psl end
Predicted summary: start pak captain imran khan to be imran khan end
Review: us senator elizabeth warren has proposed new wealth tax which would cost the world richest person jeff bezos 4 1 billion in the first year warren proposed wealth tax of 2 on americans with assets above 50 million and 3 on all fortunes above 1 billion almost all of bezos 137 1 billion wealth is tied up in amazon stock
Original summary: start us senator proposes new tax that would cost bezos in first year end
Predicted summary: start us richest firm to become world largest firm end
Review: producers of john abraham and diana penty starrer parmanu the story of pokhran have announced its release date as march clashing with anushka sharma pari there are 000 theatres so it not about clashing for me it like bringing two films of different genres together the co producer of both the films prernaa arora said
Original summary: start john starrer parmanu to clash with anushka pari end
Predicted summary: start teaser of rajinikanth starrer starrer released end
Review: a nasa backed study has found tremendous amounts of soot lofted into air from global wildfires following the dinosaur killing asteroid strike 66 million years ago would have plunged earth into darkness for nearly two years this would have shut down photosynthesis and drastically cooled the planet leading to mass extinction of three of species on earth back then the study added
Original summary: start dino killing asteroid might have brought years of end
Predicted summary: start nasa shares pic of earth from space after being hit by end
Review: terror attacks persist in jammu and kashmir despite prime minister narendra modi threats to pakistan for being lenient towards terror outfits an editorial in shiv sena mouthpiece saamana has said refuting the centre assurance regarding the situation being under control at the india pakistan border the editorial said the area was still
Original summary: start terror attacks in despite pm modi threat sena end
Predicted summary: start pm modi is the country to pak pm imran khan end
Review: nagaland under 19 girls team was dismissed for two runs in 17 overs while playing against kerala in the ongoing bcci women under 19 one day league on friday nagaland opener top scored for the team with run while the other run came off an extra kerala hit boundary on the first ball winning with 299 balls to spare
Original summary: start nagaland team all out for runs rivals win on 1st ball end
Predicted summary: start women beat pakistan to win their first ever win in ipl end
Review: the cemetery in city slovenia has unveiled tombstones with 48 inch interactive screens that can show pictures videos and other digital content the company claims the tombstones are weather proof and cannot be vandalised the tombstones which cost ã¢ââ¬3 000 ã¢ââ¹2 lakh each activate their sensors when someone stands near them
Original summary: start cemetery gets digital that can play videos end
Predicted summary: start scientists make how can be used to make end
Review: terming congress allegations of corruption in the rafale deal as manufactured union minister of state for defence subhash bhamre on thursday said don try to fool people as they are educated enough he added there have been numerous scams during congress rule and the pm modi led government made sure there were no big scams in these last years
Original summary: start don try to fool people govt to congress over rafale deal end
Predicted summary: start govt should be out of india govt on rafale deal end
Review: indo canadian youtube star lilly singh also known as superwoman said if you watch something and it makes you laugh great if it doesn that comedy isn wrong it just not suitable for you calling comedy very lilly added the best thing you can do is promote what you love and not bash what you hate
Original summary: start if it doesn make you laugh that comedy isn wrong end
Predicted summary: start i am not the best actor on the film end
Review: scientists have discovered the fossils of an elephant sized mammal that lived alongside dinosaurs during the triassic period about 200 million years ago named the four legged creature belonged to group of mammal like reptiles called we think itã¢ââs one of the most unexpected fossil discoveries from the triassic of europe one of the scientists said
Original summary: start fossils of like reptile that lived with dinosaurs found end
Predicted summary: start scientists find how did human species end
Review: the german parliament lower house has approved draft law that partially bans the full face islamic veil for public servants including election officials military and judicial staff while at work the law however allows exceptions such as for health workers protecting themselves against infections or police officers concealing their identity it will now go to the upper house for approval
Original summary: start german parliament approves partial ban on islamic veil end
Predicted summary: start un bans new york of rohingya muslims end
Review: manchester united players including paul pogba marouane fellaini and juan mata met game of thrones actors john bradley west and joe during the team pre season training camp in los angeles john who plays and joe who plays also met josãâ mourinho and posed with united europa league trophy john sported the team away jersey during the interaction
Original summary: start manchester united players meet game of thrones actors in usa end
Predicted summary: start fans slam game for thrones to play football match end
Review: former apple executive neha rastogi who accused her husband and ceo abhishek gattani of domestic abuse has disclosed audio recordings of gattani abusing and beating her in court statement rastogi said he hit me multiple times on my face arms head belly and abused me gattani was allowed plea agreement and faces 30 days in jail
Original summary: start ex apple engineer says husband abused hit her several times end
Predicted summary: start apple ceo denies sexual harassment claims end
Review: former ufc fighter tim hague has passed away aged 34 after suffering knockout in boxing fight the canadian mixed martial artist was taken to hospital on friday after being knocked out by adam but died on sunday hague fought 34 mixed martial arts contests in his career including five ufc fights and held an overall 21 13 record
Original summary: start former ufc fighter dies after in boxing fight end
Predicted summary: start former former footballer dies after being hit by end
Review: rape convict ram rahim singh adopted daughter honeypreet insan has denied claims of having sexual relationship with her father canã¢âât father put his hand on his head isnã¢âât the father daughter relation sacred relation she questioned honeypreet who is on haryana most wanted list further denied claims of absconding after ram rahim conviction
Original summary: start can father touch daughter head asks honeypreet end
Predicted summary: start rape accused of rape victim to be rahim end
Review: talking about modern startup investing silicon valley investor has said we are in the middle of an enormous multi kind of ponzi scheme adding that the investors pressurise the startups to do well he also said they investors aren people writing cheques out of their own balance sheet these are people doing job with other people money
Original summary: start startup investing is ponzi scheme silicon valley investor end
Predicted summary: start we are not the right to be microsoft ceo end
Review: former india cricketer sachin tendulkar paid surprise visit to child care centre in mumbai dressed as santa claus on the occasion of christmas on tuesday sachin spent time with the underprivileged children gave them gifts and also played cricket with them the joy on their innocent faces was just priceless he tweeted
Original summary: start sachin dresses up as santa claus surprises underprivileged kids end
Predicted summary: start sachin tendulkar meets his wedding in mumbai end
Review: four sisters were among the 20 killed when limousine crashed into another vehicle in new york in the deadliest transportation accident in the us since 2009 three of the sisters were accompanied by their husbands who also died in the accident you can wrap your head around such tragedy relative of the sisters said
Original summary: start 4 sisters among 20 killed in crash in new york end
Predicted summary: start 2 killed in us military crash in us end
Review: the madhya pradesh high court has said that the cases related to the vyapam scam will be heard by seven special courts in four major cities of the state earlier the cases were being heard in 16 special courts notably more than 40 people associated with the scam have died under mysterious circumstances since the story broke in 2013
Original summary: start vyapam scam cases to be heard by seven special courts hc end
Predicted summary: start delhi govt to probe into corruption case end
Review: cars24 has enabled car owners to sell their cars in less than hours with instant payment in their account the company also handles all paperwork including rc transfer cars24 has over 97 branches across 17 cities pan india and having bought over lakh cars in years it has become the market leader in used car transaction space
Original summary: start enables car owners to sell their cars in less than hours end
Predicted summary: start startup makes phone that can sell its own car end
Review: the hearing to decide the quantum of punishment to dera chief gurmeet ram rahim singh will take place through video conferencing on august 28 panchkula cbi court had convicted singh on charges of rape on friday as per reports the punishment can be jail term not less than seven years but may even extend to life imprisonment
Original summary: start ram rahim to get rape punishment through video conference end
Predicted summary: start ram rahim to be given to be held for sc end
Review: us defence research agency darpa has launched persistent aquatic living sensors program that will study the viability of using both natural and modified sea organisms to detect underwater vehicles the system would aim to translate the biological responses of sea life into usable data to warn ships without needing hardware that could alert the enemy of its detection
Original summary: start us military using sea life to spot threats end
Predicted summary: start us launches its first ever space station end
Review: kidambi srikanth and pv sindhu india top ranked male and female shuttlers respectively were knocked out of the bwf china open tournament in the quarterfinal stage on friday srikanth was beaten by japan kento momota in straight games while sindhu lost to china chen yufei with scoreline that read 11 21 21 11 15 21
Original summary: start sindhu srikanth both knocked out in china open end
Predicted summary: start india wins world cup final for 1st time in years end
Review: delhi daredevils batsman shreyas iyer said that coach ricky ponting speech on their first day of training gave the team members goosebumps iyer who also compared the coaching styles of former dd coach rahul dravid and ponting said dravid is calm and cool he likes to follow the process and nurture the kids
Original summary: start coach ponting speech gave us dd player iyer end
Predicted summary: start bcci should be given to play test cricket coach end
Review: a man in uttar pradesh lucknow built drone in hours to rescue puppy after he saw it drowning in drain the man who makes robots for living assembled an ai controlled robotic arm and attached it to the drone he said that initially he asked people for help but they advised him to let it die
Original summary: start lucknow techie builds drone in hrs to save drowning puppy end
Predicted summary: start man falls into train to avoid it in car end
###Markdown
**Resources - **1. https://www.analyticsvidhya.com/blog/2019/06/comprehensive-guide-text-summarization-using-deep-learning-python/2. https://www.analyticsvidhya.com/blog/2018/11/introduction-text-summarization-textrank-python/3. https://towardsdatascience.com/understand-text-summarization-and-create-your-own-summarizer-in-python-b26a9f09fc704. https://github.com/aravindpai/How-to-build-own-text-summarizer-using-deep-learning
###Code
summary = pd.read_csv('/kaggle/input/news-summary/news_summary.csv', encoding='iso-8859-1')
raw = pd.read_csv('/kaggle/input/news-summary/news_summary_more.csv', encoding='iso-8859-1')
pre1 = raw.iloc[:,0:2].copy()
# pre1['head + text'] = pre1['headlines'].str.cat(pre1['text'], sep =" ")
pre2 = summary.iloc[:,0:6].copy()
pre2['text'] = pre2['author'].str.cat(pre2['date'].str.cat(pre2['read_more'].str.cat(pre2['text'].str.cat(pre2['ctext'], sep = " "), sep =" "),sep= " "), sep = " ")
pre = pd.DataFrame()
pre['text'] = pd.concat([pre1['text'], pre2['text']], ignore_index=True)
pre['summary'] = pd.concat([pre1['headlines'],pre2['headlines']],ignore_index = True)
pre.head(2)
###Output
_____no_output_____
###Markdown
**Seq2Seq LSTM Modelling**
###Code
#LSTM with Attention
#pip install keras-self-attention
pre['text'][:10]
###Output
_____no_output_____
###Markdown
> **Perform Data Cleansing**
###Code
import re
#Removes non-alphabetic characters:
def text_strip(column):
for row in column:
#ORDER OF REGEX IS VERY VERY IMPORTANT!!!!!!
row=re.sub("(\\t)", ' ', str(row)).lower() #remove escape charecters
row=re.sub("(\\r)", ' ', str(row)).lower()
row=re.sub("(\\n)", ' ', str(row)).lower()
row=re.sub("(__+)", ' ', str(row)).lower() #remove _ if it occors more than one time consecutively
row=re.sub("(--+)", ' ', str(row)).lower() #remove - if it occors more than one time consecutively
row=re.sub("(~~+)", ' ', str(row)).lower() #remove ~ if it occors more than one time consecutively
row=re.sub("(\+\++)", ' ', str(row)).lower() #remove + if it occors more than one time consecutively
row=re.sub("(\.\.+)", ' ', str(row)).lower() #remove . if it occors more than one time consecutively
row=re.sub(r"[<>()|&©ø\[\]\'\",;?~*!]", ' ', str(row)).lower() #remove <>()|&©ø"',;?~*!
row=re.sub("(mailto:)", ' ', str(row)).lower() #remove mailto:
row=re.sub(r"(\\x9\d)", ' ', str(row)).lower() #remove \x9* in text
row=re.sub("([iI][nN][cC]\d+)", 'INC_NUM', str(row)).lower() #replace INC nums to INC_NUM
row=re.sub("([cC][mM]\d+)|([cC][hH][gG]\d+)", 'CM_NUM', str(row)).lower() #replace CM# and CHG# to CM_NUM
row=re.sub("(\.\s+)", ' ', str(row)).lower() #remove full stop at end of words(not between)
row=re.sub("(\-\s+)", ' ', str(row)).lower() #remove - at end of words(not between)
row=re.sub("(\:\s+)", ' ', str(row)).lower() #remove : at end of words(not between)
row=re.sub("(\s+.\s+)", ' ', str(row)).lower() #remove any single charecters hanging between 2 spaces
#Replace any url as such https://abc.xyz.net/browse/sdf-5327 ====> abc.xyz.net
try:
url = re.search(r'((https*:\/*)([^\/\s]+))(.[^\s]+)', str(row))
repl_url = url.group(3)
row = re.sub(r'((https*:\/*)([^\/\s]+))(.[^\s]+)',repl_url, str(row))
except:
pass #there might be emails with no url in them
row = re.sub("(\s+)",' ',str(row)).lower() #remove multiple spaces
#Should always be last
row=re.sub("(\s+.\s+)", ' ', str(row)).lower() #remove any single charecters hanging between 2 spaces
yield row
brief_cleaning1 = text_strip(pre['text'])
brief_cleaning2 = text_strip(pre['summary'])
from time import time
import spacy
nlp = spacy.load('en', disable=['ner', 'parser']) # disabling Named Entity Recognition for speed
#Taking advantage of spaCy .pipe() method to speed-up the cleaning process:
#If data loss seems to be happening(i.e len(text) = 50 instead of 75 etc etc) in this cell , decrease the batch_size parametre
t = time()
#Batch the data points into 5000 and run on all cores for faster preprocessing
text = [str(doc) for doc in nlp.pipe(brief_cleaning1, batch_size=5000, n_threads=-1)]
#Takes 7-8 mins
print('Time to clean up everything: {} mins'.format(round((time() - t) / 60, 2)))
#Taking advantage of spaCy .pipe() method to speed-up the cleaning process:
t = time()
#Batch the data points into 5000 and run on all cores for faster preprocessing
summary = ['_START_ '+ str(doc) + ' _END_' for doc in nlp.pipe(brief_cleaning2, batch_size=5000, n_threads=-1)]
#Takes 7-8 mins
print('Time to clean up everything: {} mins'.format(round((time() - t) / 60, 2)))
text[0]
summary[0]
pre['cleaned_text'] = pd.Series(text)
pre['cleaned_summary'] = pd.Series(summary)
text_count = []
summary_count = []
for sent in pre['cleaned_text']:
text_count.append(len(sent.split()))
for sent in pre['cleaned_summary']:
summary_count.append(len(sent.split()))
graph_df= pd.DataFrame()
graph_df['text']=text_count
graph_df['summary']=summary_count
import matplotlib.pyplot as plt
graph_df.hist(bins = 5)
plt.show()
#Check how much % of summary have 0-15 words
cnt=0
for i in pre['cleaned_summary']:
if(len(i.split())<=15):
cnt=cnt+1
print(cnt/len(pre['cleaned_summary']))
#Check how much % of text have 0-70 words
cnt=0
for i in pre['cleaned_text']:
if(len(i.split())<=100):
cnt=cnt+1
print(cnt/len(pre['cleaned_text']))
#Model to summarize the text between 0-15 words for Summary and 0-100 words for Text
max_text_len=100
max_summary_len=15
#Select the Summaries and Text between max len defined above
cleaned_text =np.array(pre['cleaned_text'])
cleaned_summary=np.array(pre['cleaned_summary'])
short_text=[]
short_summary=[]
for i in range(len(cleaned_text)):
if(len(cleaned_summary[i].split())<=max_summary_len and len(cleaned_text[i].split())<=max_text_len):
short_text.append(cleaned_text[i])
short_summary.append(cleaned_summary[i])
post_pre=pd.DataFrame({'text':short_text,'summary':short_summary})
post_pre.head(2)
#Add sostok and eostok at
post_pre['summary'] = post_pre['summary'].apply(lambda x : 'sostok '+ x + ' eostok')
post_pre.head(2)
###Output
_____no_output_____
###Markdown
**SEQ2SEQ MODEL BUILDING ** Split the data to TRAIN and VALIDATION sets
###Code
from sklearn.model_selection import train_test_split
x_tr,x_val,y_tr,y_val=train_test_split(np.array(post_pre['text']),np.array(post_pre['summary']),test_size=0.1,random_state=0,shuffle=True)
#Lets tokenize the text to get the vocab count , you can use Spacy here also
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer()
x_tokenizer.fit_on_texts(list(x_tr))
###Output
Using TensorFlow backend.
###Markdown
**RARE WORD ANALYSIS FOR X i.e 'text'*** tot_cnt gives the size of vocabulary (which means every unique words in the text)* cnt gives me the no. of rare words whose count falls below threshold* tot_cnt - cnt gives me the top most common words
###Code
thresh=4
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in x_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
x_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
x_tokenizer.fit_on_texts(list(x_tr))
#convert text sequences into integer sequences (i.e one-hot encodeing all the words)
x_tr_seq = x_tokenizer.texts_to_sequences(x_tr)
x_val_seq = x_tokenizer.texts_to_sequences(x_val)
#padding zero upto maximum length
x_tr = pad_sequences(x_tr_seq, maxlen=max_text_len, padding='post')
x_val = pad_sequences(x_val_seq, maxlen=max_text_len, padding='post')
#size of vocabulary ( +1 for padding token)
x_voc = x_tokenizer.num_words + 1
print("Size of vocabulary in X = {}".format(x_voc))
###Output
Size of vocabulary in X = 33412
###Markdown
**RARE WORD ANALYSIS FOR Y i.e 'summary'*** tot_cnt gives the size of vocabulary (which means every unique words in the text)* cnt gives me the no. of rare words whose count falls below threshold* tot_cnt - cnt gives me the top most common words
###Code
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer()
y_tokenizer.fit_on_texts(list(y_tr))
thresh=6
cnt=0
tot_cnt=0
freq=0
tot_freq=0
for key,value in y_tokenizer.word_counts.items():
tot_cnt=tot_cnt+1
tot_freq=tot_freq+value
if(value<thresh):
cnt=cnt+1
freq=freq+value
print("% of rare words in vocabulary:",(cnt/tot_cnt)*100)
print("Total Coverage of rare words:",(freq/tot_freq)*100)
#prepare a tokenizer for reviews on training data
y_tokenizer = Tokenizer(num_words=tot_cnt-cnt)
y_tokenizer.fit_on_texts(list(y_tr))
#convert text sequences into integer sequences (i.e one hot encode the text in Y)
y_tr_seq = y_tokenizer.texts_to_sequences(y_tr)
y_val_seq = y_tokenizer.texts_to_sequences(y_val)
#padding zero upto maximum length
y_tr = pad_sequences(y_tr_seq, maxlen=max_summary_len, padding='post')
y_val = pad_sequences(y_val_seq, maxlen=max_summary_len, padding='post')
#size of vocabulary
y_voc = y_tokenizer.num_words +1
print("Size of vocabulary in Y = {}".format(y_voc))
###Output
Size of vocabulary in Y = 11581
###Markdown
We will now remove "Summary" i.e Y (both train and val) which has only _START_ and _END_
###Code
ind=[]
for i in range(len(y_tr)):
cnt=0
for j in y_tr[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_tr=np.delete(y_tr,ind, axis=0)
x_tr=np.delete(x_tr,ind, axis=0)
ind=[]
for i in range(len(y_val)):
cnt=0
for j in y_val[i]:
if j!=0:
cnt=cnt+1
if(cnt==2):
ind.append(i)
y_val=np.delete(y_val,ind, axis=0)
x_val=np.delete(x_val,ind, axis=0)
from keras import backend as K
import gensim
from numpy import *
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense, Concatenate, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping
import warnings
pd.set_option("display.max_colwidth", 200)
warnings.filterwarnings("ignore")
print("Size of vocabulary from the w2v model = {}".format(x_voc))
K.clear_session()
latent_dim = 300
embedding_dim=200
# Encoder
encoder_inputs = Input(shape=(max_text_len,))
#embedding layer
enc_emb = Embedding(x_voc, embedding_dim,trainable=True)(encoder_inputs)
#encoder lstm 1
encoder_lstm1 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output1, state_h1, state_c1 = encoder_lstm1(enc_emb)
#encoder lstm 2
encoder_lstm2 = LSTM(latent_dim,return_sequences=True,return_state=True,dropout=0.4,recurrent_dropout=0.4)
encoder_output2, state_h2, state_c2 = encoder_lstm2(encoder_output1)
#encoder lstm 3
encoder_lstm3=LSTM(latent_dim, return_state=True, return_sequences=True,dropout=0.4,recurrent_dropout=0.4)
encoder_outputs, state_h, state_c= encoder_lstm3(encoder_output2)
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
#embedding layer
dec_emb_layer = Embedding(y_voc, embedding_dim,trainable=True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True,dropout=0.4,recurrent_dropout=0.2)
decoder_outputs,decoder_fwd_state, decoder_back_state = decoder_lstm(dec_emb,initial_state=[state_h, state_c])
#dense layer
decoder_dense = TimeDistributed(Dense(y_voc, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.summary()
model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1,patience=2)
###Output
_____no_output_____
###Markdown
**Start fitting the model with the data**
###Code
history=model.fit([x_tr,y_tr[:,:-1]], y_tr.reshape(y_tr.shape[0],y_tr.shape[1], 1)[:,1:] ,epochs=50,callbacks=[es],batch_size=128, validation_data=([x_val,y_val[:,:-1]], y_val.reshape(y_val.shape[0],y_val.shape[1], 1)[:,1:]))
###Output
Train on 88517 samples, validate on 9836 samples
Epoch 1/50
88517/88517 [==============================] - 429s 5ms/sample - loss: 5.0780 - val_loss: 4.7440
Epoch 2/50
88517/88517 [==============================] - 418s 5ms/sample - loss: 4.6248 - val_loss: 4.4052
Epoch 3/50
88517/88517 [==============================] - 416s 5ms/sample - loss: 4.3215 - val_loss: 4.1795
Epoch 4/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 4.1256 - val_loss: 4.0277
Epoch 5/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.9889 - val_loss: 3.9205
Epoch 6/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.8782 - val_loss: 3.8323
Epoch 7/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 3.7849 - val_loss: 3.7577
Epoch 8/50
88517/88517 [==============================] - 416s 5ms/sample - loss: 3.7057 - val_loss: 3.6956
Epoch 9/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 3.6399 - val_loss: 3.6538
Epoch 10/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.5814 - val_loss: 3.6122
Epoch 11/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 3.5281 - val_loss: 3.5727
Epoch 12/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.4786 - val_loss: 3.5425
Epoch 13/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 3.4315 - val_loss: 3.5075
Epoch 14/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 3.3871 - val_loss: 3.4756
Epoch 15/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 3.3451 - val_loss: 3.4456
Epoch 16/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.3073 - val_loss: 3.4204
Epoch 17/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 3.2720 - val_loss: 3.4078
Epoch 18/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 3.2413 - val_loss: 3.3854
Epoch 19/50
88517/88517 [==============================] - 416s 5ms/sample - loss: 3.2117 - val_loss: 3.3672
Epoch 20/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 3.1830 - val_loss: 3.3594
Epoch 21/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 3.1546 - val_loss: 3.3387
Epoch 22/50
88517/88517 [==============================] - 415s 5ms/sample - loss: 3.1270 - val_loss: 3.3299
Epoch 23/50
88517/88517 [==============================] - 412s 5ms/sample - loss: 3.1007 - val_loss: 3.3107
Epoch 24/50
88517/88517 [==============================] - 412s 5ms/sample - loss: 3.0755 - val_loss: 3.3052
Epoch 25/50
88517/88517 [==============================] - 411s 5ms/sample - loss: 3.0562 - val_loss: 3.2930
Epoch 26/50
88517/88517 [==============================] - 409s 5ms/sample - loss: 3.0374 - val_loss: 3.2885
Epoch 27/50
88517/88517 [==============================] - 410s 5ms/sample - loss: 3.0191 - val_loss: 3.2698
Epoch 28/50
88517/88517 [==============================] - 410s 5ms/sample - loss: 3.0018 - val_loss: 3.2640
Epoch 29/50
88517/88517 [==============================] - 410s 5ms/sample - loss: 2.9838 - val_loss: 3.2614
Epoch 30/50
88517/88517 [==============================] - 411s 5ms/sample - loss: 2.9696 - val_loss: 3.2493
Epoch 31/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.9553 - val_loss: 3.2444
Epoch 32/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.9437 - val_loss: 3.2459
Epoch 33/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.9336 - val_loss: 3.2389
Epoch 34/50
88517/88517 [==============================] - 412s 5ms/sample - loss: 2.9238 - val_loss: 3.2413
Epoch 35/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.9126 - val_loss: 3.2296
Epoch 36/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.8987 - val_loss: 3.2272
Epoch 37/50
88517/88517 [==============================] - 411s 5ms/sample - loss: 2.8868 - val_loss: 3.2318
Epoch 38/50
88517/88517 [==============================] - 411s 5ms/sample - loss: 2.8730 - val_loss: 3.2142
Epoch 39/50
88517/88517 [==============================] - 412s 5ms/sample - loss: 2.8599 - val_loss: 3.2121
Epoch 40/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.8482 - val_loss: 3.2088
Epoch 41/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 2.8362 - val_loss: 3.2009
Epoch 42/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 2.8247 - val_loss: 3.1948
Epoch 43/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 2.8125 - val_loss: 3.1916
Epoch 44/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 2.8004 - val_loss: 3.1873
Epoch 45/50
88517/88517 [==============================] - 413s 5ms/sample - loss: 2.7681 - val_loss: 3.1801
Epoch 48/50
88517/88517 [==============================] - 412s 5ms/sample - loss: 2.7575 - val_loss: 3.1739
Epoch 49/50
88517/88517 [==============================] - 410s 5ms/sample - loss: 2.7497 - val_loss: 3.1694
Epoch 50/50
88517/88517 [==============================] - 414s 5ms/sample - loss: 2.7386 - val_loss: 3.1677
###Markdown
**Visualize the model learning**
###Code
from matplotlib import pyplot
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
###Output
_____no_output_____
###Markdown
**Next, let’s build the dictionary to convert the index to word for target and source vocabulary:**
###Code
reverse_target_word_index=y_tokenizer.index_word
reverse_source_word_index=x_tokenizer.index_word
target_word_index=y_tokenizer.word_index
# Encode the input sequence to get the feature vector
encoder_model = Model(inputs=encoder_inputs,outputs=[encoder_outputs, state_h, state_c])
# Decoder setup
# Below tensors will hold the states of the previous time step
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_hidden_state_input = Input(shape=(max_text_len,latent_dim))
# Get the embeddings of the decoder sequence
dec_emb2= dec_emb_layer(decoder_inputs)
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=[decoder_state_input_h, decoder_state_input_c])
# A dense softmax layer to generate prob dist. over the target vocabulary
decoder_outputs2 = decoder_dense(decoder_outputs2)
# Final decoder model
decoder_model = Model(
[decoder_inputs] + [decoder_hidden_state_input,decoder_state_input_h, decoder_state_input_c],
[decoder_outputs2] + [state_h2, state_c2])
###Output
_____no_output_____
###Markdown
**We are defining a function below which is the implementation of the inference process**
###Code
def decode_sequence(input_seq):
# Encode the input as state vectors.
e_out, e_h, e_c = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
# Populate the first word of target sequence with the start word.
target_seq[0, 0] = target_word_index['sostok']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + [e_out, e_h, e_c])
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_token = reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence += ' '+sampled_token
# Exit condition: either hit max length or find stop word.
if (sampled_token == 'eostok' or len(decoded_sentence.split()) >= (max_summary_len-1)):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update internal states
e_h, e_c = h, c
return decoded_sentence
###Output
_____no_output_____
###Markdown
**Let us define the functions to convert an integer sequence to a word sequence for summary as well as the reviews:**
###Code
def seq2summary(input_seq):
newString=''
for i in input_seq:
if((i!=0 and i!=target_word_index['sostok']) and i!=target_word_index['eostok']):
newString=newString+reverse_target_word_index[i]+' '
return newString
def seq2text(input_seq):
newString=''
for i in input_seq:
if(i!=0):
newString=newString+reverse_source_word_index[i]+' '
return newString
###Output
_____no_output_____
###Markdown
**Run the model over the data to see the results**
###Code
for i in range(0,100):
print("Review:",seq2text(x_tr[i]))
print("Original summary:",seq2summary(y_tr[i]))
print("Predicted summary:",decode_sequence(x_tr[i].reshape(1,max_text_len)))
print("\n")
###Output
Review: pope francis on tuesday called for respect for each ethnic group in speech delivered in myanmar avoiding reference to the rohingya minority community as the nation works to restore peace the healing of wounds must be priority he said the pope myanmar visit comes amid the country military crackdown resulting in the rohingya refugee crisis
Original summary: start pope avoids mention of rohingyas in key myanmar speech end
Predicted summary: start pope urges un to give speech on rohingya refugees end
Review: students of government school in uttar pradesh sambhal were seen washing dishes at in school premises on being approached basic shiksha adhikari virendra pratap singh said yes have also received this complaint from elsewhere we are inquiring and action will be taken against those found guilty
Original summary: start students seen washing dishes at govt school in up end
Predicted summary: start school students fall ill after being raped by up school end
Review: apple india profit surged by 140 in 2017 18 to crore compared to ã¢ââ¹373 crore in the previous fiscal the indian unit of the us based company posted 12 growth in revenue last fiscal at ã¢ââ¹13 crore apple share of the indian smartphone market dropped to 1 in the second quarter of 2018 according to counterpoint research
Original summary: start apple india profit rises 140 to nearly ã¢ââ¹900 crore in fy18 end
Predicted summary: start apple profit rises to ã¢ââ¹1 crore in march quarter end
Review: uber has launched its electric scooter service in santa monica us at 1 to unlock and then 15 cents per minute to ride it comes after uber acquired the bike sharing startup jump for reported amount of 200 million uber said it is branding the scooters with jump for the sake of consistency for its other personal electric vehicle services
Original summary: start uber launches electric scooter service in us at 1 per ride end
Predicted summary: start uber launches 1 2 million in electric car startup end
Review: around 80 people were injured in accidents related to kite flying during celebrations of makar sankranti in rajasthan jaipur officials said the victims included those who fell while flying kites and those injured by glass coated kite string officials added meanwhile around 100 birds were reported to be injured by between january 13 and 15
Original summary: start 80 people injured in flying related accidents in jaipur end
Predicted summary: start 1 killed injured in rajasthan due to heavy rains end
Review: uk entrepreneur richard browning has announced the launch of his startup gravity which has created flight jet powered suit that will be priced at about ã¢ââ¹1 3 crore the suit has custom built exoskeleton with six attached micro jet engines fuelled by kerosene from backpack browning claims the can travel at speed of up to 450 kmph
Original summary: start startup makes ã¢ââ¹1 3 crore jet powered flying suit end
Predicted summary: start startup launches ã¢ââ¹1 crore to make flying taxis end
Review: andhra pradesh chief minister chandrababu naidu on monday announced that his government will provide 100 units free power to most backward classes he added that the government would also give aid of up to ã¢ââ¹15 lakh to backward classes for foreign education we will spread out the poverty eradication program under pro basis he further said n
Original summary: start most backward classes to get 100 units free power andhra cm end
Predicted summary: start andhra to get 100 000 free from cm yogi adityanath end
Review: taking dig at pm modi congress president rahul gandhi tweeted while our pm around his garden making yoga videos india leads afghanistan syria in rape violence against women this comes after thomson reuters foundation survey declared india as world most dangerous country for women pm modi shared video of himself doing yoga and other exercises last week
Original summary: start pm modi makes yoga videos while india leads in rape rahul end
Predicted summary: start pm modi wishes rahul on women in india end
Review: external affairs minister sushma swaraj on saturday called upon the united nations to pass the comprehensive convention on international terrorism to end pakistan sponsored terrorism proposed by india in 1996 aims to arrive at universal definition of terrorism ban all terror groups prosecute terrorists under special laws and make cross border terrorism an offence
Original summary: start india calls on un to pass global anti terror convention end
Predicted summary: start un asks india to pak to help terrorism terrorism end
Review: the 23 richest indians in the 500 member bloomberg billionaires index saw wealth erosion of 21 billion this year lakshmi mittal who controls the world largest steelmaker arcelormittal lost 5 6 billion or 29 of his net worth followed by sun pharma founder dilip shanghvi whose wealth declined 4 6 billion asia richest person mukesh ambani added 4 billion to his fortune
Original summary: start lakshmi mittal lost 10 bn in 2018 ambani added 4 bn end
Predicted summary: start indian economy richest man loses 50 billion in one day end
Review: the haryana police have arrested 19 year old for killing 75 year old woman after attempting rape on her in village in the state the accused gagged the victim with scarf and hit brick on her head when she shouted for help the teenager mother then washed the blood from their house and outside to save him
Original summary: start teen kills lady after rape attempt mother cleans blood to save him end
Predicted summary: start haryana man kills self for raping killing her end
Review: a in pakistan has been jailed for 24 years for blackmailing and harassing nearly 200 lady doctors and nurses he introduced himself as military intelligence official according to the complainant who filed the case he was arrested in 2015 for hacking the whatsapp accounts of lady doctors blackmailing them with objectionable content and extorting money from them
Original summary: start pak jailed for harassing 200 lady doctors nurses end
Predicted summary: start pak doctor jailed for 30 yrs for stealing end
Review: a doctor at andhra pradesh government hospital allegedly attacked the hospital superintendent with an injection containing hiv infected blood while the attempt to inject the infected blood was thwarted he managed to spray the blood on the senior dress the accused who said he only wanted to scare the superintendent was reportedly upset over being reprimanded by him
Original summary: start andhra doc tries to senior with hiv infected blood end
Predicted summary: start andhra hospital doctors surgery with hiv blood end
Review: after congress vice president rahul gandhi slammed pm narendra modi while talking at the university of california party leader anand sharma backed him saying it justified to condemn prime minister in democracy if they bjp leaders think we ll ask them before making any statement they are wrong they should be prepared for this kind of attacks he added
Original summary: start pm justified in democracy cong on rahul remark end
Predicted summary: start pm modi should be used to remove cong leader end
Review: former england cricket team spinner ashley giles will succeed former captain andrew strauss as the managing director of england men cricket in january the ecb has confirmed tom harrison the chief executive described giles as the candidate amongst very strong field giles was part of england 2005 ashes winning team
Original summary: start appointed director of cricket end
Predicted summary: start ex england captain to be charged with icc end
Review: a case has been registered against raja chauhan for firing gunshots in violence during bharat bandh called by dalit groups in madhya pradesh gwalior on monday video showing chauhan alleged to be bjp worker firing during the protest had surfaced online of the 12 people killed during the nationwide protests at least three were from gwalior
Original summary: start case filed man who fired in dalit protests end
Predicted summary: start fir against cop for refusing to visit sabarimala end
Review: there are believed to be 80 different ways to spell the name of english playwright william shakespeare including and shakespeare is known to have signed his name using variations such as and due to no proper documentation april 23 is regarded as his birth date and april 23 as his death date
Original summary: start there are 80 recorded ways to spell shakespeare end
Predicted summary: start how did the name of the name of the year end
Review: actor shahid kapoor while speaking about ranveer singh portrayal of alauddin khilji in the film padmaavat said he would have played the character differently we are two different actors and our style of acting is different he added earlier ranveer had said he would have played shahid character in the film better than shahid
Original summary: start i would have played khilji differently shahid kapoor end
Predicted summary: start shahid will be made in padmaavat shahid kapoor end
Review: minors cannot opt out of aadhaar after turning 18 year old since it not permissible under the aadhaar act uidai has told the supreme court they can lock their biometrics permanently and can unlock it temporarily if needed for biometric authentication further the sc was told the biometrics failure rate at the national level was 6 for fingerprints and 8 54 for iris
Original summary: start minors can opt out of aadhaar after turning 18 uidai end
Predicted summary: start aadhaar can be used for not aadhaar linking uidai end
Review: producer ekta kapoor slammed karni sena threat that they will cut deepika padukone nose over the row on padmavati and asked aren we getting these jailed for open threats and attacks in the tweet she also wrote about an incident wherein people threw bottles at her at an event in jaipur over her tv series akbar
Original summary: start aren those threatening deepika getting jailed asks ekta end
Predicted summary: start deepika slams padmavati for not wearing hijab in padmavati end
Review: talking about being ranked 7th on forbes list of world highest paid actors 2018 akshay kumar said it feels good but never take these lists too seriously they keep changing like seasons he further said the idea is only to do good better and best work but of course forever grateful when things like these come my way
Original summary: start feels good akshay on being 7th highest paid actor end
Predicted summary: start i have never paid more than akshay on nepotism end
Review: an indian origin couple was killed by their daughter ex boyfriend in an apparent revenge crime in the us on friday the 24 year old suspect mirza fatally shot naren prabhu silicon valley tech executive and his wife in san jose the police called the swat team after stand off with who was later killed after bullet hit him
Original summary: start indian origin couple killed in revenge crime in us end
Predicted summary: start indian origin woman shot dead in us end
Review: a 64 year old father and his 35 year old daughter flew their last flight together as british airways pilots on thursday david said he felt mixed emotions at retiring and was glad his daughter kat would continue their legacy the two had flown together numerous times and kat said she believed their father daughter bond helped them in the cockpit
Original summary: start father and daughter fly last flight together as ba pilots end
Predicted summary: start father daughter flight makes baby harassed by her end
Review: pakistan chief selector inzamam ul haq nephew imam ul haq collided with wicketkeeper brien and kane while completing single off ireland first ever test delivery on saturday debutant imam fell flat on his back after his head into brien hip while trying to make his crease uncle would ve just both fielders user tweeted reacting to the incident
Original summary: start imam collides with two on ireland first test ball end
Predicted summary: start shoaib akhtar was once hit by his 1st ball in the end
Review: reacting to apple launch event on wednesday several users took to twitter saying dual sim they are really going after indians here tweet read the iphone xs max will come in smaller model ipad mini while another mocked weakening rupee against dollar saying apple watch series can detect fall someone make the rupee wear it
Original summary: start dual sim they re going after indians twitter on new iphones end
Predicted summary: start apple india tweets user on iphone with end
Review: pakistan on sunday rejected india request for consular access to kulbhushan jadhav for the time saying he is not an ordinary citizen pakistan foreign office claimed that he was sent by an indian intelligence agency and that he killed several innocent pakistanis jadhav was sentenced to death by pakistani military court in april on charges of espionage
Original summary: start pak denies india consular access to kulbhushan for 18th time end
Predicted summary: start pak rejects india plea for jadhav to jadhav end
Review: a 9 year old south african child has become only the third known case worldwide to show signs of aids virus long term remission without any drugs the child was given anti aids medicine for 10 months till the age of one which helped lower hiv levels from very high to the kid was then taken off drugs as part of trial
Original summary: start 9 year old found to control hiv without any treatment end
Predicted summary: start teen who raped 10 yr old in korea end
Review: a 23 year old female finnish tourist was found dead at hotel in tamil nadu chennai on wednesday the police who retrieved drugs from the hotel room suspect that the woman died of drug overdose an investigation has been launched into the incident and the police are questioning the woman boyfriend
Original summary: start finnish tourist found dead in tn drug overdose suspected end
Predicted summary: start tn tourist found dead at hotel in tamil nadu end
Review: us president donald trump has told his south korean counterpart moon jae in that his country is open to talks with north korea at the appropriate time under the right circumstances trump also assured jae in that there would be no us military action against north korea as long as south korea is having dialogue with the reclusive nation
Original summary: start us open to talk to north korea under right end
Predicted summary: start trump will open to korea if korea is new chief end
Review: actress yami gautam has said rajkummar rao would have been amazing as the lead character vicky in the 2012 film vicky donor the character was originally portrayed by ayushmann khurrana who made his bollywood debut in the film while yami played the female lead when asked who would have played yami character well ayushmann had earlier taken bhumi pednekar name
Original summary: start rajkummar would have been amazing as vicky in vicky donor yami end
Predicted summary: start rajkummar rao to star in rajkummar rao end
Review: an east german secret police identity card belonging to russian president vladimir putin when he was soviet spy has been found in police archives in the city of dresden the card was issued in 1985 when putin was mid ranking soviet spy stationed in dresden in communist east germany which was then under russian occupation
Original summary: start putin spy id card found in germany end
Predicted summary: start german spy putin caught with putin end
Review: bangladesh pm sheikh hasina has accused myanmar of finding new excuses to delay the return of over lakh rohingya muslims who fled myanmar to enter bangladesh over the past year adding that under no would the refugees remain permanently in bangladesh hasina said already have 16 crore people in my country can take any other burden
Original summary: start myanmar delaying tactics blocking rohingya return desh end
Predicted summary: start bangladesh accuses myanmar of rohingya return to myanmar end
Review: surat police has issued summons to former gujarat bjp vice president jayanti bhanushali in connection with rape case against him 21 year old girl from surat has accused him of raping her multiple times since november 2017 after promising to get her admitted to fashion designing institute bhanushali resigned from his position following the rape accusation
Original summary: start police ex gujarat bjp vice president in rape case end
Predicted summary: start gujarat police release ex bjp leader in rape case end
Review: technology giant google will reportedly prioritise articles in the search results from those publications which are subscribed by users the company will also start sharing search data that show the users which are most likely to buy subscription this is part of the technology giant efforts to help media companies find and retain paying readers
Original summary: start google may prioritise stories for paid news subscribers end
Predicted summary: start google to report on users search results end
Review: india is model for peacefully resolving maritime disputes and strong provider of security us navy secretary richard spencer has said praising india for peacefully resolving maritime border dispute with the us in the indo pacific region the relationship between the us and india is based on our shared values and desire to preserve peace the us official added
Original summary: start india model for maritime disputes us end
Predicted summary: start india us has been an indian american model end
Review: the archaeological site rani ki vav that features on the rear of new ã¢ââ¹100 notes is unesco world heritage site in gujarat patan the 11th century site is which was built by the solanki dynasty queen as memorial for her deceased husband in 2016 it was awarded the cleanest iconic place in india title
Original summary: start what is the importance of rani ki featured on new ã¢ââ¹100 note end
Predicted summary: start jacqueline launches new ã¢ââ¹100 crore club in delhi end
Review: the us has accused china of pointing military grade blinding lasers at its pilots in djibouti in nearly 10 such incidents in the past few weeks two us military pilots suffered minor eye injuries in one case officials said while the us has military base in djibouti since 2001 china opened its first overseas base in the region last year
Original summary: start us accuses china of lasers at its pilots eyes end
Predicted summary: start us accuses china of its troops in military end
Review: a fake news update is being shared on whatsapp which claims that the government provides compensation in case of death due to accident it claims that if person dies in an accident and has been filing his income tax since the last three years the government will provide compensation equivalent to 10 times his average annual income
Original summary: start news about money from government in accidental deaths fake end
Predicted summary: start fake news on whatsapp death case to help govt end
Review: sridevi starrer song hawa hawai from the 1987 film mr india has been recreated for vidya balan film tumhari sulu the vocals of kavita krishnamurthy who sang the original song have been retained and the new version has been recreated and programmed by tanishk bagchi directed by suresh triveni tumhari sulu is scheduled to release on november 17
Original summary: start hawa hawai recreated for tumhari sulu end
Predicted summary: start vidya balan starrer kedarnath song end
Review: the us state department has approved the sale of 160 missiles to the united arab emirates for an estimated 2 billion over ã¢ââ¹12 800 crore the pentagon said on thursday describing uae as force for political stability in the middle east the pentagon stated this proposed sale will contribute to the foreign policy and national security of the united states
Original summary: start us approves sale of missiles worth ã¢ââ¹12 800 crore to uae end
Predicted summary: start us approves sale of uae for arms sale to malaysia end
Review: iran would not comply with the fully illegal us sanctions and would not discuss the volume or destination of its oil exports amidst the sanctions iranian oil minister zanganeh said on thursday the us sanctions targeting iran key economic sectors like oil and gas shipping and banking were imposed following the us withdrawal from the 2015 iran nuclear deal
Original summary: start iran says it will not comply with illegal us sanctions end
Predicted summary: start iran won us sanctions against iran over sanctions end
Review: pornstar stormy daniels has been ordered to pay us president donald trump nearly 293 000 in legal fees and sanctions after her defamation suit against him was dismissed daniels alleges she was paid to hide her alleged affair with trump in 2016 she sued trump for defamation after he called her claims total con job in tweet
Original summary: start pornstar stormy daniels ordered to pay trump 000 end
Predicted summary: start pornstar pay for trump over fake news of lawsuit end
Review: windies all rounder dwayne bravo compared team india and rcb captain virat kohli to footballer cristiano ronaldo admire the talent he has the passion and talent that he has for the sport and the way he plays bravo added notably ronaldo is champions league all time top goalscorer with 120 goals and is the leading scorer this season with 15 goals
Original summary: start virat kohli is cristiano ronaldo of cricket dwayne bravo end
Predicted summary: start chris gayle kohli to be named after ronaldo end
Review: singer ariana grande visited young children in hospital who were injured after blast at her concert in manchester uk in may the 23 year old singer presented t shirts and gifts to the young children ariana along with justin bieber miley cyrus and coldplay will perform on june to raise funds for the victims of the terrorist attack at the concert
Original summary: start ariana grande visits kids injured after blast at her concert end
Predicted summary: start singer chris gayle steals kids shot dead in rajasthan end
Review: a former uber manager robert miller had reportedly warned uberã¢ââs executives about safety issues before the fatal self driving car crash in arizona in march ã¢ââthe cars are routinely in accidents resulting in damage this is not how we should be operating ã¢ââ miller had said he added ã¢ââseveral of the drivers appear to not have been properly vetted or trained ã¢ââ
Original summary: start ex uber exec warned staff before self driving crash report end
Predicted summary: start ex uber ceo claims self driving cars report end
Review: in chhattisgarh bharatpur assembly constituency village there are only four voters with three belonging to the same family to reach the forest village situated 15 kilometres away from the main road six kilometre rocky path and river have to be crossed the election officials will reach the village day before and erect tent for the voters
Original summary: start chhattisgarh village has only voters from same family end
Predicted summary: start only 10 lakh votes in up to have electricity end
Review: aimim president asaduddin owaisi has said that no beef ban in three northeast states which will go to polls next year reflects dual standards and hypocrisy in up bjp is doing appeasement of hindutva forces he added his remark came after the bjp clarified that it will not impose beef ban in northeastern states if voted to power
Original summary: start no beef ban in poll bound northeast shows owaisi end
Predicted summary: start no ban on cow slaughter ban in 2019 owaisi end
Review: taking dig at bjp for its proposed rath yatra west bengal cm mamata banerjee on friday said that rath yatras are not carried out to kill people those who carry out yatras to kill common people indulge in yatras she added this comes after calcutta high court recently put stay on bjp rath yatra in the state
Original summary: start rath yatras are not carried out to kill people wb cm end
Predicted summary: start no death toll in wb cm mamata end
Review: a picture of russian mp natalya leaning against wall ahead of vladimir putin inauguration has gone viral reacting to the picture twitter user wrote current mood natalya other users tweeted am natalya at every party and maybe she was ordered to open and close the door
Original summary: start pic of mp leaning against wall before putin oath goes viral end
Predicted summary: start video shows mp cm goes viral after being putin end
Review: actor purab kohli has said that the release of films on fridays is like the appraisal period for actors just like employees in corporate offices have their annual appraisal period where they get nervous about what will happen with their salaries we actors too feel the first friday said purab he added that ultimately audience is the best judge
Original summary: start friday releases are like period for actors end
Predicted summary: start film industry is the biggest film industry end
Review: a new trailer of the hindi version of deadpool has been released where ranveer singh is seen voicing ryan reynolds who plays the lead role of deadpool bollywood desi cool is india deadpool tweeted fox star india the distributors of the film in india earlier another trailer had been released which featured different voice artiste
Original summary: start ranveer singh voices reynolds in deadpool hindi version end
Predicted summary: start new trailer of ranveer singh starrer october released end
Review: the archaeological survey of india asi has banned selfie sticks at its 46 site museums across the country including taj museum in agra and indian war memorial museum in delhi further the visitors would need to get permission for using flash multiple lenses and carrying large photography bags 15 days before the scheduled visit
Original summary: start selfie sticks banned at 46 site across india end
Predicted summary: start indian museum banned from delhi museum end
Review: indian commodity exchange on monday commenced operations as the world first diamond futures exchange indian manufacturers most require this type of financial product md prasad said adding shall offer fair transparent and nationwide market bringing in large market participation into diamond trade the exchange is backed by companies like reliance capital and
Original summary: start worldã¢ââs 1st diamond futures exchange starts trading in india end
Predicted summary: start indian origin malaysia adds first ever to be valued at end
Review: the us state department on thursday imposed sanctions against the chinese military for buying russian sukhoi su 35 fighter jets and s 400 air defence missile systems the us also blacklisted 33 people and entities associated with the russian intelligence china has called on the us to withdraw the sanctions or bear the consequences
Original summary: start us sanctions china for buying russian jets missiles end
Predicted summary: start us sanctions against russia over military missiles end
Review: the mumbai police on wednesday cancelled the conditional licences granted to the three of the city dance bars for failing to comply with fire safety norms the establishments didn respond to repeated show cause notices and failed to produce the required documents during the hearing police said in 2016 the supreme court had permitted these bars to operate after the ban
Original summary: start licence of mumbai only three dance bars cancelled end
Predicted summary: start mumbai police arrest warrant against dog for end
Review: congress president rahul gandhi has told party leaders to strengthen the organisation in such fashion that it is not candidate but the party that will fight the election party mp pl punia claimed state incharges have been told to identify the strong and weak booths while special attention is being given to shakti congress interaction platform for workers
Original summary: start rahul told us party will fight polls not candidate cong mp end
Predicted summary: start rahul gandhi not to fight in polls congress mp end
Review: japanese conglomerate softbank is still considering if it should sell its reported 20 22 stake in indian e commerce startup flipkart to walmart as per reports softbank ceo masayoshi son will take call in the next 7 10 days the reports added softbank which invested 2 5 billion in flipkart last year would sell the stake for 4 billion reports had earlier suggested
Original summary: start softbank still considering selling flipkart stake reports end
Predicted summary: start softbank may sell flipkart stake in flipkart for 20 yrs end
Review: the bjp has crossed the halfway mark leading in 112 seats in the ongoing karnataka assembly elections for 222 electoral constituencies while the congress is leading in 56 seats the jd has secured lead in 38 seats the voting for two constituencies was postponed due to candidate demise in jayanagar and the voter id row in rr nagar
Original summary: start bjp crosses halfway mark leads in 112 seats in taka polls end
Predicted summary: start bjp crosses 100 seats in taka polls end
Review: kartik aaryan is coming today for live interactive session on huawei mate 20 pro touted in media reports as the king of smartphones at ambience mall gurugram pm onwards first buyer will get chance to the smartphone with kartik as while the next will get to take group selfie with him
Original summary: start kartik aaryan comes to flag off offline strategy end
Predicted summary: start katy perry reacts to live in music festival end
Review: days after bjp and its allies sealed seat sharing arrangement in bihar ahead of 2019 lok sabha polls bjp mp gopal narayan singh said like all over india in bihar also our main brand is pm narendra modiji claiming both jd and bjp are equal partners singh added if bjp needs cm nitish kumar then he also needs bjp
Original summary: start pm modi is our main brand in bihar says bjp mp end
Predicted summary: start bjp is in bihar bjp mp on india end
Review: a mysterious carcass washed ashore an indonesian island suspected of being giant or elephant has been identified by experts us based biologist believes the bloodied creature was decomposing whale other scientists agreed that the remains of plates most likely belonged to the whale indonesia marine authorities said to carry sample to confirm its identity
Original summary: start dead sea washed up on indonesian coast identified end
Predicted summary: start dead bodies of indonesia tsunami found in indonesia end
Review: a farmer in karnataka has filed complaint against bjp workers for allegedly destroying his farmland for constructing helipad for party president amit shah he claimed the bjp workers had entered the land forcefully and abused him when he questioned them bjp spokesperson prakash said that any high handedness by local leaders will be looked into
Original summary: start farmer claims bjp workers destroyed land for shah end
Predicted summary: start farmer uses own land to amit shah for karnataka temple end
Review: the supreme court has banned all construction activities in maharashtra madhya pradesh uttarakhand and chandigarh till october the interim order came after the states and the union territory failed to comply with the court order to come up with policy on solid waste management the attitude of the states union territories is pathetic to say the least the court said
Original summary: start sc bans construction in maharashtra mp uttarakhand end
Predicted summary: start sc bans construction of maharashtra mp uttarakhand cm end
Review: mumbai based startup taxi fabric co founder has been accused by his former colleague swapna nair of sending her ck pics even after she asked him not to send them he repeatedly kept talking about how my dark skin would make him hard instantly said swapna tried to change the conversation but it was impossible without his ck interrupting she added
Original summary: start taxi fabric founder accused of sending ck pics to ex colleague end
Predicted summary: start mumbai startup accused of molestation case end
Review: after passenger who travelled in air india newark mumbai flight complained of bed bugs in their business class seats the airline claimed may have happened due to the current weather conditions apologising for the inconvenience air india also offered to refund 75 of the passenger fare the passenger had tweeted photo of bite marks all over her arm
Original summary: start air india blames weather for bed bugs in business class end
Predicted summary: start air india flight delayed by may have been end
Review: the bombay high court on monday accepted maharashtra government proposal that it wouldn allow children below 14 years of age to participate in the dahi handi festival which witnesses several injuries to youngsters however the bench refused to impose any restriction on the height of human pyramid formations during the festival notably since 2014 children below 18 years weren allowed
Original summary: start hc accepts maha govt 14 yrs age restriction for dahi handi end
Predicted summary: start hc dismisses plea to school kids in maharashtra end
Review: the rajasthan high court has issued notice to the centre asking why condom advertisements cannot be shown on tv between am and 10 pm the court was hearing petition filed by nonprofit organisation which has called the order arbitrary and meaningless earlier this month the ministry had issued an advisory restricting timings of condom ads
Original summary: start raj hc issues notice to centre over condom ads restrictions end
Predicted summary: start hc notice to ban on fake news on rajasthan govt end
Review: an 80 year old woman threw coins into the engine of plane at shanghai airport for good luck on tuesday all 150 passengers were evacuated and the flight was delayed for nearly six hours after one of the nine coins she threw entered the engine the woman has been exempted from jail because she is aged over 70 the police said
Original summary: start woman throws coins into plane engine for luck delays flight end
Predicted summary: start woman stuck in flight gets stuck in prison end
Review: actress margot best known for playing the role of lois lane in the superman film franchise of the 1970s and 1980s passed away on sunday at the age of 69 an autopsy will be performed to determine the cause of her death margot who suffered from car accident in 1990 had experienced long term mental health issues
Original summary: start actress who portrayed lane in superman film passes away end
Predicted summary: start actress passes away at cannes end
Review: zaheer khan fiancãâ actor sagarika ghatge posted picture on instagram of the couple in which zaheer new clean look sagarika captioned the picture came back home to this stranger doing rather well cricketers including ravindra jadeja hardik pandya rohit sharma and ajinkya rahane also posted selfies on instagram earlier
Original summary: start zaheer broke the beard rather well says fiancãâ sagarika end
Predicted summary: start zaheer khan shares pic with his wedding reception end
Review: for the first time bcci anti corruption unit has restricted cricketers from meeting outsiders in private during the two ipl matches on may 10 and 13 in kanpur players will only be allowed to interact with outsiders in the presence of the hotel manager in the lobby additionally phone records of players and franchise owners would be checked daily
Original summary: start players barred from meeting outsiders during ipl end
Predicted summary: start players players for ipl 2018 at ipl 2018 end
Review: kareena kapoor has said that shah rukh khan is india biggest romantic hero when you go to any corner of the world when you say india the first thing that comes to your mind is shah rukh khan she added kareena further said he has touched people lives with his roles his passion his genuine love and his charm
Original summary: start shah rukh khan is india biggest romantic hero kareena end
Predicted summary: start i am the biggest indian tweets srk on big films end
Review: aligarh mayor mohammed on wednesday confessed that he did not remember the national anthem completely but that he respected it listen to the national anthem every day stand and pay respect to it he added reporters had asked to recite the national anthem after addressing his first press conference after assuming charge as the city mayor
Original summary: start don remember the national anthem completely aligarh mayor end
Predicted summary: start i don want to visit cow to make me owaisi end
Review: reacting to kylie jenner naming her baby stormi webster twitter user wrote if ever have child ll be naming it north chicago storm weather forecast commented another user while referring to north and chicago the daughters of kylie half sister kim kardashian another tweet read it wasn going to be called something boring like brenda
Original summary: start will name kid tweets user on kylie naming kid end
Predicted summary: start twitter users react to baby girl who trolled end
Review: the union home ministry has warned that threat to prime minister narendra modi is at an all time high the home ministry has laid down security protocol which includes even ministers and officers to be cleared by special protection group spg staff before they can come close to pm modi this comes after maharashtra police seized documents mentioning rajiv gandhi type incident
Original summary: to pm at all time high even ministers need clearance to meet him end
Predicted summary: start pm modi calls for security in 26 11 attacks end
Review: the pakistan cricket board welcomed ab de villiers to the pakistan super league with tweet that read the goat from south africa is now part of psl warm welcome to the former south african international announced his arrival with video captioned it time for so there going to be party in february
Original summary: start goat now part of psl pcb welcomes ab de villiers to psl end
Predicted summary: start shoaib akhtar calls for his third place in pakistan end
Review: us senator elizabeth warren has proposed new wealth tax which would cost the world richest person jeff bezos 4 1 billion in the first year warren proposed wealth tax of 2 on americans with assets above 50 million and 3 on all fortunes above 1 billion almost all of bezos 137 1 billion wealth is tied up in amazon stock
Original summary: start us senator proposes new tax that would cost bezos in first year end
Predicted summary: start world richest man bezos net worth tax on first day end
Review: producers of john abraham and diana penty starrer parmanu the story of pokhran have announced its release date as march clashing with anushka sharma pari there are 000 theatres so it not about clashing for me it like bringing two films of different genres together the co producer of both the films prernaa arora said
Original summary: start john starrer parmanu to clash with anushka pari end
Predicted summary: start john abraham starrer pad man to release in goa end
Review: a nasa backed study has found tremendous amounts of soot lofted into air from global wildfires following the dinosaur killing asteroid strike 66 million years ago would have plunged earth into darkness for nearly two years this would have shut down photosynthesis and drastically cooled the planet leading to mass extinction of three of species on earth back then the study added
Original summary: start dino killing asteroid might have brought years of end
Predicted summary: start asteroid could be shot at earth in years study end
Review: terror attacks persist in jammu and kashmir despite prime minister narendra modi threats to pakistan for being lenient towards terror outfits an editorial in shiv sena mouthpiece saamana has said refuting the centre assurance regarding the situation being under control at the india pakistan border the editorial said the area was still
Original summary: start terror attacks in despite pm modi threat sena end
Predicted summary: start terror attack in pak pm modi to terror attack end
Review: nagaland under 19 girls team was dismissed for two runs in 17 overs while playing against kerala in the ongoing bcci women under 19 one day league on friday nagaland opener top scored for the team with run while the other run came off an extra kerala hit boundary on the first ball winning with 299 balls to spare
Original summary: start nagaland team all out for runs rivals win on 1st ball end
Predicted summary: start kerala women beat to win over 50 runs in the first end
Review: the cemetery in city slovenia has unveiled tombstones with 48 inch interactive screens that can show pictures videos and other digital content the company claims the tombstones are weather proof and cannot be vandalised the tombstones which cost ã¢ââ¬3 000 ã¢ââ¹2 lakh each activate their sensors when someone stands near them
Original summary: start cemetery gets digital that can play videos end
Predicted summary: start video shows how to make it is end
Review: terming congress allegations of corruption in the rafale deal as manufactured union minister of state for defence subhash bhamre on thursday said don try to fool people as they are educated enough he added there have been numerous scams during congress rule and the pm modi led government made sure there were no big scams in these last years
Original summary: start don try to fool people govt to congress over rafale deal end
Predicted summary: start rafale deal not enough to rafale deal cong end
Review: indo canadian youtube star lilly singh also known as superwoman said if you watch something and it makes you laugh great if it doesn that comedy isn wrong it just not suitable for you calling comedy very lilly added the best thing you can do is promote what you love and not bash what you hate
Original summary: start if it doesn make you laugh that comedy isn wrong end
Predicted summary: start if you can you re like you re you re shahid end
Review: scientists have discovered the fossils of an elephant sized mammal that lived alongside dinosaurs during the triassic period about 200 million years ago named the four legged creature belonged to group of mammal like reptiles called we think itã¢ââs one of the most unexpected fossil discoveries from the triassic of europe one of the scientists said
Original summary: start fossils of like reptile that lived with dinosaurs found end
Predicted summary: start scientists find dinosaur era fossil shows how end
Review: the german parliament lower house has approved draft law that partially bans the full face islamic veil for public servants including election officials military and judicial staff while at work the law however allows exceptions such as for health workers protecting themselves against infections or police officers concealing their identity it will now go to the upper house for approval
Original summary: start german parliament approves partial ban on islamic veil end
Predicted summary: start germany bans use of parliament to migrants end
Review: manchester united players including paul pogba marouane fellaini and juan mata met game of thrones actors john bradley west and joe during the team pre season training camp in los angeles john who plays and joe who plays also met josãâ mourinho and posed with united europa league trophy john sported the team away jersey during the interaction
Original summary: start manchester united players meet game of thrones actors in usa end
Predicted summary: start manchester united players utd players play football match end
Review: former apple executive neha rastogi who accused her husband and ceo abhishek gattani of domestic abuse has disclosed audio recordings of gattani abusing and beating her in court statement rastogi said he hit me multiple times on my face arms head belly and abused me gattani was allowed plea agreement and faces 30 days in jail
Original summary: start ex apple engineer says husband abused hit her several times end
Predicted summary: start apple ceo accused of stealing wife harassed her end
Review: former ufc fighter tim hague has passed away aged 34 after suffering knockout in boxing fight the canadian mixed martial artist was taken to hospital on friday after being knocked out by adam but died on sunday hague fought 34 mixed martial arts contests in his career including five ufc fights and held an overall 21 13 record
Original summary: start former ufc fighter dies after in boxing fight end
Predicted summary: start former wwe passes away aged ferrari end
Review: rape convict ram rahim singh adopted daughter honeypreet insan has denied claims of having sexual relationship with her father canã¢âât father put his hand on his head isnã¢âât the father daughter relation sacred relation she questioned honeypreet who is on haryana most wanted list further denied claims of absconding after ram rahim conviction
Original summary: start can father touch daughter head asks honeypreet end
Predicted summary: start ram rahim daughter denies rape by daughter end
Review: talking about modern startup investing silicon valley investor has said we are in the middle of an enormous multi kind of ponzi scheme adding that the investors pressurise the startups to do well he also said they investors aren people writing cheques out of their own balance sheet these are people doing job with other people money
Original summary: start startup investing is ponzi scheme silicon valley investor end
Predicted summary: start we are startups in the app of the year end
Review: former india cricketer sachin tendulkar paid surprise visit to child care centre in mumbai dressed as santa claus on the occasion of christmas on tuesday sachin spent time with the underprivileged children gave them gifts and also played cricket with them the joy on their innocent faces was just priceless he tweeted
Original summary: start sachin dresses up as santa claus surprises underprivileged kids end
Predicted summary: start sachin tendulkar pays tribute to late child end
Review: four sisters were among the 20 killed when limousine crashed into another vehicle in new york in the deadliest transportation accident in the us since 2009 three of the sisters were accompanied by their husbands who also died in the accident you can wrap your head around such tragedy relative of the sisters said
Original summary: start 4 sisters among 20 killed in crash in new york end
Predicted summary: start 3 killed in crash in us end
Review: the madhya pradesh high court has said that the cases related to the vyapam scam will be heard by seven special courts in four major cities of the state earlier the cases were being heard in 16 special courts notably more than 40 people associated with the scam have died under mysterious circumstances since the story broke in 2013
Original summary: start vyapam scam cases to be heard by seven special courts hc end
Predicted summary: start cases of those who will be in jail in rajasthan hc end
Review: cars24 has enabled car owners to sell their cars in less than hours with instant payment in their account the company also handles all paperwork including rc transfer cars24 has over 97 branches across 17 cities pan india and having bought over lakh cars in years it has become the market leader in used car transaction space
Original summary: start enables car owners to sell their cars in less than hours end
Predicted summary: start toyota unveils car that can be sold for cars end
Review: the hearing to decide the quantum of punishment to dera chief gurmeet ram rahim singh will take place through video conferencing on august 28 panchkula cbi court had convicted singh on charges of rape on friday as per reports the punishment can be jail term not less than seven years but may even extend to life imprisonment
Original summary: start ram rahim to get rape punishment through video conference end
Predicted summary: start ram rahim to be in jail in ram rahim case end
Review: us defence research agency darpa has launched persistent aquatic living sensors program that will study the viability of using both natural and modified sea organisms to detect underwater vehicles the system would aim to translate the biological responses of sea life into usable data to warn ships without needing hardware that could alert the enemy of its detection
Original summary: start us military using sea life to spot threats end
Predicted summary: start us defence ministry reveals its first ever end
Review: kidambi srikanth and pv sindhu india top ranked male and female shuttlers respectively were knocked out of the bwf china open tournament in the quarterfinal stage on friday srikanth was beaten by japan kento momota in straight games while sindhu lost to china chen yufei with scoreline that read 11 21 21 11 15 21
Original summary: start sindhu srikanth both knocked out in china open end
Predicted summary: start sindhu reaches india open final for women silver at china end
Review: delhi daredevils batsman shreyas iyer said that coach ricky ponting speech on their first day of training gave the team members goosebumps iyer who also compared the coaching styles of former dd coach rahul dravid and ponting said dravid is calm and cool he likes to follow the process and nurture the kids
Original summary: start coach ponting speech gave us dd player iyer end
Predicted summary: start coach was the first ever coach of england coach end
Review: a man in uttar pradesh lucknow built drone in hours to rescue puppy after he saw it drowning in drain the man who makes robots for living assembled an ai controlled robotic arm and attached it to the drone he said that initially he asked people for help but they advised him to let it die
Original summary: start lucknow techie builds drone in hrs to save drowning puppy end
Predicted summary: start up man jumps to death after drone hits the theatres end
|
notebooks/session_05-record_linkage/probabilistic_record_linkage.ipynb | ###Markdown
Probabilistic Record Linkage Table of Contents- [Introduction](Introduction)- [Setup](Setup) - [Setup - Imports](Setup---Imports) - [Setup - Database connection](Setup---Database-connection) - [Data Definition](Data-Definition)- [String Comparators](String-Comparators)- [Fellegi-Sunter Record Linkage](Fellegi-Sunter-Record-Linkage) - [Value Comparison](Value-Comparison) - [Record Comparison](Record-Comparison)- [Appendix - String Comparators](Appendix---String-Comparators)- [References & Further Readings](References-&-Further-Readings) Introduction- Back to the [Table of Contents](Table-of-Contents)In this lesson we will learn the basic idea behind probabilistic record linkage.We will use two datasets for this example. The first will be a list of people culled from "`ildoc_admit`" and "`ildoc_exit`". The second is a set of wage records from "`il_wage`".Probabilistic record linkage is somewhat different to deterministic record linkage. It takes into account a wieder range of potential identifiers. Identifiers are not unique anymore, which is why this method is also known as fuzzy matching/merging.It is a method that uses properties of variables commom to different datasets to determine the probability that two records refer to the same entity. Examples of the types of data items that might be compared in this method include gender, date of birth, age, and parts of a name.It computes weights for each idenfier used in the linkage based on the estimated ability to correctly identify a match, or a non-match. Then, by using the estimated weights a probability is calculated that two given records are the same entity. The analyst sets the threshold for this probability to determine when a pairing is defined a match. Fellegi-Sunter ApproachThis is a popular method used in probabilisitc record linkage. Let's walk through an example how it works- Let's assume each person's wage record matches to one person record in the inmate data and we have 100,000 inmates in our inmate data. Then the odds for a match at random are 1:99,999- M, the reliability is the probability that a commom variable agrees on a matched pair. Approx. 1-error rate- U, the discriminating power is the probability that a commom variable agrees on a unmatched pair. Approx. the probability of aggreeing by chance- If first name is the same: m=0.9, u=0.01, ratio: 90:1, this means that the odds for a matchare now: 1:99,999x90:1=1:1,111- If last name is the same: m=0.9, u=0.04, ratio: 22:1, this means that the odds for a matchare now: 1:1,111x22:1=1:51- And you can add as many variables as possible, such as sex, age, date of birth, etc as long as they are in both datasets. Setup- Back to the [Table of Contents](Table-of-Contents) Setup - Imports- Back to [Table of Contents](Table-of-Contents)Before we start the Probabilistic Record Linkage example, we need to import the packages we will be using. Please run the following code cell:
###Code
# Importing the modules required in this workbook
import datetime
import jellyfish
import math
import numpy
import pandas
import re
import six
import sqlalchemy
import string
# Database connection packages - one or the other will be imported below:
import psycopg2
import psycopg2.extras
print( "imports loaded at " + str( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
Setup - Database connection- Back to [Table of Contents](Table-of-Contents)`Pandas` uses a database engine to connect to databases (via the `SQLAlchemy` Python package). In the code cell below we create a `SQLAlchemy` database engine connected to our class database server for Pandas to use.
###Code
# set up database credentials
pandas_db = None
db_host = "10.10.2.10"
db_database = "appliedda"
# Create database connection for pandas.
connection_string = "postgresql://" + db_host + "/" + db_database
pandas_db = sqlalchemy.create_engine( connection_string )
print( "sqlalchemy postgresql database engine created at " + str( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
Data Definition- Back to the [Table of Contents](Table-of-Contents)Before we begin the task of record linkage, it's important that we understand the variables in our data. In this workbook, we will take a cursory look at some of the values in our data and compute some simple statistics to ensure that the content makes sense. Begin by loading a subset of the data from "`person`" and "`il_wage`" into pandas data frames. After we load the two data sets, we call the `head` method on the first data set to examine the first few reords.
###Code
# Load person data
person_df = pandas.read_sql( 'SELECT * FROM person LIMIT 1000;', con = pandas_db )
print( "person data loaded at " + str( datetime.datetime.now() ) )
# Load wage data
il_wage_df = pandas.read_sql( 'SELECT * FROM il_wage WHERE year = 2015 LIMIT 1000;', con = pandas_db )
print( "il_wage data loaded at " + str( datetime.datetime.now() ) )
# Don't forget to close the connection (or "dispose" for a SQLAlchemy engine).
pandas_db.dispose()
print( "pandas database engine dispose()-ed at " + str( datetime.datetime.now() ) )
###Output
_____no_output_____
###Markdown
Let's perform some quick summaries of the fields in the person data.To get a list of the unique values in a pandas column/Series, call the `.unique()` method on it - like `.value_counts()` from last assignment, only not sorted by frequency of use.
###Code
# Get the first few records from the person file.
person_df.head()
# print the distinct values in the birth_year, race, and sex columns
print("Distinct years = ", person_df["birth_year"].unique())
print("Distinct race = ", person_df["race"].unique())
print("Distinct sex = ", person_df["sex"].unique())
# Print the total number of rows and the number of rows with valid SSN,
# first name, middle name, and last name.
print( "Total rows = ", len( person_df ) )
ssn_hash = person_df[ "ssn_hash" ]
print( "Rows with valid SSN = " + str( len( ssn_hash[ ~ pandas.isnull( ssn_hash ) ] ) ) )
name_first_hash = person_df[ "name_first_hash" ]
print( "Rows with valid name_first_hash = " + str( len( name_first_hash[ ~ pandas.isnull( name_first_hash ) ] ) ) )
name_middle_hash = person_df[ "name_middle_hash" ]
print( "Rows with valid name_middle_hash = " + str( len( name_middle_hash[ ~ pandas.isnull( name_middle_hash ) ] ) ) )
name_last_hash = person_df[ "name_last_hash" ]
print( "Rows with valid name_last_hash = " + str( len( name_last_hash[ ~ pandas.isnull( name_last_hash ) ] ) ) )
###Output
_____no_output_____
###Markdown
Next we'll take a look at the second data set.
###Code
il_wage_df.head()
###Output
_____no_output_____
###Markdown
Let's perform some quick summaries of the fields in the wage data.
###Code
# Print the total number of rows and the number of rows with valid SSN,
# first name, middle name, and last name.
print( "Total rows = ", len( il_wage_df ) )
ssn = il_wage_df[ "ssn" ]
print( "Rows with valid SSN = " + str( len( ssn[ ~ pandas.isnull( ssn ) ] ) ) )
name_first = il_wage_df[ "name_first" ]
print( "Rows with valid name_first = " + str( len( name_first[ ~ pandas.isnull( name_first ) ] ) ) )
name_middle = il_wage_df[ "name_middle" ]
print( "Rows with valid name_middle = " + str( len( name_middle[ ~ pandas.isnull( name_middle ) ] ) ) )
name_last = il_wage_df[ "name_last" ]
print( "Rows with valid name_last = " + str( len( name_last[ ~ pandas.isnull( name_last ) ] ) ) )
###Output
_____no_output_____
###Markdown
Fellegi-Sunter Record Linkage- Back to the [Table of Contents](Table-of-Contents) Fellegi-Sunter Record Linkage is a probabilistic method that uses comparisons of fields that contain the same substantive types of data between records to calculate a weighted probability that records in different data sets refer to the same entity. Examples of the types of data items that might be compared in this method include gender, date of birth, age, and parts of a name.In this section we will "manually" perform the steps in Fellegi-Sunter record linkage. Our goal is to illustrate the Fellegi-Sunter algorithm by breaking it into bitesize pieces. In our example we will compare first names and last names using Jaro-Winkler distance. In the Fellegi-Sunter algorithm, the result of a field comparison is assumed to follow a multinomial distribution. That means it can only take on finitely many values. Therefore we will define a function that compares two strings and returns the value 2, 1, or 0 to indicate an exact match, a nearly exact match, or anything else. Value Comparison- Back to the [Table of Contents](Table-of-Contents)In this section, we will implement the value comparison stage of Fellegi-Sunter Record Linkage. You will implement a function named "`fuzzy-string-comparator`" that accepts two strings and returns one of the following match levels:- 2 - exact match- 1 - close match- 0 - not a matchTo assess whether the two strings passed in match, we could convert both strings to capital letters, decode them into unicode, then calculate the Jaro-Winkler distance between the two strings (Jaro-Winkler distance is a fast-to-compute string distance based on common letters between two words). We then assign a match level based on where the resulting match score falls in the following ranges:- 2 - exact match - score greater than or equal to ( >= ) 0.92- 1 - close match - score less than 0.92 but greater than or equal to 0.85.- 0 - not a match - score less than 0.85.Finally, we'll return that match score.A function that implements this is below.
###Code
# Please complete the following function that tells us how different two input strings are.
# It returns a match level with value 2, 1 or 0 (larger value means higher similarity)
# Calculate Jaro-Winkler distance after converting two strings into capital characters.
# Please use these three criteria, >=0.92, >=0.85, <0.85, to determine match level.
def fuzzy_string_comparator( string_1_IN, string_2_IN ):
'''
string_1_IN : input string No.1
string_2_IN : input string No.2
'''
# return reference
match_level_OUT = -1
# Check if they are all strings
if ( ( type( string_1_IN ) != str ) or ( type( string_2_IN ) != str ) ):
match_level_OUT = 0
#-- END check to see if strings are actually strings. --#
### BEGIN SOLUTION
# declare variables
cleaned_string_1 = ""
cleaned_string_2 = ""
distance = -1
# convert strings to upper case, then to unicode
# string 1
cleaned_string_1 = string_1_IN.upper()
cleaned_string_1 = six.text_type( cleaned_string_1 )
# string 2
cleaned_string_2 = string_2_IN.upper()
cleaned_string_2 = six.text_type( cleaned_string_2 )
# Calculate Jaro-Winkler distance after converting two strings into capital characters.
distance = jellyfish.jaro_winkler( cleaned_string_1, cleaned_string_2 )
# According to different thresholds, return the match level
if distance >= 0.92:
match_level_OUT = 2
elif distance >= 0.85:
match_level_OUT = 1
else:
match_level_OUT = 0
#-- END conditional to set match level. --#
### END SOLUTION
return match_level_OUT
#-- END function fuzzy_string_comparator --#
print( "==> Defined function fuzzy_string_comparator() at " + str( datetime.datetime.now() ) + "." )
# Let's see how the fuzzy_string_comparator works
score1 = fuzzy_string_comparator( "joshua", "joshua" )
score2 = fuzzy_string_comparator( "joshua", "joshau" )
score3 = fuzzy_string_comparator( "joshua", "todd" )
print( "Match level for joshua-joshua: " + str( score1 ) )
print( "Match level for joshua-joshau: " + str( score2 ) )
print( "Match level for joshua-todd: " + str( score3 ) )
# tests for our grading program:
assert score1 == 2
assert score2 == 2
assert score3 == 0
###Output
_____no_output_____
###Markdown
The above function compares *text fields* in a record (other types of data would need different means of comparison). Next, we define a function that compares *records*. This record comparison function assumes that records will have the form of a tuple: (identifier, first name, last name). It returns a length 2 tuple that gives the result of applying a string comparator to the first name and to the last name._Note: Since our name values are hashed, we've replaced the fuzzy match call here with a simple equality test, since hash values being close together doesn't indicate that the names from which they were generated are close._
###Code
# Comparison_vector compare a pair of records, which consists of first name and last name.
# It returns a tuple with 2 match levels.
def compare_records( record_1_IN, record_2_IN ):
'''
record_1_IN : input record No.1
record_2_IN : input record No.2
'''
# return reference
results_OUT = None
# declare variables
value_1 = None
value_2 = None
field_1_match_level = -1
field_2_match_level = -1
# record_1_IN and record_2_IN have the form (id, first name, last name)
# m, n store the comparing outcomes of first name and last name.
value_1 = record_1_IN[ 1 ]
value_2 = record_2_IN[ 1 ]
if ( ( value_1 is not None ) and ( value_2 is not None ) ):
#field_1_match_level = fuzzy_string_comparator( value_1, value_2 )
# just check equality since hash.
if ( value_1 == value_2 ):
field_1_match_level = 2
else:
field_1_match_level = 0
#-- END check to see if equal or not. --#
else:
field_1_match_level = 0
#-- END check to see if empty values. --#
value_1 = record_1_IN[ 2 ]
value_2 = record_2_IN[ 2 ]
if ( ( value_1 is not None ) and ( value_2 is not None ) ):
#field_2_match_level = fuzzy_string_comparator( value_1, value_2 )
# just check equality since hash.
if ( value_1 == value_2 ):
field_2_match_level = 2
else:
field_2_match_level = 0
#-- END check to see if equal or not. --#
else:
field_2_match_level = 0
#-- END check to see if empty values. --#
results_OUT = ( field_1_match_level, field_2_match_level )
return results_OUT
#-- END function compare_records() --#
print( "==> Defined function compare_records() at " + str( datetime.datetime.now() ) + "." )
# Try out the compare_records function
print( compare_records( ( 1, "joshua", "tokle" ), ( 2, "joshua", "smith") ) )
print( compare_records( ( 3, "joshua", "tokle" ), ( 4, "josue", "tolke") ) )
###Output
_____no_output_____
###Markdown
Record Comparison- Back to the [Table of Contents](Table-of-Contents)Next, we'll work on implementing the section of Fellegi-Sunter Record Linkage that calculates a weighted probability that two records from different data sets refer to the same entity.Fellegi-Sunter Record Linkage uses two different sets of probabilities per pair of data items as weights in this step: m-weights and u-weights.For a given pair of data items that represent the same conceptual thing, for each match level:- An **m-weight** is the probability of seeing a particular match level if we assume that we are comparing two records that represent the same individual.- A **u-weight** is the probability of seeing a particular match level if we assume that that we are comparing two records that do *not* represent the same individual.For example, thinking of probabilities when two records are the same (m-weights), if two records represent the same person, the first names and last names should match with high probability - match level 2. So, the m-weight for first name and last name having a match level of 2 when two records refer to the same person should be large.On the other hand, in the context of probabilities when two records are different (u-weights), suppose we had month of birth in our data set. The probability that two random individuals will have the same month of birth is about 1/12, so for records that are not the same person, we would assign a u-weight of about 1/12 to the birth year being identical (where for a field like social security number, the u-weight of two different people having the same social security number is 0).Let's assign some preliminary and arbitrary m- and u-weights for first name and last name.- first name - m-weights: - match level 0: **_0.01_** (very unlikely the same person will have different first names) - match level 1: **_0.14_** (also pretty unlikely that first names for a person will be mostly the same) - match level 2: **_0.85_** (very likely that a person's first names will match exactly) - u-weights: - match level 0: **_0.88_** (probability that different people will have different first names) - match level 1: **_0.10_** (probability that different people's first names will be mostly the same) - match level 2: **_0.02_** (probability that different people will have same first name)- last name - m-weights: - match level 0: **_0.01_** (very unlikely the same person will have different last names) - match level 1: **_0.09_** (also pretty unlikely that last names for a person will be mostly the same) - match level 2: **_0.90_** (very likely that a person's last names will match exactly) - u-weights: - match level 0: **_0.91_** (probability that different people will have different first names) - match level 1: **_0.08_** (probability that different people's first names will be mostly the same) - match level 2: **_0.01_** (probability that different people will have same first name In practice you would likely start with guesses or very general estimates like these, but then try to better estimate these by trying to fit them to a model or at least tweaking them after seeing preliminary output. In this case, we are just going to run with our initial estimates so we can move efficiently through this process.In the cell below, we create dictionaries that contain the m- and u-weights for our two columns.
###Code
# Make dictionaries to hold m_weights and u-weights. In each dictionary, the weights for
# a given field are mapped to a string label for that field ("first_name" and "last_name").
# Weights are captured in tuples of length 3, with the index in the tuple matching each of the
# match levels that can be returned by the fuzzy string comparator.
# In this tuple, we go from match level 0 (not the same), to match level 2 (identical)
# as we move from left to right in the tuple, with each position in the tuple holding the
# corresponding weight for that match level.
m_weights_dict = {}
m_weights_dict[ "first_name" ] = ( 0.01, 0.14, 0.85 ) # m-weights corresponding to first name
m_weights_dict[ "last_name" ] = ( 0.01, 0.09, 0.90 ) # m-weights corresponding to last name
u_weights_dict = {}
u_weights_dict[ "first_name" ] = ( 0.88, 0.10, 0.02 ) # u-weights corresponding to first name
u_weights_dict[ "last_name" ] = ( 0.91, 0.08, 0.01 ) # u-weights corresponding to last name
print( "==> Created m- and u-weight dictionaries at " + str( datetime.datetime.now() ) + "." )
###Output
_____no_output_____
###Markdown
Now, for exercise 3, we are going to define a function that uses these weights to compare two records and return their record-level match score.In Fellegi-Sunter Record Linkage the match score for a given record is the at calculates a weighted probability that two records from different data sets refer to the same entity.The match_score function starts by comparing the two records passed in and retrieving a match level for the first name and last name. The function then needs to:- get tuples of m- and u-weights for first name and last name.- retrieve the m- and u-weights for the particular match level calculated for first and last name.- calculate the "log probability" of each of the weights retrieved in the previous step. The "log probability" of a probability is the log base-n of that probability - so, the result of calling the "`math.log()`" function on a probability (in this case, on each of the weights we retrieved in the step above).- use these log probabilities to calculate a match score for the first name and the second name. Our algorithm for this match score: - sum the log probabilities of the records being a match (the log probabilities of the m-weights). - sum the log probabilities of the records not being a match (the log probabilities of the u-weights). - subtract the non-match (u-weight) sum from the match (m-weight) sum. - store your match score value in the variable "`score_OUT`" so that it is returned by the function.
###Code
def match_score( record_1_IN, record_2_IN ):
'''
record_1_IN: input record No.1
record_2_IN: input record No.2
'''
# return reference
score_OUT = -1
# declare variables
match_level_tuple = None
match_level_first_name = -1
match_level_last_name = -1
# Calulate the similarity level using compare_records
match_level_tuple = compare_records( record_1_IN, record_2_IN )
match_level_first_name = match_level_tuple[ 0 ]
match_level_last_name = match_level_tuple[ 1 ]
# Use match levels and m- and u-weights to calculate a match score for this record.
### BEGIN SOLUTION
# declare variables
m_weights_list_first_name = None
m_weights_list_last_name = None
u_weights_list_first_name = None
u_weights_list_last_name = None
first_name_m_weight = -1
first_name_u_weight = -1
last_name_m_weight = -1
last_name_u_weight = -1
log_prob_given_match = None
log_prob_given_nonmatch = None
# get lists of m- and u- weights for each field from weights dictionaries defined above.
m_weights_list_first_name = m_weights_dict[ "first_name" ]
m_weights_list_last_name = m_weights_dict[ "last_name" ]
u_weights_list_first_name = u_weights_dict[ "first_name" ]
u_weights_list_last_name = u_weights_dict[ "last_name" ]
# get weights for match levels returned by compare_records.
first_name_m_weight = m_weights_list_first_name[ match_level_first_name ]
first_name_u_weight = u_weights_list_first_name[ match_level_first_name ]
last_name_m_weight = m_weights_list_last_name[ match_level_last_name ]
last_name_u_weight = u_weights_list_last_name[ match_level_last_name ]
# calculate log-probabilities for each field assuming a match (m-weight),
# and assuming a non-match (u-weight). Log-probability is the natural
# log (math.log() in Python) of the probability of a given match level.
log_prob_first_name_given_match = math.log( first_name_m_weight )
log_prob_last_name_given_match = math.log( last_name_m_weight )
log_prob_first_name_given_no_match = math.log( first_name_u_weight )
log_prob_last_name_given_no_match = math.log( last_name_u_weight )
# For match and no-match, sum the log-probabilities for each field.
# What's the log-probability of seeing this comparison vector if the records are a match?
log_prob_given_match = log_prob_first_name_given_match + log_prob_last_name_given_match
# What's the log-probability of seeing this comparison vector if the records are a nonmatch?
log_prob_given_nonmatch = log_prob_first_name_given_no_match + log_prob_last_name_given_no_match
# match score is the sum of the log probabilities given a match
# minus the sum of the log probabilites given no match.
score_OUT = log_prob_given_match - log_prob_given_nonmatch
### END SOLUTION
# return match score.
return score_OUT
#-- END function match_score() --#
print( "==> Defined function match_score() at " + str( datetime.datetime.now() ) + "." )
# Have a rough look at its sample output
print(match_score((1, "joshua", "tokle"), (2, "joshua", "smith")))
print(match_score((1, "joshua", "tokle"), (4, "joshu", "tolke")))
print(match_score((1, "joshua", "tokle"), (7, "christina", "jones")))
###Output
_____no_output_____
###Markdown
Finally, we can try to link people in our "`person`" table with "`il_wage`" records.We use our "`match_score()`" function to compare each name in the person list with each name in the wage record list, storing only matches whose score exceeds a threshold (0.5, to start) in a separate list of potential matches._Note: This code cell might take quite a few minutes to run._
###Code
# This cell takes a few minutes to execute
# configuration
match_score_min = 0.5
# Create an empty list to save the outputs
potential_matches = []
# Loop over uc data frame by row
person_counter = 0
for person_index, person_row in person_df.iterrows():
# increment counter
person_counter += 1
# Get ID, FirstName and LastName of uc
current_person_id = person_row[ "id" ]
current_person_first_name = person_row[ "name_first_hash" ]
current_person_last_name = person_row[ "name_last_hash" ]
# store in tuple for comparison
current_person_record = ( current_person_id, current_person_first_name, current_person_last_name )
#print( "==> Processing UC person " + str( person_counter ) + ": " + str( current_person_record ) )
# Loop over wage data frame by lines
wage_counter = 0
for wage_index, wage_row in il_wage_df.iterrows():
# increment counter
wage_counter += 1
# Get id, name_first and name_last from NSF row.
current_wage_id = wage_row[ "id" ] # no person ID, so using id.
current_wage_first_name = wage_row[ "name_first" ]
current_wage_last_name = wage_row[ "name_last" ]
# store in tuple for comparison
current_wage_record = ( current_wage_id, current_wage_first_name, current_wage_last_name )
# print( "====> Processing wage person " + str( wage_counter ) + ": " + str( current_wage_record ) )
# Calculate the match score of each pair of records
score = match_score( current_person_record, current_wage_record )
# Save those pairs with score equal to or greater than 0.5
if score >= match_score_min:
# good enough - add to potential_matches
potential_matches.append( ( score, current_person_record, current_wage_record ) )
#-- END conditional to see if score is above our threshold --#
#-- END loop over wage data frame rows. --#
# output a little message every 1000 rows.
if ( person_counter % 1000 == 0 ):
print( "==> Processed " + str( current_wage_record ) + " of " + str( len( person_df ) ) + " person records." )
#-- END check to see if we've processed another thousand rows yet. --#
#-- END loop over UC data frame rows. --#
# Sort the output so the best matches appear at the top
# define lambda to retrieve score from each tuple (first item in tuple)
lambda_get_score = lambda x: x[0]
# sort
potential_matches = sorted( potential_matches, key = lambda_get_score, reverse = True )
# output matches:
print( "Matches, in order of descending match score:" )
for current_match in potential_matches:
# print current match.
print( "==> " + str( current_match ) )
#-- END output loop. --#
# How did we do?
###Output
_____no_output_____
###Markdown
And finally, we assess the potential matches displayed above - how did our algorithm do? Appendix - String Comparators- Back to the [Table of Contents](Table-of-Contents)In this section we will demonstrate different string comparison algorithms provided by the [jellyfish](https://github.com/sunlightlabs/jellyfish) package ( [https://github.com/sunlightlabs/jellyfish](https://github.com/sunlightlabs/jellyfish) ).For each method we examine, we'll write a function that accepts a name that we want to find matches for and a list of names in which we should look, and that returns a list of the names in that list that are most similar to the name of interest.We will start by creating a `set` of unique first names from the "`il_wage`" data. The `name_first_hash` field is missing some values which are represented as NaN in the data frame. To prevent errors later on, we only include valid character strings (which have type `str`) in our list of unique names.
###Code
# If we were starting from scratch, we'd need to import jellyfish
# import jellyfish
# Make a set storing the unique first name with respect to the nsf dataset
unique_first_names = set( name for name in il_wage_df[ "name_first_hash" ] if type( name ) == six.text_type )
###Output
_____no_output_____
###Markdown
Next, we will define our function "`closest_names`" that:* Accepts a string name we are interested in matching as input argument "`name_IN`".+ Accepts a list of names in which we want to look for "`name_IN`", in argument "`find_name_in_list_IN`".* Accepts an optional number of results we want returned as input argument "`result_count_IN`".* Compares the name in `name_IN` to each name in `unique_first_names` and calculates the "distance" between the two strings using the Levenshtein Distance string comparator from `jellyfish`.* Return a list of size `result_count_IN` of names in `uniq_first_names` that are "closest" to `name_IN`.From wikipedia, the Levenshtein Distance is defined as:> "In information theory and computer science, the Levenshtein distance is a string metric for measuring the difference between two sequences. Informally, the Levenshtein distance between two words is the minimum number of single-character edits (i.e. insertions, deletions or substitutions) required to change one word into the other. It is named after Vladimir Levenshtein, who considered this distance in 1965."> — [https://en.wikipedia.org/wiki/Levenshtein_distance](https://en.wikipedia.org/wiki/Levenshtein_distance)_Note that in the comparison we capitalize both names being compared, so that letter case doesn't affect the final distance._
###Code
def closest_names( name_IN, find_name_in_list_IN, result_count_IN = 10 ):
# return reference
results_OUT = []
# declare variables
other_name = ""
cleaned_name = ""
cleaned_other_name = ""
get_distance_lambda = None
# first, standardize the name - convert to upper case and to unicode.
cleaned_name = name_IN.upper()
cleaned_name = six.text_type( cleaned_name )
# First create a list of tuples (other_name, distance), where other_name is taken from uniq_first_names
distances = []
# loop over unique_first_names to calculate and store distances
for other_name in find_name_in_list_IN:
# standardize the other name.
cleaned_other_name = other_name.upper()
cleaned_other_name = six.text_type( cleaned_other_name )
# get distance from name to other_name (converted to upper case so we are case-insensitive.)
distance_value = jellyfish.levenshtein_distance( cleaned_name, cleaned_other_name )
# add tuple to distances
current_tuple = ( other_name, distance_value )
distances.append( current_tuple )
#-- END loop over unique_first_names --#
# Sort distances by the second element in the tuple.
# define lambda function to retrieve the distance (the second item in the tuple)
# and return it. Lambda functions are little one line functions. More information:
# https://docs.python.org/2/reference/expressions.html#lambda
get_distance_lambda = lambda distance_tuple_list_IN : distance_tuple_list_IN[ 1 ]
# sort matching names by distance
results_OUT = sorted( distances, key = get_distance_lambda )
# get the number of results requested using Python's slice notation.
results_OUT = results_OUT[ : result_count_IN ]
# return results
return results_OUT
'''
# For reference, compacted version - you can do this, but please don't.
# First create a list of tuples (other_name, distance), where other_name is taken from uniq_first_names
distances = [ ( other_name, jellyfish.levenshtein_distance( unicode( name_IN.upper() ), unicode( other_name.upper() ) ) )
for other_name in unique_first_names ]
# Sort distances by the second element in the tuple, and return the top n values
return sorted(distances, key=lambda x: x[1])[:result_count_IN]
'''
#-- END function closest_names() --#
print( "==> Defined function closest_names() at " + str( datetime.datetime.now() ) + "." )
###Output
_____no_output_____
###Markdown
Let's try it out on some names.
###Code
# Experiment the function with several names.
print( closest_names( "Jennifer", unique_first_names ) )
print( closest_names( "Sonya", unique_first_names ) )
print( closest_names( "Wai Tong", unique_first_names ) )
###Output
_____no_output_____
###Markdown
Recall that Levenshtein distance is a kind of edit distance. Edit distances count the number of edit operations needed to change one word to another, and different edit distances count different edit operations as valid. In the case of Levenshtein distance, the valid edit operations are inserting a letter, deleting a letter, or changing a letter. It would be interesting to compare this output to the output from other string comparators included in the jellyfish package:* **`jellyfish.lenvenshtein_distance`** - _Levenshtein distance_: edit distance where the valid operations are inserting a letter, deleting a letter, or changing a letter* **`jellyfish.damerau_levenshtein_distance`** - _Levenshtein-Damerau distance_: edit distance which includes the same operations as Levenshtein distance but also allows transposing two adjacent letters. This can be useful for finding words with typos.* **`jellyfish.jaro_winkler`** - _Jaro-Winkler distance_: a fast-to-compute string distance based on common letters between two words_Note: For edit distance smaller numbers indicate closer strings, but for Jaro-Winkler distance larger values indicate closer strings._Let's update our `closest_names` function so that we can specify the string comparator we want to use. Changes from previous function:- add ability to pass in the string distance calculation function you want to use as an argument, named "`string_comparator_function_IN`". - just pass the name of the function, not in quotation marks, and not followed by parentheses (just like they are shown in the list of functions above).- add ability to reverse sort order for returning "closest" strings to name passed in. New parameter "`reverse_sort_IN`" defaults to `False` (to match distance scores where a larger number indicates two strings being further apart). Set it to `True` for distance scores like Jaro-Winkler distance where a larger number indicates two strings are closer together.
###Code
def closest_names_2( string_comparator_function_IN, name_IN, find_name_in_list_IN, reverse_sort_IN = False, result_count_IN = 10 ):
# return reference
results_OUT = []
# declare variables
other_name = ""
cleaned_name = ""
cleaned_other_name = ""
get_distance_lambda = None
# first, standardize the name - convert to upper case and to unicode.
cleaned_name = name_IN.upper()
cleaned_name = six.text_type( cleaned_name )
# First create a list of tuples (other_name, distance), where other_name is taken from uniq_first_names
distances = []
# loop over unique_first_names to calculate and store distances
for other_name in find_name_in_list_IN:
# standardize the other name.
cleaned_other_name = other_name.upper()
cleaned_other_name = six.text_type( cleaned_other_name )
# get distance from name to other_name (converted to upper case so we are case-insensitive.)
distance_value = string_comparator_function_IN( cleaned_name, cleaned_other_name )
# add tuple to distances
current_tuple = ( other_name, distance_value )
distances.append( current_tuple )
#-- END loop over unique_first_names --#
# Sort distances by the second element in the tuple.
# define lambda function to retrieve the distance (the second item in the tuple)
# and return it. Lambda functions are little one line functions. More information:
# https://docs.python.org/2/reference/expressions.html#lambda
get_distance_lambda = lambda distance_tuple_list_IN : distance_tuple_list_IN[ 1 ]
# sort matching names by distance
results_OUT = sorted( distances, key = get_distance_lambda, reverse = reverse_sort_IN )
# get the number of results requested using Python's slice notation.
results_OUT = results_OUT[ : result_count_IN ]
# return results
return results_OUT
'''
# For reference, compacted version - you can do this, but please don't.
# First create a list of tuples (other_name, distance), where other_name is taken from uniq_first_names
distances = [(other_name, string_comparator(unicode(name.upper()), unicode(other_name.upper())))
for other_name in uniq_first_names]
# Sort distances by the second element in the tuple, and return the top n values
return sorted(distances, key=lambda x: x[1], reverse=reverse_sort)[:n]
'''
#-- END function closest_names_2() --#
print( "==> Defined function closest_names_2() at " + str( datetime.datetime.now() ) + "." )
# Try it!
print( "Closest names for \"William\" using Levenshtein-Damerau distance:" )
print( closest_names_2( jellyfish.damerau_levenshtein_distance, "William", unique_first_names ) )
print( "\n\nClosest names for \"William\" using Levenshtein-Damerau distance:" )
print( closest_names_2( jellyfish.jaro_winkler, "Wiliam", unique_first_names, reverse_sort_IN = True ) )
###Output
_____no_output_____ |
Machine_Learning.ipynb | ###Markdown
Machine Learning **A aprendizagem de máquina é o subcampo da ciência da computação que, segundo Arthur Samuel, em 1959, dá aos "computadores a capacidade de aprender sem serem explicitamente programados". Desenvolvida a partir do estudo do reconhecimento de padrões e da teoria da aprendizagem computacional na inteligência artificial, o aprendizado de máquina explora o estudo e a construção de algoritmos que podem aprender e fazer previsões - tais algoritmos superam instruções de programas estritamente estáticos fazendo previsões ou decisões baseadas em dados, através da construção de um modelo a partir de entradas de amostras.**Aplicações:Segurança de Dados;Trading;Assistência Média;Marketing Personalizado;Detecção de Fraudes;Recomendações de Compras;Visão Computacional;Busca Online;Processamento de Linguagem Natural (NLP);Carros inteligentes. Tipos de Machine Learning **Aprendizado Supervisionado** -- O computador é confrontado com exemplos de entradas (features) e as saídas desejadas, dadas por um "professor", e o objetivo é aprender uma regra geral que mapeia as entradas às saídas.**Aprendizado Não Supervisionado** -- Nenhuma saída desejada é dada ao algoritmo de aprendizagem, deixando-o por conta própria para encontrar estrutura em sua entrada.**Aprendizado por Reforço** -- O agente/algoritmo interage com um ambiente dinâmico em que deve executar uma determinada meta. Scikit-Learn
###Code
from __future__ import absolute_import, division, print_function
# Aprendizado supervisionado
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
print(boston.data[0])
print(boston.target[0])
# Usar apenas uma feature
X = boston.data[:,5]
# Preço
y = boston.target
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(10,10))
plt.scatter(X, y)
plt.xlabel("Media de quartos na vizinhanca")
plt.ylabel("Preco")
plt.title("Relacao entre preco e numero de quartos")
plt.show()
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Reshaping
X = X.reshap
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Reshaping
X = X.reshape(-1, 1)
# Dividir dados em treinamento e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Reshaping
#X = X.reshape(-1, 1)
# Dividir dados em treinamento e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# Criar algoritmo
reg = LinearRegression()
# Treinamento
reg.fit(X_train, y_train)
# Fazendo previsões
y_pred = reg.predict(X_train)
plt.figure(figsize=(10,10))
plt.scatter(X, y)
# Plotar modelo treinado
plt.plot(X_train, y_pred, color='red')
plt.xlabel("Media de quartos na vizinhanca")
plt.ylabel("Preco")
plt.title("Relacao entre preco e numero de quartos")
plt.show()
# Avaliando modelo
R2 = reg.score(X_test, y_test)
print("O R2 é {}".format(R2))
# Usar todas as features
X = boston.data
# Dividir dados em treinamento e teste
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# Criar algoritmo
reg = LinearRegression()
# Treinamento
reg.fit(X_train, y_train)
# Fazendo previsões
y_pred = reg.predict(X_train)
# Avaliando modelo
R2 = reg.score(X_test, y_test)
print("O R2 é {}".format(R2))
###Output
O R2 é 0.762076524299
###Markdown
Filtro SpamProblema de processamento de linguagem natural (NLP);Aprendizado Supervisionado;Classificação Bag of Words**O modelo bag of words é uma representação simplificada usada no processamento de linguagem natural. Neste modelo, um texto (uma frase ou um documento) é representado como o saco de suas palavras, desconsiderando a gramática e até a ordem das palavras, mas mantendo a multiplicidade.****Exemplo** (1) John likes to watch movies. Mary likes movies too.(2) John also likes to watch football games.Vocabulário: [ "John", "likes", "to", "watch", "movies", "also", "football", "games", "Mary", "too"]**Representação Vetorial** (1) [1, 2, 1, 1, 2, 0, 0, 0, 1, 1](2) [1, 1, 1, 1, 0, 1, 1, 1, 0, 0]
###Code
import pandas as pd
import seaborn as sns
data = pd.read_csv('./smsspamcollection/SMSSpamCollection', delimiter='\t', header=None)
plt.figure(figsize=(20,10))
ax = plt.axes()
sns.countplot(x=0, data=data, ax=ax);
data.head(10)
###Output
_____no_output_____
###Markdown
Pré-processamento**Eliminar stopwords** -- Ruído**Substituir digitos por um token especial (NUM)****Eliminar caracteres especiais****Substituir ham por 0 e spam por 1**
###Code
# Módulo de NLP
import nltk
nltk.data.path.append('./nltk_data')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import re
def preprocess(string):
# Tokenizar string. Exemplo: string = "Eu amo meu cachorro." --> ["Eu", "amo", "meu", "cachorro", "."]
tokens = word_tokenize(string.decode('utf-8'))
# Remove stopwords presentes e string em letras minúsculas.
low_tokens = [token.lower() for token in tokens \
if token not in stopwords.words('english')]
# Concatena strings
string = " ".join(low_tokens)
# Substitui numeros por NUM. Exemplo: 4 vira NUM, 15 vira NUM, 10000 vira NUM.
string = re.sub(r'\d{1,}','NUM',string)
# Elimina caracteres especiais
string = re.sub(r'[^a-z\s]','',string)
return string
def class2int(classes):
if classes == 'ham':
classes = 0
if classes == 'spam':
classes = 1
return classes
# Aplicar pré-processamento
data[1] = data[1].apply(preprocess)
data[0] = data[0].apply(class2int)
data.head(10)
###Output
_____no_output_____
###Markdown
SMS to vectors
###Code
from sklearn.feature_extraction.text import CountVectorizer
sms = data[1]
y = data[0]
vect = CountVectorizer(ngram_range=(1,1), max_features=1000)
vetores = vect.fit_transform(sms)
vetores
# Plotar sms (5572 dimensões) em um espaço de 2 dimensões
from sklearn.manifold import TSNE
# Objeto tsne
tsne = TSNE()
#Treinamento e Redução de dimensões
vet2dims = tsne.fit_transform(vetores.toarray())
# Organizar as dimensões e classes em um dataframe
df = pd.DataFrame(dict(x=vet2dims[:,0], y=vet2dims[:,1], label=y))
#Agrupar por clases
groups = df.groupby('label')
# Plot sms
fig, ax = plt.subplots()
ax.margins(0.05)
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, label=name)
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Classificação**Naive Bayes**http://scikit-learn.org/stable/modules/naive_bayes.html
###Code
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import Pipeline
# Separar dados em treinamento e teste
X_train, X_test, y_train, y_test = train_test_split(sms, y, train_size=0.7, random_state=0)
# Criar pipeline
clf = Pipeline([('vect', vect), ('nb', BernoulliNB())])
clf.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Desempenho
###Code
# Acurácia
accuracy = clf.score(X_test, y_test)
print("Accuracy is {}".format(accuracy))
from sklearn.metrics import auc, roc_curve
pred = clf.predict(X_test)
# Curva ROC
fpr, tpr, thresholds = roc_curve(y_test, pred, pos_label=1)
area_under_curve = auc(fpr, tpr)
print("Area under the curve is {}".format(area_under_curve))
import numpy as np
# Matriz de Confusão
def plot_confusion_matrix(cm, classes,title='Confusion matrix', cmap=plt.cm.Blues, rotation=False):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
bal = classes
tick_marks = np.arange(len(bal))
if rotation:
plt.xticks(tick_marks, bal, rotation=60)
else:
plt.xticks(tick_marks, bal)
plt.yticks(tick_marks, bal)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
from sklearn.metrics import confusion_matrix
y_prediction = clf.predict(X_test)
cm = confusion_matrix(y_test, y_prediction)
print(cm)
plot_confusion_matrix(cm, ['ham', 'spam'])
# Salvando modelo
from sklearn.externals import joblib
joblib.dump(clf, 'spam_filter.pkl')
###Output
_____no_output_____
###Markdown
###Code
%tensorflow_version 2.x # this line is not required unless you are in a notebook
import tensorflow as tf # now import the tensorflow module
print(tf.version) # make sure the version is 2.x
###Output
`%tensorflow_version` only switches the major version: 1.x or 2.x.
You set: `2.x # this line is not required unless you are in a notebook`. This will be interpreted as: `2.x`.
TensorFlow 2.x selected.
<module 'tensorflow._api.v2.version' from '/usr/local/lib/python3.6/dist-packages/tensorflow/_api/v2/version/__init__.py'>
###Markdown
Machine Learning and Statistics Project - Boston House Price Dataset The Boston Housing dataset contains US census data published in 1978 of houses in various parts of Boston. The dataset contains information on 505 samples with 12 measurements for each sample. The samples are contained in the rows of the dataset and the measurements in the columns. The following is analysis of the dataset using descriptive and inferential statistics:
###Code
import numpy as np # import numnerical python with shorthand
import matplotlib.pyplot as plt # shorthand import for matplotlib library
import seaborn as sns # import seaborn for advanced graphics
import pandas as pd # import pandas for data structures and operations for manipulating numerical tables and time series
import sklearn # for Machine Learning in Python
import scipy.stats as stats # statisitcal function
import statsmodels.api as sm # conducting statistical tests, and statistical data exploration
%matplotlib inline
from matplotlib import rcParams # improved plots from matplotlib
from sklearn.model_selection import train_test_split #Split arrays or matrices into random train and test subsets
from sklearn import svm # support vector classification
import sklearn.preprocessing as pre # For encoding categorical variables.
import sklearn.model_selection as mod # For splitting into training and test sets.
import sklearn.decomposition as dec
# make the plots bigger
plt.rcParams['figure.figsize'] = (20.0, 10.0)
###Output
_____no_output_____
###Markdown
The Boston House Price Dataset is loaded into the jupyter notebook from the scikit learn library
###Code
# Load the Boston House Price dataset from scikit-learn library
from sklearn.datasets import load_boston
boston_dataset = load_boston()
#Print the values of the Boston dataset
print(boston_dataset.keys())
###Output
dict_keys(['data', 'target', 'feature_names', 'DESCR', 'filename'])
###Markdown
data: contains the information for various housestarget: prices of the housefeature_names: names of the featuresDESCR: describes the dataset Describe the shape of the dataset rows, columns
###Code
# Give the description of the dataset
print(boston_dataset.data.shape)
print(boston_dataset.target.shape)
###Output
(506,)
###Markdown
Print the first five rows of the dataset
###Code
#Print the header of the Boston dataset
boston = pd.DataFrame(boston_dataset.data, columns=boston_dataset.feature_names)
boston.head()
###Output
_____no_output_____
###Markdown
Give a description of what is contained in each column
###Code
print(boston_dataset.DESCR)
###Output
.. _boston_dataset:
Boston house prices dataset
---------------------------
**Data Set Characteristics:**
:Number of Instances: 506
:Number of Attributes: 13 numeric/categorical predictive. Median Value (attribute 14) is usually the target.
:Attribute Information (in order):
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per $10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in $1000's
:Missing Attribute Values: None
:Creator: Harrison, D. and Rubinfeld, D.L.
This is a copy of UCI ML housing dataset.
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
This dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.
The Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic
prices and the demand for clean air', J. Environ. Economics & Management,
vol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics
...', Wiley, 1980. N.B. Various transformations are used in the table on
pages 244-261 of the latter.
The Boston house-price data has been used in many machine learning papers that address regression
problems.
.. topic:: References
- Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.
- Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.
###Markdown
Append the MEDV value from target to the dataset
###Code
boston['MEDV'] = boston_dataset.target
# Add MEDV value to the dataset
print(boston.head())
# Dataframe with price
###Output
CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX \
0 0.00632 18.0 2.31 0.0 0.538 6.575 65.2 4.0900 1.0 296.0
1 0.02731 0.0 7.07 0.0 0.469 6.421 78.9 4.9671 2.0 242.0
2 0.02729 0.0 7.07 0.0 0.469 7.185 61.1 4.9671 2.0 242.0
3 0.03237 0.0 2.18 0.0 0.458 6.998 45.8 6.0622 3.0 222.0
4 0.06905 0.0 2.18 0.0 0.458 7.147 54.2 6.0622 3.0 222.0
PTRATIO B LSTAT MEDV
0 15.3 396.90 4.98 24.0
1 17.8 396.90 9.14 21.6
2 17.8 392.83 4.03 34.7
3 18.7 394.63 2.94 33.4
4 18.7 396.90 5.33 36.2
###Markdown
Descriptive StatisticsCalculate the minimum, maximum, mean, median, and standard deviation of 'MEDV', which is stored in target. Median value of owner-occupied homes in $1000's
###Code
minimum_price = np.min(boston.MEDV) # Minimum price of the data
maximum_price = np.max(boston.MEDV) # maximum price of the data
mean_price = np.mean(boston.MEDV) # mean price of the data
median_price = np.median(boston.MEDV) # median price of the data
std_price = np.std(boston.MEDV) # standard deviation of the house prices
# Show the calculated statistics
print ("The lowest house price is:", minimum_price)
print ("The highest house price is:", maximum_price)
print ("The average house price is:", mean_price)
print ("The median house price is:", median_price)
print ("The standard deviation of houses prices is:", std_price)
boston.describe().T
###Output
_____no_output_____
###Markdown
Histograms of variables Plot histograms of the variables within the dataset as a visual representation
###Code
boston.hist(bins=50, figsize=(20,15))
plt.show()
###Output
_____no_output_____
###Markdown
Plot the house prices to show distribution of prices
###Code
# Histogram of house prices in 1000's
plt.hist(boston.MEDV)
plt.show
###Output
_____no_output_____
###Markdown
Distribution plot of house prices
###Code
sns.set(rc={'figure.figsize':(12,8)})
sns.distplot(boston['MEDV'], bins=10)
plt.show()
###Output
_____no_output_____
###Markdown
Correlation HeatmapHeatmap showing the correlation matrix meausing the linear relationships between the variables of the dataset
###Code
correlation_matrix = boston.corr().round(2)
# annot = True to print the values inside the square
sns.heatmap(data=correlation_matrix, annot=True)
###Output
_____no_output_____
###Markdown
ObservationsThe correlation coefficient runs on a scale of -1 to 1, 1 having stong correlation and -1 showing no correlation. MEDV shows a strong cprrelation to the RM value indicating a higher house price for a dwelling with a larger number of rooms. It shows negative correlation to LSTAT, lower status withing the population. Linear Regression LSAT - Percentage of lower status of the populationLSAT was selected for the linear regression module as it showed the stongest negative correlation with MEDV (Median value of owner-occupied homes in $1000s) in the correlation matrix. It shows that the house prices are negatively correlated linearly with the status within the population.More "lower class" areas have lower prices.
###Code
plt.figure(figsize=(20, 5))
features = ['LSTAT']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1, len(features) , i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
###Output
_____no_output_____
###Markdown
Positive Correlation RM - Average number of rooms per dwellingThere is strong positive correlation with with MEDV (Median value of owner-occupied homes in $1000s) in the correlation matrix. There is a higher MEDV for a higher RM as more rooms would imply more space, thereby costing more, taking all other factors constant.
###Code
plt.figure(figsize=(20, 5))
features = ['RM']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1, len(features) , i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
###Output
_____no_output_____
###Markdown
Pupil-teacher ratio by townThere is a lower price in areas where there is a high student-to-teacher ratio due to a lower demand for houses in such areas. A high student teacher ratio results in less attention dedicated to each student and potentially impact their performance.
###Code
plt.figure(figsize=(20, 5))
features = ['PTRATIO']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1, len(features) , i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
###Output
_____no_output_____
###Markdown
======================================================================================================================= Charles River AnalysisThe following section uses inferential statistics are used to analyse the difference in median house prices along the Charles river and houses which are not. The following tools have been used in the analysis:- Boxplots- Histogram- Anova - Paired T TestThe CHAS variable in the dataset is the Charles River dummy variable. The variable is equal to one is the house is bound to the Charles River and zero if it is not. This section analyzes the data based on the CHAS value and its impact on the respective MEDV value.
###Code
# Split the Chas data into along the river and not along the river
Along_The_River=boston.loc[boston['CHAS'] != 0]
Not_Along_The_River=boston.loc[boston['CHAS'] != 1]
chas = boston.CHAS
chas
###Output
_____no_output_____
###Markdown
HistogramDisplay a histogram to show how many houses in the dataset track the Charles River and how many do not.
###Code
plt.hist(boston.CHAS)
plt.show
###Output
_____no_output_____
###Markdown
The histogram dispalys the number of houses houses which do not tract the river (0) and the number of house which do tract the river (1). A significantly higher number of houses do not tract the river. For futher analysis the median price for each category will be compared. BoxplotsVisual comparison of the median values for the houses which tract the river verus those that do not.
###Code
boston.boxplot(column='MEDV', by='CHAS')
plt.show()
###Output
_____no_output_____
###Markdown
The boxplot shows the median price for the houses which tract the river was higher in Boston at the time the dataset was generated. AnovaAnalysis of variance (ANOVA) is a collection of statistical models and their associated estimation procedures (such as the "variation" among and between groups) used to analyze the differences among group means in a sample. (https://en.wikipedia.org/wiki/Analysis_of_variance)
###Code
atrm = np.mean(Along_The_River) # mean of Along_The_River
atrs = np.std(Along_The_River) # std of Along_The_River
natrm = np.mean(Not_Along_The_River) # mean of Not_Along_The_River
natrs = np.std(Not_Along_The_River) # std of Not_Along_The_River
print (atrm['MEDV'])
print (atrs['MEDV'])
print (natrs['MEDV'])
print (natrs['MEDV'])
# x values for plotting.
x = np.linspace(1.25, 2.25, 1000)
# The probability density functions (PDFs) for the two populations.
pdfA = stats.norm.pdf(x, atrm['MEDV'], atrs['MEDV'])
pdfB = stats.norm.pdf(x, natrs['MEDV'], natrs['MEDV'])
# Plot the population PDFs as shaded regions.
plt.fill_between(x, pdfA, color='g', alpha=0.25, label="PDF Along_The_River") # Fill the area between two horizontal curves.
plt.fill_between(x, pdfB, color='b', alpha=0.25, label="PDF Not_Along_The_River") # Fill the area between two horizontal curves.
# Plot histograms of the two samples.
plt.hist(Along_The_River['MEDV'], density=True, color='g', alpha=0.25, label="Along_The_River")
plt.hist(Not_Along_The_River['MEDV'], density=True, color='b', alpha=0.25, label="Not_Along_The_River")
# Display a legend.
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
The probability density function (PDF) is calculated to find probabilities associated with a continuous random variable. Therefore it is not an analytical tool which can be applied to the Charles River Data. Paired T Testhttps://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_rel.htmlWhen performing an independent sample t-test we assume that there is a given difference between the means of two populations, usually a difference of zero.The t-statistic looks at the two samples to see how different they are. The hypothesised difference (usually zero) is a indication of the probability of seeing a t-statistic at least this extreme.If the t-statistic is too extreme then we cannot accept the null hypothesis. If there is a large p-value, (> 0.05 or 0.1), then we accept the null hypothesis. If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%, then we reject the null hypothesis of equal averages. T Test of median prices along the river and those that are not along the river:
###Code
import scipy.stats as ss
ss.ttest_ind(Along_The_River['MEDV'], Not_Along_The_River['MEDV'])
###Output
_____no_output_____
###Markdown
As the p value is 0.0000739 the null hypothesis is rejected. T test of median prices and if they bound the Charles River
###Code
ss.ttest_ind(boston['MEDV'], boston['CHAS'])
###Output
_____no_output_____
###Markdown
Houses along the Charles River have a higher median price than those that are not along the river. ================================================================================================================= Neural Network Train the datasetTraining Dataset: The sample of data used to fit the model.The actual dataset that we use to train the model (weights and biases in the case of Neural Network). The model sees and learns from this data. (https://towardsdatascience.com/train-validation-and-test-sets-72cb40cba9e7)
###Code
from sklearn.linear_model import LinearRegression # used to train the model on both the training and test sets.
###Output
_____no_output_____
###Markdown
Prepare the data for training the model
###Code
X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns = ['LSTAT','RM'])
Y = boston['MEDV']
###Output
_____no_output_____
###Markdown
Split the data into training and testing sets. 80% of the samples are used to train the model and the remaining 20% are used to test
###Code
from sklearn.model_selection import train_test_split # used to split the data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
###Output
(404, 2)
(102, 2)
(404,)
(102,)
###Markdown
Scikit-learn’s LinearRegression is used to train the model on both the training and test sets.
###Code
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
lin_model = LinearRegression()
lin_model.fit(X_train, Y_train)
###Output
_____no_output_____
###Markdown
Model EvaluationThe model is evaluated using RMSE and R2-score.
###Code
from sklearn.metrics import r2_score
# model evaluation for training set
y_train_predict = lin_model.predict(X_train)
rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict)))
r2 = r2_score(Y_train, y_train_predict)
print("The model performance for training set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
print("\n")
# model evaluation for testing set
y_test_predict = lin_model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))
r2 = r2_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
###Output
The model performance for training set
--------------------------------------
RMSE is 5.637129335071195
R2 score is 0.6300745149331701
The model performance for testing set
--------------------------------------
RMSE is 5.137400784702912
R2 score is 0.6628996975186952
###Markdown
Scatter plot Scatter plot to visualize the difference between actual vs predicted valuesusing the X_test values
###Code
lm = LinearRegression()
lm.fit(X_train, Y_train)
Y_pred = lm.predict(X_test)
plt.scatter(Y_test, Y_pred)
plt.xlabel("Actual Price: $Y_i$")
plt.ylabel("Predicted price: $\hat{Y}_i$")
plt.title("Actual price vs Predicted prices: $Y_i$ vs $\hat{Y}_i$")
###Output
_____no_output_____
###Markdown
Create a neural network to predict the median house price based on the other variables in the dataset. The features, 'RM', 'LSTAT', and 'PTRATIO', give us quantitative information about each data point. The target variable, 'MEDV', will be the variable we seek to predict. An artificial neural network is an interconnected group of nodes, inspired by a simplification of neurons in a brain. Here, each circular node represents an artificial neuron and an arrow represents a connection from the output of one artificial neuron to the input of another.(https://en.wikipedia.org/wiki/Artificial_neural_network)
###Code
# Inputs from the dataset
inputs = pd.DataFrame(boston, columns=['RM','LSTAT','PTRATIO'])
inputs
###Output
_____no_output_____
###Markdown
Pre-processing refers to the transformations applied to our data before feeding it to the algorithm. Data Preprocessing is a technique that is used to convert the raw data into a clean data set. In other words, whenever the data is gathered from different sources it is collected in raw format which is not feasible for the analysis. (https://www.geeksforgeeks.org/data-preprocessing-machine-learning-python/)
###Code
# https://scikit-learn.org/stable/modules/preprocessing.html preprocessing
xscale = pd.DataFrame(pre.scale(inputs), columns=inputs.columns)
xscale
output = boston['MEDV'] #seek to output a house price value
output
###Output
_____no_output_____
###Markdown
Build The ModelUsing keras to create a neural network that can predict the median house price based on the other variables in the dataset.To build the model I will use the following subset of the dataset: - RM average number of rooms per dwelling - PTRATIO pupil-teacher ratio by town - LSTAT % lower status of the population - MEDV Median value of owner-occupied homes in $1000's
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras as kr
# Create a new neural network.
m = kr.models.Sequential()
# Add neurons.
# m.add(kr.layers.Dense(1, input_dim=1, activation="linear"))
# Add a hidden layer with x neurons and an input layer with 3.
m.add(kr.layers.Dense(units=10, input_dim=3, activation="linear"))
m.add(kr.layers.Dense(10, activation="linear"))
m.add(kr.layers.Dense(1, activation='linear'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="adam")
###Output
_____no_output_____
###Markdown
The number of epochs is a hyperparameter that defines the number times that the learning algorithm will work through the entire training dataset. (https://machinelearningmastery.com/difference-between-a-batch-and-an-epoch/) Split
###Code
# Split the inputs and outputs into training and test sets.
inputs_train, inputs_test, output_train, output_test = mod.train_test_split(inputs, output, test_size=0.2)
inputs_test.iloc[0]
m.predict(inputs_test.as_matrix()[0:1])
###Output
C:\Users\Frank\Anaconda4\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
"""Entry point for launching an IPython kernel.
###Markdown
Train
###Code
# Train the neural network.
m.fit(inputs_train, output_train, epochs=500, batch_size=20)
# Train the model.
# m.fit(inputs, output, epochs=15, batch_size=128)
# https://stackoverflow.com/questions/37232782/nan-loss-when-training-regression-network
###Output
_____no_output_____
###Markdown
Predict
###Code
p = m.predict(inputs_test.as_matrix())
p
#Plot of actual v predicted price of first 100 data points
plt.figure(figsize=(12, 12))
plt.plot(p[:100], 'r-', label ='Predicted MEDV')
plt.plot(output[:100], 'b', label = 'Actual MEDV')
plt.title("Predicted Prices vs Actual Prices")
plt.ylabel("Price(1000's)")
plt.xlabel("Zone")
plt.legend(["Prediction", "Actual"], loc="upper left")
plt.show()
###Output
_____no_output_____
###Markdown
Preprocessing the DataPut data into format for data analysis help the nural network make better predictions. Principal Components Analysis (PCA) is a dimensionality reduction algorithm that can be used to significantly speed up your unsupervised feature learning algorithm. More importantly, understanding PCA will enable us to later implement whitening, which is an important pre-processing step for many algorithms.(http://ufldl.stanford.edu/tutorial/unsupervised/PCAWhitening/). WhiteningWhitening of data is a way to preprocess the data. The idea behind whitening is to remove the underlying correlation in the data. It is a process done usually after the data is projected onto the eigen vectors as a result of PCA.(https://www.quora.com/What-is-whitening-of-data-in-Neural-Networks)
###Code
# https://scikit-learn.org/stable/modules/preprocessing.html#preprocessing
xscale = pd.DataFrame(pre.scale(inputs), columns=inputs.columns)
xscale
scaler = pre.StandardScaler()
scaler.fit(inputs)
scaler.mean_, scaler.scale_
xscale = pd.DataFrame(scaler.transform(inputs), columns=inputs.columns)
xscale
inputs.corr()
pca = dec.PCA(n_components=3, whiten=True)
pca.fit(inputs)
inputs_white = pd.DataFrame(pca.transform(inputs), columns=inputs.columns)
inputs_white
inputs_white.corr().round()
inputs_white.mean().round()
inputs_white.std().round()
# Add neurons.
# m.add(kr.layers.Dense(1, input_dim=1, activation="linear"))
# Add a hidden layer with x neurons and an input layer with 3.
m.add(kr.layers.Dense(units=10, input_dim=3, activation="linear"))
m.add(kr.layers.Dense(10, activation="linear"))
m.add(kr.layers.Dense(1, activation='linear'))
# Compile the model.
m.compile(loss="mean_squared_error", optimizer="adam")
# Train the neural network.
m.fit(inputs_train_white, output_train, epochs=500, batch_size=20)
w = m.predict(inputs_white.as_matrix())
w
#Plot of actual v whitened predicted price of first 100 data points
plt.figure(figsize=(12, 12))
plt.plot(w[:100], 'r-', label ='WhitePredicted MEDV')
plt.plot(output[:100], 'b', label = 'Actual MEDV')
plt.title("White Predicted Prices vs Actual Prices")
plt.ylabel("Price(1000's)")
plt.xlabel("Zone")
plt.legend(["WhitePrediction", "Actual"], loc="upper left")
plt.show()
###Output
_____no_output_____
###Markdown
**Linear Regression**
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('./data/weight-height.csv')
df.head()
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
# Here we're plotting the red line 'by hand' with fixed values
# We'll try to learn this line with an algorithm below
plt.plot([55, 78], [75, 250], color='red', linewidth=3)
def line(x, w=0, b=0):
return x * w + b
x = np.linspace(55, 80, 100)
x
yhat = line(x, w=0, b=0)
yhat
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
plt.plot(x, yhat, color='red', linewidth=3)
###Output
_____no_output_____
###Markdown
Cost Function
###Code
def mean_squared_error(y_true, y_pred):
s = (y_true - y_pred)**2
return s.mean()
X = df[['Height']].values
y_true = df['Weight'].values
y_true
y_pred = line(X)
y_pred
mean_squared_error(y_true, y_pred.ravel())
plt.figure(figsize=(10, 5))
# we are going to draw 2 plots in the same figure
# first plot, data and a few lines
ax1 = plt.subplot(121)
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults', ax=ax1)
# let's explore the cost function for a few values of b between -100 and +150
bbs = np.array([-100, -50, 0, 50, 100, 150])
mses = [] # we will append the values of the cost here, for each line
for b in bbs:
y_pred = line(X, w=2, b=b)
mse = mean_squared_error(y_true, y_pred)
mses.append(mse)
plt.plot(X, y_pred)
# second plot: Cost function
ax2 = plt.subplot(122)
plt.plot(bbs, mses, 'o-')
plt.title('Cost as a function of b')
plt.xlabel('b')
###Output
_____no_output_____
###Markdown
Linear Regression with Keras
###Code
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam, SGD
model = Sequential()
model.add(Dense(1, input_shape=(1,)))
model.summary()
model.compile(Adam(lr=0.8), 'mean_squared_error')
model.fit(X, y_true, epochs=40)
y_pred = model.predict(X)
df.plot(kind='scatter',
x='Height',
y='Weight',
title='Weight and Height in adults')
plt.plot(X, y_pred, color='red')
W, B = model.get_weights()
W
B
###Output
Epoch 1/40
10000/10000 [==============================] - 0s 33us/step - loss: 744.2330
Epoch 2/40
10000/10000 [==============================] - 0s 25us/step - loss: 559.9940
Epoch 3/40
10000/10000 [==============================] - 0s 24us/step - loss: 488.2442
Epoch 4/40
10000/10000 [==============================] - 0s 23us/step - loss: 451.8103
Epoch 5/40
10000/10000 [==============================] - 0s 22us/step - loss: 421.0326
Epoch 6/40
10000/10000 [==============================] - 0s 24us/step - loss: 362.7149
Epoch 7/40
10000/10000 [==============================] - 0s 23us/step - loss: 326.6601
Epoch 8/40
10000/10000 [==============================] - 0s 22us/step - loss: 299.1790
Epoch 9/40
10000/10000 [==============================] - 0s 23us/step - loss: 280.8071
Epoch 10/40
10000/10000 [==============================] - 0s 22us/step - loss: 258.3090
Epoch 11/40
10000/10000 [==============================] - 0s 23us/step - loss: 248.8697
Epoch 12/40
10000/10000 [==============================] - 0s 23us/step - loss: 225.1109
Epoch 13/40
10000/10000 [==============================] - 0s 23us/step - loss: 208.2274
Epoch 14/40
10000/10000 [==============================] - 0s 22us/step - loss: 211.9454
Epoch 15/40
10000/10000 [==============================] - 0s 22us/step - loss: 197.6752
Epoch 16/40
10000/10000 [==============================] - 0s 22us/step - loss: 188.5390
Epoch 17/40
10000/10000 [==============================] - 0s 22us/step - loss: 197.1329
Epoch 18/40
10000/10000 [==============================] - 0s 22us/step - loss: 175.0691
Epoch 19/40
10000/10000 [==============================] - 0s 22us/step - loss: 183.5077
Epoch 20/40
10000/10000 [==============================] - 0s 22us/step - loss: 184.5312
Epoch 21/40
10000/10000 [==============================] - 0s 22us/step - loss: 195.0342
Epoch 22/40
10000/10000 [==============================] - 0s 22us/step - loss: 175.4520
Epoch 23/40
10000/10000 [==============================] - 0s 23us/step - loss: 177.7562
Epoch 24/40
10000/10000 [==============================] - 0s 22us/step - loss: 171.5894
Epoch 25/40
10000/10000 [==============================] - 0s 22us/step - loss: 207.4876
Epoch 26/40
10000/10000 [==============================] - 0s 23us/step - loss: 183.3164
Epoch 27/40
10000/10000 [==============================] - 0s 22us/step - loss: 177.4042
Epoch 28/40
10000/10000 [==============================] - 0s 23us/step - loss: 164.8412
Epoch 29/40
10000/10000 [==============================] - 0s 22us/step - loss: 176.6916
Epoch 30/40
10000/10000 [==============================] - 0s 22us/step - loss: 185.1806
Epoch 31/40
10000/10000 [==============================] - 0s 22us/step - loss: 175.8753
Epoch 32/40
10000/10000 [==============================] - 0s 24us/step - loss: 165.2096
Epoch 33/40
10000/10000 [==============================] - 0s 23us/step - loss: 179.7428
Epoch 34/40
10000/10000 [==============================] - 0s 23us/step - loss: 178.1815
Epoch 35/40
10000/10000 [==============================] - 0s 22us/step - loss: 180.3435
Epoch 36/40
10000/10000 [==============================] - 0s 23us/step - loss: 192.8079
Epoch 37/40
10000/10000 [==============================] - 0s 23us/step - loss: 174.1851
Epoch 38/40
10000/10000 [==============================] - 0s 22us/step - loss: 168.8030
Epoch 39/40
10000/10000 [==============================] - 0s 23us/step - loss: 169.8194
Epoch 40/40
10000/10000 [==============================] - 0s 23us/step - loss: 176.9974
###Markdown
Evaluating Model Performance
###Code
from sklearn.metrics import r2_score
print("The R2 score is {:0.3f}".format(r2_score(y_true, y_pred)))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y_true,
test_size=0.2)
len(X_train)
len(X_test)
W[0, 0] = 0.0
B[0] = 0.0
model.set_weights((W, B))
model.fit(X_train, y_train, epochs=50, verbose=0)
y_train_pred = model.predict(X_train).ravel()
y_test_pred = model.predict(X_test).ravel()
from sklearn.metrics import mean_squared_error as mse
print("The Mean Squared Error on the Train set is:\t{:0.1f}".format(mse(y_train, y_train_pred)))
print("The Mean Squared Error on the Test set is:\t{:0.1f}".format(mse(y_test, y_test_pred)))
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
###Output
The R2 score on the Train set is: 0.851
The R2 score on the Test set is: 0.856
###Markdown
Classification
###Code
df = pd.read_csv('./data/user_visit_duration.csv')
!git clone https://github.com/Dataweekends/zero_to_deep_learning_video.git
!cp -r zero_to_deep_learning_video/data ./
from google.colab import drive
drive.mount('/content/drive')
df.plot(kind='scatter', x='Time (min)', y='Buy')
model = Sequential()
model.add(Dense(1, input_shape=(1,), activation='sigmoid'))
model.compile(SGD(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
model.summary()
X = df[['Time (min)']].values
y = df['Buy'].values
model.fit(X, y, epochs=25)
ax = df.plot(kind='scatter', x='Time (min)', y='Buy',
title='Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, model.predict(temp), color='orange')
plt.legend(['model', 'data'])
temp_class = model.predict(temp) > 0.5
ax = df.plot(kind='scatter', x='Time (min)', y='Buy',
title='Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, temp_class, color='orange')
plt.legend(['model', 'data'])
y_pred = model.predict(X)
y_class_pred = y_pred > 0.5
from sklearn.metrics import accuracy_score
print("The accuracy score is {:0.3f}".format(accuracy_score(y, y_class_pred)))
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Train/Test split
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
len(X_train)
len(X_test)
W[0,0] = 0.0
B[0] = 0.0
model.set_weights((W, B))
y_train_pred = model.predict(X_train).ravel()
y_test_pred = model.predict(X_test).ravel()
model.fit(X_train, y_train, epochs=50, verbose=0)
from sklearn.metrics import mean_squared_error as mse
print("The Mean Squared Error on the Train set is:\t{:0.1f}".format(mse(y_train, y_train_pred)))
print("The Mean Squared Error on the Test set is:\t{:0.1f}".format(mse(y_test, y_test_pred)))
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
df = pd.read_csv('./data/user_visit_duration.csv')
df.head()
df.plot(kind='scatter', x='Time (min)', y='Buy')
model = Sequential()
model.add(Dense(1, input_shape=(1,), activation='sigmoid'))
model.compile(SGD(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
model.summary()
X = df[['Time (min)']].values
y = df['Buy'].values
model.fit(X, y, epochs=25)
ax = df.plot(kind='scatter', x='Time (min)', y='Buy',
title='Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, model.predict(temp), color='orange')
plt.legend(['model', 'data'])
temp_class = model.predict(temp) > 0.5
ax = df.plot(kind='scatter', x='Time (min)', y='Buy',
title='Purchase behavior VS time spent on site')
temp = np.linspace(0, 4)
ax.plot(temp, temp_class, color='orange')
plt.legend(['model', 'data'])
y_pred = model.predict(X)
y_class_pred = y_pred > 0.5
from sklearn.metrics import accuracy_score
print("The accuracy score is {:0.3f}".format(accuracy_score(y, y_class_pred)))
###Output
The accuracy score is 0.790
###Markdown
Train/Test split
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
params = model.get_weights()
params = [np.zeros(w.shape) for w in params]
model.set_weights(params)
print("The accuracy score is {:0.3f}".format(accuracy_score(y, model.predict(X) > 0.5)))
model.fit(X_train, y_train, epochs=25, verbose=0)
print("The train accuracy score is {:0.3f}".format(accuracy_score(y_train, model.predict(X_train) > 0.5)))
print("The test accuracy score is {:0.3f}".format(accuracy_score(y_test, model.predict(X_test) > 0.5)))
###Output
The train accuracy score is 0.863
The test accuracy score is 0.750
###Markdown
Cross Validation
###Code
from keras.wrappers.scikit_learn import KerasClassifier
def build_logistic_regression_model():
model = Sequential()
model.add(Dense(1, input_shape=(1,), activation='sigmoid'))
model.compile(SGD(lr=0.5),
'binary_crossentropy',
metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=build_logistic_regression_model,
epochs=25,
verbose=0)
from sklearn.model_selection import cross_val_score, KFold
cv = KFold(3, shuffle=True)
scores = cross_val_score(model, X, y, cv=cv)
scores
print("The cross validation accuracy is {:0.4f} ± {:0.4f}".format(scores.mean(), scores.std()))
###Output
_____no_output_____
###Markdown
Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix
confusion_matrix(y, y_class_pred)
def pretty_confusion_matrix(y_true, y_pred, labels=["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index=labels, columns=pred_labels)
return df
pretty_confusion_matrix(y, y_class_pred, ['Not Buy', 'Buy'])
from sklearn.metrics import precision_score, recall_score, f1_score
print("Precision:\t{:0.3f}".format(precision_score(y, y_class_pred)))
print("Recall: \t{:0.3f}".format(recall_score(y, y_class_pred)))
print("F1 Score:\t{:0.3f}".format(f1_score(y, y_class_pred)))
from sklearn.metrics import classification_report
print(classification_report(y, y_class_pred))
###Output
_____no_output_____
###Markdown
Feature Preprocessing
###Code
df = pd.read_csv('./data/weight-height.csv')
df.head()
df['Gender'].unique()
pd.get_dummies(df['Gender'], prefix='Gender').head()
df['Height (feet)'] = df['Height']/12.0
df['Weight (100 lbs)'] = df['Weight']/100.0
df.describe().round(2)
from sklearn.preprocessing import MinMaxScaler
mms = MinMaxScaler()
df['Weight_mms'] = mms.fit_transform(df[['Weight']])
df['Height_mms'] = mms.fit_transform(df[['Height']])
df.describe().round(2)
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
df['Weight_ss'] = ss.fit_transform(df[['Weight']])
df['Height_ss'] = ss.fit_transform(df[['Height']])
df.describe().round(2)
plt.figure(figsize=(15, 5))
for i, feature in enumerate(['Height', 'Height (feet)', 'Height_mms', 'Height_ss']):
plt.subplot(1, 4, i+1)
df[feature].plot(kind='hist', title=feature)
plt.xlabel(feature)
###Output
_____no_output_____
###Markdown
**1. Define Machine Learning?**Machine learning is the science of programming computer so they can learn from datas.**2. Can you name four types of problems where it shines?**Problems for which existing solutions require a lot of hand-tuning or long lists ofrules: one Machine Learning algorithm can often simplify code and perform better.• Complex problems for which there is no good solution at all using a traditionalapproach: the best Machine Learning techniques can find a solution.• Fluctuating environments: a Machine Learning system can adapt to new data.• Getting insights about complex problems and large amounts of data.**3. What is a labeled training set?**In supervised learning, the training data you feed to the algorithm includes the desired solutions, called labels**4. What are the two most common supervised tasks?**A typical supervised learning task is classification. The spam filter is a good example of this.Another typical task is to predict a target numeric value, such as the price of a car,given a set of features (mileage, age, brand, etc.) called predictors. This sort of task is called regression**5. Can you name four common unsupervised tasks?**ClusteringAnomaly detection and novelty detectionVisualization and dimensionality reductionAssociation rule learning**6. What type of Machine Learning algorithm would you use to allow a robot to walk in various unknown terrains?****7. What type of algorithm would you use to segment your customers into multiple groups?****8. Would you frame the problem of spam detection as a supervised learning problem or an unsupervised learning problem?****9. What is an online learning system?****10. What is out-of-core learning?****11. What type of learning algorithm relies on a similarity measure to make predictions?****12. What is the difference between a model parameter and a learning algorithm’s hyperparameter?****13. What do model-based learning algorithms search for? What is the most common strategy they use to succeed? How do they make predictions?****14. Can you name four of the main challenges in Machine Learning?****15. If your model performs great on the training data but generalizes poorly to new instances, what is happening? Can you name three possible solutions?****16. What is a test set and why would you want to use it?****17. What is the purpose of a validation set?****18. What can go wrong if you tune hyperparameters using the test set?****19. What is repeated cross-validation and why would you prefer it to using a single validation set?**
###Code
import pandas as pd
import numpy as np
import os
import tarfile
from six.moves import urllib
housing = pd.read_csv('https://raw.githubusercontent.com/ageron/handson-ml2/master/datasets/housing/housing.csv')
housing.head(10)
housing.describe()
housing.info()
housing['ocean_proximity'].value_counts()
import matplotlib.pyplot as plt
%matplotlib inline
housing.hist(figsize=(20,15), bins = 50)
plt.show()
#Create a Test Set
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices],data.iloc[test_indices]
train_set, test_set = split_train_test(housing, 0.2)
len(train_set)
len(test_set)
from zlib import crc32
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2 ** 32
def split_train_test_by_id(data, test_ratio, id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_id = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
from sklearn.model_selection import train_test_split
train_set,test_set = train_test_split(housing, test_size = 0.2, random_state = 42)
housing['income_cat'] = pd.cut(housing['median_income'], bins = [0, 1.5, 3.0, 4.5, 6, np.inf], labels= [1, 2, 3, 4, 5])
housing['income_cat'].hist();
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2, random_state = 42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
housing = strat_train_set.copy()
housing.plot(kind = 'scatter', x='longitude', y= 'latitude');
#can clearly see the high-density areas, namely the Bay Area and around Los Angeles and San Diego
housing.plot(kind = 'scatter', x= 'longitude', y = 'latitude', alpha = 0.1, figsize = (10,7));
#let's do a map with color represents the prices
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s = housing['population']/100, label='population', figsize=(10,8),
c = 'median_house_value', cmap = plt.get_cmap('jet'), colorbar = True,)
plt.legend();
#standard correlation coefficient (called Pearson’s )
corr_matrix = housing.corr()
#close of 0,there is no linear correlation, close to 1 has a strong correlation, close to -1, has a strong negative correlation
corr_matrix['median_house_value'].sort_values(ascending= False)
#check for correlation between attributes using pandas
from pandas.plotting import scatter_matrix
attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age']
scatter_matrix(housing[attributes], figsize=(13,8));
housing.head()
housing.plot(kind = 'scatter', x = 'median_income', y = 'median_house_value', alpha = 0.1);
#Experimenting with Attribute Combinations
housing['rooms_per_household'] = housing['total_rooms']/housing['households']
housing['bedrooms_per_room'] = housing['total_bedrooms']/housing['total_rooms']
housing['population_per_household'] = housing['population']/ housing['households']
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
#Prepare the Data for Machine Learning Algorithms
housing = strat_train_set.drop('median_house_value', axis = 1)
housing_labels = strat_train_set['median_house_value'].copy()
#Data Cleaning
housing.dropna(subset=['total_bedrooms']).head()
#use SimpleImputer
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy = 'median')
housing_num = housing.drop('ocean_proximity', axis= 1)
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
X = imputer.transform(housing_num)
#for pandas dataframe
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
#Handling Text and Categorical Attributes
housing_cat = housing[['ocean_proximity']]
housing_cat.head(10)
#convert these categories from text to numbers
from sklearn.preprocessing import OrdinalEncoder
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
housing_cat_encoded[:10]
ordinal_encoder.categories_
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot
housing_cat_1hot.toarray()
housing_cat_1hot.toarray()
cat_encoder.categories_
###Output
_____no_output_____
###Markdown
I'm studying Machine Learning using the book : using the book :** Python Data Science Handbook From Jake VanderPlas ** Machine learning? What is ?Machine Learning for data science as a means building models of dataMachine learning involves building mathematics models to understand datas, this models can be trained with "tuneables parameters" and after this , they are used to predict and understand aspects of datas.It's like a human brain, where you learn training with the datas.Machine Learning can be categorized how supervized learning where we are measured features of data and labels associate with data, unsupervized learning , don't have reference of any label and let dataset speak for itself, there is too semi-supervized learning when only incomplete labels are avaliable.
###Code
###Output
_____no_output_____
###Markdown
1: Introduction to Machine Learning Machine LearningIt is important for Data Scientists to understand the basics of Machine Learning in order to understand what is happening under the hood. One of the first questions students wonder is "Why is Machine Learning used synonymously with Data Science?". We shall try to answer this foremost question in this brief course on Machine Learning, abbreviated as ML. DefinitionMachine Learning can be described as an area of AI that involves learning or intelligence by computers. Artificial Intelligence, Pattern Recognition, Statistical LearningThese are the other terms that are commonly used by the Data Scientists. There are important differences that are worth knowing as we progress to understand Data Science. Artificial Intelligence (AI)The field of AI research was founded at a conference on the campus of Dartmouth College in the summer of 1956. Those who attended would become the leaders of AI research for decades. Many of them predicted that a machine as intelligent as a human being would exist in no more than a generation and they were given millions of dollars to make this vision come true [1]. AI is now used to describe algorithms or mathematical techniques that can imitate intelligence involving activities such as learning, inference, predictions and decision making. Pattern RecognitionPattern Recognition is an area of AI which involves inference of patterns in the complex data set. Statistical LearningStatistical Learning involves aspects of statistical analysis of the data set, results as well as theories of statistics along with the intelligence aspect of Pattern Recognition. Classes of Problems in MLThe types of problems in ML can be categorized into Supervised Learning, Unsupservised Learning, Reinforcment Learning and Recommendation. Supervised LearningSupervised Learning refers to a class of learning where data with the resulting outputs for certain scenarios are available. The ML algorithms learn from the known data sets and their results. The ML algorithms are termed supervised as we can evaluate how good they are depending on their ability to produce output similar to what is already known. There are mainly 2 categories of Supervised Learning problems: Prediction Classification* Prediction - This type of ML algorithms are involved in prediction such as prediction of weather, stocks, etc...* Classification - Image Classification, Character Recognition are usually the type of problems that fall into this category. Unsupervised LearningThe category of problems that involve extracting meaningful information from the data such as clustering are called as Unsupervised Learning. This is because, no target is involved in the operations of the ML algorithms. Recommendation Recommendations of movies, shopping lists are examples of ML algorithms that fall into this category. Reinforcement LearningThis class of algorithms solve decision making steps in scenarios by taking various actions while maximizing a reward. Robotics to a large extent, is a field that uses Reinforcement Learning. References[1] https://en.wikipedia.org/wiki/History_of_artificial_intelligence 1. Exercise Instructions* Given the data set of college majors with information as to who secured a job and who didn't, what is this class of ML problem?College MajorsMajor Grade Internship Sports Job at GraduationEngineering A No No NoArts B Yes Yes YesMathematics B B No Yes [20, 10, 5, 4], [5, 4, 39, 3]]
###Code
# Print the answer
print("")
###Output
_____no_output_____
###Markdown
1. Solution 2: Supervised Learning What is Supervised Learning?In this lesson, we shall try to understand supervised learning and define it in a mathematical language. Suppose we have a data with points (x, y) that are generated by a process that looks like this when we plot:As humans, we can infer that the points follow a straight line. How do we explain the process mathematically? We can do so by solving for the straight line equation:y = mx + cLet us solve for this equation by assuming that one of the points, (2, 11) lies on the graph:11 = 2m + c ...(1)Sovling for c,c = 11 - 2m We need another point to solve this equation since there are 2 variables. Let us now consider another point say (7, 26):26 = 7m + c ....(2)With the equations (1) and (2),we can solve for m = 3 and c = 5.Hence, we can write the equation for the line as:y = 3x + 5We can also verify for the known points in x = {0, 1, 2, ..., 10} that the y generated is indeed correct. For example for point on x-axis, 13,y = 3*13 + 5 = 44 which is correct.Once we know the equation of the line, we can say that we can predict the values for any point on the x-axis. We have now learnt the process that generated the points. Given these points, providing a mathematical technique to discover the line equation would be termed as learning, as with the known parameters, m & c, we can predict the future values of y, given any x outside of the known dataset. Terminologies in Machine LearningThe terminologies in ML are going to be important to Data Science as these are used frequently. * In the above example, the known data set of x and corresponding y for each point in x, is called Ground Truth or Training Data.* set of points of y is called the Target Vector.* The function y = mx + c is called the Model and m, c are known are the parameters of the model. * The process of solving equations is called Learning or Training the Model.* Computing the value of y for an x is called prediction.* A set of values of x for which y needs to be predicted, that are outside of the known values used in Training is termed test data. 2. Exercise Instructions* Given a line y = 5*x + 3,* compute predictions for x = {1, 5, 10, 12} and assign it to variable y.
###Code
# Compute y for x, define x below.
x = 'array'
###Output
_____no_output_____
###Markdown
Import Statements
###Code
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
%matplotlib inline
import mpld3
mpld3.enable_notebook()
from sklearn.decomposition import PCA
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.neighbors import KNeighborsClassifier
import sklearn.metrics as mt
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.metrics import confusion_matrix, f1_score, classification_report
from sklearn.model_selection import cross_val_score
df=pd.read_csv("Extracted_Features.csv")
labels=df["label"]
data=df.drop(['filename',"label"],axis=1)
data.head()
###Output
_____no_output_____
###Markdown
Dimensionality Reduction (PCA)
###Code
sc=StandardScaler()
data_scaled=sc.fit_transform(data)
data=pd.DataFrame(data_scaled)
pc = PCA(n_components=30)
pc.fit(data)
pc.explained_variance_
print(sum(pc.explained_variance_))
var= pc.explained_variance_ratio_
var1=np.cumsum(np.round(pc.explained_variance_ratio_, decimals=4)*100)
plt.figure(figsize=(8, 4))
plt.plot(range(30),var1,'ro-', linewidth=2)
plt.title('Cumulative Explained Variance')
plt.ylabel('Cumulative explained variance ratio')
plt.xlabel('Principal components')
plt.show()
plt.figure(figsize=(8,4))
plt.plot(range(30),var, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('Principal Component')
plt.ylabel('Proportion of Variance Explained')
plt.show()
pca=PCA(n_components=20)
data_new=pca.fit_transform(data)
data_pca=pd.DataFrame(data_new)
data_pca.columns=[f"PCA_{i+1}" for i in data_pca.columns]
dataset=pd.concat([data_pca,labels],axis=1)
dataset = shuffle(dataset)
dataset.shape
###Output
_____no_output_____
###Markdown
Model Preperation* Label encoding* Train and Test Split
###Code
instru_list = dataset.iloc[:, -1]
encoder = LabelEncoder()
y = encoder.fit_transform(instru_list)
X=dataset.iloc[:, :-1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=1234456)
X_train.shape
X_test.shape
###Output
_____no_output_____
###Markdown
Support Vector Machines
###Code
svclassifier = SVC(kernel='rbf',random_state=259641)
svclassifier.fit(X_train, y_train)
predicted_labels = svclassifier.predict(X_test)
print(mt.classification_report(y_test,predicted_labels))
cm=confusion_matrix(y_test, predicted_labels)
cm
###Output
_____no_output_____
###Markdown
Random Forest
###Code
RFClassifier = RandomForestClassifier(random_state=128649)
RFClassifier.fit(X_train, y_train)
predicted_labels = RFClassifier.predict(X_test)
print(mt.classification_report(y_test,RFClassifier.predict(X_test)))
cm1=confusion_matrix(y_test, predicted_labels)
cm1
###Output
_____no_output_____
###Markdown
Xgboost
###Code
XGBvlassifier = XGBClassifier(random_state=1289564,use_label_encoder=False)
XGBvlassifier.fit(X_train, y_train)
predicted_labels = XGBvlassifier.predict(X_test)
print(mt.classification_report(y_test,predicted_labels))
cm2=confusion_matrix(y_test, predicted_labels)
cm2
###Output
_____no_output_____
###Markdown
K Nearest Neighbor
###Code
classifier = KNeighborsClassifier(n_neighbors = 66)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cm3 = confusion_matrix(y_test, y_pred)
cm3
print(mt.classification_report(y_test,y_pred))
###Output
precision recall f1-score support
0 0.75 0.84 0.79 120
1 0.88 0.43 0.58 116
2 0.66 0.91 0.76 118
accuracy 0.73 354
macro avg 0.76 0.73 0.71 354
weighted avg 0.76 0.73 0.71 354
|
module3-random-forests/random_forests_ordinal_encoding.ipynb | ###Markdown
_Lambda School Data Science — Classification 1_ This sprint, your project is about water pumps in Tanzania. Can you predict which water pumps are faulty? Random Forests, Ordinal Encoding Objectives- do feature engineering with dates- use scikit-learn for random forests- understand how tree ensembles reduce overfitting compared to a single decision tree with unlimited depth- do ordinal encoding with high-cardinality categoricals- understand how categorical encodings affect trees differently compared to linear models Summary Try Tree Ensembles when you do machine learning with labeled, tabular data- "Tree Ensembles" means Random Forest or Gradient Boosting models. - [Tree Ensembles often have the best predictive accuracy](https://arxiv.org/abs/1708.05070) with labeled, tabular data.- Why? Because trees can fit non-linear, non-[monotonic](https://en.wikipedia.org/wiki/Monotonic_function) relationships, and [interactions](https://christophm.github.io/interpretable-ml-book/interaction.html) between features.- A single decision tree, grown to unlimited depth, will [overfit](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/). We solve this problem by ensembling trees, with bagging (Random Forest) or boosting (Gradient Boosting).- Random Forest's advantage: may be less sensitive to hyperparameters. Gradient Boosting's advantage: may get better predictive accuracy. One-hot encoding isn’t the only way, and may not be the best way, of categorical encoding for tree ensembles.- For example, tree ensembles can work with arbitrary "ordinal" encoding! (Randomly assigning an integer to each category.) Compared to one-hot encoding, the dimensionality will be lower, and the predictive accuracy may be just as good or even better. Libraries category_encoders- Local, Anaconda: `conda install -c conda-forge category_encoders`- Google Colab: `pip install category_encoders` graphviz- Local, Anaconda: `conda install python-graphviz`- Google Colab: `!pip install graphviz` `!apt-get install graphviz` Solution example- Do feature engineering with dates- Clean data with outliers- Impute missing values- Use scikit-learn for decision trees- Get and interpret feature importances of a tree-based model
###Code
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeClassifier
pd.set_option('display.float_format', '{:.2f}'.format)
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('https://drive.google.com/uc?export=download&id=14ULvX0uOgftTB2s97uS8lIx1nHGQIB0P'),
pd.read_csv('https://drive.google.com/uc?export=download&id=1r441wLr7gKGHGLyPpKauvCuUOU556S2f'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('https://drive.google.com/uc?export=download&id=1wvsYl9hbRbZuIuoaLWCsW_kbcxCdocHz')
sample_submission = pd.read_csv('https://drive.google.com/uc?export=download&id=1kfJewnmhowpUo381oSn3XqsQ6Eto23XV')
# Split train into train & val
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
X = X.drop(columns=['recorded_by', 'id'])
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
# For categoricals with missing values, fill with the category 'MISSING'
categoricals = X.select_dtypes(exclude='number').columns
for col in categoricals:
X[col] = X[col].fillna('MISSING')
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality <= 50].index.tolist()
# Combine the lists
features = numeric_features + categorical_features
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# Make pipeline!
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
DecisionTreeClassifier(max_depth=20, random_state=42)
)
# Fit on train, score on val, predict on test
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_test)
# Write submission csv file
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('submission-03.csv', index=False)
# Get feature importances
encoder = pipeline.named_steps['onehotencoder']
tree = pipeline.named_steps['decisiontreeclassifier']
feature_names = encoder.transform(X_val).columns
importances = pd.Series(tree.feature_importances_, feature_names)
# Plot feature importances
n = 20
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
import graphviz
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,
out_file=None,
max_depth=2,
feature_names=feature_names,
class_names=tree.classes_,
impurity=False,
filled=True,
proportion=True,
rotate=True,
rounded=True)
graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
Use scikit-learn for random forests[Scikit-Learn User Guide: Random Forests](https://scikit-learn.org/stable/modules/ensemble.htmlrandom-forests) Do ordinal encoding with high-cardinality categoricalshttp://contrib.scikit-learn.org/categorical-encoding/ordinal.html Understand how tree ensembles reduce overfitting compared to a single decision tree with unlimited depth Interlude: [predicting golf putts](https://statmodeling.stat.columbia.edu/2008/12/04/the_golf_puttin/)(1 feature, non-linear, regression)
###Code
putts = pd.DataFrame(
columns=['distance', 'tries', 'successes'],
data = [[2, 1443, 1346],
[3, 694, 577],
[4, 455, 337],
[5, 353, 208],
[6, 272, 149],
[7, 256, 136],
[8, 240, 111],
[9, 217, 69],
[10, 200, 67],
[11, 237, 75],
[12, 202, 52],
[13, 192, 46],
[14, 174, 54],
[15, 167, 28],
[16, 201, 27],
[17, 195, 31],
[18, 191, 33],
[19, 147, 20],
[20, 152, 24]]
)
putts['rate of success'] = putts['successes'] / putts['tries']
putts_X = putts[['distance']]
putts_y = putts['rate of success']
from ipywidgets import interact
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
def putt_trees(max_depth=1, n_estimators=1):
models = [DecisionTreeRegressor(max_depth=max_depth),
RandomForestRegressor(max_depth=max_depth, n_estimators=n_estimators)]
for model in models:
name = model.__class__.__name__
model.fit(putts_X, putts_y)
ax = putts.plot('distance', 'rate of success', kind='scatter', title=name)
ax.step(putts_X, model.predict(putts_X), where='mid')
plt.show()
interact(putt_trees, max_depth=(1,6,1), n_estimators=(10,40,10));
###Output
_____no_output_____
###Markdown
What's "random" about random forests?1. Each tree trains on a random bootstrap sample of the data. (In scikit-learn, for `RandomForestRegressor` and `RandomForestClassifier`, the `bootstrap` parameter's default is `True`.) This type of ensembling is called Bagging.2. Each split considers a random subset of the features. (In scikit-learn, when the `max_features` parameter is not `None`.) For extra randomness, you can try ["extremely randomized trees"](https://scikit-learn.org/stable/modules/ensemble.htmlextremely-randomized-trees)!>In extremely randomized trees (see [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html) and [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html) classes), randomness goes one step further in the way splits are computed. As in random forests, a random subset of candidate features is used, but instead of looking for the most discriminative thresholds, thresholds are drawn at random for each candidate feature and the best of these randomly-generated thresholds is picked as the splitting rule. This usually allows to reduce the variance of the model a bit more, at the expense of a slightly greater increase in bias Bagging demo, with golf putts datahttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html
###Code
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
def diy_bagging(max_depth=1, n_estimators=1):
y_preds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = putts.sample(n=len(putts), replace=True).sort_values(by='distance')
bootstrap_X = bootstrap_sample[['distance']]
bootstrap_y = bootstrap_sample['rate of success']
tree = DecisionTreeRegressor(max_depth=max_depth)
tree.fit(bootstrap_X, bootstrap_y)
y_pred = tree.predict(bootstrap_X)
y_preds.append(y_pred)
ax = bootstrap_sample.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(bootstrap_X, y_pred, where='mid')
plt.show()
ensembled = np.vstack(y_preds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
ax = putts.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(putts_X, ensembled, where='mid')
plt.show()
interact(diy_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
###Output
_____no_output_____
###Markdown
Go back to Tanzania Waterpumps ... viz2D helper function
###Code
def viz2D(fitted_model, X, feature1, feature2, num=100, title=''):
"""
Visualize model predictions as a 2D heatmap
For regression or binary classification models, fitted on 2 features
Parameters
----------
fitted_model : scikit-learn model, already fitted
X : pandas dataframe, which was used to fit model
feature1 : string, name of feature 1
feature2 : string, name of feature 2
target : string, name of target
num : int, number of grid points for each feature
Returns
-------
predictions: numpy array, predictions/predicted probabilities at each grid point
References
----------
https://scikit-learn.org/stable/auto_examples/classification/plot_classification_probability.html
https://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html
"""
x1 = np.linspace(X[feature1].min(), X[feature1].max(), num)
x2 = np.linspace(X[feature2].min(), X[feature2].max(), num)
X1, X2 = np.meshgrid(x1, x2)
X = np.c_[X1.flatten(), X2.flatten()]
if hasattr(fitted_model, 'predict_proba'):
predicted = fitted_model.predict_proba(X)[:,0]
else:
predicted = fitted_model.predict(X)
plt.imshow(predicted.reshape(num, num), cmap='viridis')
plt.title(title)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
return predicted
###Output
_____no_output_____
###Markdown
Compare Decision Tree, Random Forest, Logistic Regression
###Code
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'longitude'
feature2 = 'latitude'
from sklearn.linear_model import LogisticRegression
def get_X_y(df, feature1, feature2, target):
features = [feature1, feature2]
X = df[features]
y = df[target]
X = X.fillna(X.median())
X = ce.OrdinalEncoder().fit_transform(X)
return X, y
def compare_models(max_depth=1, n_estimators=1):
models = [DecisionTreeClassifier(max_depth=max_depth),
RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators),
LogisticRegression(solver='lbfgs', multi_class='auto')]
for model in models:
name = model.__class__.__name__
model.fit(X, y)
viz2D(model, X, feature1, feature2, title=name)
X, y = get_X_y(train, feature1, feature2, target='status_group')
interact(compare_models, max_depth=(1,6,1), n_estimators=(10,40,10));
###Output
_____no_output_____
###Markdown
Bagging
###Code
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'longitude'
feature2 = 'latitude'
def waterpumps_bagging(max_depth=1, n_estimators=1):
predicteds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = train.sample(n=len(train), replace=True)
X, y = get_X_y(bootstrap_sample, feature1, feature2, target='status_group')
tree = DecisionTreeClassifier(max_depth=max_depth)
tree.fit(X, y)
predicted = viz2D(tree, X, feature1, feature2, title=title)
predicteds.append(predicted)
ensembled = np.vstack(predicteds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
plt.imshow(ensembled.reshape(100, 100), cmap='viridis')
plt.title(title)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
interact(waterpumps_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
###Output
_____no_output_____
###Markdown
Understand how categorical encodings affect trees differently compared to linear models Categorical exploration, 1 feature at a timeChange `feature`, then re-run these cells!
###Code
feature = 'quantity'
X_train[feature].value_counts()
sns.barplot(x=train[feature],
y=train['status_group']=='functional');
X_train[feature].head()
###Output
_____no_output_____
###Markdown
[One Hot Encoding](http://contrib.scikit-learn.org/categorical-encoding/onehot.html)> Onehot (or dummy) coding for categorical features, produces one feature per category, each binary.Warning: May run slow, or run out of memory, with high cardinality categoricals!
###Code
encoder = ce.OneHotEncoder(use_cat_names=True)
encoded = encoder.fit_transform(X_train[[feature]])
print(f'{len(encoded.columns)} columns')
encoded.head()
###Output
_____no_output_____
###Markdown
[Ordinal Encoding](http://contrib.scikit-learn.org/categorical-encoding/ordinal.html)> Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed in; in this case, we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes are assumed to have no true order and integers are selected at random.
###Code
encoder = ce.OrdinalEncoder()
encoded = encoder.fit_transform(X_train[[feature]])
print(f'1 column, {encoded[feature].nunique()} unique values')
encoded.head()
###Output
_____no_output_____
###Markdown
_Lambda School Data Science — Classification 1_ This sprint, your project is about water pumps in Tanzania. Can you predict which water pumps are faulty? Random Forests, Ordinal Encoding Objectives- do feature engineering with dates- use scikit-learn for random forests- understand how tree ensembles reduce overfitting compared to a single decision tree with unlimited depth- do ordinal encoding with high-cardinality categoricals- understand how categorical encodings affect trees differently compared to linear models Summary Try Tree Ensembles when you do machine learning with labeled, tabular data- "Tree Ensembles" means Random Forest or Gradient Boosting models. - [Tree Ensembles often have the best predictive accuracy](https://arxiv.org/abs/1708.05070) with labeled, tabular data.- Why? Because trees can fit non-linear, non-[monotonic](https://en.wikipedia.org/wiki/Monotonic_function) relationships, and [interactions](https://christophm.github.io/interpretable-ml-book/interaction.html) between features.- A single decision tree, grown to unlimited depth, will [overfit](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/). We solve this problem by ensembling trees, with bagging (Random Forest) or boosting (Gradient Boosting).- Random Forest's advantage: may be less sensitive to hyperparameters. Gradient Boosting's advantage: may get better predictive accuracy. One-hot encoding isn’t the only way, and may not be the best way, of categorical encoding for tree ensembles.- For example, tree ensembles can work with arbitrary "ordinal" encoding! (Randomly assigning an integer to each category.) Compared to one-hot encoding, the dimensionality will be lower, and the predictive accuracy may be just as good or even better. Libraries category_encoders- Local, Anaconda: `conda install -c conda-forge category_encoders`- Google Colab: `pip install category_encoders` graphviz- Local, Anaconda: `conda install python-graphviz`- Google Colab: `!pip install graphviz` `!apt-get install graphviz` Solution example- Do feature engineering with dates- Clean data with outliers- Impute missing values- Use scikit-learn for decision trees- Get and interpret feature importances of a tree-based model
###Code
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeClassifier
pd.set_option('display.float_format', '{:.2f}'.format)
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('https://drive.google.com/uc?export=download&id=14ULvX0uOgftTB2s97uS8lIx1nHGQIB0P'),
pd.read_csv('https://drive.google.com/uc?export=download&id=1r441wLr7gKGHGLyPpKauvCuUOU556S2f'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('https://drive.google.com/uc?export=download&id=1wvsYl9hbRbZuIuoaLWCsW_kbcxCdocHz')
sample_submission = pd.read_csv('https://drive.google.com/uc?export=download&id=1kfJewnmhowpUo381oSn3XqsQ6Eto23XV')
# Split train into train & val
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
X = X.drop(columns=['recorded_by', 'id'])
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
# For categoricals with missing values, fill with the category 'MISSING'
categoricals = X.select_dtypes(exclude='number').columns
for col in categoricals:
X[col] = X[col].fillna('MISSING')
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality[cardinality <= 50].index.tolist()
# Combine the lists
features = numeric_features + categorical_features
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# Make pipeline!
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
DecisionTreeClassifier(max_depth=20, random_state=42)
)
# Fit on train, score on val, predict on test
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_test)
# Get feature importances
encoder = pipeline.named_steps['onehotencoder']
tree = pipeline.named_steps['decisiontreeclassifier']
feature_names = encoder.transform(X_val).columns
importances = pd.Series(tree.feature_importances_, feature_names)
# Plot feature importances
n = 20
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
import graphviz
from sklearn.tree import export_graphviz
dot_data = export_graphviz(tree,
out_file=None,
max_depth=2,
feature_names=feature_names,
class_names=tree.classes_,
impurity=False,
filled=True,
proportion=True,
rotate=True,
rounded=True)
graphviz.Source(dot_data)
###Output
_____no_output_____
###Markdown
Use scikit-learn for random forests[Scikit-Learn User Guide: Random Forests](https://scikit-learn.org/stable/modules/ensemble.htmlrandom-forests)
###Code
%%time
from sklearn.ensemble import RandomForestClassifier
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
print('X_train shape before encoding', X_train.shape)
encode = pipeline.named_steps['onehotencoder']
shape = encoder.transform(X_train).shape
print('X_train shape after encoding', shape)
###Output
X_train shape before encoding (47520, 33)
X_train shape after encoding (47520, 183)
###Markdown
Do ordinal encoding with high-cardinality categoricalshttp://contrib.scikit-learn.org/categorical-encoding/ordinal.html
###Code
%%time
from sklearn.ensemble import RandomForestClassifier
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
print('X_train shape before encoding', X_train.shape)
encode = pipeline.named_steps['ordinalencoder']
shape = encoder.transform(X_train).shape
print('X_train shape after encoding', shape)
X
###Output
_____no_output_____
###Markdown
Understand how tree ensembles reduce overfitting compared to a single decision tree with unlimited depth Interlude: [predicting golf putts](https://statmodeling.stat.columbia.edu/2008/12/04/the_golf_puttin/)(1 feature, non-linear, regression)
###Code
putts = pd.DataFrame(
columns=['distance', 'tries', 'successes'],
data = [[2, 1443, 1346],
[3, 694, 577],
[4, 455, 337],
[5, 353, 208],
[6, 272, 149],
[7, 256, 136],
[8, 240, 111],
[9, 217, 69],
[10, 200, 67],
[11, 237, 75],
[12, 202, 52],
[13, 192, 46],
[14, 174, 54],
[15, 167, 28],
[16, 201, 27],
[17, 195, 31],
[18, 191, 33],
[19, 147, 20],
[20, 152, 24]]
)
putts['rate of success'] = putts['successes'] / putts['tries']
putts_X = putts[['distance']]
putts_y = putts['rate of success']
from ipywidgets import interact
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
def putt_trees(max_depth=1, n_estimators=1):
models = [DecisionTreeRegressor(max_depth=max_depth),
RandomForestRegressor(max_depth=max_depth, n_estimators=n_estimators)]
for model in models:
name = model.__class__.__name__
model.fit(putts_X, putts_y)
ax = putts.plot('distance', 'rate of success', kind='scatter', title=name)
ax.step(putts_X, model.predict(putts_X), where='mid')
plt.show()
interact(putt_trees, max_depth=(1,6,1), n_estimators=(10,40,10));
###Output
_____no_output_____
###Markdown
What's "random" about random forests?1. Each tree trains on a random bootstrap sample of the data. (In scikit-learn, for `RandomForestRegressor` and `RandomForestClassifier`, the `bootstrap` parameter's default is `True`.) This type of ensembling is called Bagging.2. Each split considers a random subset of the features. (In scikit-learn, when the `max_features` parameter is not `None`.) For extra randomness, you can try ["extremely randomized trees"](https://scikit-learn.org/stable/modules/ensemble.htmlextremely-randomized-trees)!>In extremely randomized trees (see [ExtraTreesClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html) and [ExtraTreesRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html) classes), randomness goes one step further in the way splits are computed. As in random forests, a random subset of candidate features is used, but instead of looking for the most discriminative thresholds, thresholds are drawn at random for each candidate feature and the best of these randomly-generated thresholds is picked as the splitting rule. This usually allows to reduce the variance of the model a bit more, at the expense of a slightly greater increase in bias Bagging demo, with golf putts datahttps://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html
###Code
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
def diy_bagging(max_depth=1, n_estimators=1):
y_preds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = putts.sample(n=len(putts), replace=True).sort_values(by='distance')
bootstrap_X = bootstrap_sample[['distance']]
bootstrap_y = bootstrap_sample['rate of success']
tree = DecisionTreeRegressor(max_depth=max_depth)
tree.fit(bootstrap_X, bootstrap_y)
y_pred = tree.predict(bootstrap_X)
y_preds.append(y_pred)
ax = bootstrap_sample.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(bootstrap_X, y_pred, where='mid')
plt.show()
ensembled = np.vstack(y_preds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
ax = putts.plot('distance', 'rate of success', kind='scatter', title=title)
ax.step(putts_X, ensembled, where='mid')
plt.show()
interact(diy_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
###Output
_____no_output_____
###Markdown
Go back to Tanzania Waterpumps ... viz2D helper function
###Code
def viz2D(fitted_model, X, feature1, feature2, num=100, title=''):
"""
Visualize model predictions as a 2D heatmap
For regression or binary classification models, fitted on 2 features
Parameters
----------
fitted_model : scikit-learn model, already fitted
X : pandas dataframe, which was used to fit model
feature1 : string, name of feature 1
feature2 : string, name of feature 2
target : string, name of target
num : int, number of grid points for each feature
Returns
-------
predictions: numpy array, predictions/predicted probabilities at each grid point
References
----------
https://scikit-learn.org/stable/auto_examples/classification/plot_classification_probability.html
https://jakevdp.github.io/PythonDataScienceHandbook/04.04-density-and-contour-plots.html
"""
x1 = np.linspace(X[feature1].min(), X[feature1].max(), num)
x2 = np.linspace(X[feature2].min(), X[feature2].max(), num)
X1, X2 = np.meshgrid(x1, x2)
X = np.c_[X1.flatten(), X2.flatten()]
if hasattr(fitted_model, 'predict_proba'):
predicted = fitted_model.predict_proba(X)[:,0]
else:
predicted = fitted_model.predict(X)
plt.imshow(predicted.reshape(num, num), cmap='viridis')
plt.title(title)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
return predicted
###Output
_____no_output_____
###Markdown
Compare Decision Tree, Random Forest, Logistic Regression
###Code
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'longitude'
feature2 = 'latitude'
from sklearn.linear_model import LogisticRegression
def get_X_y(df, feature1, feature2, target):
features = [feature1, feature2]
X = df[features]
y = df[target]
X = X.fillna(X.median())
X = ce.OrdinalEncoder().fit_transform(X)
return X, y
def compare_models(max_depth=1, n_estimators=1):
models = [DecisionTreeClassifier(max_depth=max_depth),
RandomForestClassifier(max_depth=max_depth, n_estimators=n_estimators),
LogisticRegression(solver='lbfgs', multi_class='auto')]
for model in models:
name = model.__class__.__name__
model.fit(X, y)
viz2D(model, X, feature1, feature2, title=name)
X, y = get_X_y(train, feature1, feature2, target='status_group')
interact(compare_models, max_depth=(1,6,1), n_estimators=(10,40,10));
###Output
_____no_output_____
###Markdown
Bagging
###Code
# Do-it-yourself Bagging Ensemble of Decision Trees (like a Random Forest)
# Instructions
# 1. Choose two features
# 2. Run this code cell
# 3. Interact with the widget sliders
feature1 = 'longitude'
feature2 = 'latitude'
def waterpumps_bagging(max_depth=1, n_estimators=1):
predicteds = []
for i in range(n_estimators):
title = f'Tree {i+1}'
bootstrap_sample = train.sample(n=len(train), replace=True)
X, y = get_X_y(bootstrap_sample, feature1, feature2, target='status_group')
tree = DecisionTreeClassifier(max_depth=max_depth)
tree.fit(X, y)
predicted = viz2D(tree, X, feature1, feature2, title=title)
predicteds.append(predicted)
ensembled = np.vstack(predicteds).mean(axis=0)
title = f'Ensemble of {n_estimators} trees, with max_depth={max_depth}'
plt.imshow(ensembled.reshape(100, 100), cmap='viridis')
plt.title(title)
plt.xlabel(feature1)
plt.ylabel(feature2)
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
interact(waterpumps_bagging, max_depth=(1,6,1), n_estimators=(2,5,1));
###Output
_____no_output_____
###Markdown
Understand how categorical encodings affect trees differently compared to linear models Categorical exploration, 1 feature at a timeChange `feature`, then re-run these cells!
###Code
feature = 'quantity'
X_train[feature].value_counts()
sns.barplot(x=train[feature],
y=train['status_group']=='functional');
X_train[feature].head()
###Output
_____no_output_____
###Markdown
[One Hot Encoding](http://contrib.scikit-learn.org/categorical-encoding/onehot.html)> Onehot (or dummy) coding for categorical features, produces one feature per category, each binary.Warning: May run slow, or run out of memory, with high cardinality categoricals!
###Code
encoder = ce.OneHotEncoder(use_cat_names=True)
encoded = encoder.fit_transform(X_train[[feature]])
print(f'{len(encoded.columns)} columns')
encoded.head()
###Output
5 columns
###Markdown
[Ordinal Encoding](http://contrib.scikit-learn.org/categorical-encoding/ordinal.html)> Ordinal encoding uses a single column of integers to represent the classes. An optional mapping dict can be passed in; in this case, we use the knowledge that there is some true order to the classes themselves. Otherwise, the classes are assumed to have no true order and integers are selected at random.
###Code
encoder = ce.OrdinalEncoder()
encoded = encoder.fit_transform(X_train[[feature]])
print(f'1 column, {encoded[feature].nunique()} unique values')
encoded.head()
###Output
1 column, 5 unique values
|
DSX.ipynb | ###Markdown
Trusted Notebook" width="500 px" align="left"> Running QISKit Jupyter Notebooks using the IBM Data Science ExperienceThis article shows you how to set up and run [QISKit](https://www.qiskit.org/) Jupyter notebooks using [IBM Data Science Experience](https://datascience.ibm.com/) (IBM DSX), thanks to Doug McClure ([@dtmcclure](https://github.com/dtmcclure)) and Ninad Sathaye ([@sathayen](https://github.com/sathayen/)). The latest version of this notebook is available [here](https://github.com/QISKit/qiskit-tutorial), where you can find all the other QISKit tutorials. About IBM Data Science ExperienceIBM DSX is an interactive, collaborative, cloud-based environment where data scientists can use multiple tools to activate their insights. If you are just starting with QISKit and looking for a webhosted Jupyter notebook environment, IBM DSX is an excellent option. It is fairly easy to get your quantum notebook running in the Jupyter notebook environment provided by IBM DSX. Moreover, it provides a platform where you can create your notebooks, invite fellow researchers to collaborate, or simply share your work within the community. Prerequisites- You must sign up for the IBM Data Science Experience ([IBM DSX](https://datascience.ibm.com/)). Tip: It is often convenient to use an IBM ID when signing up. If you do not have one already, [create it first](https://www.ibm.com/account/us-en/signup/register.html).- You also need an account for the IBM Quantum Experience ([IBM QX](https://quantumexperience.ng.bluemix.net/qx)). You can optionally use the IBM ID here as well. This account is required so that you can connect to the API using an 'API TOKEN'. Steps1. [Setup QISKit](setup)2. [Test QISKit](test)3. [Run QISKit](run) 1. Setting up QISKit in your DSX account 1. Download this file ([DSX.ipynb](DSX.ipynb))2. Log into the [IBM DSX](https://datascience.ibm.com/)3. Create a new notebook from your downloaded DSX.ipynb4. Get your IBM QX API token - Logon to the [IBM QX](https://quantumexperience.ng.bluemix.net/qx) - Click on your username on the top right, and select *My Account* - Copy the token displayed under *Advanced*. Make sure it is enclosed in quotes ""5. Update the value *`"PUT_YOUR_API_TOKEN_HERE"`* in the code cell with the API token you just copied in previous step - **NOTE:** If you are sharing this notebook, MAKE SURE TO REMOVE the API token string before sharing! 6. Run the following code cell to install QISKit on your DSX account and connect to the IBM QX.
###Code
# This cell does some preparatory work to set QISKit up on the IBM Data Science Experience.
# --------------------------------------------------------------------------------------------------
# IMPORTANT NOTES:
# 1) Your QX API token can be found in the *Advanced* section of the *My Account* page of the QX.
# Copy-Paste it into the QX_API_TOKEN variable below. BE SURE TO ENCLOSE IN QUOTES ("")
# 2) If you are sharing any notebooks, MAKE SURE TO REMOVE the API token string before sharing!
# 3) This both creates an IBMQE_API environment variable as well as a Qconfig.py file for connecting
# to IBM QX.
# --------------------------------------------------------------------------------------------------
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
# DO NOT CHANGE THE FOLLOWING assertions
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
# Install qiskit
!pip install qiskit
# Create Qconfig.py for connecting to QX
import importlib
import urllib.request
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
qconfig_template_path = "https://raw.githubusercontent.com/QISKit/qiskit-tutorial/master/Qconfig.py.template"
# We need visibility to Qconfig.py module. Add home dir in your sys.path just to be sure.
if home_dir not in sys.path:
sys.path.append(home_dir)
# First check if the Qconfig module has already been loaded (may happen if you are executing this cell more than once)
if 'Qconfig' in sys.modules:
# The module has been imported already. Reload it to make sure we get the latest API token
try:
importlib.reload(Qconfig)
Qconfig.update_token(os.environ["IBMQE_API"])
except AttributeError:
print("Qconfig reload failed. This could be due to missing Qconfig.py file.")
print("Try restarting the Jupyter notebook kernel!")
raise
except AssertionError:
print("Have you set a valid APItoken?")
raise
except:
print("Qconfig reload or API token update failed.")
print("Try updating the token and restarting the Jupyter notebook kernel")
raise
else:
# Try importing Qconfig module. If it doesn't exist, then download it from the qiskit-tutorials repo.
try:
import Qconfig
except ImportError:
urllib.request.urlretrieve ("{}".format(qconfig_template_path), "{}".format(qconfig_filepath))
# chmod the file. For Python 3, need to prefix the permission with 0o (zero and character small oh)
os.chmod(qconfig_filepath , 0o664)
import Qconfig
except:
print("Unexpected error!")
raise
###Output
_____no_output_____
###Markdown
2. Testing your QISKit setupRun the following *code cell* to test your QISKit setup. It should print `COMPLETED`.
###Code
# Trivial program to test if the QISKit setup was sucessful.
from qiskit import *
qp = QuantumProgram()
qp.set_api(Qconfig.APItoken, Qconfig.config['url'])
# Create a 2-qubit Quantum Register, "qr"
qr = qp.create_quantum_register("qr", 2)
# Create a Classical Register, "cr", with 2 bits
cr = qp.create_classical_register("cr", 2)
# Create the circuit
qc = qp.create_circuit('Bell', [qr], [cr])
# add measure to see the state
qc.measure(qr, cr)
result = qp.execute('Bell', backend='ibmqx_qasm_simulator', shots=1, seed=88)
# Show the results
print(result)
###Output
_____no_output_____
###Markdown
3. Running other QISKit NotebooksUpload other tutorial notebooks into IBM DSX, copy the following *code cell* after updating the API token and paste it at the beginning of the *other* notebooks.
###Code
# Create IBMQE_API environment variable - remember to remove token if sharing any notebooks
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
assert sys.version_info[0:2] >= (3, 5) , "This code requires Python 3.5 or beyond!"
# Add path to and import QConfig.py
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
if home_dir not in sys.path:
sys.path.append(home_dir)
import Qconfig
# NOTE:
# If you ever change the value of environment variable os.environ["IBMQE_API"] AFTER executing this cell,
# you can call the following code to update its value.
# Qconfig.update_token(os.environ["IBMQE_API"])
# Or just set it as : Qconfig.APItoken = os.environ["IBMQE_API"]
###Output
_____no_output_____
###Markdown
Running QISKit Jupyter Notebooks using the IBM Data Science ExperienceThis article shows you how to set up and run [QISKit](https://www.qiskit.org/) Jupyter notebooks using [IBM Data Science Experience](https://datascience.ibm.com/) (IBM DSX), thanks to Doug McClure ([@dtmcclure](https://github.com/dtmcclure)) and Ninad Sathaye ([@sathayen](https://github.com/sathayen/)). The latest version of this notebook is available [here](https://github.com/QISKit/qiskit-tutorial), where you can find all the other QISKit tutorials. About IBM Data Science ExperienceIBM DSX is an interactive, collaborative, cloud-based environment where data scientists can use multiple tools to activate their insights. If you are just starting with QISKit and looking for a webhosted Jupyter notebook environment, IBM DSX is an excellent option. It is fairly easy to get your quantum notebook running in the Jupyter notebook environment provided by IBM DSX. Moreover, it provides a platform where you can create your notebooks, invite fellow researchers to collaborate or simply share your work within the community. Prerequisites- You need to signup for the IBM Data Science Experience ([IBM DSX](https://datascience.ibm.com/)). Tip: It is often convenient to just use an IBM ID for signing up. If you do not have one already, [create it first](https://www.ibm.com/account/us-en/signup/register.html).- You also need an account for the IBM Quantum Experience ([IBM QX](https://quantumexperience.ng.bluemix.net/qx)). You can optionally use the IBM ID here as well. This account is required so that you can connect to the API using an 'API TOKEN'. Steps1. [Setup QISKit](setup)2. [Test QISKit](test)3. [Run QISKit](run) 1. Setting up QISKit in your DSX account 1. Download this file ([DSX.ipynb](DSX.ipynb))2. Log into the [IBM DSX](https://datascience.ibm.com/)3. Create a new notebook from your downloaded DSX.ipynb4. Get your IBM QX API token - Logon to the [IBM QX](https://quantumexperience.ng.bluemix.net/qx) - Click on your username on the top right, and select *My Account* - Copy the token displayed under *Advanced*. Make sure it's enclosed in quotes ""5. Update the value *`"PUT_YOUR_API_TOKEN_HERE"`* in the code cell with the API token you just copied in previous step - **NOTE:** If you are sharing this notebook, MAKE SURE TO REMOVE the API token string before sharing! 6. Run the following code cell to install QISKit on your DSX account and connect to the IBM QX.
###Code
# This cell does some preparatory work to set QISKit up on the IBM Data Science Experience.
# --------------------------------------------------------------------------------------------------
# IMPORTANT NOTES:
# 1) Your QX API token can be found in the *Advanced* section of the *My Account* page of the QX.
# Copy-Paste it into the QX_API_TOKEN variable below. BE SURE TO ENCLOSE IN QUOTES ("")
# 2) If you are sharing any notebooks, MAKE SURE TO REMOVE the API token string before sharing!
# 3) This both creates an IBMQE_API environment variable as well as a Qconfig.py file for connecting
# to IBM QX.
# --------------------------------------------------------------------------------------------------
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
# DO NOT CHANGE THE FOLLOWING assertions
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
assert sys.version_info[0:2] >= (3, 5) , "This code requires Python 3.5 or beyond!"
# Install qiskit
!pip install qiskit
# Create Qconfig.py for connecting to QX
import importlib
import urllib.request
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
# TODO: UPDATE the Qconfig template path to point to the main qiskit-tutorials repo.
qconfig_template_path = "https://raw.githubusercontent.com/jaygambetta/qiskit-tutorial/master/Qconfig.py.template"
# We need visibility to Qconfig.py module. Add home dir in your sys.path just to be sure.
if home_dir not in sys.path:
sys.path.append(home_dir)
# First check if the Qconfig module has already been loaded (may happen if you are executing this cell more than once)
if 'Qconfig' in sys.modules:
# The module has been imported already. Reload it to make sure we get the latest API token
try:
importlib.reload(Qconfig)
Qconfig.update_token(os.environ["IBMQE_API"])
except AttributeError:
print("Qconfig reload failed. This could be due to missing Qconfig.py file.")
print("Try restarting the Jupyter notebook kernel!")
raise
except AssertionError:
print("Have you set a valid APItoken?")
raise
except:
print("Qconfig reload or API token update failed.")
print("Try updating the token and restarting the Jupyter notebook kernel")
raise
else:
# Try importing Qconfig module. If it doesn't exist, then download it from the qiskit-tutorials repo.
try:
import Qconfig
except ImportError:
urllib.request.urlretrieve ("{}".format(qconfig_template_path), "{}".format(qconfig_filepath))
# chmod the file. For Python 3, need to prefix the permission with 0o (zero and character small oh)
os.chmod(qconfig_filepath , 0o664)
import Qconfig
except:
print("Unexpected error!")
raise
###Output
_____no_output_____
###Markdown
2. Testing your QISKit setupRun the following *code cell* to test your QISKit setup. It should print `COMPLETED`.
###Code
# Trivial program to test if the QISKit setup was sucessful.
from qiskit import *
qp = QuantumProgram()
qp.set_api(Qconfig.APItoken, Qconfig.config['url'])
# Create a 2 quibit Quantum Register, "qr"
qr = qp.create_quantum_register("qr", 2)
# Create a Classical Register , "cr", with 2 bits
cr = qp.create_classical_register("cr", 2)
# Create the circuit
qc = qp.create_circuit('Bell', [qr], [cr])
# add measure to see the state
qc.measure(qr, cr)
result = qp.execute('Bell', backend='ibmqx_qasm_simulator', shots=1, seed=88)
# Show the results
print(result)
###Output
_____no_output_____
###Markdown
3. Running other QISKit NotebooksUpload other tutorial notebooks into IBM DSX, copy the following *code cell* after updating the API token and paste it at the beginning of the *other* notebooks.
###Code
# Create IBMQE_API environment variable - remember to remove token if sharing any notebooks
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
assert sys.version_info[0:2] >= (3, 5) , "This code requires Python 3.5 or beyond!"
# Add path to and import QConfig.py
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
if home_dir not in sys.path:
sys.path.append(home_dir)
import Qconfig
# NOTE:
# If you ever change the value of environment variable os.environ["IBMQE_API"] AFTER executing this cell,
# you can call the following code to update its value.
# Qconfig.update_token(os.environ["IBMQE_API"])
# Or just set it as : Qconfig.APItoken = os.environ["IBMQE_API"]
###Output
_____no_output_____
###Markdown
Running QISKit Jupyter Notebooks using the IBM Data Science ExperienceThis article shows you how to set up and run [QISKit](https://www.qiskit.org/) Jupyter notebooks using [IBM Data Science Experience](https://datascience.ibm.com/) (IBM DSX), thanks to Doug McClure ([@dtmcclure](https://github.com/dtmcclure)) and Ninad Sathaye ([@sathayen](https://github.com/sathayen/)). The latest version of this notebook is available [here](https://github.com/QISKit/qiskit-tutorial), where you can find all the other QISKit tutorials. About IBM Data Science ExperienceIBM DSX is an interactive, collaborative, cloud-based environment where data scientists can use multiple tools to activate their insights. If you are just starting with QISKit and looking for a webhosted Jupyter notebook environment, IBM DSX is an excellent option. It is fairly easy to get your quantum notebook running in the Jupyter notebook environment provided by IBM DSX. Moreover, it provides a platform where you can create your notebooks, invite fellow researchers to collaborate, or simply share your work within the community. Prerequisites- You must sign up for the IBM Data Science Experience ([IBM DSX](https://datascience.ibm.com/)). Tip: It is often convenient to use an IBM ID when signing up. If you do not have one already, [create it first](https://www.ibm.com/account/us-en/signup/register.html).- You also need an account for the IBM Quantum Experience ([IBM QX](https://quantumexperience.ng.bluemix.net/qx)). You can optionally use the IBM ID here as well. This account is required so that you can connect to the API using an 'API TOKEN'. Steps1. [Setup QISKit](setup)2. [Test QISKit](test)3. [Run QISKit](run) 1. Setting up QISKit in your DSX account 1. Download this file ([DSX.ipynb](DSX.ipynb))2. Log into the [IBM DSX](https://datascience.ibm.com/)3. Create a new notebook from your downloaded DSX.ipynb4. Get your IBM QX API token - Logon to the [IBM QX](https://quantumexperience.ng.bluemix.net/qx) - Click on your username on the top right, and select *My Account* - Copy the token displayed under *Advanced*. Make sure it is enclosed in quotes ""5. Update the value *`"PUT_YOUR_API_TOKEN_HERE"`* in the code cell with the API token you just copied in previous step - **NOTE:** If you are sharing this notebook, MAKE SURE TO REMOVE the API token string before sharing! 6. Run the following code cell to install QISKit on your DSX account and connect to the IBM QX.
###Code
# This cell does some preparatory work to set QISKit up on the IBM Data Science Experience.
# --------------------------------------------------------------------------------------------------
# IMPORTANT NOTES:
# 1) Your QX API token can be found in the *Advanced* section of the *My Account* page of the QX.
# Copy-Paste it into the QX_API_TOKEN variable below. BE SURE TO ENCLOSE IN QUOTES ("")
# 2) If you are sharing any notebooks, MAKE SURE TO REMOVE the API token string before sharing!
# 3) This both creates an IBMQE_API environment variable as well as a Qconfig.py file for connecting
# to IBM QX.
# --------------------------------------------------------------------------------------------------
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
# DO NOT CHANGE THE FOLLOWING assertions
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
# Install qiskit
!pip install qiskit
# Create Qconfig.py for connecting to QX
import importlib
import urllib.request
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
qconfig_template_path = "https://raw.githubusercontent.com/QISKit/qiskit-tutorial/master/Qconfig.py.template"
# We need visibility to Qconfig.py module. Add home dir in your sys.path just to be sure.
if home_dir not in sys.path:
sys.path.append(home_dir)
# First check if the Qconfig module has already been loaded (may happen if you are executing this cell more than once)
if 'Qconfig' in sys.modules:
# The module has been imported already. Reload it to make sure we get the latest API token
try:
importlib.reload(Qconfig)
Qconfig.update_token(os.environ["IBMQE_API"])
except AttributeError:
print("Qconfig reload failed. This could be due to missing Qconfig.py file.")
print("Try restarting the Jupyter notebook kernel!")
raise
except AssertionError:
print("Have you set a valid APItoken?")
raise
except:
print("Qconfig reload or API token update failed.")
print("Try updating the token and restarting the Jupyter notebook kernel")
raise
else:
# Try importing Qconfig module. If it doesn't exist, then download it from the qiskit-tutorials repo.
try:
import Qconfig
except ImportError:
urllib.request.urlretrieve ("{}".format(qconfig_template_path), "{}".format(qconfig_filepath))
# chmod the file. For Python 3, need to prefix the permission with 0o (zero and character small oh)
os.chmod(qconfig_filepath , 0o664)
import Qconfig
except:
print("Unexpected error!")
raise
###Output
_____no_output_____
###Markdown
2. Testing your QISKit setupRun the following *code cell* to test your QISKit setup. It should print `COMPLETED`.
###Code
# Trivial program to test if the QISKit setup was sucessful.
from qiskit import *
qp = QuantumProgram()
qp.set_api(Qconfig.APItoken, Qconfig.config['url'])
# Create a 2-qubit Quantum Register, "qr"
qr = qp.create_quantum_register("qr", 2)
# Create a Classical Register, "cr", with 2 bits
cr = qp.create_classical_register("cr", 2)
# Create the circuit
qc = qp.create_circuit('Bell', [qr], [cr])
# add measure to see the state
qc.measure(qr, cr)
result = qp.execute('Bell', backend='ibmqx_qasm_simulator', shots=1, seed=88)
# Show the results
print(result)
###Output
_____no_output_____
###Markdown
3. Running other QISKit NotebooksUpload other tutorial notebooks into IBM DSX, copy the following *code cell* after updating the API token and paste it at the beginning of the *other* notebooks.
###Code
# Create IBMQE_API environment variable - remember to remove token if sharing any notebooks
import os, sys
os.environ["IBMQE_API"] = "PUT_YOUR_API_TOKEN_HERE"
assert os.environ["IBMQE_API"] != "PUT_YOUR_API_TOKEN_HERE", "QX_API_TOKEN not updated!"
assert sys.version_info[0:2] >= (3, 5) , "This code requires Python 3.5 or beyond!"
# Add path to and import QConfig.py
home_dir = os.path.expanduser('~')
qconfig_filepath = os.path.join(home_dir, 'Qconfig.py')
if home_dir not in sys.path:
sys.path.append(home_dir)
import Qconfig
# NOTE:
# If you ever change the value of environment variable os.environ["IBMQE_API"] AFTER executing this cell,
# you can call the following code to update its value.
# Qconfig.update_token(os.environ["IBMQE_API"])
# Or just set it as : Qconfig.APItoken = os.environ["IBMQE_API"]
###Output
_____no_output_____ |
fabric_ground_up/Fabric_Groundup.ipynb | ###Markdown
Goal: to use fabric to setup entire workflow on Raspberry piinspired by https://www.youtube.com/watch?v=mzP-QYxW9Vs&list=PL2hxivbr6C_2lMqtZi4uVAOta2b5dp1yN&index=28&t=0s&ab_channel=DavyWybiral 3. Enable SSH on a headless Raspberry Pi (add file to SD card on another machine)For headless setup, SSH can be enabled by placing a file named ssh, without any extension, onto the boot partition of the SD card from another computer. When the Pi boots, it looks for the ssh file. If it is found, SSH is enabled and the file is deleted. The content of the file does not matter; it could contain text, or nothing at all.If you have loaded Raspberry Pi OS onto a blank SD card, you will have two partitions. The first one, which is the smaller one, is the boot partition. Place the file into this one.
###Code
# !pip uninstall fabric # Not used
# !pip install fabric2
# !pip install python-dotenv
from dotenv import load_dotenv
import os
load_dotenv()
pwd = os.getenv('PI_PSWD')
pi1 = os.getenv('PI_IP1')
pi_uid = os.getenv('PI_UID')
pi_gid = os.getenv('PI_GID')
###Output
_____no_output_____
###Markdown
Using Fabric
###Code
from fabric2 import Connection, Config
config = Config({'user': 'pi', 'connect_kwargs': {'password': pwd}})
cxn = Connection(pi1, config = config)
cxn.run('whoami')
def reboot():
# reboot hosts
cxn.sudo('shutdown -r now')
def shutdown():
# shutdown hosts
cxn.sudo('shutdown -h now')
def update():
# apt update & apt dist-upgrade hosts
cxn.sudo('apt-get update')
cxn.sudo('apt-get dist-upgrade -y')
def change_uid_gid():
#TODO create a new user and change the pi user's UID GID
cxn.sudo(f'usermod -u {pi_uid} pi')
cxn.sudo(f'groupmod -g {pi_gid} pi')
#Or change below files (since processes with current PID is running)
# sudo nano /etc/passwd
# sudo nano /etc/group
# while we change the pi's PID in passwd, we need to ssh again into the terminal to use
# sudo to change the secound group file once done, we need to restart pi using
# sudo su to have root user access then
# systemctl reboot -i
def linux_setup():
for i in ['apt-get install -y python3-pip tree htop nfs-common',
'rm -rf /var/lib/apt/lists/*']:
cxn.sudo(i)
def setup_nas():
nas_ip = os.getenv('NAS_IP')
cxn.sudo('mkdir -p /home/pi/pi_projects')
mount_to_pi = f'mount {nas_ip}:/volume1/pi_projects /home/pi'
cxn.sudo(mount_to_pi)
def add_mount_nas_to_restart():
# add to restart
cxn.sudo("chmod 777 /etc/fstab")
cxn.sudo(f"echo '{nas_ip}:/volume1/pi_projects /home/pi nfs rsize=8192,wsize=8192,timeo=14,intr' >> /etc/fstab ")
cxn.run('tail /etc/fstab')
def docker_setup():
cxn.sudo('apt-get install apt-transport-https ca-certificates software-properties-common -y')
cxn.sudo('curl -fsSL get.docker.com -o get-docker.sh && sh get-docker.sh')
cxn.sudo('usermod -aG docker pi')
cxn.sudo('curl https://download.docker.com/linux/raspbian/gpg')
cxn.sudo("chmod 777 /etc/apt/sources.list")
cxn.sudo("""echo "deb https://download.docker.com/linux/raspbian/ stretch stable" >> /etc/apt/sources.list""")
cxn.run('tail /etc/apt/sources.list')
cxn.sudo('pip3 install docker-compose')
def docker_start():
cxn.sudo('systemctl start docker.service')
cxn.run('docker info')
def docker_file_upload():
cxn.run('cd /home/pi/Docker')
cxn.put('Dockerfile', '/home/pi/Docker')
cxn.put('requirements.txt', '/home/pi/Docker')
cxn.put('docker-compose.yml', '/home/pi/Docker')
#======================
# reboot()
# update()
# linux_setup()
# setup_nas()
# docker_setup()
# docker_start()
# rq_setup()
# add_mount_nas_to_restart()
docker_file_upload()
cxn.run('tree /home/pi')
###Output
_____no_output_____
###Markdown
Install a linux browser
###Code
# cxn.sudo('sudo apt-get install w3m w3m-img')
###Output
_____no_output_____ |
docs/training.ipynb | ###Markdown
Stage 2: Model training
###Code
import anndata
import itertools
import networkx as nx
import numpy as np
import pandas as pd
import scanpy as sc
import scglue
from matplotlib import rcParams
scglue.plot.set_publication_params()
rcParams["figure.figsize"] = (4, 4)
###Output
_____no_output_____
###Markdown
Read preprocessed data First, read the preprocessed data as produced by [stage 1](preprocessing.ipynb).
###Code
rna = anndata.read_h5ad("rna_preprocessed.h5ad")
atac = anndata.read_h5ad("atac_preprocessed.h5ad")
graph = nx.read_graphml("prior.graphml.gz")
###Output
_____no_output_____
###Markdown
Configure data > (Estimated time: negligible) Before model training, we need to configure the datasets using [scglue.models.configure_dataset](api/scglue.models.scglue.configure_dataset.rst). For each dataset to be integrated, we specify the probabilistic generative model to use. Here we model the raw counts of both scRNA-seq and scATAC-seq using the negative binomial distribution (`"NB"`).Optionally, we can specify whether only the highly variable features should be used (`use_highly_variable`), what data layer to use (`use_layer`), as well as what preprocessing embedding (`use_rep`) to use as first encoder transformation.* For the scRNA-seq data, we use the [previously backed up](preprocessing.ipynbPreprocess-scRNA-seq-data) raw counts in the "raw" layer, and use the PCA embedding as the first encoder transformation.* For the scATAC-seq data, the raw counts are just `atac.X`, so it's unnecessary to specify `use_layer`. We use the LSI embedding as the first encoder transformation.
###Code
scglue.models.configure_dataset(
rna, "NB", use_highly_variable=True,
use_layer="raw", use_rep="X_pca"
)
scglue.models.configure_dataset(
atac, "NB", use_highly_variable=True,
use_rep="X_lsi"
)
###Output
_____no_output_____
###Markdown
Accordingly, we also subset the prior graph to retain highly variable features only.
###Code
graph = graph.subgraph(itertools.chain(
rna.var.query("highly_variable").index,
atac.var.query("highly_variable").index
))
###Output
_____no_output_____
###Markdown
Build and train GLUE model > (Estimated time: 5-30 min, depending on computation device) Next we initialize a [GLUE model](api/scglue.models.scglue.SCGLUEModel.rst) for integrating the two omics layers.* The datasets to be integrated as specified as a `dict`, where the keys are domain names. The domain names can be set at your discretion, as long as they are kept consistent (see below).* The graph nodes are specified as a list. Here we use a sorted list to guarantee a consistent order.
###Code
glue = scglue.models.SCGLUEModel(
{"rna": rna, "atac": atac}, sorted(graph.nodes),
random_seed=0
)
###Output
[INFO] autodevice: Using GPU 6 as computation device.
###Markdown
To prepare the model for training, we ["compile"](api/scglue.models.scglue.SCGLUEModel.compile.rst) it to initialize model optimizers.
###Code
glue.compile()
###Output
_____no_output_____
###Markdown
Now we can train the model using the [fit](api/scglue.models.scglue.SCGLUEModel.fit.rst) method.* The data to be integrated are again specified as a `dict`, where the keys should match those used in model initialization.* The prior graph and attribute keys for edge weight and edge sign are specified.* We also specify a directory to store checkpoints and tensorboard logs.
###Code
glue.fit(
{"rna": rna, "atac": atac},
graph, edge_weight="weight", edge_sign="sign",
directory="glue"
)
###Output
[INFO] SCGLUEModel: Setting `graph_batch_size` = 27025
[INFO] SCGLUEModel: Setting `align_burnin` = 62
[INFO] SCGLUEModel: Setting `max_epochs` = 248
[INFO] SCGLUEModel: Setting `patience` = 31
[INFO] SCGLUEModel: Setting `reduce_lr_patience` = 16
[INFO] SCGLUETrainer: Using training directory: "glue"
[INFO] SCGLUETrainer: [Epoch 10] train={'g_nll': 0.472, 'g_kl': 0.004, 'g_elbo': 0.476, 'x_rna_nll': 0.171, 'x_rna_kl': 0.007, 'x_rna_elbo': 0.178, 'x_atac_nll': 0.041, 'x_atac_kl': 0.001, 'x_atac_elbo': 0.042, 'dsc_loss': 0.688, 'gen_loss': 0.226}, val={'g_nll': 0.48, 'g_kl': 0.004, 'g_elbo': 0.484, 'x_rna_nll': 0.169, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.175, 'x_atac_nll': 0.042, 'x_atac_kl': 0.001, 'x_atac_elbo': 0.043, 'dsc_loss': 0.699, 'gen_loss': 0.223}, 2.8s elapsed
[INFO] SCGLUETrainer: [Epoch 20] train={'g_nll': 0.432, 'g_kl': 0.005, 'g_elbo': 0.436, 'x_rna_nll': 0.166, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.172, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.691, 'gen_loss': 0.216}, val={'g_nll': 0.449, 'g_kl': 0.005, 'g_elbo': 0.454, 'x_rna_nll': 0.165, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.171, 'x_atac_nll': 0.041, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.042, 'dsc_loss': 0.692, 'gen_loss': 0.217}, 2.8s elapsed
[INFO] SCGLUETrainer: [Epoch 30] train={'g_nll': 0.412, 'g_kl': 0.004, 'g_elbo': 0.417, 'x_rna_nll': 0.164, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.17, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.213}, val={'g_nll': 0.441, 'g_kl': 0.004, 'g_elbo': 0.445, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.694, 'gen_loss': 0.213}, 2.8s elapsed
[INFO] SCGLUETrainer: [Epoch 40] train={'g_nll': 0.398, 'g_kl': 0.004, 'g_elbo': 0.402, 'x_rna_nll': 0.163, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.17, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.212}, val={'g_nll': 0.438, 'g_kl': 0.004, 'g_elbo': 0.442, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.169, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.694, 'gen_loss': 0.213}, 2.9s elapsed
[INFO] SCGLUETrainer: [Epoch 50] train={'g_nll': 0.386, 'g_kl': 0.004, 'g_elbo': 0.39, 'x_rna_nll': 0.163, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.169, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.211}, val={'g_nll': 0.438, 'g_kl': 0.004, 'g_elbo': 0.442, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.695, 'gen_loss': 0.212}, 2.7s elapsed
[INFO] SCGLUETrainer: [Epoch 60] train={'g_nll': 0.375, 'g_kl': 0.004, 'g_elbo': 0.379, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.209}, val={'g_nll': 0.44, 'g_kl': 0.004, 'g_elbo': 0.444, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.695, 'gen_loss': 0.211}, 2.9s elapsed
[INFO] SCGLUETrainer: [Epoch 70] train={'g_nll': 0.364, 'g_kl': 0.004, 'g_elbo': 0.368, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.209}, val={'g_nll': 0.444, 'g_kl': 0.004, 'g_elbo': 0.449, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.211}, 2.9s elapsed
[INFO] SCGLUETrainer: [Epoch 80] train={'g_nll': 0.353, 'g_kl': 0.004, 'g_elbo': 0.357, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.208}, val={'g_nll': 0.45, 'g_kl': 0.004, 'g_elbo': 0.454, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.212}, 2.9s elapsed
[INFO] SCGLUETrainer: [Epoch 90] train={'g_nll': 0.343, 'g_kl': 0.004, 'g_elbo': 0.347, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.208}, val={'g_nll': 0.459, 'g_kl': 0.004, 'g_elbo': 0.463, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.166, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.211}, 2.8s elapsed
Epoch 96: reducing learning rate of group 0 to 2.0000e-04.
Epoch 96: reducing learning rate of group 0 to 2.0000e-04.
[INFO] SCGLUETrainer: [Epoch 100] train={'g_nll': 0.332, 'g_kl': 0.004, 'g_elbo': 0.336, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.206}, val={'g_nll': 0.467, 'g_kl': 0.004, 'g_elbo': 0.471, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.166, 'x_atac_nll': 0.041, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.693, 'gen_loss': 0.212}, 2.8s elapsed
[INFO] SCGLUETrainer: [Epoch 110] train={'g_nll': 0.329, 'g_kl': 0.004, 'g_elbo': 0.333, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.206}, val={'g_nll': 0.47, 'g_kl': 0.004, 'g_elbo': 0.475, 'x_rna_nll': 0.162, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.168, 'x_atac_nll': 0.041, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.693, 'gen_loss': 0.214}, 2.8s elapsed
Epoch 118: reducing learning rate of group 0 to 2.0000e-05.
Epoch 118: reducing learning rate of group 0 to 2.0000e-05.
[INFO] SCGLUETrainer: [Epoch 120] train={'g_nll': 0.327, 'g_kl': 0.004, 'g_elbo': 0.331, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.166, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.205}, val={'g_nll': 0.475, 'g_kl': 0.004, 'g_elbo': 0.479, 'x_rna_nll': 0.16, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.166, 'x_atac_nll': 0.041, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.041, 'dsc_loss': 0.692, 'gen_loss': 0.212}, 2.8s elapsed
[INFO] SCGLUETrainer: [Epoch 130] train={'g_nll': 0.326, 'g_kl': 0.004, 'g_elbo': 0.331, 'x_rna_nll': 0.161, 'x_rna_kl': 0.006, 'x_rna_elbo': 0.167, 'x_atac_nll': 0.039, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.693, 'gen_loss': 0.206}, val={'g_nll': 0.475, 'g_kl': 0.004, 'g_elbo': 0.479, 'x_rna_nll': 0.159, 'x_rna_kl': 0.005, 'x_rna_elbo': 0.165, 'x_atac_nll': 0.04, 'x_atac_kl': 0.0, 'x_atac_elbo': 0.04, 'dsc_loss': 0.692, 'gen_loss': 0.211}, 2.9s elapsed
###Markdown
If you have tensorboard installed, you can monitor the training progress by running `tensorboard --logdir=glue` at the command line. After convergence, the trained model can be saved and loaded as ".dill" files.
###Code
glue.save("glue/final.dill")
# glue = scglue.models.load_model("glue/final.dill")
###Output
_____no_output_____
###Markdown
Apply model for cell and feature embedding > (Estimated time: ~2 min) With the trained model, we can use the [encode_data](api/scglue.models.scglue.SCGLUEModel.encode_data.rst) method to project the single-cell omics data to cell embeddings. The first argument to [encode_data](api/scglue.models.scglue.SCGLUEModel.encode_data.rst) specifies the domain to encode (one of the previous domain names), and the second specifies the dataset to be encoded. By convention, we store the cell embeddings in the [obsm](https://anndata.readthedocs.io/en/latest/anndata.AnnData.obsm.htmlanndata.AnnData.obsm) slot, with name `"X_glue"`.
###Code
rna.obsm["X_glue"] = glue.encode_data("rna", rna)
atac.obsm["X_glue"] = glue.encode_data("atac", atac)
###Output
_____no_output_____
###Markdown
To jointly visualize the cell embeddings from two omics layers, we construct a combined dataset containing their cell metadata and embeddings.
###Code
combined = anndata.AnnData(
obs=pd.concat([rna.obs, atac.obs], join="inner"),
obsm={"X_glue": np.concatenate([rna.obsm["X_glue"], atac.obsm["X_glue"]])}
)
###Output
_____no_output_____
###Markdown
Then we use UMAP to visualize the aligned embeddings. We can see that the two omics layers are now correctly aligned.
###Code
sc.pp.neighbors(combined, use_rep="X_glue", metric="cosine")
sc.tl.umap(combined)
sc.pl.umap(combined, color=["cell_type", "domain"], wspace=0.65)
###Output
... storing 'domain' as categorical
###Markdown
To obtain feature embeddings, we can use the [encode_graph](api/scglue.models.scglue.SCGLUEModel.encode_graph.rst) method.
###Code
feature_embeddings = glue.encode_graph(graph, edge_weight="weight", edge_sign="sign")
feature_embeddings = pd.DataFrame(feature_embeddings, index=glue.vertices)
feature_embeddings.iloc[:5, :5]
###Output
_____no_output_____ |
Notebooks/Linear regression.ipynb | ###Markdown
Linear regression We want to plot a randomly generated correlated data set. Then we want to calculate the mean and plot that as a point. Between each point and the mean we will draw a line. Drawing a line
###Code
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
def newline(p1, p2):
ax = plt.gca()
xmin, xmax = ax.get_xbound()
if(p2[0] == p1[0]):
xmin = xmax = p1[0]
ymin, ymax = ax.get_ybound()
else:
ymax = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmax-p1[0])
ymin = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmin-p1[0])
l = mlines.Line2D([xmin,xmax], [ymin,ymax])
ax.add_line(l)
return l
###Output
_____no_output_____
###Markdown
Generating random dataset We can change how this data set will look by changing:* $\text{xx}=$ x range (min, max)* $\text{yy}=$ y range (min, max)* $\text{r}=$ correlation (corr)* $N=$ number of samples
###Code
xx = np.array([0, 10])
yy = np.array([0, 10])
corr = 0.625
N = 50
means = [xx.mean(), yy.mean()]
stds = [xx.std() / 3, yy.std() / 3]
covs = [[stds[0]**2 , stds[0]*stds[1]*corr],
[stds[0]*stds[1]*corr, stds[1]**2]]
m1 = np.random.multivariate_normal(means, covs, N).T
scatter(m1[0], m1[1]);
###Output
_____no_output_____
###Markdown
Regression
###Code
fit = np.polyfit(m1[0], m1[1], 1)
def lin_reg(x):
return x * fit[0] + fit[1]
linspace = np.linspace(np.min(m1[0])-1, np.max(m1[0])+1)
fitted = lin_reg(linspace)
###Output
_____no_output_____
###Markdown
Plotting everything
###Code
# We use stdev to calculate bandwidths
stdev = np.std(fitted)
plt.figure(figsize=(14,8))
# Random points
scatter(m1[0], m1[1], zorder=3, c='#F57C00');
# Mean
scatter(means[0], means[1], lw=8, c='#E65100', marker='.', zorder=2)
# Draw lines between the points and the mean
for i in range(0, len(m1[0])-1):
plot([5,m1[0][i]],[5,m1[1][i]], color='#FFB74D', zorder=1)
# Draw linear regression line
plot(linspace, fitted, '--', c='gray',zorder=1, alpha=0.5)
# Draw 1-sigma band
upper = fitted + stdev * 1
lower = fitted - stdev * 1
fill_between(linspace, lower, upper, facecolor='lightgray', alpha=0.5);
# Draw 2-sigma band
upper = fitted + stdev * 2
lower = fitted - stdev * 2
fill_between(linspace, lower, upper, facecolor='lightgray', alpha=0.25);
# Annotation to display sigma and correlation
text(0.5, 0.5,'$\sigma_y=%1.2f$\n$r=%1.2f$' % (round(stdev,3), round(corr,3)),
horizontalalignment='center',
verticalalignment='center');
#grid(linestyle='--');
###Output
_____no_output_____ |
labs/DA0101EN-Review-Data_Wrangling-py-v4pp.ipynb | ###Markdown
Data Analysis with Python Data Wrangling Welcome!By the end of this notebook, you will have learned the basics of Data Wrangling! Table of content Identify and handle missing values Identify missing values Deal with missing values Correct data format Data standardization Data Normalization (centering/scaling) Binning Indicator variable Estimated Time Needed: 30 min What is the purpose of Data Wrangling? Data Wrangling is the process of converting data from the initial format to a format that may be better for analysis. What is the fuel consumption (L/100k) rate for the diesel car? Import dataYou can find the "Automobile Data Set" from the following link: https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data. We will be using this data set throughout this course. Import pandas
###Code
import pandas as pd
import matplotlib.pylab as plt
###Output
_____no_output_____
###Markdown
Reading the data set from the URL and adding the related headers. URL of the dataset This dataset was hosted on IBM Cloud object click HERE for free storage.
###Code
filename = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/auto.csv"
###Output
_____no_output_____
###Markdown
Python list headers containing name of headers
###Code
headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style",
"drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type",
"num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower",
"peak-rpm","city-mpg","highway-mpg","price"]
###Output
_____no_output_____
###Markdown
Use the Pandas method read_csv() to load the data from the web address. Set the parameter "names" equal to the Python list "headers".
###Code
df = pd.read_csv(filename, names = headers)
###Output
_____no_output_____
###Markdown
Use the method head() to display the first five rows of the dataframe.
###Code
# To see what the data set looks like, we'll use the head() method.
df.head()
###Output
_____no_output_____
###Markdown
As we can see, several question marks appeared in the dataframe; those are missing values which may hinder our further analysis. So, how do we identify all those missing values and deal with them? How to work with missing data?Steps for working with missing data: dentify missing data deal with missing data correct data format Identify and handle missing valuesIdentify missing valuesConvert "?" to NaNIn the car dataset, missing data comes with the question mark "?".We replace "?" with NaN (Not a Number), which is Python's default missing value marker, for reasons of computational speed and convenience. Here we use the function: .replace(A, B, inplace = True) to replace A by B
###Code
import numpy as np
# replace "?" to NaN
df.replace("?", np.nan, inplace = True)
df.head(5)
###Output
_____no_output_____
###Markdown
dentify_missing_valuesEvaluating for Missing DataThe missing values are converted to Python's default. We use Python's built-in functions to identify these missing values. There are two methods to detect missing data: .isnull() .notnull()The output is a boolean value indicating whether the value that is passed into the argument is in fact missing data.
###Code
missing_data = df.isnull()
missing_data.head(5)
###Output
_____no_output_____
###Markdown
"True" stands for missing value, while "False" stands for not missing value. Count missing values in each columnUsing a for loop in Python, we can quickly figure out the number of missing values in each column. As mentioned above, "True" represents a missing value, "False" means the value is present in the dataset. In the body of the for loop the method ".value_counts()" counts the number of "True" values.
###Code
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
###Output
_____no_output_____
###Markdown
Based on the summary above, each column has 205 rows of data, seven columns containing missing data: "normalized-losses": 41 missing data "num-of-doors": 2 missing data "bore": 4 missing data "stroke" : 4 missing data "horsepower": 2 missing data "peak-rpm": 2 missing data "price": 4 missing data Deal with missing dataHow to deal with missing data? drop data a. drop the whole row b. drop the whole column replace data a. replace it by mean b. replace it by frequency c. replace it based on other functions Whole columns should be dropped only if most entries in the column are empty. In our dataset, none of the columns are empty enough to drop entirely.We have some freedom in choosing which method to replace data; however, some methods may seem more reasonable than others. We will apply each method to many different columns:Replace by mean: "normalized-losses": 41 missing data, replace them with mean "stroke": 4 missing data, replace them with mean "bore": 4 missing data, replace them with mean "horsepower": 2 missing data, replace them with mean "peak-rpm": 2 missing data, replace them with meanReplace by frequency: "num-of-doors": 2 missing data, replace them with "four". Reason: 84% sedans is four doors. Since four doors is most frequent, it is most likely to occur Drop the whole row: "price": 4 missing data, simply delete the whole row Reason: price is what we want to predict. Any data entry without price data cannot be used for prediction; therefore any row now without price data is not useful to us Calculate the average of the column
###Code
avg_norm_loss = df["normalized-losses"].astype("float").mean(axis=0)
print("Average of normalized-losses:", avg_norm_loss)
###Output
_____no_output_____
###Markdown
Replace "NaN" by mean value in "normalized-losses" column
###Code
df["normalized-losses"].replace(np.nan, avg_norm_loss, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'bore' column
###Code
avg_bore=df['bore'].astype('float').mean(axis=0)
print("Average of bore:", avg_bore)
###Output
_____no_output_____
###Markdown
Replace NaN by mean value
###Code
df["bore"].replace(np.nan, avg_bore, inplace=True)
###Output
_____no_output_____
###Markdown
Question 1: According to the example above, replace NaN in "stroke" column by mean.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: calculate the mean vaule for "stroke" columnavg_stroke = df["stroke"].astype("float").mean(axis = 0)print("Average of stroke:", avg_stroke) replace NaN by mean value in "stroke" columndf["stroke"].replace(np.nan, avg_stroke, inplace = True)--> Calculate the mean value for the 'horsepower' column:
###Code
avg_horsepower = df['horsepower'].astype('float').mean(axis=0)
print("Average horsepower:", avg_horsepower)
###Output
_____no_output_____
###Markdown
Replace "NaN" by mean value:
###Code
df['horsepower'].replace(np.nan, avg_horsepower, inplace=True)
###Output
_____no_output_____
###Markdown
Calculate the mean value for 'peak-rpm' column:
###Code
avg_peakrpm=df['peak-rpm'].astype('float').mean(axis=0)
print("Average peak rpm:", avg_peakrpm)
###Output
_____no_output_____
###Markdown
Replace NaN by mean value:
###Code
df['peak-rpm'].replace(np.nan, avg_peakrpm, inplace=True)
###Output
_____no_output_____
###Markdown
To see which values are present in a particular column, we can use the ".value_counts()" method:
###Code
df['num-of-doors'].value_counts()
###Output
_____no_output_____
###Markdown
We can see that four doors are the most common type. We can also use the ".idxmax()" method to calculate for us the most common type automatically:
###Code
df['num-of-doors'].value_counts().idxmax()
###Output
_____no_output_____
###Markdown
The replacement procedure is very similar to what we have seen previously
###Code
#replace the missing 'num-of-doors' values by the most frequent
df["num-of-doors"].replace(np.nan, "four", inplace=True)
###Output
_____no_output_____
###Markdown
Finally, let's drop all rows that do not have price data:
###Code
# simply drop whole row with NaN in "price" column
df.dropna(subset=["price"], axis=0, inplace=True)
# reset index, because we droped two rows
df.reset_index(drop=True, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Good! Now, we obtain the dataset with no missing values. Correct data formatWe are almost there!The last step in data cleaning is checking and making sure that all data is in the correct format (int, float, text or other).In Pandas, we use .dtype() to check the data type.astype() to change the data type Lets list the data types for each column
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
As we can see above, some columns are not of the correct data type. Numerical variables should have type 'float' or 'int', and variables with strings such as categories should have type 'object'. For example, 'bore' and 'stroke' variables are numerical values that describe the engines, so we should expect them to be of the type 'float' or 'int'; however, they are shown as type 'object'. We have to convert data types into a proper format for each column using the "astype()" method. Convert data types to proper format
###Code
df[["bore", "stroke"]] = df[["bore", "stroke"]].astype("float")
df[["normalized-losses"]] = df[["normalized-losses"]].astype("int")
df[["price"]] = df[["price"]].astype("float")
df[["peak-rpm"]] = df[["peak-rpm"]].astype("float")
###Output
_____no_output_____
###Markdown
Let us list the columns after the conversion
###Code
df.dtypes
###Output
_____no_output_____
###Markdown
Wonderful!Now, we finally obtain the cleaned dataset with no missing values and all data in its proper format. Data StandardizationData is usually collected from different agencies with different formats.(Data Standardization is also a term for a particular type of data normalization, where we subtract the mean and divide by the standard deviation) What is Standardization?Standardization is the process of transforming data into a common format which allows the researcher to make the meaningful comparison.ExampleTransform mpg to L/100km:In our dataset, the fuel consumption columns "city-mpg" and "highway-mpg" are represented by mpg (miles per gallon) unit. Assume we are developing an application in a country that accept the fuel consumption with L/100km standardWe will need to apply data transformation to transform mpg into L/100km? The formula for unit conversion isL/100km = 235 / mpgWe can do many mathematical operations directly in Pandas.
###Code
df.head()
# Convert mpg to L/100km by mathematical operation (235 divided by mpg)
df['city-L/100km'] = 235/df["city-mpg"]
# check your transformed data
df.head()
###Output
_____no_output_____
###Markdown
Question 2: According to the example above, transform mpg to L/100km in the column of "highway-mpg", and change the name of column to "highway-L/100km".
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: transform mpg to L/100km by mathematical operation (235 divided by mpg)df["highway-mpg"] = 235/df["highway-mpg"] rename column name from "highway-mpg" to "highway-L/100km"df.rename(columns={'"highway-mpg"':'highway-L/100km'}, inplace=True) check your transformed data df.head()--> Data NormalizationWhy normalization?Normalization is the process of transforming values of several variables into a similar range. Typical normalizations include scaling the variable so the variable average is 0, scaling the variable so the variance is 1, or scaling variable so the variable values range from 0 to 1ExampleTo demonstrate normalization, let's say we want to scale the columns "length", "width" and "height" Target:would like to Normalize those variables so their value ranges from 0 to 1.Approach: replace original value by (original value)/(maximum value)
###Code
# replace (original value) by (original value)/(maximum value)
df['length'] = df['length']/df['length'].max()
df['width'] = df['width']/df['width'].max()
###Output
_____no_output_____
###Markdown
Questiont 3: According to the example above, normalize the column "height".
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below:df['height'] = df['height']/df['height'].max() show the scaled columnsdf[["length","width","height"]].head()--> Here we can see, we've normalized "length", "width" and "height" in the range of [0,1]. BinningWhy binning? Binning is a process of transforming continuous numerical variables into discrete categorical 'bins', for grouped analysis.Example: In our dataset, "horsepower" is a real valued variable ranging from 48 to 288, it has 57 unique values. What if we only care about the price difference between cars with high horsepower, medium horsepower, and little horsepower (3 types)? Can we rearrange them into three ‘bins' to simplify analysis? We will use the Pandas method 'cut' to segment the 'horsepower' column into 3 bins Example of Binning Data In Pandas Convert data to correct format
###Code
df["horsepower"]=df["horsepower"].astype(int, copy=True)
###Output
_____no_output_____
###Markdown
Lets plot the histogram of horspower, to see what the distribution of horsepower looks like.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
plt.pyplot.hist(df["horsepower"])
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
We would like 3 bins of equal size bandwidth so we use numpy's linspace(start_value, end_value, numbers_generated function.Since we want to include the minimum value of horsepower we want to set start_value=min(df["horsepower"]).Since we want to include the maximum value of horsepower we want to set end_value=max(df["horsepower"]).Since we are building 3 bins of equal length, there should be 4 dividers, so numbers_generated=4. We build a bin array, with a minimum value to a maximum value, with bandwidth calculated above. The bins will be values used to determine when one bin ends and another begins.
###Code
bins = np.linspace(min(df["horsepower"]), max(df["horsepower"]), 4)
bins
###Output
_____no_output_____
###Markdown
We set group names:
###Code
group_names = ['Low', 'Medium', 'High']
###Output
_____no_output_____
###Markdown
We apply the function "cut" the determine what each value of "df['horsepower']" belongs to.
###Code
df['horsepower-binned'] = pd.cut(df['horsepower'], bins, labels=group_names, include_lowest=True )
df[['horsepower','horsepower-binned']].head(20)
###Output
_____no_output_____
###Markdown
Lets see the number of vehicles in each bin.
###Code
df["horsepower-binned"].value_counts()
###Output
_____no_output_____
###Markdown
Lets plot the distribution of each bin.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
pyplot.bar(group_names, df["horsepower-binned"].value_counts())
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
Check the dataframe above carefully, you will find the last column provides the bins for "horsepower" with 3 categories ("Low","Medium" and "High"). We successfully narrow the intervals from 57 to 3! Bins visualizationNormally, a histogram is used to visualize the distribution of bins we created above.
###Code
%matplotlib inline
import matplotlib as plt
from matplotlib import pyplot
a = (0,1,2)
# draw historgram of attribute "horsepower" with bins = 3
plt.pyplot.hist(df["horsepower"], bins = 3)
# set x/y labels and plot title
plt.pyplot.xlabel("horsepower")
plt.pyplot.ylabel("count")
plt.pyplot.title("horsepower bins")
###Output
_____no_output_____
###Markdown
The plot above shows the binning result for attribute "horsepower". Indicator variable (or dummy variable)What is an indicator variable? An indicator variable (or dummy variable) is a numerical variable used to label categories. They are called 'dummies' because the numbers themselves don't have inherent meaning. Why we use indicator variables? So we can use categorical variables for regression analysis in the later modules.Example We see the column "fuel-type" has two unique values, "gas" or "diesel". Regression doesn't understand words, only numbers. To use this attribute in regression analysis, we convert "fuel-type" into indicator variables. We will use the panda's method 'get_dummies' to assign numerical values to different categories of fuel type.
###Code
df.columns
###Output
_____no_output_____
###Markdown
get indicator variables and assign it to data frame "dummy_variable_1"
###Code
dummy_variable_1 = pd.get_dummies(df["fuel-type"])
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
change column names for clarity
###Code
dummy_variable_1.rename(columns={'fuel-type-diesel':'gas', 'fuel-type-diesel':'diesel'}, inplace=True)
dummy_variable_1.head()
###Output
_____no_output_____
###Markdown
We now have the value 0 to represent "gas" and 1 to represent "diesel" in the column "fuel-type". We will now insert this column back into our original dataset.
###Code
# merge data frame "df" and "dummy_variable_1"
df = pd.concat([df, dummy_variable_1], axis=1)
# drop original column "fuel-type" from "df"
df.drop("fuel-type", axis = 1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
The last two columns are now the indicator variable representation of the fuel-type variable. It's all 0s and 1s now. Question 4: As above, create indicator variable to the column of "aspiration": "std" to 0, while "turbo" to 1.
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below: get indicator variables of aspiration and assign it to data frame "dummy_variable_2"dummy_variable_2 = pd.get_dummies(df['aspiration']) change column names for claritydummy_variable_2.rename(columns={'std':'aspiration-std', 'turbo': 'aspiration-turbo'}, inplace=True) show first 5 instances of data frame "dummy_variable_1"dummy_variable_2.head()--> Question 5: Merge the new dataframe to the original dataframe then drop the column 'aspiration'
###Code
# Write your code below and press Shift+Enter to execute
###Output
_____no_output_____
###Markdown
Double-click here for the solution.<!-- The answer is below:merge the new dataframe to the original dataframdf = pd.concat([df, dummy_variable_2], axis=1) drop original column "aspiration" from "df"df.drop('aspiration', axis = 1, inplace=True)--> save the new csv
###Code
df.to_csv('clean_df.csv')
###Output
_____no_output_____ |
Guia_2/Problema_5.ipynb | ###Markdown
En este problema se simulará la ocurrencia de eventos para un proceso Poisson con una tasa de ocurrencia (lambda=5/h) en el intervalo (t=3h).El código lo que hace es usar el hecho de que puedo generar una variable Poisson a partir de otra variable que sigue la distribución exponencial con el mismo parámetro lambda. Ésta otra variable determina los "saltos" en la recta numérica que delimita el intervalo que yo quiera.Hago un doble loop para generar los valores de la variable Poisson usando la generación de la otra variable aleatoria
###Code
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from math import factorial
# Estilo de gráficos
plt.style.use('dark_background')
# Estilo de gráficos
plt.style.use('dark_background')
def Gen_Poisson(tasa, size, time=1):
""" Devuelve un arreglo con una variable Poisson
Parameters
----------
tasa : .float
Tasa de eventos Poisson. Debe ser positiva
size : int
Tamaño del arreglo, Debe ser positivo
time : .float
Tiempo observado. Default = 1
Returns
-------
N : list
Arreglo de una variable Poisson
"""
# Errores
if size<0:
raise ValueError('Error, el argumento size debe ser un entero positivo')
if tasa<0:
raise ValueError('Error, la tasa debe ser un real positivo')
# -------
import numpy as np
from math import factorial
N = [] # Variable aleatoria tipo Poisson
ij = 0
while ij<size: # Loop para generar "size" valores de N
x = np.random.exponential(scale=time/tasa, size=size)
CS = np.cumsum(x)
ik = 0
while ik<size: # Loop para calcular el valor de N usando los saltos
S = CS[ik] # Exploro la suma acumulada de saltos
if S>time: # Cuando me paso del intervalo pedido cuento los eventos
N.append( len(x[:ik]) )
break
ik = ik + 1
ij = ij + 1
return N
# Genero la variable Poisson con mi función
Ns = Gen_Poisson(tasa=5, size=10000, time=3)
# Histograma
fig, ax = plt.subplots(1, 1, figsize = (14,8))
ax.hist(Ns, color='green', density=True, bins=30)
ax.set_title('Histograma', fontsize=20)
ax.set_ylim(0,1)
ax.set_xlabel('k', fontsize=20)
ax.set_ylabel('Frecuencia relativa', fontsize=20);
###Output
_____no_output_____ |
.ipynb_checkpoints/FinalProject_2021_2022-checkpoint.ipynb | ###Markdown
Consider the following one-dimensional PDE:$$-u_{xx}(x) = f(x)\quad\mathrm{ in }\ \Omega = (0, \pi)$$$$u(x) = 0, \quad\mathrm{ on }\ \partial\Omega = \{0, \pi\}$$Given the following $4^{th}$ order finite difference approximation of the second order derivative:$$u_{xx}(x_i) = \frac{-u_{i-2}+16u_{i-1}-30u_i+16u_{i+1}-u_{i+2}}{12h^2}$$Implement a function that given the domain interval, the forcing function, the number of discretization points, the boundary conditions, returns the matrix $A$ and the the right hand side $b$.
###Code
def finDif(omega,f,n,bc):
dpoints=linspace(omega[0],omega[1],n)
b=zeros(n)
for i in range(1,n-1):
b[i]=f(dpoints[i])
b[0]=bc[0]
b[n-1]=bc[1]
h=(omega[1]-omega[0])/n
A = zeros((n,n))
for i in range(2,n-2):
A[i, i-1] = A[i, i+1] = -16/(12*h**2)
A[i, i-2] = A[i, i+2] = +1/(12*h**2)
A[i,i] = +30/(12*h**2)
# per il secondo e penultimo valore uno schema di quarto ordine non è applicabile
A[1,0]=A[1,2]=A[n-2,n-3]=A[n-2,n-1]=-1/h**2
A[1,1]=A[n-2,n-2]=2/h**2
A[0,0] = A[-1,-1]=1
return A, b
###Output
_____no_output_____
###Markdown
Call the function using:
###Code
omega = [0,pi]
f = lambda x : sin(x)
n=100
bc = [0,0]
A, b = finDif(omega, f, n, bc)
###Output
_____no_output_____
###Markdown
Implement two functions that compute the LU and the Cholesky factorization of the system matrix $A$
###Code
def LU(A):
U=copy.deepcopy(A)
n=len(A)
L=eye(n)
for j in range(n-1):
for i in range(j+1,n):
L[i,j]=U[i,j]/U[j,j]
U[i,j:]-=L[i,j]*U[j,j:]
return L, U
def cholesky(A):
n=len(A)
L=zeros([n,n])
for j in range(n):
L[j,j]=(A[j,j]-sum(fromiter((L[j,k]**2 for k in range(j)),float)))**0.5
for i in range(j+1,n):
L[i,j]=(A[i,j]-sum(fromiter((L[i,k]*L[j,k] for k in range(j)),float)))/L[j,j]
return transpose(L)
###Output
_____no_output_____
###Markdown
Implement forward and backward substitution functions to exploit the developed factorization methods to solve the derived linear system of equations.
###Code
def L_solve(L,rhs):
n=len(L)
x=zeros(n)
for i in range(n):
x[i] = rhs[i]
for j in range(i):
x[i] -= x[j]*L[i,j]
x[i] = x[i]/L[i,i]
return x
def U_solve(U,rhs):
n=len(U)
x=zeros(n)
for i in range(n-1, -1, -1):
x[i] = rhs[i]
for j in range(n-1, i, -1):
x[i] -= x[j]*U[i,j]
x[i] = x[i]/U[i,i]
return x
###Output
_____no_output_____
###Markdown
Solve the derived linear system using the implemented functions and plot the computed solution:
###Code
def LU_solve(A,b):
L,U=LU(A)
y=U_solve(U,L_solve(L,b))
return y
xspace=linspace(0,pi,n)
plot(xspace,LU_solve(A,b))
###Output
_____no_output_____
###Markdown
Considering the new domain $\Omega = (0,1)$ and the forcing term $f(x) = x(1-x)$ with B.C. $u(x) = 0$, on $\partial \Omega = {0,1}$ produce a plot and a table where you show the decay of the error w.r.t. the number of grid points.(The analytical solution for the above problems is $u_{an} = \frac{x^4}{12} - \frac{x^3}{6} + \frac{x}{12}$)
###Code
omega = [0,1]
f = lambda x : x*(1-x)
n=100
bc = [0,0]
A, b = finDif(omega, f, n, bc)
L,U=LU(A)
xspace=linspace(0,1,n)
yspace=xspace**4/12-xspace**3/6+xspace/12
y=U_solve(U,L_solve(L,b))
plot(xspace,y)
plot(xspace,yspace)
###Output
_____no_output_____
###Markdown
Exploit the derived LU factorizations to compute the condition number of the system's matrix $A$ using the original problem formulation.
###Code
def direct_power_method(A,nmax,tol):
x0=random.rand(len(A))
q = x0/linalg.norm(x0,2)
it = 0
err = tol + 0.1
while it < nmax and err > tol:
x = dot(A,q)
l = dot(q.T,x)
err = linalg.norm(x-l*q,2)
q = x/linalg.norm(x,2)
it += 1
return l,q
def inverse_power_method_noshift(A,nmax,tol):
n=len(A)
L,U=LU(A)
B=A.copy()
I=eye(n)
for i in range(n):
B[:,i]=U_solve(U,L_solve(L,I[:,i]))
l,q=direct_power_method(B,nmax,tol)
return 1/l,q
#Assuming condition number relative to norm 2
def condNumb(A):
l1,q1=direct_power_method(A,500,1e-05)
l2,q2=inverse_power_method_noshift(A,500,1e-05)
return l1/l2
print(condNumb(A))
###Output
53309.90730316458
###Markdown
Implement a preconditioned Conjugant Gradient method to solve the original linear system of equations using an iterative method:
###Code
def conjugate_gradient(A, b, P, nmax, eps=1e-10):
xold = zeros_like(b)
rold=b-dot(A,xold)
zold=LU_solve(P,rold)
pold=zold
tol=eps+1
i=0
while (i<nmax and tol>eps):
temp=dot(A,pold)
alpha=dot(pold.T,rold)/dot(pold.T,temp)
x=xold+alpha*pold
r=rold-alpha*temp
z=LU_solve(P,r)
beta=dot(temp.T,z)/dot(temp.T,pold)
p=z-beta*pold
tol=linalg.norm(r,2)
xold=x
rold=r
pold=p
zold=z
return x
###Output
_____no_output_____
###Markdown
Consider the following time dependent variation of the PDE starting from the orginal problem formulation:$$u'(t)-u_{xx} = \alpha(t)f(x)$$for $t\in [0,T]$, with $\alpha(t) = \cos(t)$ and $T = 6\pi$Use the same finite difference scheme to derive the semi-discrete formulation and solve it using a forward Euler's method.Plot the time dependent solution solution at $x = \pi/2$, $x=1$, $x=\pi$ Formal description the PDEThe method will solve a problem of type:$$\begin{cases} u_{x x}+\alpha(t) f(x)=\frac{\partial u}{\partial t}(t, x) & (t,x) \in (0,T] \times (a,b)\\u(0,x)=g(x) & \forall x \in [a,b]\\u(t,a)=p(t)\\u(t,b)=q(t)\\\end{cases}$$with the necessary continuity assumptions, where $a,b,T \in \mathbb{R}$ Formal description the approximation usedLet's start by discretizing time and substiting:$$u_{x x}+\alpha(t) f(x)=\frac{\partial u}{\partial t}(t, x) \Rightarrow u\left(t_{i+1},x\right)=u\left( t_{i},x\right)+h_{\mathrm{t}} u_{x x}\left( t_{i},x\right)+h_{\mathrm{t}} \alpha\left(t_{i}\right) f(x) \\$$Then let's discretize space$$u_{xx}(t,x_{j})=\frac{-u\left(t, x_{j-2}\right)+16 u\left(t, x_{j-1}\right)-30 u\left(t, x_{j}\right)+16 u\left(t, x_{j+1}\right)-u\left(t, x_{j+2}\right)}{12 h_{x}^{2}}$$After substiting and doing some calculations we arrive at\begin{aligned}u\left(t_{i+1}, x_{j}\right) = h_{\mathrm{t}}\left(\frac{-u\left(t_{i}, x_{j-2}\right)+16 u\left(t_{i},x_{j-1}\right)-\left(30-12 h_{x}^{2} / h_{t}\right) u\left(t_{i},x_{j}\right)+16 u\left(t_{i}, x_{j+1}\right)-u\left(t_{i},x_{j+2}\right)}{12 h_{x}^{2}}+\alpha\left(t_{i}\right) f\left(x_{j}\right)\right)\end{aligned}However this discrete method does not work for $x=1$ and $x=N_{x}-1$. For this points we will use another approximation of x $$u_{xx}(t,x_{j})=\frac{u(t,x_{j-1})-2u(t,x_{j})+u(t,x_{j+1})}{h_{x}^{2}} $$ After substiting we arrive at $$ u\left(t_{i+1}, x_{j}\right) = h_{\mathrm{t}}\left(\frac{ u\left(t_{i},x_{j-1}\right)-\left(2-h_{x}^{2} / h_{t}\right) u\left(t_{i},x_{j}\right)+ u\left(t_{i}, x_{j+1}\right)}{ h_{x}^{2}}+\alpha\left(t_{i}\right) f\left(x_{j}\right)\right) $$Vectorizing on the variable $x$ similarly to the first exercise we arrive at $\exists A$ s.t.$$u(:,t_{i+1})=Au(:,t_{i})+h_{t}f(:)\alpha(t_{i})$$i.e. a dynamical system.
###Code
def PDEsol(f,alpha,nt,nx,a,b,T,g,q,p):
x=linspace(a,b,nx)
t=linspace(0,T,nt)
ht=T/nt
hx=(b-a)/nx
A=zeros((nx,nx),dtype=float)
for i in range(2,nx-2):
A[i, i-1] = A[i, i+1] = 16/12
A[i, i-2] = A[i, i+2] = -1/12
A[i,i] = (-30+12*hx**2/ht)/12
A[1,0]=A[1,2]=A[nx-2,nx-3]=A[nx-2,nx-1]=1
A[1,1]=A[nx-2,nx-2]=(-2+hx**2/ht)
A[0,0] = A[-1,-1]=1
A=ht*A/hx**2
fx=f(x)
alphat=alpha(t)
U=zeros((nt,nx),dtype=float)
U[0,:]=g(x)
U[:,0]=p(t)
U[:,nx-1]=q(t)
for i in range(1,nt):
U[i,:]=dot(A,U[i-1,:])+ht*fx*alphat[i]
return U
a=0
b=pi
T=6*pi
q=lambda x: 0
p=lambda x:0
f = lambda x : sin(x)
alpha= lambda x: cos(x)
g= lambda x: 0
nx=50
nt=100000
bc=[0,0]
sol=PDEsol(f,alpha,nt,nx,a,b,T,g,p,q)
#computation of approximate x
piapp=argmin(abs(linspace(a,b,nx)-pi))*(b-a)/nx
pihapp=argmin(abs(linspace(a,b,nx)-pi/2))*(b-a)/nx
oneapp=argmin(abs(linspace(a,b,nx)-1))*(b-a)/nx
plot(linspace(0,T,nt),sol[:,argmin(abs(linspace(a,b,nx)-pi/2))], '-b', label='x={}'.format(piapp))
plot(linspace(0,T,nt),sol[:,argmin(abs(linspace(a,b,nx)-1))], '-r',label='x={}'.format(oneapp))
plot(linspace(0,T,nt),sol[:,argmin(abs(linspace(a,b,nx)-pi))], '-g',label='x={}'.format(pihapp))
matplotlib.pyplot.legend()
###Output
_____no_output_____
###Markdown
Given the original $Au = b$ system, implement an algorithm to compute the eigenvalues and eigenvectors of the matrix $A$. Exploit the computed LU factorization
###Code
#calculates the nearest eigenvalue to mu and its corresponding eigentvector: this method is not used
def inverse_power_method_shift(A,nmax,tol,mu):
n=len(A)
L,U=LU(A)
I=eye(n)
B=A.copy()-mu*I.copy()
for i in range(n):
B[:,i]=U_solve(U,L_solve(L,I[:,i]))
l,q=direct_power_method(B,nmax,tol)
return (1/l+mu),q
###Output
_____no_output_____
###Markdown
SolutionFor a symmetric diagonalizable matrix $A$, it holds $A=\sum_{i=1}^{n}\lambda_{i}v_{i}v_{i}^{T}$ where $v_{i}$ and $\lambda_{i}$ are the i-th eigenvector and eigenvalue.
###Code
#Works only for symmetric diagonalizable matrices
def eigendecomposition(A,nmax,tol=1e-08):
n=len(A)
d=zeros(n)
V=zeros([n,n])
for i in range(n):
d[i],V[:,i]=direct_power_method(A,nmax,tol)
A=A-d[i]*outer(V[:,i],V[:,i])
return d,V
###Output
_____no_output_____
###Markdown
Determinant using LU
###Code
def LU_det(A):
L,U=LU(A)
return U.diagonal().prod()
###Output
_____no_output_____
###Markdown
Compute the inverse of the matrix A exploiting the derived LU factorization
###Code
def inverse(A):
n=shape(A)[0]
B=A.copy()
I=eye(n)
for i in range(n):
B[:,i]=U_solve(U,L_solve(L,I[:,i]))
return B
###Output
_____no_output_____
###Markdown
Consider the following Cauchy problem$$\begin{cases}y'= -ty^2 \quad 0\le t \le 2\\y(0) = 1\end{cases}$$Implement a Backward Euler's method in a suitable function and solve the resulting non-linear equation using a Newton's method.
###Code
#newton method implementation
def newton(f,f1,x0,epsilon=1e-10,max_iter=500):
x = x0
for n in range(0,max_iter):
if abs(f(x)) < epsilon:
return x
if f1(x) == 0:
return None
x = x - f(x)/f1(x)
return x
f=lambda t,y: -t*(y**2)
f1=lambda t,y: -t*2*y
#backward euler implementation
def backward_euler(y0,g,g1,omega,n):
tspace=linspace(omega[0],omega[1],n)
h=(omega[1]-omega[0])/n
f=lambda t,z,x: z-h*g(t,z)-x
f1=lambda t,z,x: 1-h*g1(t,z)
y=zeros(n)
y[0]=y0
for i in range(1,n):
ft=lambda z: f(tspace[i],z,y[i-1])
ft1=lambda z: f1(tspace[i],z,y[i-1])
y[i]=newton(ft,ft1,y[i-1])
return y
n=50
y=backward_euler(1,f,f1,array([0,2]),n)
plot(linspace(0,2,n),y)
plot(linspace(0,2,n),2/(linspace(0,2,n)**2+2))
###Output
_____no_output_____
###Markdown
TestsTests on linear algebra are done with random sdp matrices. Tests on the newton method, the simple ode and the pde without time are done with the arguments requested in the project. For the pde with time derivative I was not able to find an explicit solution of the one with $g(x)=cos(x)$, so I did the test with $g(x)=0.5\cos(x)$ for which the solution is $u(t,x)=sin(x)(cos(t)-sin(t))$
###Code
n=50
#initialize random spd matrix
A=sklearn.datasets.make_spd_matrix(50)
b=random.rand(1,n)[0]
#LU factorization of the linalg can not be used because it may do some permutations
Ltrue=splu(scipy.sparse.csc_matrix(A), permc_spec = "NATURAL", diag_pivot_thresh=0, options={"SymmetricMode":True}).L.toarray()
Utrue=splu(scipy.sparse.csc_matrix(A), permc_spec = "NATURAL", diag_pivot_thresh=0, options={"SymmetricMode":True}).U.toarray()
L,U=LU(A)
Soltrue=linalg.solve(A,b)
sollu=LU_solve(A,b)
solgrad=conjugate_gradient(A,b,eye(len(A)),len((A)))
Eigtrue=sort(linalg.eigvals(A))
Eig,V=eigendecomposition(A,100000)
Eig=sort(Eig)
print('Tests of manually implemented functions on random sdp and vector:')
print('relative L error in inplemented LU factorization is', linalg.norm(L-Ltrue)/linalg.norm(Ltrue))
print('relative U error in inplemented LU factorization is', linalg.norm(U-Utrue)/linalg.norm(Utrue))
print('relative error in matrix inverse computation using LU is', linalg.norm(linalg.inv(A)-inverse(A))/linalg.norm(linalg.inv(A)))
print('relative error in cholesky is', linalg.norm(cholesky(A)-scipy.linalg.cholesky(A))/linalg.norm(linalg.cholesky(A)))
print('relative error in LU_solve is', linalg.norm(sollu-Soltrue)/linalg.norm(Soltrue))
print('relative error in conjugant gradient is', linalg.norm(solgrad-Soltrue)/linalg.norm(Soltrue))
print('relative error in eigenvalue calculation is', linalg.norm(Eigtrue-Eig)/linalg.norm(Eigtrue))
print('relative error in determinant is', linalg.norm(LU_det(A)-linalg.det(A))/linalg.norm(linalg.det(A)))
f=lambda x: x**3-x**2+x-1
f1= lambda x: 3*x**2-2*x+1
newtonsol=newton(f,f1,0.9)
print('relative error of newton method is',linalg.norm(1-newtonsol)/linalg.norm(1))
omega = [0,1]
f = lambda x : x*(1-x)
n=3000
bc = [0,0]
A, b = finDif(omega, f, n, bc)
L,U=LU(A)
xspace=linspace(0,1,n)
yspace=xspace**4/12-xspace**3/6+xspace/12
y=U_solve(U,L_solve(L,b))
print('relative error of the pde solution without time derivative is', linalg.norm(y-yspace)/linalg.norm(y))
f=lambda t,y: -t*(y**2)
f1=lambda t,y: -t*2*y
y=backward_euler(1,f,f1,array([0,2]),n)
print('relative error of the ode solution is',linalg.norm(y-2/(linspace(0,2,n)**2+2))/linalg.norm(2/(linspace(0,2,n)**2+2)))
a=0
b=pi
T=6*pi
q=lambda x: 0
p=lambda x:0
f = lambda x : sin(x)
alpha= lambda x: cos(x)
g= lambda x: 0.5*sin(x)
nx=100
nt=100000
sol=PDEsol(f,alpha,nt,nx,a,b,T,g,p,q)
realfun=lambda x,t: 0.5*(cos(t)+sin(t))*sin(x)
rangex=linspace(0,pi,nx)
ranget=linspace(0,T,nt)
realsol=zeros([nt,nx])
for j in range(nx):
realsol[:,j]=0.5*(cos(ranget)+sin(ranget))*sin(rangex[j])
print('relative error of the pde solution with time derivative is', linalg.norm(sol-realsol)/linalg.norm(realsol))
print('Everything works :-)')
###Output
Tests of manually implemented functions on random sdp and vector:
relative L error in inplemented LU factorization is 2.029358257832441e-15
relative U error in inplemented LU factorization is 6.981192356668348e-16
relative error in matrix inverse computation using LU is 1.8124031419104778e-14
relative error in cholesky is 1.8184329029308924e-12
relative error in LU_solve is 7.057925440051885e-15
relative error in conjugant gradient is 6.53141716359704e-12
relative error in eigenvalue calculation is 7.070210020586287e-16
relative error in determinant is 1.9015317775395346e-15
relative error of newton method is 2.220446049250313e-16
|
module2-polynomial-log-linear-regression/polynomial-log-linear-regression-assignment.ipynb | ###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
##### Your Code Here #####
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv')
df.head()
df.columns
df = df.drop(columns=['Id'])
df.head()
df.select_dtypes(include='number').columns
df['ln_price'] = np.log(df['SalePrice'])
target = 'ln_price'
numeric_columns = df.select_dtypes(include='number').columns
for feature in numeric_columns.drop(target):
sns.scatterplot(x=feature, y=target, data=df, alpha=0.1)
plt.show()
target = 'SalePrice'
features = [sns.regplot(heights, weights, color='blue').set_title('Height by Weight');]
y = df[target]
X = df[features]
def run_linear_model(X, y):
# Split into test and train data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42)
# Fit model using train data
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions using test features
y_pred = model.predict(X_test)
# Compare predictions to test target
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Squared Error', rmse)
print('R^2 Score', r2)
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, X_train.columns)
print(coefficients.to_string())
run_linear_model(X, y)
target = 'SalePrice'
features = ['OverallQual', '1stFlrSF']
y = df[target]
X = df[features]
run_linear_model(X, y)
target = 'ln_price'
features = ['OverallQual', '1stFlrSF']
y = df[target]
X = df[features]
run_linear_model(X, y)
from sklearn.preprocessing import PolynomialFeatures
import warnings
X = df[['YearBuilt', 'SalePrice']]
y = df[target]
interaction = PolynomialFeatures(degree=3, include_bias=False, interaction_only=True)
X_inter = interaction.fit_transform(X)
# Create linear regression
regr = LinearRegression()
# Fit the linear regression
model = regr.fit(X_inter, y)
run_linear_model(X, y)
###Output
Root Mean Squared Error 7.973482426540466e-11
R^2 Score 1.0
Intercept 2.9103830456733704e-10
YearBuilt -6.120213e-14
SalePrice 1.000000e+00
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
import pandas as pd
data = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv')
pd.set_option('display.max_rows',100)
data.head().T
from sklearn.model_selection import train_test_split
X=data
train, test= train_test_split(X,test_size=.5)
train = train.drop('Id', axis = 1)
test = test.drop('Id', axis = 1)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
train = train.replace({np.nan:0})
train.Street.unique()
sns.scatterplot(x='Street',y='SalePrice',data=train)
train['Street'] = train['Street'].replace({'Pave':2,'Grvl':1})
target = 'SalePrice'
numeric_columns = data.select_dtypes(include='number').columns
for feature in numeric_columns.drop(target):
sns.scatterplot(x=feature, y=target, data=data, alpha=0.2)
plt.show()
#OverallQual,YearBuilt,2ndFlrSF - zeros,
train = train.drop(train[(train['OverallQual']>9) & (train['SalePrice']<200000)].index)
sns.scatterplot(x='OverallQual',y=target,data=train,alpha=.5)
sns.scatterplot(x='YearBuilt',y=target,data=train,alpha=.5)
train = train.drop(train[(train['2ndFlrSF']<1500) & (train['SalePrice']>400000)].index)
train['2ndFlrSF']= train['2ndFlrSF'].replace({0:np.nan})
sns.scatterplot(x='2ndFlrSF',y=target,data=train,alpha=.5)
from scipy.stats import norm, skew
from scipy import stats
sns.distplot(train['SalePrice'], fit=norm)
(mu, sigma) = norm.fit(train['SalePrice'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
train['SalePrice'] = np.log1p(train['SalePrice'])
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
loc='best')
plt.ylabel('Frequency')
plt.title('SalePrice distribution')
fig = plt.figure()
res = stats.probplot(train['SalePrice'], plot=plt)
plt.show()
###Output
_____no_output_____
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
##### Your Code Here #####
###Output
_____no_output_____
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
# set dataframe display options
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# import data
df = pd.read_csv('https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv')
df.head().T
# Baseline Model
baseline_target = 'SalePrice'
baseline_numeric_columns = df.select_dtypes(include='number').columns
baseline_features = baseline_numeric_columns.drop([baseline_target, 'Id'])
baseline_y = df[baseline_target]
baseline_X = df[baseline_features]
# Deal with NaN values
df[baseline_features].isnull().sum()
df[['LotFrontage', 'MasVnrArea', 'GarageYrBlt']] = df[['LotFrontage', 'MasVnrArea', 'GarageYrBlt']].fillna(df[['LotFrontage', 'MasVnrArea', 'GarageYrBlt']].median())
# split the data
X_train, X_test, y_train, y_test = train_test_split(baseline_X, baseline_y, train_size=0.8, test_size=0.2, random_state=42)
# fit the model
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions usint test target
y_pred = model.predict(X_test)
# Compare predictions to test target
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Squared Error:', rmse)
print('R^2 Score:', r2)
print('Intercept:', model.intercept_)
coefficients = pd.Series(model.coef_, X_train.columns)
print(coefficients.to_string())
# plot scatterplots to check for relationships
for feature in df[baseline_features]:
sns.scatterplot(x=feature, y=baseline_target, data=df, alpha=0.1)
plt.show()
# Interaction variables
df['bedroom_bathroom_ratio'] = df['BedroomAbvGr'] / (df['FullBath'] + df['HalfBath'] + df['BsmtFullBath'] + df['BsmtHalfBath'])
df['bedroom_per_sqft'] = df['BedroomAbvGr'] / (df['1stFlrSF'] + df['2ndFlrSF'] + df['BsmtFinSF1'] + df['BsmtFinSF2'])
df['bathrooms_per_sqft'] = (df['BsmtFullBath'] + df['BsmtHalfBath'] + df['FullBath'] + df['HalfBath']) / (df['1stFlrSF'] + df['2ndFlrSF'] + df['TotalBsmtSF'])
df['dollars_per_sqft_living'] = df['SalePrice'] / df['GrLivArea']
df['dollars_per_sqft_lot'] = df['SalePrice'] / df['LotArea']
df['age'] = df['YrSold'] - df['YearBuilt']
df['garage_age'] = df['YrSold'] - df['GarageYrBlt']
# Polynomial features
df['overall_qual_squared'] = df['OverallQual'] ** 2
df['overall_cond_squared'] = df['OverallCond'] ** 2
df['GrLivArea_squared'] = df['GrLivArea'] ** 2
df['first_floor_sqft_squared'] = df['1stFlrSF'] ** 2
df['second_floor_sqft_squared'] = df['2ndFlrSF'] ** 2
# convert sale price to natural log
df['ln_price'] = np.log(df['SalePrice'])
target = 'ln_price'
# select all numeric columns; drop target, price, and id
numeric_columns = df.select_dtypes(include='number').columns
features = numeric_columns.drop([target, 'SalePrice', 'Id'])
features
# Separate target and features
y = df[target]
X = df[features]
# split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=42)
# fit the model
model = LinearRegression()
model.fit(X_train, y_train)
# make predictions using test features
y_pred = model.predict(X_test)
# compare predictions to test target
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Squared Error:', rmse)
print('R^2 Score:', r2)
print('Intercept:', model.intercept_)
coefficients = pd.Series(model.coef_, X_train.columns)
print(coefficients.to_string())
###Output
Root Mean Squared Error: 0.0637692885828241
R^2 Score: 0.9782088553683577
Intercept: 12.638601484216046
MSSubClass -2.168385e-05
LotFrontage 2.217168e-04
LotArea 1.947238e-07
OverallQual 5.950067e-02
OverallCond 3.938439e-02
YearBuilt -4.646229e-05
YearRemodAdd 7.201645e-05
MasVnrArea -1.638306e-05
BsmtFinSF1 -5.261704e-06
BsmtFinSF2 -1.084572e-05
BsmtUnfSF 1.791889e-05
TotalBsmtSF 1.835509e-06
1stFlrSF 2.584612e-04
2ndFlrSF 2.367276e-04
LowQualFinSF 2.329534e-04
GrLivArea 7.283137e-04
BsmtFullBath 1.783001e-02
BsmtHalfBath 1.078982e-02
FullBath 2.384190e-02
HalfBath 3.657580e-02
BedroomAbvGr 1.230860e-02
KitchenAbvGr -5.551974e-02
TotRmsAbvGrd -2.084772e-03
Fireplaces 1.199552e-02
GarageYrBlt -5.921310e-04
GarageCars 6.445952e-03
GarageArea 1.478652e-05
WoodDeckSF 1.527767e-05
OpenPorchSF -4.064527e-05
EnclosedPorch 4.593333e-05
3SsnPorch 1.447034e-05
ScreenPorch 4.280876e-05
PoolArea 9.914493e-05
MiscVal -4.821702e-06
MoSold -7.429592e-04
YrSold -9.449519e-04
bedroom_bathroom_ratio 7.122155e-03
bedroom_per_sqft -1.936429e+01
bathrooms_per_sqft -2.041853e+01
overall_qual_squared -4.644716e-03
overall_cond_squared -2.071114e-03
dollars_per_sqft_living 7.563407e-03
dollars_per_sqft_lot 1.754002e-04
age -8.984832e-04
garage_age -3.528218e-04
GrLivArea_squared -1.258532e-07
first_floor_sqft_squared -3.733404e-11
second_floor_sqft_squared 1.767470e-08
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
##### Your Code Here #####
###Output
_____no_output_____
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
df = pd.read_csv("https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv")
pd.set_option('display.max_columns', None)
df.head()
###Output
_____no_output_____
###Markdown
A number of features have the same categorical ranking that would be easy to turn into numeric and be able to include these columns in the model Ex Excellent --> 5 Gd Good --> 4 TA Average/Typical --> 3 Fa Fair --> 2 Po Poor --> 1 NA --> 0
###Code
to_number = {"ExterQual": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"ExterCond": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"BsmtQual": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"BsmtCond": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"HeatingQC": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"KitchenQual": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"FireplaceQu": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"GarageQual": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"GarageCond": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0},
"PoolQC": {"Ex": 5, "Gd": 4, "TA": 3, "Fa": 2, "Po": 1, "NA": 0}
}
df = df.replace(to_number)
cols = ["ExterQual", "ExterCond", "BsmtQual", "BsmtCond", "HeatingQC",
"KitchenQual", "FireplaceQu", "GarageQual", "GarageCond", "PoolQC"]
df[cols] = df[cols].apply(pd.to_numeric, errors='coerce')
target = "SalePrice"
numeric_columns = df.select_dtypes(include='number').columns
for feature in numeric_columns.drop(target):
sns.scatterplot(x=feature, y=target, data=df, alpha=0.2)
plt.show()
#making a squared feature for OverallQual
df["OverallQual_squared"] = df["OverallQual"]**2
for feature in ["OverallQual", "OverallQual_squared"]:
sns.scatterplot(x=feature, y=target, data=df, alpha=0.1)
plt.show()
# function for running the model
def run_linear_model(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Squared Error', rmse)
print('R^2 Score', r2)
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, X_train.columns)
print(coefficients.to_string())
#baseline
features = ['OverallQual']
y = df[target]
X = df[features]
run_linear_model(X, y)
# bivariate using squared column
features = ["OverallQual_squared"]
y = df[target]
X = df[features]
run_linear_model(X, y)
# multiple regression using both features
features = ["OverallQual", "OverallQual_squared"]
y = df[target]
X = df[features]
run_linear_model(X, y)
# trying a higher degree polynomial
df["OverallQual_cubed"] = df["OverallQual"]**3
features = ["OverallQual", "OverallQual_cubed"]
y = df[target]
X = df[features]
run_linear_model(X,y)
# want to see what this looks like
df = df.sort_values(by='OverallQual')
ax = df.plot(x='OverallQual', y=target, kind='scatter', alpha=0.5)
ax.plot(X['OverallQual'], model.predict(X));
# log transformed y
features = ["OverallQual"]
y = np.log(df[target])
X = df[features]
run_linear_model(X, y)
# more engineered features
df["yard"] = df["LotArea"] - df["1stFlrSF"]
df["age"] = np.subtract(2019, df["YearBuilt"])
df.head()
df = df.fillna(value=0)
# put it all together
numeric_columns = df.select_dtypes(include='number').columns
features = numeric_columns.drop([target, 'SalePrice', 'Id'])
y = df[target]
X = df[features]
run_linear_model(X, y)
# interaction feature
from sklearn.preprocessing import PolynomialFeatures
features = ["GarageCars", "GarageYrBlt"]
X = df[features]
y = df[target]
poly = PolynomialFeatures(interaction_only=True,include_bias = False)
poly.fit_transform(X)
run_linear_model(X, y)
#gonna try another
features = ["TotRmsAbvGrd", "1stFlrSF"]
X = df[features]
y = df[target]
poly = PolynomialFeatures(interaction_only=True, include_bias = False)
poly.fit_transform(X)
run_linear_model(X, y)
###Output
Root Mean Squared Error 51428.865551917814
R^2 Score 0.5029097404559604
Intercept -35056.60569579227
TotRmsAbvGrd 16418.975438
1stFlrSF 93.643058
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
##### Your Code Here #####
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
pd.set_option('display.max_columns', 100)
url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv'
df = pd.read_csv(url)
df.head()
df.MSSubClass.isna().sum()
df.plot(x='MSSubClass', y='SalePrice', kind='scatter')
X= df[['YearBuilt']]
y= np.log(df['SalePrice'])
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8, test_size=.2, random_state=42)
model = LinearRegression()
model.fit(X_train,y_train)
y_pred = np.exp(model.predict(X_test))
ax = df.plot(x='YearBuilt', y='SalePrice', kind='scatter', alpha=0.5, logy=True)
ax.plot(X_test, y_pred, color='r')
df['OverallQual **2'] = df['OverallQual'] **2
features = ['OverallQual', 'OverallQual **2']
X = df[features]
y= df['SalePrice']
model = LinearRegression()
model.fit(X,y)
y_pred = model.predict(X)
print('RMSE:', np.sqrt(mean_squared_error(y, y_pred)))
print('R^2:', r2_score(y,y_pred))
ax = df.plot(x='OverallQual', y='SalePrice', kind='scatter', alpha=0.5)
ax.plot(X['OverallQual'], y_pred, color='r')
df['TotRmsAbvGrd **2'] = df['TotRmsAbvGrd'] **2
features = ['TotRmsAbvGrd', 'TotRmsAbvGrd **2']
X = df[features]
y= df['SalePrice']
model = LinearRegression()
model.fit(X,y)
y_pred = model.predict(X)
print('RMSE:', np.sqrt(mean_squared_error(y, y_pred)))
print('R^2:', r2_score(y,y_pred))
ax = df.plot(x='TotRmsAbvGrd', y='SalePrice', kind='scatter', alpha=0.5)
ax.plot(X['TotRmsAbvGrd'], y_pred, color='r')
###Output
_____no_output_____
###Markdown
Intermediate Linear Regression Practice Use a Linear Regression model to get the lowest RMSE possible on the following dataset:[Dataset Folder](https://github.com/ryanleeallred/datasets/tree/master/Ames%20Housing%20Data)[Raw CSV](https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv) Your model must include (at least):- A log-transformed y variable- Two polynomial features- One interaction feature- 10 other engineered featuresWhat is the lowest Root-Mean-Squared Error that you are able to obtain? Share your best RMSEs in Slack!Notes:There may be some data cleaning that you need to do on some features of this dataset. Linear Regression will only accept numeric values and will not acceptNote* There may not be a clear candidate for an interaction term in this dataset. Include one anyway, sometimes it's a good practice for predictive modeling feature engineering in general.
###Code
url = 'https://raw.githubusercontent.com/ryanleeallred/datasets/master/Ames%20Housing%20Data/train.csv'
df = pd.read_csv(url)
#columns to drop bc of NaNs:
drop_cols = ['Alley', 'Fence', 'MiscFeature']
df = df.drop(columns = drop_cols)
#Fill Nans for garage yr blt w/ YearBlt value:
nums = df[df.GarageYrBlt.isna() == True].index
for i in nums:
df.at[i, 'GarageYrBlt'] = df.at[i, 'YearBuilt']
#drop 8 rows of masonry veneer area Nans:
df.dropna(subset=['MasVnrArea'], inplace=True)
#Fill NaNs for LotFrontage as 0:
df['LotFrontage'] = df['LotFrontage'].fillna(0)
numeric_columns = df.select_dtypes(include='number').drop(columns='Id').columns
numeric_columns
def run_linear_model(X, y):
# Split into test and train data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20, random_state=42)
# Fit model using train data
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions using test features
y_pred = model.predict(X_test)
# Compare predictions to test target
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
r2 = r2_score(y_test, y_pred)
print('Root Mean Squared Error', rmse)
print('R^2 Score', r2)
print('Intercept', model.intercept_)
coefficients = pd.Series(model.coef_, X_train.columns)
print(coefficients.to_string())
#Pre-feature engineering baseline regression:
target= 'SalePrice'
X = df[numeric_columns].drop(columns=target)
y = df[target]
run_linear_model(X, y)
###Output
Root Mean Squared Error 30491.521649630755
R^2 Score 0.8477464959400496
Intercept 220589.42269341327
MSSubClass -173.645775
LotFrontage 1.754565
LotArea 0.381321
OverallQual 17393.481114
OverallCond 3663.422892
YearBuilt 313.271387
YearRemodAdd 228.352471
MasVnrArea 39.528342
BsmtFinSF1 8.775940
BsmtFinSF2 -2.772518
BsmtUnfSF -1.446343
TotalBsmtSF 4.557080
1stFlrSF 13.099720
2ndFlrSF 13.393311
LowQualFinSF 7.322930
GrLivArea 33.815960
BsmtFullBath 10711.209239
BsmtHalfBath 2428.199879
FullBath 5700.736127
HalfBath -3383.169793
BedroomAbvGr -9849.144139
KitchenAbvGr -12219.141455
TotRmsAbvGrd 4587.806384
Fireplaces 4654.336873
GarageYrBlt -29.930401
GarageCars 12190.843114
GarageArea -4.618454
WoodDeckSF 25.632564
OpenPorchSF -13.474969
EnclosedPorch 9.782653
3SsnPorch 30.874942
ScreenPorch 37.623401
PoolArea -25.653473
MiscVal -0.623324
MoSold -48.685553
YrSold -637.725691
###Markdown
Engineer Some Features A log-transformed y variable
###Code
df['ln_price'] = np.log(df['SalePrice'])
###Output
_____no_output_____
###Markdown
Two polynomial features Find Curved Data:
###Code
for feature in df[numeric_columns]:
sns.scatterplot(x=feature, y=target, data=df, alpha=0.1)
plt.show()
#candidates for curved data after visual inspection, add squared column:
#-OverallQual
#-GarageCars
df['quality_squared'] = df['OverallQual']**2
df['garage_cars_squared'] = df['GarageCars']**2
###Output
_____no_output_____
###Markdown
Check if squaring the Quality feature increased predictive accuracy:
###Code
X = df[['OverallQual']]
y = df['SalePrice']
run_linear_model(X, y)
X = df[['quality_squared']]
y = df['SalePrice']
run_linear_model(X, y)
###Output
Root Mean Squared Error 42824.467576919145
R^2 Score 0.6996739216639173
Intercept 36248.37116356945
quality_squared 3717.658598
###Markdown
Check if squaring the basement square footage increased predictive accuracy:
###Code
X = df[['GarageCars']]
y = df['SalePrice']
run_linear_model(X, y)
X = df[['garage_cars_squared']]
y = df['SalePrice']
run_linear_model(X, y)
###Output
Root Mean Squared Error 53022.91689699418
R^2 Score 0.5395989222623487
Intercept 107204.01645159317
garage_cars_squared 19976.136255
###Markdown
One interaction feature
###Code
#hmmmm
###Output
_____no_output_____
###Markdown
10 other engineered features
###Code
#age:
df['age'] = 2010 - df['YearBuilt']
#renovation_age:
df['renovation_age'] = 2010 - df['YearRemodAdd']
#yard size (ignores deck, pool, etc):
df['yard_size'] = df['LotArea'] - df['1stFlrSF']
#kitchen Quality:
kitchen_map = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1}
df['KitchenQual'] = df['KitchenQual'].replace(kitchen_map)
#fireplace quality:
fireplace_map = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1, np.NaN: 0}
df['FireplaceQu'] = df['FireplaceQu'].replace(fireplace_map)
#garage type:
garage_map = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1, np.NaN: 0}
df['GarageType'] = df['GarageType'].replace(garage_map)
#garage quality:
df['GarageQual'] = df['GarageQual'].replace(garage_map)
#garage condition:
df['GarageCond'] = df['GarageCond'].replace(garage_map)
#garage finish:
finish_map = {'Fin':3, 'RFn': 2, 'Unf':1, np.NaN:0}
df['GarageFinish'] = df['GarageFinish'].replace(finish_map)
#Pool Qulaity:
pool_map = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1, np.NaN: 0}
df['PoolQC'] = df['PoolQC'].replace(pool_map)
#sale type:
sale_map = {'Normal': 3,
'Partial': 3,
'Abnorml': 1,
'Family': 1,
'AdjLand': 2,
'Alloca': 2}
df['sale_type'] = df['SaleCondition'].replace(sale_map)
#Lot shape:
shape_map = {'Reg': 4, 'IR1': 3, 'IR2': 2, 'IR3': 1}
df["LotShape"] = df["LotShape"].replace(shape_map)
#Street:
street_map = {'Grvl':1, 'Pave':2}
df['Street'] = df['Street'].replace(street_map)
numeric_columns = df.select_dtypes(include='number').drop(columns=['Id', 'SalePrice', 'ln_price']).columns
numeric_columns
target= 'ln_price'
X = df[numeric_columns]
y = df[target]
run_linear_model(X, y)
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import seaborn as sns
%matplotlib inline
X = df[numeric_columns].values
scaled_X = StandardScaler().fit_transform(X)
covar_matrix = PCA(n_components = 30)
covar_matrix.fit(scaled_X)
variance = covar_matrix.explained_variance_ratio_
cumulative_var = np.cumsum(np.round(covar_matrix.explained_variance_ratio_, decimals=3)*100)
#plot it:
y_vals = [num for num in cumulative_var]
x_vals = [num for num in range(1,31)]
fig, ax = plt.subplots(figsize=(10,8))
ax.grid(True)
ax.set_title('Principal Components Cumulative Variance')
ax.set_ylabel('Cumulative Variance % Explained')
ax.set_xlabel('Principal Components')
sns.barplot(x=x_vals, y=y_vals, ax=ax, color='#769ddb')
plt.show()
#make pca df
pca = PCA(20)
principal_components = pca.fit_transform(scaled_X)
pc_df = pd.DataFrame(data = principal_components)
#combine pca and main df
combined = pc_df.join(df[numeric_columns].reset_index(drop=True))
target= 'ln_price'
X = combined
y = df[target]
run_linear_model(X, y)
###Output
Root Mean Squared Error 0.13285651381722052
R^2 Score 0.893553676842148
Intercept -9575639083.312624
0 -1.320419e+08
1 7.324378e+06
2 2.019490e+06
3 -2.794592e+07
4 3.179658e+06
5 3.438620e+06
6 -2.119812e+05
7 5.286031e+06
8 7.426538e+06
9 2.842934e+06
10 3.531286e+06
11 -5.695468e+05
12 -8.596206e+05
13 -7.280779e+06
14 3.411891e+06
15 1.144646e+07
16 -5.282038e+06
17 1.308331e+06
18 2.375875e+06
19 5.763359e+06
MSSubClass -1.504317e+05
LotFrontage 4.101426e+05
LotArea 4.189310e+04
Street -8.138911e+05
LotShape -1.375495e+07
OverallQual 2.608251e+07
OverallCond -6.099336e+06
YearBuilt 1.025518e+06
YearRemodAdd 1.492057e+06
MasVnrArea 1.449708e+05
BsmtFinSF1 5.643382e+04
BsmtFinSF2 -5.111754e+04
BsmtUnfSF 6.015889e+04
TotalBsmtSF 6.547516e+04
1stFlrSF 8.269386e+04
2ndFlrSF 4.547075e+04
LowQualFinSF -1.169257e+05
GrLivArea 1.123891e+04
BsmtFullBath 1.654096e+07
BsmtHalfBath 2.773749e+06
FullBath 4.927667e+07
HalfBath 1.013663e+07
BedroomAbvGr 6.977470e+06
KitchenAbvGr -7.407016e+06
TotRmsAbvGrd 1.251466e+07
Fireplaces 1.809014e+07
FireplaceQu 8.506416e+06
GarageYrBlt 1.140893e+06
GarageFinish 2.675967e+07
GarageCars 4.228953e+07
GarageArea 1.478097e+05
GarageQual 1.083654e+07
GarageCond 1.095174e+07
WoodDeckSF 1.058216e+05
OpenPorchSF 2.755562e+05
EnclosedPorch -2.205511e+05
3SsnPorch -1.158948e+05
ScreenPorch 5.750014e+04
PoolArea 4.296692e+03
PoolQC 4.102867e+06
MiscVal -7.792794e+02
MoSold 8.915228e+05
YrSold 6.816974e+05
quality_squared 2.118285e+06
garage_cars_squared 1.321633e+07
age -1.025518e+06
renovation_age -1.492057e+06
yard_size -4.080075e+04
sale_type 5.219701e+06
|
day4/rl/policy_gradient_example.ipynb | ###Markdown
Training an Agent using Policy Gradient on Open AI Gym.https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
###Code
""" Trains an agent with (stochastic) Policy Gradients on Pong. Uses OpenAI Gym. """
import numpy as np
import pickle
import gym
# hyperparameters
H = 200 # number of hidden layer neurons
batch_size = 10 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
resume = False # resume from previous checkpoint?
render = False
# model initialization
D = 80 * 80 # input dimensionality: 80x80 grid
if resume:
model = pickle.load(open('save.p', 'rb'))
else:
model = {}
model['W1'] = np.random.randn(H,D) / np.sqrt(D) # "Xavier" initialization
model['W2'] = np.random.randn(H) / np.sqrt(H)
grad_buffer = { k : np.zeros_like(v) for k,v in model.items() } # update buffers that add up gradients over a batch
rmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() } # rmsprop memory
# Policy Gradient Implementation
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x)) # sigmoid "squashing" function to interval [0,1]
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward
(rewards for steps far into the future are deemed less important)
"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t] # gamma: discount factor
# higher degree of discount further back in time
discounted_r[t] = running_add
return discounted_r
def policy_forward(x):
h = np.dot(model['W1'], x)
h[h<0] = 0 # ReLU nonlinearity
logp = np.dot(model['W2'], h)
p = sigmoid(logp)
return p, h # return probability of taking action 2, and hidden state
def policy_backward(eph, epdlogp):
""" backward pass. (eph is array of intermediate hidden states)
https://medium.com/@jonathan_hui/rl-policy-gradients-explained-9b13b688b146
"""
# h = relu(W1 * x)
# logp = W2 * h = W2 * relu(W1 * x)
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model['W2'])
dh[eph <= 0] = 0 # backprop relu
dW1 = np.dot(dh.T, epx)
return {'W1':dW1, 'W2':dW2}
# Training using OpenAI Gym's Pong preset enviroment
render = False
env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None # used in computing the difference frame
xs,hs,dlogps,drs = [],[],[],[]
running_reward = None
reward_sum = 0
episode_number = 0
while True:
if render: env.render()
# preprocess the observation, set input to network to be difference image
cur_x = prepro(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# forward the policy network and sample an action from the returned probability
aprob, h = policy_forward(x)
action = 2 if np.random.uniform() < aprob else 3 # roll the dice!
# record various intermediates (needed later for backprop)
xs.append(x) # observation
hs.append(h) # hidden state
y = 1 if action == 2 else 0 # a "fake label"
dlogps.append(y - aprob) # grad that encourages the action that was taken to be taken (see http://cs231n.github.io/neural-networks-2/#losses if confused)
# step the environment and get new measurements
observation, reward, done, info = env.step(action)
reward_sum += reward
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
if done: # an episode finished
print(f'Episode {episode_number}: input: {x}, reward: {reward}')
episode_number += 1
# stack together all inputs, hidden states, action gradients, and rewards for this episode
epx = np.vstack(xs) # inputs
eph = np.vstack(hs) # hidden states
epdlogp = np.vstack(dlogps) # action gradients
epr = np.vstack(drs) # rewards
xs,hs,dlogps,drs = [],[],[],[] # reset array memory
# compute the discounted reward backwards through time
discounted_epr = discount_rewards(epr)
# standardize the rewards to be unit normal (helps control the gradient estimator variance)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.)
grad = policy_backward(eph, epdlogp)
for k in model: grad_buffer[k] += grad[k] # accumulate grad over batch
# perform rmsprop parameter update every batch_size episodes
if episode_number % batch_size == 0:
for k,v in model.items():
g = grad_buffer[k] # gradient
rmsprop_cache[k] = decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g**2
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
grad_buffer[k] = np.zeros_like(v) # reset batch gradient buffer
# boring book-keeping
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print(f'resetting env. episode reward total was {reward_sum}. running mean: {running_reward}')
if episode_number % 100 == 0: pickle.dump(model, open('save.p', 'wb'))
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
#if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.
# print (f'ep {episode_number}: game finished, reward: {reward}')
###Output
_____no_output_____ |
keras_basics/tf.keras.layers.Attention_test.ipynb | ###Markdown
TensorFlow Attention Layer Referencehttps://www.tensorflow.org/api_docs/python/tf/keras/layers/Attention
###Code
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
import matplotlib.pyplot as plt
inputs = tf.random.normal([32, 10, 8])
lstm = tf.keras.layers.LSTM(4)
output = lstm(inputs)
print(output.shape)
###Output
(32, 4)
###Markdown
Args: query: Query tensor of shape `[batch_size, Tq, dim]`. key: Key tensor of shape `[batch_size, Tv, dim]`.Output shape: Attention outputs of shape `[batch_size, Tq, dim]`.
###Code
batch_size = 6
Tq = Tv = 10
# base_vector = np.arange(0, Tq)
base_vector = np.linspace(1, 100, Tq)
a = np.asarray(batch_size * [base_vector])
b = np.zeros((batch_size, Tv))
for i in range(batch_size):
b[i, :] = np.concatenate([base_vector[i:], base_vector[0:i]])
print(b.shape)
print(b)
dim = 1
query_seq_encoding = tf.constant(np.expand_dims(a, axis=-1), dtype=tf.float32)
value_seq_encoding = tf.constant(np.expand_dims(b, axis=-1), dtype=tf.float32)
query_value_attention_seq = tf.keras.layers.Attention()([query_seq_encoding, value_seq_encoding])
scores = math_ops.matmul(query_seq_encoding, value_seq_encoding, transpose_b=True)
plt.matshow(scores[0,:])
plt.matshow(scores[1,:])
plt.matshow(scores[2,:])
plt.matshow(scores[3,:])
weights = nn.softmax(scores)
plt.matshow(weights[0,:])
plt.matshow(weights[1,:])
plt.matshow(weights[2,:])
plt.matshow(weights[3,:])
###Output
_____no_output_____ |
head-and-mouth/Head and Mouth Cancer Datasets.ipynb | ###Markdown
Head and Mouth Cancer DatasetsThis Jupyter Notebook builds a dataset relating to head and mouth cancers. This category was chosen because it is easy to define on the Proteomic Data Commons and the Imaging Data Commons, but will need to be manually aggregated on the Genomic Data Commons. Dataset definitionThis dataset is defined as:* On the Proteomic Data Commons: all cases whose [`primary_site` is set to `Head and Neck`](https://pdc.cancer.gov/pdc/browse/filters/primary_site:Head+and+Neck)* On the Imaging Data Commons: all cases whose [Primary Site Location is `Head-Neck`](https://portal.imaging.datacommons.cancer.gov/explore/?filters_for_load=%5B%7B%22filters%22:%5B%7B%22id%22:%22128%22,%22values%22:%5B%22Head-Neck%22%5D%7D%5D%7D%5D) * This isn't currently included, since I don't think the IDC API is publicly accessible yet.* On the Genomics Data Commons: [all cases](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22base%20of%20tongue%22%2C%22floor%20of%20mouth%22%2C%22gum%22%2C%22hypopharynx%22%2C%22larynx%22%2C%22lip%22%2C%22nasal%20cavity%20and%20middle%20ear%22%2C%22nasopharynx%22%2C%22oropharynx%22%2C%22other%20and%20ill-defined%20sites%20in%20lip%2C%20oral%20cavity%20and%20pharynx%22%2C%22other%20and%20unspecified%20major%20salivary%20glands%22%2C%22other%20and%20unspecified%20parts%20of%20mouth%22%2C%22other%20and%20unspecified%20parts%20of%20tongue%22%2C%22palate%22%2C%22tonsil%22%5D%7D%7D%5D%7D) whose `primary_site` is set to one of: * [`Other and unspecified parts of major salivary glands`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20major%20salivary%20glands%22%5D%7D%7D%5D%7D) * [`Other and ill-defined sites in lip, oral cavity and pharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20ill-defined%20sites%20in%20lip%2C%20oral%20cavity%20and%20pharynx%22%5D%7D%7D%5D%7D) * [`Oropharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22oropharynx%22%5D%7D%7D%5D%7D) * [`Larynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22larynx%22%5D%7D%7D%5D%7D) * [`Other and unspecified parts of tongue`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20parts%20of%20tongue%22%5D%7D%7D%5D%7D) * [`Nasopharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22nasopharynx%22%5D%7D%7D%5D%7D) * [`Floor of mouth`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22floor%20of%20mouth%22%5D%7D%7D%5D%7D) * [`Tonsil`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22tonsil%22%5D%7D%7D%5D%7D) * [`Other and unspecified parts of mouth`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20parts%20of%20mouth%22%5D%7D%7D%5D%7D) * [`Nasal cavity and middle ear`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22nasal%20cavity%20and%20middle%20ear%22%5D%7D%7D%5D%7D) * [`Hypopharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22hypopharynx%22%5D%7D%7D%5D%7D) * [`Base of tongue`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22base%20of%20tongue%22%5D%7D%7D%5D%7D) * [`Gum`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22gum%22%5D%7D%7D%5D%7D) * [`Lip`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22lip%22%5D%7D%7D%5D%7D) * [`Palate`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22palate%22%5D%7D%7D%5D%7D)Note that we specifically don't include the brain, eye, trachea or esophagus -- please let us know if you think they should be included in this dataset. Setup
###Code
# Load required packages.
import requests
import json
import pandas
import numpy
###Output
_____no_output_____
###Markdown
Download PDC data
###Code
pdc_graphql_endpoint = "https://pdc.cancer.gov/graphql"
# Step 1. Get a list of all the case IDs relevant to us.
primary_site = 'Head and Neck'
primary_site_query = """{ uiCase(primary_site: "%s") { case_id } }""" % (primary_site)
response = requests.get(pdc_graphql_endpoint, params = {
"query": primary_site_query
})
result = json.loads(response.content)
case_ids = list(map(lambda case: case['case_id'], result['data']['uiCase']))
unique_case_ids = numpy.unique(case_ids)
print(f"We have {len(case_ids)} case IDs, of which {len(unique_case_ids)} are unique.")
# TODO: Should be 258 -- why are we off by one?
# Step 2. Get all the cases relevant to us.
# I got this query from https://pdc.cancer.gov/data-dictionary/publicapi-documentation/#!/Case/case
case_query = """{
case (
case_id: "%s"
acceptDUA: true
) {
case_id case_submitter_id project_submitter_id days_to_lost_to_followup disease_type
index_date lost_to_followup primary_site
externalReferences {
external_reference_id
reference_resource_shortname reference_resource_name reference_entity_location
}
demographics {
demographic_id ethnicity gender demographic_submitter_id race cause_of_death days_to_birth
days_to_death vital_status year_of_birth year_of_death
}
samples {
sample_id sample_submitter_id sample_type sample_type_id gdc_sample_id gdc_project_id
biospecimen_anatomic_site composition current_weight days_to_collection days_to_sample_procurement
diagnosis_pathologically_confirmed freezing_method initial_weight intermediate_dimension is_ffpe
longest_dimension method_of_sample_procurement oct_embedded pathology_report_uuid preservation_method
sample_type_id shortest_dimension time_between_clamping_and_freezing time_between_excision_and_freezing
tissue_type tumor_code tumor_code_id tumor_descriptor
aliquots {
aliquot_id aliquot_submitter_id analyte_type
aliquot_run_metadata {
aliquot_run_metadata_id
}
}
}
diagnoses {
diagnosis_id tissue_or_organ_of_origin age_at_diagnosis primary_diagnosis tumor_grade tumor_stage
diagnosis_submitter_id classification_of_tumor days_to_last_follow_up days_to_last_known_disease_status
days_to_recurrence last_known_disease_status morphology progression_or_recurrence
site_of_resection_or_biopsy prior_malignancy ajcc_clinical_m ajcc_clinical_n ajcc_clinical_stage
ajcc_clinical_t ajcc_pathologic_m ajcc_pathologic_n ajcc_pathologic_stage ajcc_pathologic_t
ann_arbor_b_symptoms ann_arbor_clinical_stage ann_arbor_extranodal_involvement ann_arbor_pathologic_stage
best_overall_response burkitt_lymphoma_clinical_variant circumferential_resection_margin
colon_polyps_history days_to_best_overall_response days_to_diagnosis days_to_hiv_diagnosis
days_to_new_event figo_stage hiv_positive hpv_positive_type hpv_status iss_stage laterality
ldh_level_at_diagnosis ldh_normal_range_upper lymph_nodes_positive lymphatic_invasion_present
method_of_diagnosis new_event_anatomic_site new_event_type overall_survival perineural_invasion_present
prior_treatment progression_free_survival progression_free_survival_event residual_disease
vascular_invasion_present year_of_diagnosis icd_10_code synchronous_malignancy
tumor_largest_dimension_diameter
}
}
}"""
cases = []
for num, case_id in enumerate(unique_case_ids):
response = requests.get(pdc_graphql_endpoint, params = {
"query": case_query % (case_id)
})
result = json.loads(response.content)
case = result['data']['case']
if case and len(case) == 1:
cases.append(case[0])
print("Downloaded case %d of %d: %s" % (num, len(unique_case_ids), case_id))
else:
print("Could not download case %d of %d: %s" % (num, len(unique_case_ids), case_id))
len(cases)
# Before we examine it, let's save this as a file.
with open('pdc-head-and-mouth.json', 'w') as file:
json.dump(cases, file, indent=2, sort_keys=True)
cases_df = pandas.DataFrame(cases)
cases_df.describe()
###Output
_____no_output_____
###Markdown
Download GDC data
###Code
# Search by subject_id.
cases_endpt = "https://api.gdc.cancer.gov/cases"
filters = {
"op": "in",
"content": {
"field": "cases.primary_site",
"value": [
"baseoftongue",
"floorofmouth",
"gum",
"hypopharynx",
"larynx",
"lip",
"nasalcavityandmiddleear",
"nasopharynx",
"oropharynx",
"otherandill-definedsitesinlip,oralcavityandpharynx",
"otherandunspecifiedmajorsalivaryglands",
"otherandunspecifiedpartsofmouth",
"otherandunspecifiedpartsoftongue",
"palate",
"tonsil"
]
}
}
params = {
"filters": json.dumps(filters),
"expand": "diagnoses,samples",
"format": "JSON",
"size": 1000
}
response = requests.get(cases_endpt, params = params)
result = json.loads(response.content)
print(f"Warnings: {result['warnings']}")
gdc_entries = result['data']['hits']
with open('gdc-head-and-mouth.json', 'w') as file:
json.dump(gdc_entries, file, indent=2, sort_keys=True)
pandas.DataFrame(gdc_entries)
df_gdc_entries = pandas.DataFrame(gdc_entries)
df_gdc_entries.describe()
###Output
_____no_output_____
###Markdown
Head and Mouth Cancer DatasetsThis Jupyter Notebook builds a dataset relating to head and mouth cancers. This category was chosen because it is easy to define on the Proteomic Data Commons and the Imaging Data Commons, but will need to be manually aggregated on the Genomic Data Commons. Dataset definitionThis dataset is defined as:* On the Proteomic Data Commons: all cases whose [`primary_site` is set to `Head and Neck`](https://pdc.cancer.gov/pdc/browse/filters/primary_site:Head+and+Neck)* On the Imaging Data Commons: all cases whose [Primary Site Location is `Head-Neck`](https://portal.imaging.datacommons.cancer.gov/explore/?filters_for_load=%5B%7B%22filters%22:%5B%7B%22id%22:%22128%22,%22values%22:%5B%22Head-Neck%22%5D%7D%5D%7D%5D) * This isn't currently included, since I don't think the IDC API is publicly accessible yet.* On the Genomics Data Commons: [all cases](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22base%20of%20tongue%22%2C%22floor%20of%20mouth%22%2C%22gum%22%2C%22hypopharynx%22%2C%22larynx%22%2C%22lip%22%2C%22nasal%20cavity%20and%20middle%20ear%22%2C%22nasopharynx%22%2C%22oropharynx%22%2C%22other%20and%20ill-defined%20sites%20in%20lip%2C%20oral%20cavity%20and%20pharynx%22%2C%22other%20and%20unspecified%20major%20salivary%20glands%22%2C%22other%20and%20unspecified%20parts%20of%20mouth%22%2C%22other%20and%20unspecified%20parts%20of%20tongue%22%2C%22palate%22%2C%22tonsil%22%5D%7D%7D%5D%7D) whose `primary_site` is set to one of: * [`Other and unspecified parts of major salivary glands`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20major%20salivary%20glands%22%5D%7D%7D%5D%7D) * [`Other and ill-defined sites in lip, oral cavity and pharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20ill-defined%20sites%20in%20lip%2C%20oral%20cavity%20and%20pharynx%22%5D%7D%7D%5D%7D) * [`Oropharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22oropharynx%22%5D%7D%7D%5D%7D) * [`Larynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22larynx%22%5D%7D%7D%5D%7D) * [`Other and unspecified parts of tongue`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20parts%20of%20tongue%22%5D%7D%7D%5D%7D) * [`Nasopharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22nasopharynx%22%5D%7D%7D%5D%7D) * [`Floor of mouth`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22floor%20of%20mouth%22%5D%7D%7D%5D%7D) * [`Tonsil`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22tonsil%22%5D%7D%7D%5D%7D) * [`Other and unspecified parts of mouth`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22other%20and%20unspecified%20parts%20of%20mouth%22%5D%7D%7D%5D%7D) * [`Nasal cavity and middle ear`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22nasal%20cavity%20and%20middle%20ear%22%5D%7D%7D%5D%7D) * [`Hypopharynx`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22hypopharynx%22%5D%7D%7D%5D%7D) * [`Base of tongue`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22base%20of%20tongue%22%5D%7D%7D%5D%7D) * [`Gum`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22gum%22%5D%7D%7D%5D%7D) * [`Lip`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22lip%22%5D%7D%7D%5D%7D) * [`Palate`](https://portal.gdc.cancer.gov/exploration?filters=%7B%22op%22%3A%22and%22%2C%22content%22%3A%5B%7B%22op%22%3A%22in%22%2C%22content%22%3A%7B%22field%22%3A%22cases.primary_site%22%2C%22value%22%3A%5B%22palate%22%5D%7D%7D%5D%7D)Note that we specifically don't include the brain, eye, trachea or esophagus -- please let us know if you think they should be included in this dataset. Setup
###Code
import sys
# Install pandas
#!{sys.executable} -m pip install pandas
# Load required packages.
import requests
import json
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Download PDC data
###Code
pdc_graphql_endpoint = "https://pdc.cancer.gov/graphql"
# Step 1. Get a list of all the case IDs relevant to us.
primary_site = "Head and Neck"
primary_site_query = """{ uiCase(primary_site: "%s") { case_id } }""" % (primary_site)
response = requests.get(pdc_graphql_endpoint, params={"query": primary_site_query})
result = json.loads(response.content)
case_ids = list(map(lambda case: case["case_id"], result["data"]["uiCase"]))
unique_case_ids = np.unique(case_ids)
print(f"We have {len(case_ids)} case IDs, of which {len(unique_case_ids)} are unique.")
# Step 2. Get all the cases relevant to us.
# I got this query from https://pdc.cancer.gov/data-dictionary/publicapi-documentation/#!/Case/case
case_query = """{
case (
case_id: "%s"
acceptDUA: true
) {
case_id case_submitter_id project_submitter_id days_to_lost_to_followup disease_type
index_date lost_to_followup primary_site
externalReferences {
external_reference_id
reference_resource_shortname reference_resource_name reference_entity_location
}
demographics {
demographic_id ethnicity gender demographic_submitter_id race cause_of_death days_to_birth
days_to_death vital_status year_of_birth year_of_death
}
samples {
sample_id sample_submitter_id sample_type sample_type_id gdc_sample_id gdc_project_id
biospecimen_anatomic_site composition current_weight days_to_collection days_to_sample_procurement
diagnosis_pathologically_confirmed freezing_method initial_weight intermediate_dimension is_ffpe
longest_dimension method_of_sample_procurement oct_embedded pathology_report_uuid preservation_method
sample_type_id shortest_dimension time_between_clamping_and_freezing time_between_excision_and_freezing
tissue_type tumor_code tumor_code_id tumor_descriptor
aliquots {
aliquot_id aliquot_submitter_id analyte_type
aliquot_run_metadata {
aliquot_run_metadata_id
}
}
}
diagnoses {
diagnosis_id tissue_or_organ_of_origin age_at_diagnosis primary_diagnosis tumor_grade tumor_stage
diagnosis_submitter_id classification_of_tumor days_to_last_follow_up days_to_last_known_disease_status
days_to_recurrence last_known_disease_status morphology progression_or_recurrence
site_of_resection_or_biopsy prior_malignancy ajcc_clinical_m ajcc_clinical_n ajcc_clinical_stage
ajcc_clinical_t ajcc_pathologic_m ajcc_pathologic_n ajcc_pathologic_stage ajcc_pathologic_t
ann_arbor_b_symptoms ann_arbor_clinical_stage ann_arbor_extranodal_involvement ann_arbor_pathologic_stage
best_overall_response burkitt_lymphoma_clinical_variant circumferential_resection_margin
colon_polyps_history days_to_best_overall_response days_to_diagnosis days_to_hiv_diagnosis
days_to_new_event figo_stage hiv_positive hpv_positive_type hpv_status iss_stage laterality
ldh_level_at_diagnosis ldh_normal_range_upper lymph_nodes_positive lymphatic_invasion_present
method_of_diagnosis new_event_anatomic_site new_event_type overall_survival perineural_invasion_present
prior_treatment progression_free_survival progression_free_survival_event residual_disease
vascular_invasion_present year_of_diagnosis icd_10_code synchronous_malignancy
tumor_largest_dimension_diameter
}
}
}"""
cases = []
for num, case_id in enumerate(unique_case_ids):
response = requests.get(
pdc_graphql_endpoint, params={"query": case_query % (case_id)}
)
result = json.loads(response.content)
case = result["data"]["case"]
if case and len(case) == 1:
cases.append(case[0])
print("Downloaded case %d of %d: %s" % (num + 1, len(unique_case_ids), case_id))
else:
print(
"Could not download case %d of %d: %s"
% (num + 1, len(unique_case_ids), case_id)
)
len(cases)
# Before we examine it, let's save this as a file.
with open("pdc-head-and-mouth.json", "w") as file:
json.dump(cases, file, indent=2, sort_keys=True)
# df_pdc_cases = pd.DataFrame(cases)
df_pdc_cases = pd.json_normalize(cases)
df_pdc_cases.shape, list(df_pdc_cases.columns)
df_pdc_cases.head(5)
df_pdc_cases.describe()
###Output
_____no_output_____
###Markdown
Download GDC data
###Code
# Search by cases.primary_site.
cases_endpt = "https://api.gdc.cancer.gov/cases"
field_groups = ["diagnoses", "samples", "demographic"]
field_groups = ",".join(field_groups)
filters = {
"op": "in",
"content": {
"field": "cases.primary_site",
"value": [
"baseoftongue",
"floorofmouth",
"gum",
"hypopharynx",
"larynx",
"lip",
"nasalcavityandmiddleear",
"nasopharynx",
"oropharynx",
"otherandill-definedsitesinlip,oralcavityandpharynx",
"otherandunspecifiedmajorsalivaryglands",
"otherandunspecifiedpartsofmouth",
"otherandunspecifiedpartsoftongue",
"palate",
"tonsil",
],
},
}
params = {
"filters": json.dumps(filters),
"expand": field_groups,
"format": "JSON",
"size": 1000,
}
response = requests.get(cases_endpt, params=params)
result = json.loads(response.content)
print(f"Warnings: {result['warnings']}")
gdc_entries = result["data"]["hits"]
with open("gdc-head-and-mouth.json", "w") as file:
json.dump(gdc_entries, file, indent=2, sort_keys=True)
# pd_gdc_entries = pd.DataFrame(gdc_entries)
df_gdc_entries = pd.json_normalize(gdc_entries)
df_gdc_entries.shape, list(df_gdc_entries.columns)
df_gdc_entries.head(5)
df_gdc_entries.describe()
###Output
_____no_output_____ |
Simulations/EthereumPricePrediction/EthereumPricePredictionParametersRBF.ipynb | ###Markdown
Ethereum Price Prediction Based on [Ethereum (ETH) Price Prediction using Machine Learning (SVR) & Python](https://www.youtube.com/watch?v=HiDEAWdAif0) from [Computer Science](https://www.youtube.com/channel/UCbmb5IoBtHZTpYZCDBOC1CA) **Disclaimer:** _Investing in the stock market involves risk and can lead to monetary loss. This material is purely for educational purposes and should not be taken as professional investment advice. Invest at your own discretion._
###Code
import pandas as pd
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
###Output
_____no_output_____
###Markdown
Load the Bitcoin data
###Code
df = pd.read_csv("ETH.csv")
###Output
_____no_output_____
###Markdown
Set the date as index
###Code
df = df.set_index(pd.DatetimeIndex(df['Date']))
###Output
_____no_output_____
###Markdown
Show the data
###Code
df
future_days = 5
###Output
_____no_output_____
###Markdown
Create a new column
###Code
df[str(future_days)+"_Day_Price_Forecast"] = df[['Close']].shift(-future_days)
###Output
_____no_output_____
###Markdown
Show the data
###Code
df[['Close', str(future_days)+"_Day_Price_Forecast"]]
X = np.array(df[['Close']])
X = X[:df.shape[0] - future_days]
X
y = np.array(df[str(future_days)+"_Day_Price_Forecast"])
y = y[:-future_days]
y
###Output
_____no_output_____
###Markdown
Split the data
###Code
from sklearn.model_selection import train_test_split, GridSearchCV
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
from sklearn.svm import SVR
params = {
"C": [1e3, 1e4, 1e5, 1e6, 1e7],
"kernel": ['rbf'],
"gamma": [1e-3, 1e-4, 1e-5, 1e-6, 1e-7],
}
grid_search_svr = GridSearchCV(
SVR(), params
)
grid_search_svr.fit(x_train, y_train)
print("best_score: ", grid_search_svr.best_score_)
print("best_params: ", grid_search_svr.best_params_)
svr_rbf = grid_search_svr.best_estimator_
svr_rbf.fit(x_train, y_train)
svr_rbf_confidendce = svr_rbf.score(x_test, y_test)
svr_rbf_confidendce
svm_predicition = svr_rbf.predict(x_test)
svm_predicition
print(y)
plt.figure(figsize=(12,4))
plt.plot(svm_predicition, label="Prediciton", lw=2, alpha=0.7)
plt.plot(y_test, label="Actual", lw=2, alpha=0.7)
plt.title("Prediction vs Actual")
plt.ylabel("Price in USD")
plt.xlabel("Time")
plt.legend()
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____ |
02. Feature Engineering/Feature-Engineering.ipynb | ###Markdown
Common stuff
###Code
#Common imports
import pandas as pd
from IPython.display import Markdown, display, clear_output
from scipy import stats
from IPython.core.debugger import set_trace
from pathlib import Path
###Output
_____no_output_____
###Markdown
Pickling
###Code
import _pickle as cPickle
from pathlib import Path
def dumpPickle(fileName, content):
pickleFile = open(fileName, 'wb')
cPickle.dump(content, pickleFile, -1)
pickleFile.close()
def loadPickle(fileName):
file = open(fileName, 'rb')
content = cPickle.load(file)
file.close()
return content
def pickleExists(fileName):
file = Path(fileName)
if file.is_file():
return True
return False
###Output
_____no_output_____
###Markdown
Displaying progress
###Code
#Displaying the percentage completed
def printProgress(currentStep, maxStep):
stepSize = maxStep / 100
if (int(currentStep / stepSize) > ((currentStep - 1) / stepSize)):
clear_output()
print('{}%'.format(int(currentStep / stepSize)))
###Output
_____no_output_____
###Markdown
Reading the dataset
###Code
train = pd.read_json('../data/squad-v1/train-v1.1.json', orient='column')
dev = pd.read_json('../data/squad-v1/dev-v1.1.json', orient='column')
df = pd.concat([train, dev], ignore_index=True)
df.head()
###Output
_____no_output_____
###Markdown
Extracting words and it's features
###Code
import spacy
from spacy import displacy
nlp = spacy.load('en_core_web_md')
#There seems to be a bug with spacy's stop words.
from spacy.lang.en.stop_words import STOP_WORDS
for word in STOP_WORDS:
for w in (word, word[0].capitalize(), word.upper()):
lex = nlp.vocab[w]
lex.is_stop = True
###Output
_____no_output_____
###Markdown
Extracting words from a paragrapgh
###Code
currText = df['data'][0]['paragraphs'][0]['context']
currQas = df['data'][0]['paragraphs'][0]['qas']
currDoc = nlp(currText)
#Extract answers and the sentence they are in
def extractAnswers(qas, doc):
answers = []
senStart = 0
senId = 0
for sentence in doc.sents:
senLen = len(sentence.text)
for answer in qas:
answerStart = answer['answers'][0]['answer_start']
if (answerStart >= senStart and answerStart < (senStart + senLen)):
answers.append({'sentenceId': senId, 'text': answer['answers'][0]['text']})
senStart += senLen
senId += 1
return answers
currAnswers = extractAnswers(currQas, currDoc)
currAnswers
#TODO - Clean answers from stopwords?
def tokenIsAnswer(token, sentenceId, answers):
for i in range(len(answers)):
if (answers[i]['sentenceId'] == sentenceId):
if (answers[i]['text'] == token):
return True
return False
tokenIsAnswer('the Main Building', 4, currAnswers)
#Save named entities start points
def getNEStartIndexs(doc):
neStarts = {}
for ne in doc.ents:
neStarts[ne.start] = ne
return neStarts
currNeStarts = getNEStartIndexs(currDoc)
if 6 in currNeStarts:
print(currNeStarts[6].label_)
def getSentenceStartIndexes(doc):
senStarts = []
for sentence in doc.sents:
senStarts.append(sentence[0].i)
return senStarts
def getSentenceForWordPosition(wordPos, senStarts):
for i in range(1, len(senStarts)):
if (wordPos < senStarts[i]):
return i - 1
senStarts = getSentenceStartIndexes(currDoc)
senStarts
getSentenceForWordPosition(108, senStarts)
#Creating the dataframe
wordColums = ['text', 'isAnswer', 'titleId', 'paragrapghId', 'sentenceId','wordCount', 'NER', 'POS', 'TAG', 'DEP','shape']
wordDf = pd.DataFrame(columns=wordColums)
#Save to pickle
#load df
#Add new words to array
newWord = ['koala', True, 0, 0, 4, 1, None, None, None, None, 'xxxxx']
newWords = []
#newWords.append(newWord)
#Make array to dataframe
newWordsDf = pd.DataFrame(newWords, columns=wordColums)
newWordsDf
#Merge dataframes
def addWordsForParagrapgh(newWords, titleId, paragraphId):
text = df['data'][titleId]['paragraphs'][paragraphId]['context']
qas = df['data'][titleId]['paragraphs'][paragraphId]['qas']
doc = nlp(text)
answers = extractAnswers(qas, doc)
neStarts = getNEStartIndexs(doc)
senStarts = getSentenceStartIndexes(doc)
#index of word in spacy doc text
i = 0
while (i < len(doc)):
#If the token is a start of a Named Entity, add it and push to index to end of the NE
if (i in neStarts):
word = neStarts[i]
#add word
currentSentence = getSentenceForWordPosition(word.start, senStarts)
wordLen = word.end - word.start
shape = ''
for wordIndex in range(word.start, word.end):
shape += (' ' + doc[wordIndex].shape_)
newWords.append([word.text,
tokenIsAnswer(word.text, currentSentence, answers),
titleId,
paragraphId,
currentSentence,
wordLen,
word.label_,
None,
None,
None,
shape])
i = neStarts[i].end - 1
#If not a NE, add the word if it's not a stopword or a non-alpha (not regular letters)
else:
if (doc[i].is_stop == False and doc[i].is_alpha == True):
word = doc[i]
currentSentence = getSentenceForWordPosition(i, senStarts)
wordLen = 1
newWords.append([word.text,
tokenIsAnswer(word.text, currentSentence, answers),
titleId,
paragraphId,
currentSentence,
wordLen,
None,
word.pos_,
word.tag_,
word.dep_,
word.shape_])
i += 1
newWords
addWordsForParagrapgh(newWords, 0, 0)
newWords[0]
newWordsDf = pd.DataFrame(newWords, columns=wordColums)
newWordsDf.head()
newWordsDf[newWordsDf['isAnswer'] == True].head()
###Output
_____no_output_____
###Markdown
Generating a words for 2 titles
###Code
words = []
#titlesCount = len(df['data'])
titlesCount = 2
for titleId in range(titlesCount):
paragraphsCount = len(df['data'][titleId]['paragraphs'])
printProgress(titleId, titlesCount - 1)
for paragraphId in range(paragraphsCount):
addWordsForParagrapgh(words, titleId, paragraphId)
wordsDf = pd.DataFrame(words, columns=wordColums)
wordsDf.head()
print("Total words for 2 articles:", len(wordsDf))
###Output
Total words for 2 titles: 9147
###Markdown
Generating the entire word dataset
###Code
wordPickleName = 'pickles/wordsDf.pkl'
#If the dataframe is already generated, load it.
if (pickleExists(wordPickleName)):
print("Pickle found. Saved some time.")
wordsDf = loadPickle(wordPickleName)
else:
#Extracting words
words = []
titlesCount = len(df['data'])
for titleId in range(titlesCount):
paragraphsCount = len(df['data'][titleId]['paragraphs'])
printProgress(titleId, titlesCount - 1)
for paragraphId in range(paragraphsCount):
addWordsForParagrapgh(words, titleId, paragraphId)
#Create the dataframe
wordColums = ['text', 'isAnswer', 'titleId', 'paragrapghId', 'sentenceId','wordCount', 'NER', 'POS', 'TAG', 'DEP','shape']
wordsDf = pd.DataFrame(words, columns=wordColums)
#Pickle the result
dumpPickle(wordPickleName, wordsDf)
print("Result was not pickled. You had to wait.")
print("Total words for all articles:", len(wordsDf))
totalAnswers = len(wordsDf[wordsDf['isAnswer'] == True])
print(totalAnswers, 'total answers', '{:.2f}%'.format((totalAnswers / len(wordsDf)) * 100), 'of all words are answers.')
###Output
33548 total answers 2.53% of all words are answers.
|
jupyter/synthetic/synthetic_gradual_mean.ipynb | ###Markdown
ChangeFinder
###Code
smooth1 = 5
smooth2 = 5
for r_cf in [0.003, 0.005, 0.01, 0.03, 0.1]:
for order in [2, 3, 4, 5]:
scores_cf = []
for i in range(N_trial):
X = generate_multiple_changing_mean_gradual(N, sigma=SIGMA, coef=COEF, seed=i)
# ChangeFinder
cf = ChangeFinder(r=r_cf, order1=order, order2=order, smooth1=smooth1, smooth2=smooth2)
scores = []
for x in X:
score, _ = cf.update(x)
scores.append(score)
scores = np.array(scores)
scores_cf.append(scores)
scores_cf = np.array(scores_cf)
auc_list = calc_auc_average(scores_cf)
print('r_cf =', r_cf, 'order =', order, ':', np.mean(auc_list), '+/-', np.std(auc_list))
###Output
r_cf = 0.003 order = 2 : 0.4492062336361773 +/- 0.027416174408318592
r_cf = 0.003 order = 3 : 0.45604155481956055 +/- 0.021666809262538358
r_cf = 0.003 order = 4 : 0.4829613377010413 +/- 0.017168242331332118
r_cf = 0.003 order = 5 : 0.4768801961245641 +/- 0.018109670983715766
r_cf = 0.005 order = 2 : 0.46080019039345566 +/- 0.02630977626974498
r_cf = 0.005 order = 3 : 0.4630936965046272 +/- 0.026587154878157642
r_cf = 0.005 order = 4 : 0.4697871881372088 +/- 0.019321911517344463
r_cf = 0.005 order = 5 : 0.47232447364293384 +/- 0.018064782861951158
r_cf = 0.01 order = 2 : 0.4985402249610888 +/- 0.035127721135277644
r_cf = 0.01 order = 3 : 0.49385323880020754 +/- 0.03289042261446822
r_cf = 0.01 order = 4 : 0.48719251686152437 +/- 0.03259074193369835
r_cf = 0.01 order = 5 : 0.48679292877740377 +/- 0.0327376185133799
r_cf = 0.03 order = 2 : 0.5047395822323573 +/- 0.029162340358474533
r_cf = 0.03 order = 3 : 0.49791978288148064 +/- 0.02686503248839184
r_cf = 0.03 order = 4 : 0.49380022013489216 +/- 0.028473700210129085
r_cf = 0.03 order = 5 : 0.49371969407946337 +/- 0.03315134546069698
r_cf = 0.1 order = 2 : 0.4984791447376359 +/- 0.025129819959425245
r_cf = 0.1 order = 3 : 0.5021146577020066 +/- 0.024638911706435205
r_cf = 0.1 order = 4 : 0.5028799845561709 +/- 0.029558027643632332
r_cf = 0.1 order = 5 : 0.5015935809172187 +/- 0.01721605869036014
###Markdown
BOCPD
###Code
ALPHA = 0.1
BETA = 1.0
KAPPA = 1.0
MU = 0.0
DELAY = 15
for LAMBDA in [100, 600]:
for THRESHOLD in [0.1, 0.3]:
scores_bocpd = []
for i in range(N_trial):
X = generate_multiple_changing_mean_gradual(N, sigma=SIGMA, coef=COEF, seed=i)
# BOCPD
bocd = BOCD(partial(constant_hazard, LAMBDA),
StudentT(ALPHA, BETA, KAPPA, MU), X)
change_points = []
scores = [np.nan] * DELAY
for x in X[:DELAY]:
bocd.update(x)
for x in X[DELAY:]:
bocd.update(x)
if bocd.growth_probs[DELAY] >= THRESHOLD:
change_points.append(bocd.t - DELAY + 1)
score = np.sum(bocd.growth_probs[:bocd.t - DELAY] * 1.0 / (1.0 + np.arange(1, bocd.t - DELAY + 1)))
scores.append(score)
scores_bocpd.append(scores)
scores_bocpd = np.array(scores_bocpd)
auc_list = calc_auc_average(scores_bocpd)
print('LAMBDA =', LAMBDA, 'THRESHOLD =', THRESHOLD, ':', np.mean(auc_list), '+/-', np.std(auc_list))
###Output
LAMBDA = 100 THRESHOLD = 0.1 : 0.4063147273198924 +/- 0.04844171574690272
LAMBDA = 100 THRESHOLD = 0.3 : 0.4063147273198924 +/- 0.04844171574690272
LAMBDA = 600 THRESHOLD = 0.1 : 0.4159691242866278 +/- 0.03845106903470217
LAMBDA = 600 THRESHOLD = 0.3 : 0.4159691242866278 +/- 0.03845106903470217
###Markdown
Adwin2
###Code
M = 5
for delta in [0.1, 0.3, 0.5, 0.7, 0.9]:
scores_ad = []
for i in range(N_trial):
X = generate_multiple_changing_mean_gradual(N, sigma=SIGMA, coef=COEF, seed=i)
# ADWIN2
ad = ADWIN2()
scores = ad.transform(X, delta=delta, M=M)
scores_ad.append(InvRunLen(scores))
scores_ad = np.array(scores_ad)
auc_list = calc_auc_average(scores_ad)
print('delta =', delta, ':', np.mean(auc_list), '+/-', np.std(auc_list))
###Output
delta = 0.1 : 0.5 +/- 0.0
delta = 0.3 : 0.5 +/- 0.0
delta = 0.5 : 0.5 +/- 0.0
delta = 0.7 : 0.5 +/- 0.0
delta = 0.9 : 0.5 +/- 0.0
###Markdown
D-MDL
###Code
h = 100
T = 100
mu_max = 50.0
sigma_min = 0.005
scores_list_0th = []
scores_list_1st = []
scores_list_2nd = []
for i in range(N_trial):
X = generate_multiple_changing_mean_gradual(N, sigma=SIGMA, coef=COEF, seed=i)
len_X = len(X)
norm1d = Norm1D()
smdl = SMDL(norm1d)
scores_0th = np.array([np.nan]*h + [ smdl.calc_change_score(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_0th.append(scores_0th)
scores_1st = np.array([np.nan]*h + [ smdl.calc_change_score_1st(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_1st.append(scores_1st)
scores_2nd = np.array([np.nan]*h + [ smdl.calc_change_score_2nd(X[(t-h):(t+h)], h, mu_max=mu_max, sigma_min=sigma_min) \
for t in range(h, len_X-h)] + [np.nan]*h)
scores_list_2nd.append(scores_2nd)
scores_list_0th = np.array(scores_list_0th)
scores_list_1st = np.array(scores_list_1st)
scores_list_2nd = np.array(scores_list_2nd)
auc_list_0th = calc_auc_average(scores_list_0th, T=T)
auc_list_1st = calc_auc_average(scores_list_1st, T=T)
auc_list_2nd = calc_auc_average(scores_list_2nd, T=T)
print(np.mean(auc_list_0th), '+/-', np.std(auc_list_0th))
print(np.mean(auc_list_1st), '+/-', np.std(auc_list_1st))
print(np.mean(auc_list_2nd), '+/-', np.std(auc_list_2nd))
###Output
0.6201857067964913 +/- 0.0033140487836213293
|
is_lenin.ipynb | ###Markdown
Objective: Look for an algorithm which can tell if a statemnt was written by V.I. Lenin
###Code
# Import libraries
import json
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 20000
embedding_dim = 16
max_length = 100
trunc_type = 'post'
padding_type = 'post'
oov_tok = '<OOV>'
# Import JSON file with file
!wget http://leninism.masaccio.io/lenin_dataset.json
# Convert JSON to Lists
with open('lenin_dataset.json', 'r') as f:
datastore = json.load(f)
statements = []
labels = []
for item in datastore:
statements.append(item['statement'])
labels.append(item['is_lenin'])
# Split the data between training and testing sets
training_size = round(len(statements) * 0.8)
training_statements = statements[0:training_size]
testing_statements = statements[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Tokenize the words in the set and fit the sentences
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_statements)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_statements)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_statements)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Convert lists to arrays
import numpy as np
training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(24, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(
loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']
)
model.summary()
num_epochs = 30
history = model.fit(training_padded,
training_labels,
epochs=num_epochs,
validation_data=(testing_padded, testing_labels), verbose=2)
# Plot loss and accuracy per iteration
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
# Test accuracy with new staements
statement = ["The transfer of state enterprises to the so-called profit basis is inevitably and inseparably connected with the New Economic Policy; in the near future this is bound to become the predominant, if not the sole, form of state enterprise. In actual fact, this means that with the free market now permitted and developing the state enterprises will to a large extent be put on a commercial basis. In view of the urgent need to increase the productivity of labour and make every state enterprise pay its way and show a profit, and in view of the inevitable rise of narrow departmental interests and excessive departmental zeal, this circumstance is bound; to create a certain conflict of interests in matters concerning labour conditions between the masses of workers and the directors and managers of the state enterprises, or the government departments in charge of them. Therefore, as regards the socialised enterprises, it is undoubtedly the duty of the trade unions to protect the interests of the working people, to facilitate as far as possible the improvement of their standard of living, and constantly to correct the blunders and excesses of business organisations resulting from bureaucratic distortions of the state apparatus."]
sequences = tokenizer.texts_to_sequences(statement)
padded = pad_sequences(sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
print(model.predict(padded))
###Output
[[0.99989736]]
|
Regression/Linear Models/LassoRegression_MinMaxScaler_PowerTransformer.ipynb | ###Markdown
LassoRegresion with MinMax Scaler & Power Transformer This Code template is for regression analysis using the Lasso Regressor where rescaling method used is MinMaxScaler and feature transformation is done via PowerTransformer. Required Packages
###Code
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, PowerTransformer
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X=df[features]
Y=df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
###Output
_____no_output_____
###Markdown
Calling preprocessing functions on the feature and target set.
###Code
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
ModelLinear Model trained with L1 prior as regularizer (aka the Lasso)The optimization objective for Lasso is:(1 / (2 n_samples)) ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with l1_ratio=1.0 (no L2 penalty). Parameters:alpha: float, default=1.0 -> Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by the LinearRegression object. For numerical reasons, using alpha = 0 with the Lasso object is not advised. Given this, you should use the LinearRegression object.fit_intercept: bool, default=True -> Whether to calculate the intercept for this model. If set to False, no intercept will be used in calculations (i.e. data is expected to be centered).normalize: bool, default=False -> This parameter is ignored when fit_intercept is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.precompute: bool or array-like of shape (n_features, n_features), default=False -> Whether to use a precomputed Gram matrix to speed up calculations. The Gram matrix can also be passed as argument. For sparse input this option is always False to preserve sparsity.copy_X: bool, default=True -> If True, X will be copied; else, it may be overwritten.max_iter: int, default=1000 -> The maximum number of iterations.tol: float, default=1e-4 -> The tolerance for the optimization: if the updates are smaller than tol, the optimization code checks the dual gap for optimality and continues until it is smaller than tol.warm_start: bool, default=False -> When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See the Glossary.positive: bool, default=False -> When set to True, forces the coefficients to be positive.random_state: int, RandomState instance, default=None -> The seed of the pseudo random number generator that selects a random feature to update. Used when selection == ‘random’. Pass an int for reproducible output across multiple function calls. See Glossary.selection: {‘cyclic’, ‘random’}, default=’cyclic’ -> If set to ‘random’, a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to ‘random’) often leads to significantly faster convergence especially when tol is higher than 1e-4.MinMax ScalerTransform features by scaling each feature to a given range.This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one. Polynomial FeaturesGenerate polynomial and interaction features.Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
###Code
model=make_pipeline(MinMaxScaler(), PowerTransformer(), Lasso())
model.fit(x_train,y_train)
###Output
_____no_output_____
###Markdown
Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.score: The score function returns the coefficient of determination R2 of the prediction.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 91.89 %
###Markdown
r2_score: The r2_score function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. mae: The mean abosolute error function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. mse: The mean squared error function squares the error(penalizes the model for large errors) by our model.
###Code
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
###Output
R2 Score: 91.89 %
Mean Absolute Error 82748.49
Mean Squared Error 10795447086.08
###Markdown
Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
###Code
n=len(x_test) if len(x_test)<20 else 20
plt.figure(figsize=(14,10))
plt.plot(range(n),y_test[0:n], color = "green")
plt.plot(range(n),model.predict(x_test[0:n]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
###Output
_____no_output_____
###Markdown
LassoRegresion with MinMax Scaler & Power Transformer This Code template is for regression analysis using the Lasso Regressor where rescaling method used is MinMaxScaler and feature transformation is done via PowerTransformer. Required Packages
###Code
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler, PowerTransformer
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X=df[features]
Y=df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
###Output
_____no_output_____
###Markdown
Calling preprocessing functions on the feature and target set.
###Code
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
ModelLinear Model trained with L1 prior as regularizer (aka the Lasso)The optimization objective for Lasso is:(1 / (2 n_samples)) ||y - Xw||^2_2 + alpha * ||w||_1 Technically the Lasso model is optimizing the same objective function as the Elastic Net with l1_ratio=1.0 (no L2 penalty). Parameters:alpha: float, default=1.0 -> Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by the LinearRegression object. For numerical reasons, using alpha = 0 with the Lasso object is not advised. Given this, you should use the LinearRegression object.fit_intercept: bool, default=True -> Whether to calculate the intercept for this model. If set to False, no intercept will be used in calculations (i.e. data is expected to be centered).normalize: bool, default=False -> This parameter is ignored when fit_intercept is set to False. If True, the regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm. If you wish to standardize, please use StandardScaler before calling fit on an estimator with normalize=False.precompute: bool or array-like of shape (n_features, n_features), default=False -> Whether to use a precomputed Gram matrix to speed up calculations. The Gram matrix can also be passed as argument. For sparse input this option is always False to preserve sparsity.copy_X: bool, default=True -> If True, X will be copied; else, it may be overwritten.max_iter: int, default=1000 -> The maximum number of iterations.tol: float, default=1e-4 -> The tolerance for the optimization: if the updates are smaller than tol, the optimization code checks the dual gap for optimality and continues until it is smaller than tol.warm_start: bool, default=False -> When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. See the Glossary.positive: bool, default=False -> When set to True, forces the coefficients to be positive.random_state: int, RandomState instance, default=None -> The seed of the pseudo random number generator that selects a random feature to update. Used when selection == ‘random’. Pass an int for reproducible output across multiple function calls. See Glossary.selection: {‘cyclic’, ‘random’}, default=’cyclic’ -> If set to ‘random’, a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to ‘random’) often leads to significantly faster convergence especially when tol is higher than 1e-4.MinMax ScalerTransform features by scaling each feature to a given range.This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one. Polynomial FeaturesGenerate polynomial and interaction features.Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
###Code
model=make_pipeline(MinMaxScaler(), PowerTransformer(), Lasso())
model.fit(x_train,y_train)
###Output
_____no_output_____
###Markdown
Model AccuracyWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.score: The score function returns the coefficient of determination R2 of the prediction.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 91.89 %
###Markdown
r2_score: The r2_score function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. mae: The mean abosolute error function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. mse: The mean squared error function squares the error(penalizes the model for large errors) by our model.
###Code
y_pred=model.predict(x_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
###Output
R2 Score: 91.89 %
Mean Absolute Error 82748.49
Mean Squared Error 10795447086.08
###Markdown
Prediction PlotFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
###Code
n=len(x_test) if len(x_test)<20 else 20
plt.figure(figsize=(14,10))
plt.plot(range(n),y_test[0:n], color = "green")
plt.plot(range(n),model.predict(x_test[0:n]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
###Output
_____no_output_____ |
jupyter/transformers/HuggingFace in Spark NLP - XlmRoBertaForTokenClassification.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/transformers/HuggingFace%20in%20Spark%20NLP%20-%20XlmRoBertaForTokenClassification.ipynb) Import XlmRoBertaForTokenClassification models from HuggingFace 🤗 into Spark NLP 🚀 Let's keep in mind a few things before we start 😊 - This feature is only in `Spark NLP 3.3.x` and after. So please make sure you have upgraded to the latest Spark NLP release- You can import XLM-RoBERTa models trained/fine-tuned for token classification via `XLMRobertaForTokenClassification` or `TFXLMRobertaForTokenClassification`. These models are usually under `Token Classification` category and have `xlm-roberta` in their labels- Reference: [TFXLMRobertaForTokenClassification](https://huggingface.co/transformers/model_doc/xlmroberta.htmltfxlmrobertafortokenclassification)- Some [example models](https://huggingface.co/models?filter=xlm-roberta&pipeline_tag=token-classification) Export and Save HuggingFace model - Let's install `HuggingFace` and `TensorFlow`. You don't need `TensorFlow` to be installed for Spark NLP, however, we need it to load and save models from HuggingFace.- We lock TensorFlow on `2.4.1` version and Transformers on `4.10.0`. This doesn't mean it won't work with the future releases, but we wanted you to know which versions have been tested successfully.- XLMRobertaTokenizer requires the `SentencePiece` library, so we install that as well
###Code
!pip install -q transformers==4.10.0 tensorflow==2.4.1 sentencepiece
###Output
[K |████████████████████████████████| 394.3 MB 13 kB/s
[K |████████████████████████████████| 1.2 MB 63.9 MB/s
[K |████████████████████████████████| 636 kB 57.4 MB/s
[K |████████████████████████████████| 895 kB 61.0 MB/s
[K |████████████████████████████████| 3.3 MB 48.6 MB/s
[K |████████████████████████████████| 52 kB 1.8 MB/s
[K |████████████████████████████████| 2.9 MB 41.5 MB/s
[K |████████████████████████████████| 462 kB 41.6 MB/s
[K |████████████████████████████████| 3.8 MB 47.9 MB/s
[?25h
###Markdown
- HuggingFace comes with a native `saved_model` feature inside `save_pretrained` function for TensorFlow based models. We will use that to save it as TF `SavedModel`.- We'll use [wpnbos/xlm-roberta-base-conll2002-dutch](https://huggingface.co/wpnbos/xlm-roberta-base-conll2002-dutch) model from HuggingFace as an example- In addition to `TFXLMRobertaForTokenClassification` we also need to save the `XLMRobertaTokenizer`. This is the same for every model, these are assets needed for tokenization inside Spark NLP.
###Code
from transformers import TFXLMRobertaForTokenClassification, XLMRobertaTokenizer
MODEL_NAME = 'wpnbos/xlm-roberta-base-conll2002-dutch'
tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_NAME)
tokenizer.save_pretrained('./{}_tokenizer/'.format(MODEL_NAME))
# just in case if there is no TF/Keras file provided in the model
# we can just use `from_pt` and convert PyTorch to TensorFlow
try:
print('try downloading TF weights')
model = TFXLMRobertaForTokenClassification.from_pretrained(MODEL_NAME)
except:
print('try downloading PyTorch weights')
model = TFXLMRobertaForTokenClassification.from_pretrained(MODEL_NAME, from_pt=True)
model.save_pretrained("./{}".format(MODEL_NAME), saved_model=True)
###Output
_____no_output_____
###Markdown
Let's have a look inside these two directories and see what we are dealing with:
###Code
!ls -l {MODEL_NAME}
!ls -l {MODEL_NAME}/saved_model/1
!ls -l {MODEL_NAME}_tokenizer
###Output
total 4960
-rw-r--r-- 1 root root 5069051 Sep 20 12:17 sentencepiece.bpe.model
-rw-r--r-- 1 root root 239 Sep 20 12:17 special_tokens_map.json
-rw-r--r-- 1 root root 633 Sep 20 12:17 tokenizer_config.json
###Markdown
- as you can see, we need the SavedModel from `saved_model/1/` path- we also be needing `sentencepiece.bpe.model` file from the tokenizer- all we need is to copy `sentencepiece.bpe.model` file into `saved_model/1/assets` which Spark NLP will look for- in addition to vocabs, we also need `labels` and their `ids` which is saved inside the model's config. We will save this inside `labels.txt`
###Code
asset_path = '{}/saved_model/1/assets'.format(MODEL_NAME)
# let's copy sentencepiece.bpe.model file to saved_model/1/assets
!cp {MODEL_NAME}_tokenizer/sentencepiece.bpe.model {asset_path}
# get label2id dictionary
labels = model.config.label2id
# sort the dictionary based on the id
labels = sorted(labels, key=labels.get)
with open(asset_path+'/labels.txt', 'w') as f:
f.write('\n'.join(labels))
###Output
_____no_output_____
###Markdown
Voila! We have our `vocab.txt` and `labels.txt` inside assets directory
###Code
!ls -l {asset_path}
###Output
total 4956
-rw-r--r-- 1 root root 71 Sep 20 12:26 labels.txt
-rw-r--r-- 1 root root 5069051 Sep 20 12:26 sentencepiece.bpe.model
###Markdown
Import and Save XlmRoBertaForTokenClassification in Spark NLP - Let's install and setup Spark NLP in Google Colab- This part is pretty easy via our simple script
###Code
! wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
###Output
_____no_output_____
###Markdown
Let's start Spark with Spark NLP included via our simple `start()` function
###Code
import sparknlp
# let's start Spark with Spark NLP
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
- Let's use `loadSavedModel` functon in `XlmRoBertaForTokenClassification` which allows us to load TensorFlow model in SavedModel format- Most params can be set later when you are loading this model in `XlmRoBertaForTokenClassification` in runtime like `setMaxSentenceLength`, so don't worry what you are setting them now- `loadSavedModel` accepts two params, first is the path to the TF SavedModel. The second is the SparkSession that is `spark` variable we previously started via `sparknlp.start()`- NOTE: `loadSavedModel` only accepts local paths and not distributed file systems such as `HDFS`, `S3`, `DBFS`, etc. That is why we use `write.save` so we can use `.load()` from any file systems
###Code
from sparknlp.annotator import *
tokenClassifier = XlmRoBertaForTokenClassification\
.loadSavedModel('{}/saved_model/1'.format(MODEL_NAME), spark)\
.setInputCols(["sentence",'token'])\
.setOutputCol("ner")\
.setCaseSensitive(True)\
.setMaxSentenceLength(128)
###Output
_____no_output_____
###Markdown
- Let's save it on disk so it is easier to be moved around and also be used later via `.load` function
###Code
tokenClassifier.write().overwrite().save("./{}_spark_nlp".format(MODEL_NAME))
###Output
_____no_output_____
###Markdown
Let's clean up stuff we don't need anymore
###Code
!rm -rf {MODEL_NAME}_tokenizer {MODEL_NAME}
###Output
_____no_output_____
###Markdown
Awesome 😎 !This is your XlmRoBertaForTokenClassification model from HuggingFace 🤗 loaded and saved by Spark NLP 🚀
###Code
! ls -l {MODEL_NAME}_spark_nlp
###Output
total 1096760
drwxr-xr-x 4 root root 4096 Sep 20 12:31 fields
drwxr-xr-x 2 root root 4096 Sep 20 12:31 metadata
-rw-r--r-- 1 root root 1117998527 Sep 20 12:32 xlm_roberta_classification_tensorflow
-rw-r--r-- 1 root root 5069051 Sep 20 12:32 xlmroberta_spp
###Markdown
Now let's see how we can use it on other machines, clusters, or any place you wish to use your new and shiny XlmRoBertaForTokenClassification model 😊
###Code
tokenClassifier_loaded = XlmRoBertaForTokenClassification.load("./{}_spark_nlp".format(MODEL_NAME))\
.setInputCols(["sentence",'token'])\
.setOutputCol("ner")
tokenClassifier_loaded.getCaseSensitive()
###Output
_____no_output_____
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/transformers/HuggingFace%20in%20Spark%20NLP%20-%20XlmRoBertaForTokenClassification.ipynb) Import XlmRoBertaForTokenClassification models from HuggingFace 🤗 into Spark NLP 🚀 Let's keep in mind a few things before we start 😊 - This feature is only in `Spark NLP 3.3.x` and after. So please make sure you have upgraded to the latest Spark NLP release- You can import XLM-RoBERTa models trained/fine-tuned for token classification via `XLMRobertaForTokenClassification` or `TFXLMRobertaForTokenClassification`. These models are usually under `Token Classification` category and have `xlm-roberta` in their labels- Reference: [TFXLMRobertaForTokenClassification](https://huggingface.co/transformers/model_doc/xlmroberta.htmltfxlmrobertafortokenclassification)- Some [example models](https://huggingface.co/models?filter=xlm-roberta&pipeline_tag=token-classification) Export and Save HuggingFace model - Let's install `HuggingFace` and `TensorFlow`. You don't need `TensorFlow` to be installed for Spark NLP, however, we need it to load and save models from HuggingFace.- We lock TensorFlow on `2.4.4` version and Transformers on `4.15.0`. This doesn't mean it won't work with the future releases, but we wanted you to know which versions have been tested successfully.- XLMRobertaTokenizer requires the `SentencePiece` library, so we install that as well
###Code
!pip install -q transformers==4.15.0 tensorflow==2.4.4 sentencepiece
###Output
[K |████████████████████████████████| 3.4 MB 5.1 MB/s
[K |████████████████████████████████| 394.5 MB 33 kB/s
[K |████████████████████████████████| 1.2 MB 62.5 MB/s
[K |████████████████████████████████| 61 kB 347 kB/s
[K |████████████████████████████████| 895 kB 61.6 MB/s
[K |████████████████████████████████| 3.3 MB 41.7 MB/s
[K |████████████████████████████████| 596 kB 48.6 MB/s
[K |████████████████████████████████| 3.8 MB 47.5 MB/s
[K |████████████████████████████████| 2.9 MB 37.3 MB/s
[K |████████████████████████████████| 462 kB 46.9 MB/s
[?25h Building wheel for wrapt (setup.py) ... [?25l[?25hdone
###Markdown
- HuggingFace comes with a native `saved_model` feature inside `save_pretrained` function for TensorFlow based models. We will use that to save it as TF `SavedModel`.- We'll use [xlm-roberta-large-finetuned-conll03-english](https://huggingface.co/xlm-roberta-large-finetuned-conll03-english) model from HuggingFace as an example- In addition to `TFXLMRobertaForTokenClassification` we also need to save the `XLMRobertaTokenizer`. This is the same for every model, these are assets needed for tokenization inside Spark NLP.
###Code
from transformers import TFXLMRobertaForTokenClassification, XLMRobertaTokenizer
MODEL_NAME = 'xlm-roberta-large-finetuned-conll03-english'
tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_NAME)
tokenizer.save_pretrained('./{}_tokenizer/'.format(MODEL_NAME))
# just in case if there is no TF/Keras file provided in the model
# we can just use `from_pt` and convert PyTorch to TensorFlow
try:
print('try downloading TF weights')
model = TFXLMRobertaForTokenClassification.from_pretrained(MODEL_NAME)
except:
print('try downloading PyTorch weights')
model = TFXLMRobertaForTokenClassification.from_pretrained(MODEL_NAME, from_pt=True)
model.save_pretrained("./{}".format(MODEL_NAME), saved_model=True)
###Output
_____no_output_____
###Markdown
Let's have a look inside these two directories and see what we are dealing with:
###Code
!ls -l {MODEL_NAME}
!ls -l {MODEL_NAME}/saved_model/1
!ls -l {MODEL_NAME}_tokenizer
###Output
total 4960
-rw-r--r-- 1 root root 5069051 Dec 28 16:12 sentencepiece.bpe.model
-rw-r--r-- 1 root root 238 Dec 28 16:12 special_tokens_map.json
-rw-r--r-- 1 root root 637 Dec 28 16:12 tokenizer_config.json
###Markdown
- as you can see, we need the SavedModel from `saved_model/1/` path- we also be needing `sentencepiece.bpe.model` file from the tokenizer- all we need is to copy `sentencepiece.bpe.model` file into `saved_model/1/assets` which Spark NLP will look for- in addition to vocabs, we also need `labels` and their `ids` which is saved inside the model's config. We will save this inside `labels.txt`
###Code
asset_path = '{}/saved_model/1/assets'.format(MODEL_NAME)
# let's copy sentencepiece.bpe.model file to saved_model/1/assets
!cp {MODEL_NAME}_tokenizer/sentencepiece.bpe.model {asset_path}
# get label2id dictionary
labels = model.config.label2id
# sort the dictionary based on the id
labels = sorted(labels, key=labels.get)
with open(asset_path+'/labels.txt', 'w') as f:
f.write('\n'.join(labels))
###Output
_____no_output_____
###Markdown
Voila! We have our `vocab.txt` and `labels.txt` inside assets directory
###Code
! ls -l {asset_path}
###Output
total 4956
-rw-r--r-- 1 root root 45 Dec 28 16:15 labels.txt
-rw-r--r-- 1 root root 5069051 Dec 28 16:15 sentencepiece.bpe.model
###Markdown
Import and Save XlmRoBertaForTokenClassification in Spark NLP - Let's install and setup Spark NLP in Google Colab- This part is pretty easy via our simple script
###Code
! wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
###Output
--2021-12-28 16:26:13-- http://setup.johnsnowlabs.com/colab.sh
Resolving setup.johnsnowlabs.com (setup.johnsnowlabs.com)... 51.158.130.125
Connecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|51.158.130.125|:80... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://setup.johnsnowlabs.com/colab.sh [following]
--2021-12-28 16:26:13-- https://setup.johnsnowlabs.com/colab.sh
Connecting to setup.johnsnowlabs.com (setup.johnsnowlabs.com)|51.158.130.125|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp/master/scripts/colab_setup.sh [following]
--2021-12-28 16:26:13-- https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp/master/scripts/colab_setup.sh
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1275 (1.2K) [text/plain]
Saving to: ‘STDOUT’
- 100%[===================>] 1.25K --.-KB/s in 0s
2021-12-28 16:26:13 (49.9 MB/s) - written to stdout [1275/1275]
setup Colab for PySpark 3.0.3 and Spark NLP 3.3.4
Installing PySpark 3.0.3 and Spark NLP 3.3.4
###Markdown
Let's start Spark with Spark NLP included via our simple `start()` function
###Code
import sparknlp
# let's start Spark with Spark NLP
spark = sparknlp.start()
###Output
_____no_output_____
###Markdown
- Let's use `loadSavedModel` functon in `XlmRoBertaForTokenClassification` which allows us to load TensorFlow model in SavedModel format- Most params can be set later when you are loading this model in `XlmRoBertaForTokenClassification` in runtime like `setMaxSentenceLength`, so don't worry what you are setting them now- `loadSavedModel` accepts two params, first is the path to the TF SavedModel. The second is the SparkSession that is `spark` variable we previously started via `sparknlp.start()`- NOTE: `loadSavedModel` only accepts local paths and not distributed file systems such as `HDFS`, `S3`, `DBFS`, etc. That is why we use `write.save` so we can use `.load()` from any file systems
###Code
from sparknlp.annotator import *
from sparknlp.base import *
tokenClassifier = XlmRoBertaForTokenClassification\
.loadSavedModel('{}/saved_model/1'.format(MODEL_NAME), spark)\
.setInputCols(["document",'token'])\
.setOutputCol("ner")\
.setCaseSensitive(True)\
.setMaxSentenceLength(128)
###Output
_____no_output_____
###Markdown
- Let's save it on disk so it is easier to be moved around and also be used later via `.load` function
###Code
tokenClassifier.write().overwrite().save("./{}_spark_nlp".format(MODEL_NAME))
###Output
_____no_output_____
###Markdown
Let's clean up stuff we don't need anymore
###Code
! rm -rf {MODEL_NAME}_tokenizer {MODEL_NAME}
###Output
_____no_output_____
###Markdown
Awesome 😎 !This is your XlmRoBertaForTokenClassification model from HuggingFace 🤗 loaded and saved by Spark NLP 🚀
###Code
! ls -l {MODEL_NAME}_spark_nlp
###Output
ls: cannot access '{MODEL_NAME}_spark_nlp': No such file or directory
###Markdown
Now let's see how we can use it on other machines, clusters, or any place you wish to use your new and shiny XlmRoBertaForTokenClassification model 😊
###Code
tokenClassifier_loaded = XlmRoBertaForTokenClassification.load("./{}_spark_nlp".format(MODEL_NAME))\
.setInputCols(["document",'token'])\
.setOutputCol("ner")
###Output
_____no_output_____
###Markdown
You can see what labels were used to train this model via `getClasses` function:
###Code
# .getClasses was introduced in spark-nlp==3.4.0
tokenClassifier_loaded.getClasses()
###Output
_____no_output_____
###Markdown
This is how you can use your loaded classifier model in Spark NLP 🚀 pipeline:
###Code
document_assembler = DocumentAssembler() \
.setInputCol('text') \
.setOutputCol('document')
tokenizer = Tokenizer() \
.setInputCols(['document']) \
.setOutputCol('token')
pipeline = Pipeline(stages=[
document_assembler,
tokenizer,
tokenClassifier_loaded
])
# couple of simple examples
example = spark.createDataFrame([["My name is Sarah and I live in London"], ['My name is Clara and I live in Berkeley, California.']]).toDF("text")
result = pipeline.fit(example).transform(example)
# result is a DataFrame
result.select("text", "ner.result").show()
###Output
+--------------------+--------------------+
| text| result|
+--------------------+--------------------+
|My name is Sarah ...|[O, O, O, I-PER, ...|
|My name is Clara ...|[O, O, O, I-PER, ...|
+--------------------+--------------------+
###Markdown
That's it! You can now go wild and use hundreds of `XlmRoBertaForTokenClassification` models from HuggingFace 🤗 in Spark NLP 🚀
###Code
###Output
_____no_output_____ |
examples/colab/cropnet_on_device.ipynb | ###Markdown
Copyright 2021 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detection View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models This notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_maker) and the latest version of TensorFlow Datasets.
###Code
!pip install --use-deprecated=legacy-resolver tflite-model-maker
!pip install -U tensorflow-datasets
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.experimental.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.experimental.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Transfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
list(ds_test.map(lambda x, y: y)),
predict_class_label_number(test_data),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
list(ds_unknown_test.map(lambda x, y: y)),
predict_class_label_number(unknown_test_data),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detection View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models This notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_maker) and the latest version of TensorFlow Datasets.
###Code
!pip install --use-deprecated=legacy-resolver tflite-model-maker
!pip install -U tensorflow-datasets
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.Dataset.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.Dataset.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Transfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
list(ds_test.map(lambda x, y: y)),
predict_class_label_number(test_data),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
list(ds_unknown_test.map(lambda x, y: y)),
predict_class_label_number(unknown_test_data),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detection View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models This notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_maker) and the latest version of TensorFlow Datasets.
###Code
!pip install --use-deprecated=legacy-resolver tflite-model-maker
!pip install -U tensorflow-datasets
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.experimental.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.experimental.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Transfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(test_data),
list(ds_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(unknown_test_data),
list(ds_unknown_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Hub Authors.Licensed under the Apache License, Version 2.0 (the "License"); View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models
###Code
# Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detectionThis notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_maker) and the latest version of TensorFlow Datasets.
###Code
!pip install -q tflite-model-maker
!pip install -q -U tensorflow-datasets
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.experimental.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.experimental.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Tranfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(test_data),
list(ds_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(unknown_test_data),
list(ds_unknown_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detection View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models This notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_makerinstallation) and the latest version of TensorFlow Datasets.
###Code
!sudo apt install -q libportaudio2
!pip install --use-deprecated=legacy-resolver tflite-model-maker
!pip install -U tensorflow-datasets
## scann library requires tensorflow < 2.9.0
!pip install "tensorflow<2.9.0"
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.Dataset.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.Dataset.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Transfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
list(ds_test.map(lambda x, y: y)),
predict_class_label_number(test_data),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
list(ds_unknown_test.map(lambda x, y: y)),
predict_class_label_number(unknown_test_data),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____
###Markdown
Copyright 2021 The TensorFlow Hub Authors. Licensed under the Apache License, Version 2.0 (the "License");
###Code
#@title Copyright 2021 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###Output
_____no_output_____
###Markdown
Fine tuning models for plant disease detection View on TensorFlow.org Run in Google Colab View on GitHub Download notebook See TF Hub models This notebook shows you how to **fine-tune CropNet models from TensorFlow Hub** on a dataset from TFDS or your own crop disease detection dataset.You will:- Load the TFDS cassava dataset or your own data- Enrich the data with unknown (negative) examples to get a more robust model- Apply image augmentations to the data- Load and fine tune a [CropNet model](https://tfhub.dev/s?module-type=image-feature-vector&q=cropnet) from TF Hub- Export a TFLite model, ready to be deployed on your app with [Task Library](https://www.tensorflow.org/lite/inference_with_metadata/task_library/image_classifier), [MLKit](https://developers.google.com/ml-kit/vision/image-labeling/custom-models/android) or [TFLite](https://www.tensorflow.org/lite/guide/inference) directly Imports and DependenciesBefore starting, you'll need to install some of the dependencies that will be needed like [Model Maker](https://www.tensorflow.org/lite/guide/model_maker) and the latest version of TensorFlow Datasets.
###Code
!pip install --use-deprecated=legacy-resolver tflite-model-maker
!pip install -U tensorflow-datasets
import matplotlib.pyplot as plt
import os
import seaborn as sns
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import image_preprocessing
from tflite_model_maker import image_classifier
from tflite_model_maker import ImageClassifierDataLoader
from tflite_model_maker.image_classifier import ModelSpec
###Output
_____no_output_____
###Markdown
Load a TFDS dataset to fine-tune onLets use the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava) from TFDS.
###Code
tfds_name = 'cassava'
(ds_train, ds_validation, ds_test), ds_info = tfds.load(
name=tfds_name,
split=['train', 'validation', 'test'],
with_info=True,
as_supervised=True)
TFLITE_NAME_PREFIX = tfds_name
###Output
_____no_output_____
###Markdown
Or alternatively load your own data to fine-tune onInstead of using a TFDS dataset, you can also train on your own data. This code snippet shows how to load your own custom dataset. See [this](https://www.tensorflow.org/datasets/api_docs/python/tfds/folder_dataset/ImageFolder) link for the supported structure of the data. An example is provided here using the publicly available [Cassava Leaf Disease dataset](https://www.tensorflow.org/datasets/catalog/cassava).
###Code
# data_root_dir = tf.keras.utils.get_file(
# 'cassavaleafdata.zip',
# 'https://storage.googleapis.com/emcassavadata/cassavaleafdata.zip',
# extract=True)
# data_root_dir = os.path.splitext(data_root_dir)[0] # Remove the .zip extension
# builder = tfds.ImageFolder(data_root_dir)
# ds_info = builder.info
# ds_train = builder.as_dataset(split='train', as_supervised=True)
# ds_validation = builder.as_dataset(split='validation', as_supervised=True)
# ds_test = builder.as_dataset(split='test', as_supervised=True)
###Output
_____no_output_____
###Markdown
Visualize samples from train splitLet's take a look at some examples from the dataset including the class id and the class name for the image samples and their labels.
###Code
_ = tfds.show_examples(ds_train, ds_info)
###Output
_____no_output_____
###Markdown
Add images to be used as Unknown examples from TFDS datasetsAdd additional unknown (negative) examples to the training dataset and assign a new unknown class label number to them. The goal is to have a model that, when used in practice (e.g. in the field), has the option of predicting "Unknown" when it sees something unexpected.Below you can see a list of datasets that will be used to sample the additional unknown imagery. It includes 3 completely different datasets to increase diversity. One of them is a beans leaf disease dataset, so that the model has exposure to diseased plants other than cassava.
###Code
UNKNOWN_TFDS_DATASETS = [{
'tfds_name': 'imagenet_v2/matched-frequency',
'train_split': 'test[:80%]',
'test_split': 'test[80%:]',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'oxford_flowers102',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}, {
'tfds_name': 'beans',
'train_split': 'train',
'test_split': 'test',
'num_examples_ratio_to_normal': 1.0,
}]
###Output
_____no_output_____
###Markdown
The UNKNOWN datasets are also loaded from TFDS.
###Code
# Load unknown datasets.
weights = [
spec['num_examples_ratio_to_normal'] for spec in UNKNOWN_TFDS_DATASETS
]
num_unknown_train_examples = sum(
int(w * ds_train.cardinality().numpy()) for w in weights)
ds_unknown_train = tf.data.experimental.sample_from_datasets([
tfds.load(
name=spec['tfds_name'], split=spec['train_split'],
as_supervised=True).repeat(-1) for spec in UNKNOWN_TFDS_DATASETS
], weights).take(num_unknown_train_examples)
ds_unknown_train = ds_unknown_train.apply(
tf.data.experimental.assert_cardinality(num_unknown_train_examples))
ds_unknown_tests = [
tfds.load(
name=spec['tfds_name'], split=spec['test_split'], as_supervised=True)
for spec in UNKNOWN_TFDS_DATASETS
]
ds_unknown_test = ds_unknown_tests[0]
for ds in ds_unknown_tests[1:]:
ds_unknown_test = ds_unknown_test.concatenate(ds)
# All examples from the unknown datasets will get a new class label number.
num_normal_classes = len(ds_info.features['label'].names)
unknown_label_value = tf.convert_to_tensor(num_normal_classes, tf.int64)
ds_unknown_train = ds_unknown_train.map(lambda image, _:
(image, unknown_label_value))
ds_unknown_test = ds_unknown_test.map(lambda image, _:
(image, unknown_label_value))
# Merge the normal train dataset with the unknown train dataset.
weights = [
ds_train.cardinality().numpy(),
ds_unknown_train.cardinality().numpy()
]
ds_train_with_unknown = tf.data.experimental.sample_from_datasets(
[ds_train, ds_unknown_train], [float(w) for w in weights])
ds_train_with_unknown = ds_train_with_unknown.apply(
tf.data.experimental.assert_cardinality(sum(weights)))
print((f"Added {ds_unknown_train.cardinality().numpy()} negative examples."
f"Training dataset has now {ds_train_with_unknown.cardinality().numpy()}"
' examples in total.'))
###Output
_____no_output_____
###Markdown
Apply augmentations For all the images, to make them more diverse, you'll apply some augmentation, like changes in:- Brightness- Contrast- Saturation- Hue- CropThese types of augmentations help make the model more robust to variations in image inputs.
###Code
def random_crop_and_random_augmentations_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
image = tf.image.random_brightness(image, 0.2)
image = tf.image.random_contrast(image, 0.5, 2.0)
image = tf.image.random_saturation(image, 0.75, 1.25)
image = tf.image.random_hue(image, 0.1)
return image
def random_crop_fn(image):
# preprocess_for_train does random crop and resize internally.
image = image_preprocessing.preprocess_for_train(image)
return image
def resize_and_center_crop_fn(image):
image = tf.image.resize(image, (256, 256))
image = image[16:240, 16:240]
return image
no_augment_fn = lambda image: image
train_augment_fn = lambda image, label: (
random_crop_and_random_augmentations_fn(image), label)
eval_augment_fn = lambda image, label: (resize_and_center_crop_fn(image), label)
###Output
_____no_output_____
###Markdown
To apply the augmentation, it uses the `map` method from the Dataset class.
###Code
ds_train_with_unknown = ds_train_with_unknown.map(train_augment_fn)
ds_validation = ds_validation.map(eval_augment_fn)
ds_test = ds_test.map(eval_augment_fn)
ds_unknown_test = ds_unknown_test.map(eval_augment_fn)
###Output
_____no_output_____
###Markdown
Wrap the data into Model Maker friendly formatTo use these dataset with Model Maker, they need to be in a ImageClassifierDataLoader class.
###Code
label_names = ds_info.features['label'].names + ['UNKNOWN']
train_data = ImageClassifierDataLoader(ds_train_with_unknown,
ds_train_with_unknown.cardinality(),
label_names)
validation_data = ImageClassifierDataLoader(ds_validation,
ds_validation.cardinality(),
label_names)
test_data = ImageClassifierDataLoader(ds_test, ds_test.cardinality(),
label_names)
unknown_test_data = ImageClassifierDataLoader(ds_unknown_test,
ds_unknown_test.cardinality(),
label_names)
###Output
_____no_output_____
###Markdown
Run training[TensorFlow Hub](https://tfhub.dev) has multiple models available for Tranfer Learning.Here you can choose one and you can also keep experimenting with other ones to try to get better results.If you want even more models to try, you can add them from this [collection](https://tfhub.dev/google/collections/image/1).
###Code
#@title Choose a base model
model_name = 'mobilenet_v3_large_100_224' #@param ['cropnet_cassava', 'cropnet_concat', 'cropnet_imagenet', 'mobilenet_v3_large_100_224']
map_model_name = {
'cropnet_cassava':
'https://tfhub.dev/google/cropnet/feature_vector/cassava_disease_V1/1',
'cropnet_concat':
'https://tfhub.dev/google/cropnet/feature_vector/concat/1',
'cropnet_imagenet':
'https://tfhub.dev/google/cropnet/feature_vector/imagenet/1',
'mobilenet_v3_large_100_224':
'https://tfhub.dev/google/imagenet/mobilenet_v3_large_100_224/feature_vector/5',
}
model_handle = map_model_name[model_name]
###Output
_____no_output_____
###Markdown
To fine tune the model, you will use Model Maker. This makes the overall solution easier since after the training of the model, it'll also convert it to TFLite.Model Maker makes this conversion be the best one possible and with all the necessary information to easily deploy the model on-device later.The model spec is how you tell Model Maker which base model you'd like to use.
###Code
image_model_spec = ModelSpec(uri=model_handle)
###Output
_____no_output_____
###Markdown
One important detail here is setting `train_whole_model` which will make the base model fine tuned during training. This makes the process slower but the final model has a higher accuracy. Setting `shuffle` will make sure the model sees the data in a random shuffled order which is a best practice for model learning.
###Code
model = image_classifier.create(
train_data,
model_spec=image_model_spec,
batch_size=128,
learning_rate=0.03,
epochs=5,
shuffle=True,
train_whole_model=True,
validation_data=validation_data)
###Output
_____no_output_____
###Markdown
Evaluate model on test split
###Code
model.evaluate(test_data)
###Output
_____no_output_____
###Markdown
To have an even better understanding of the fine tuned model, it's good to analyse the confusion matrix. This will show how often one class is predicted as another.
###Code
def predict_class_label_number(dataset):
"""Runs inference and returns predictions as class label numbers."""
rev_label_names = {l: i for i, l in enumerate(label_names)}
return [
rev_label_names[o[0][0]]
for o in model.predict_top_k(dataset, batch_size=128)
]
def show_confusion_matrix(cm, labels):
plt.figure(figsize=(10, 8))
sns.heatmap(cm, xticklabels=labels, yticklabels=labels,
annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show()
confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(test_data),
list(ds_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Evaluate model on unknown test dataIn this evaluation we expect the model to have accuracy of almost 1. All images the model is tested on are not related to the normal dataset and hence we expect the model to predict the "Unknown" class label.
###Code
model.evaluate(unknown_test_data)
###Output
_____no_output_____
###Markdown
Print the confusion matrix.
###Code
unknown_confusion_mtx = tf.math.confusion_matrix(
predict_class_label_number(unknown_test_data),
list(ds_unknown_test.map(lambda x, y: y)),
num_classes=len(label_names))
show_confusion_matrix(unknown_confusion_mtx, label_names)
###Output
_____no_output_____
###Markdown
Export the model as TFLite and SavedModelNow we can export the trained models in TFLite and SavedModel formats for deploying on-device and using for inference in TensorFlow.
###Code
tflite_filename = f'{TFLITE_NAME_PREFIX}_model_{model_name}.tflite'
model.export(export_dir='.', tflite_filename=tflite_filename)
# Export saved model version.
model.export(export_dir='.', export_format=ExportFormat.SAVED_MODEL)
###Output
_____no_output_____ |
demo/flights_query_template_5.ipynb | ###Markdown
For Flights dataset with the following query tempalte:SELECT unique_carrier, AF(dep_delay) FROM flightsWHERE distance BETWEEN 1000 AND 1200 AND origin_state_abr='LA' GROUP BY unique_carrier
###Code
from dbestclient.executor.executor import SqlExecutor
###Output
_____no_output_____
###Markdown
Parameter Initialization
###Code
sqlExecutor = SqlExecutor()
sqlExecutor.execute("set n_mdn_layer_node_reg=10") #
sqlExecutor.execute("set n_mdn_layer_node_density=15") #
sqlExecutor.execute("set n_jobs=1") #
sqlExecutor.execute("set n_hidden_layer=1") #
sqlExecutor.execute("set n_epoch=20") #
sqlExecutor.execute("set n_gaussians_reg=8") #
sqlExecutor.execute("set n_gaussians_density=10") #
sqlExecutor.execute("set csv_split_char=','")
sqlExecutor.execute("set table_header=" +
"'year_date,unique_carrier,origin,origin_state_abr,dest,dest_state_abr,dep_delay,taxi_out,taxi_in,arr_delay,air_time,distance'")
###Output
Local mode is on, as no slaves are provided.
start loading pre-existing models.
Loaded 4 models. time cost 0.284639 s
OK, n_mdn_layer_node_reg is updated.
OK, n_mdn_layer_node_density is updated.
OK, n_jobs is updated.
OK, n_hidden_layer is updated.
OK, n_epoch is updated.
OK, n_gaussians_reg is updated.
OK, n_gaussians_density is updated.
OK, csv_split_char is updated.
OK, table_header is updated.
###Markdown
Model Creation
###Code
# sqlExecutor.execute("drop table template5") # drop the model
sqlExecutor.execute(
"create table template5(dep_delay real, distance real,origin_state_abr categorical) from '/home/quincy/Documents/workspace/data/flights/flight_1m.csv' GROUP BY unique_carrier method uniform size 0.001 ")
###Output
Start creating model template5
The given table is treated as a uniform sample, and it is obtained with sampling rate 0.001
Reading data file...
get frequency info from data....
fit MdnQueryEngineXCategoricalOneModel...
training density...
finish training embedding.
< Epoch 0
< Epoch 1
< Epoch 2
< Epoch 3
< Epoch 4
< Epoch 5
< Epoch 6
< Epoch 7
< Epoch 8
< Epoch 9
< Epoch 10
< Epoch 11
< Epoch 12
< Epoch 13
< Epoch 14
< Epoch 15
< Epoch 16
< Epoch 17
< Epoch 18
< Epoch 19
finish mdn training...
training regression...
finish training embedding.
embedding inference...
start normalizing data...
transform data from MDN training...
finish transforming data from MDN training...
< Epoch 0
< Epoch 1
< Epoch 2
< Epoch 3
< Epoch 4
< Epoch 5
< Epoch 6
< Epoch 7
< Epoch 8
< Epoch 9
< Epoch 10
< Epoch 11
< Epoch 12
< Epoch 13
< Epoch 14
< Epoch 15
< Epoch 16
< Epoch 17
< Epoch 18
< Epoch 19
Finish regression training.
time cost: 430s.
------------------------
###Markdown
Query Serving
###Code
predictions = sqlExecutor.execute(
"select unique_carrier, avg(dep_delay) from template5 where 1000 <=distance<= 1200 and origin_state_abr='LA' group by unique_carrier"
)
# sqlExecutor.execute("drop table template5")
###Output
OK
0 1
9E,LA 1206.414211
AA,LA 1206.447441
AQ,LA 1206.316881
AS,LA 1206.448448
B6,LA 1206.397555
CO,LA 1206.387241
DH,LA 1206.572376
DL,LA 1206.398092
EV,LA 1206.344478
F9,LA 1206.571358
FL,LA 1206.474904
HA,LA 1206.531414
HP,LA 1206.337871
MQ,LA 1206.488969
NK,LA 1206.678322
NW,LA 1206.411467
OH (1),LA 1206.414877
OO,LA 1206.303849
TW,LA 1206.444148
TZ,LA 1206.627812
UA,LA 1206.383681
US,LA 1206.429333
VX,LA 1206.332781
WN,LA 1206.444382
XE,LA 1206.422698
YV,LA 1206.444927
Time cost: 0.0306s.
------------------------
###Markdown
HIVE querySELECT unique_carrier, COUNT(dep_delay),SUM(dep_delay), AVG(dep_delay) FROM flights WHERE distance BETWEEN 1000 AND 1200 AND origin_state_abr='LA' GROUP BY unique_carrier
###Code
Time Cost: 498s
hive> SELECT unique_carrier, COUNT(dep_delay),SUM(dep_delay), AVG(dep_delay) FROM flights WHERE distance BETWEEN 1000 AND 1200 AND origin_state_abr='LA' GROUP BY unique_carrier;
WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Query ID = hduser_20210310231215_fd3ca82b-edd9-4606-bddf-6e91da2bea6b
Total jobs = 1
Launching Job 1 out of 1
Number of reduce tasks not specified. Estimated from input data size: 224
In order to change the average load for a reducer (in bytes):
set hive.exec.reducers.bytes.per.reducer=<number>
In order to limit the maximum number of reducers:
set hive.exec.reducers.max=<number>
In order to set a constant number of reducers:
set mapreduce.job.reduces=<number>
Starting Job = job_1586257888433_0314, Tracking URL = http://master:8088/proxy/application_1586257888433_0314/
Kill Command = /opt/hadoop292/bin/hadoop job -kill job_1586257888433_0314
Hadoop job information for Stage-1: number of mappers: 214; number of reducers: 224
2021-03-10 23:12:41,836 Stage-1 map = 0%, reduce = 0%
2021-03-10 23:20:33,745 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 6466.4 sec
MapReduce Total cumulative CPU time: 0 days 1 hours 47 minutes 46 seconds 400 msec
Ended Job = job_1586257888433_0314
MapReduce Jobs Launched:
Stage-Stage-1: Map: 214 Reduce: 224 Cumulative CPU: 6466.4 sec HDFS Read: 57224216019 HDFS Write: 23598 SUCCESS
Total MapReduce CPU Time Spent: 0 days 1 hours 47 minutes 46 seconds 400 msec
OK
DH 3971 4788137.399999998 1205.7762276504652
DL 95975 1.1575895680000003E8 1206.1365647303987
EV 34035 4.1062881099999994E7 1206.4898222418096
F9 5916 7132196.000000003 1205.5774171737664
TW 10982 1.32409268E7 1205.6935712984885
MQ 39003 4.703943819999997E7 1206.0466682050092
FL 14608 1.7622019300000004E7 1206.326622398686
TZ 1237 1490529.2999999993 1204.9549717057391
UA 72344 8.725239720000003E7 1206.076484573704
NK 1478 1782778.2999999993 1206.2099458728007
US 63588 7.669369760000001E7 1206.1033150908977
NW 41632 5.020781880000002E7 1205.9910357417375
9E 7720 9318013.800000008 1206.9966062176177
HA 5108 6161658.099999998 1206.2760571652307
AA 84448 1.0186515010000005E8 1206.247040782494
OO 45830 5.527949410000002E7 1206.185775692778
VX 1763 2126155.8000000026 1205.9874078275682
HP 13056 1.5747752600000003E7 1206.1697763480395
AQ 815 984427.1000000001 1207.8860122699389
AS 19721 2.3785714200000014E7 1206.110957862178
B6 15646 1.8878680100000016E7 1206.613837402532
WN 133469 1.6097666200000003E8 1206.0977605286623
XE 20138 2.4297131199999988E7 1206.531492700367
CO 33622 4.0555577299999975E7 1206.2214413181837
OH (1) 10144 1.2236402000000004E7 1206.2699132492116
YV 9692 1.1692630299999997E7 1206.4207903425502
Time taken: 498.802 seconds, Fetched: 26 row(s)
###Output
_____no_output_____ |
day2/exercises/.ipynb_checkpoints/Exercises_Python-checkpoint.ipynb | ###Markdown
Exercises ***Rules***:* Every variable/function/class name should be meaningful* Variable/function names should be lowercase, class names uppercase* Write a documentation string (even if minimal) for every function. 1) (From [jakevdp](https://github.com/jakevdp/2014_fall_ASTR599/blob/master/notebooks/01_basic_training.ipynb)): Create a program (a .py file) which repeatedly asks the user for a word. The program should append all the words together. When the user types a "!", "?", or a ".", the program should print the resulting sentence and exit. For example, a session might look like this:: $ ./make_sentence.py Enter a word (. ! or ? to end): My Enter a word (. ! or ? to end): name Enter a word (. ! or ? to end): is Enter a word (. ! or ? to end): Walter Enter a word (. ! or ? to end): White Enter a word (. ! or ? to end): ! My name is Walter White!
###Code
#The following line will only work if you create the make_sentence.py in the current directory
import make_sentence.py
###Output
_____no_output_____
###Markdown
2) (From [jakevdp](https://github.com/jakevdp/2014_fall_ASTR599/blob/master/notebooks/01_basic_training.ipynb)): Write a program that prints the numbers from 1 to 100. But for multiples of three print “Fizz” instead of the number and for the multiples of five print “Buzz”. For numbers which are multiples of both three and five print “FizzBuzz”. If you finish quickly... see how **few** characters you can write this program in (this is known as "code golf": going for the fewest key strokes).
###Code
import numpy as np
x_arr = np.arrange(10)
y_arr = np.arrange(4)
print(x_arr.shape)
x= x_arr[:,np.newaxis]
print(x.shape)
###Output
_____no_output_____
###Markdown
2) Write a function called ``sum_digits`` that returns the sum of the digits of an integer argument; that is, ``sum_digits(123)`` should return ``6``. Use this function in an other function that prints out the sum of the digits of every integer multiple of the first argument, up to either a second optional argument (if included) or the first argument's square. That is:: list_multiple(4) with one argument 4 8 3 7 And I'll let you figure out what it looks like with a second optional argument
###Code
def sum_digits(number):
string_num = str(number)
sum = 0
for digit in string_num:
sum += int(digit)
return sum
#Don't forget the second function
def multiple(number, limit=None):
sum_digits(number)
sum_digits(123)
###Output
_____no_output_____ |
Natural Language Processing with Classification and Vector Spaces/Week 1/NLP_C1_W1_lecture_nb_03.ipynb | ###Markdown
Ungraded lab 3: Visualizing word frequencies*Copyrighted material***Objectives:** Visualize and interpret the logistic regression model**Steps:*** Plot tweets in a scatter plot as done in ungraded_lab2. Show the number of the tweet in the plot.* Plot the output of the logistic regression model in the same plot as a solid line Import the required librariesDuring assigments of weeks 1 and 2 you will make sentiment analysis on tweets. [*NLTK*](http://www.nltk.org/howto/twitter.html) is a opensource Python library to collect and process Twitter data.NLTK allows to work with 'live' Tweeter data. If you are interested, just review the documentation of the project. But for this project, you are going to use the example dataset that comes alonside with NLTK, which has been manually annotated, and is intented to serve to quickly stablish baselines for your models. So, to start let's import the required libraries for this project
###Code
import nltk # Tweeter toolbox
from os import getcwd
import pandas as pd # Library for Dataframes
from nltk.corpus import twitter_samples
import matplotlib.pyplot as plt # Library for visualization
import numpy as np # Library for math functions
from utils import process_tweet, build_freqs # Our functions for NLP
###Output
_____no_output_____
###Markdown
Load the NLTK sample datasetTo complete this lab, you need the sample dataset of the previuos lab. Here, we assume the files are already available and we only need to load into Python lists.
###Code
# select the set of positive and negative tweets
all_positive_tweets = twitter_samples.strings('positive_tweets.json')
all_negative_tweets = twitter_samples.strings('negative_tweets.json')
tweets = all_positive_tweets + all_negative_tweets ## Concatenate the lists.
labels = np.append(np.ones((len(all_positive_tweets),1)), np.zeros((len(all_negative_tweets),1)), axis = 0)
# split the data into two pieces, one for training and one for testing (validation set)
train_pos = all_positive_tweets[:4000]
train_neg = all_negative_tweets[:4000]
train_x = train_pos + train_neg
print("Number of tweets: ", len(train_x))
###Output
Number of tweets: 8000
###Markdown
Load the extracted featuresLoad the features created for the tweets sample. They have been already precalculated and saved in a csv file
###Code
data = pd.read_csv('logistic_features.csv');
X = data[['bias', 'positive', 'negative']].values
Y = data['sentiment'].values;
X.shape
###Output
_____no_output_____
###Markdown
Load a pretrained Logistic Regression modelAs part of this week assingment, you are going to train a Logistic regression model. In the cell bellow, you can find the model that you will obtain from such training process. Notice that the whole model is just represented by a list of 3 numeric values, that we have called tetha.
###Code
theta = [7e-08, 0.0005239, -0.00055517]
###Output
_____no_output_____
###Markdown
Plot the samples in a scatter plotThe vector tetha represents a plane that split our feature space in two parts. Samples located over that plane are considered positive, and samples located under that plane are considered negative. Remember that we have a 3D feature space, i.e, each tweet is represented as a vector comprising 3 values: [bias, positive_sum, negative_sum], always having bias = 1. If we ignore the bias term, we can plot each tweet in a cartesian plan, using positive_sum and negative_sum. In the bellow cell, we do exactly this. Addicionaly, we color each tweet, pedending on its class. Positive tweets will be green and negative tweets will be red.
###Code
# Plot the samples using columns 1 and 2 of the matrix
fig, ax = plt.subplots(figsize = (8, 8))
colors = ['red', 'green']
# Color base on the sentiment Y
ax.scatter(X[:,1], X[:,2], c=[colors[int(k)] for k in Y], s = 0.1) # Plot a dot for each pair of words
plt.xlabel("Positive")
plt.ylabel("Negative")
###Output
_____no_output_____
###Markdown
As you can see, the features that we have chosen to represent tweets as numerical vectors, allows an almost perfect separation between positive and negative tweets. So it is not a surprice anymore that the logistic regression model produce an accuracy around 99%. Plot the model alongside the dataThe gray line is just the cutoff between the Positive and Negative region. In other words the gray line marks the line where $$ z = \theta * x = 0.$$To draw this line, we had to solve the former equation in terms of one of the independent variables$$ z = \theta * x = 0$$$$ x = [1, pos, neg] $$$$ z(\theta, x) = \theta_0+ \theta_1 * pos + \theta_2 * neg = 0 $$$$ neg = (-\theta_0 - \theta_1 * pos) / \theta_2 $$
###Code
# Equation of the separation plane
# f(pos, neg, W) = w0 + w1 * pos + w2 * neg = 0
# s(pos, W) = (w0 - w1 * pos) / w2
def solution(theta, pos):
return (-theta[0] - pos * theta[1]) / theta[2]
# Equation of the derivative of the s
# df(pos, W) = pos * w2 / w1
def solution2(theta, pos):
return pos * theta[2] / theta[1]
###Output
_____no_output_____
###Markdown
The green line in the chart points in the direction where z > 0 and the red line points in the direction where z < 0. The direction of these lines are given by the weights $\theta_1$ and $\theta_2$
###Code
# Plot the samples using columns 1 and 2 of the matrix
fig, ax = plt.subplots(figsize = (8, 8))
colors = ['red', 'green']
# Color base on the sentiment Y
ax.scatter(X[:,1], X[:,2], c=[colors[int(k)] for k in Y], s = 0.1) # Plot a dot for each pair of words
plt.xlabel("Positive")
plt.ylabel("Negative")
# Now lets represent the logistic regression model in this chart.
maxpos = np.max(X[:,1])
offset = 5000
# Plot the red line that divides the 2 areas.
ax.plot([0, maxpos], [solution(theta, 0), solution(theta, maxpos)], color = 'gray')
# Plot the red line pointing to the negative direction
ax.arrow(offset, solution(theta, offset), offset, solution2(theta, offset), head_width=500, head_length=500, fc='g', ec='g')
# Plot the red line pointing to the negative direction
ax.arrow(offset, solution(theta, offset), -offset, -solution2(theta, offset), head_width=500, head_length=500, fc='r', ec='r')
plt.show()
###Output
_____no_output_____ |
docs/samples/client/kfserving_sdk_sample.ipynb | ###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, patch and delete KFService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2KFServiceSpec
from kfserving import V1alpha2KFService
from kubernetes.client import V1ResourceRequirements
###Output
_____no_output_____
###Markdown
Define KFService Firstly define the model spec for default model spec, and then define the kfservice basic on the model spec.
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create KFService Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Add Canary to KFService Patch the KFService to add canary model.
###Code
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec,
canary=canary_endpoint_spec,
canary_traffic_percent=10))
KFServing.patch('flower-sample', kfsvc)
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Promote Canary to Default
###Code
KFServing.promote('flower-sample', namespace='kubeflow')
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Delete the KFService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, patch and delete KFService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2ModelSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2KFServiceSpec
from kfserving import V1alpha2KFService
###Output
_____no_output_____
###Markdown
Define KFService Firstly define the model spec for default model spec, and then define the kfservice basic on the model spec.
###Code
default_model_spec = V1alpha2ModelSpec(tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers'))
kfsvc = V1alpha2KFService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_model_spec))
###Output
_____no_output_____
###Markdown
Create KFService Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Patch the KFService Patch the KFService to add canary model.
###Code
canary_model_spec = V1alpha2ModelSpec(tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers'))
kfsvc = V1alpha2KFService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_model_spec,
canary=canary_model_spec,
canary_traffic_percent=10))
KFServing.patch('flower-sample', kfsvc)
###Output
_____no_output_____
###Markdown
Delete the KFService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, rollout_canary, promote and delete KFService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2KFServiceSpec
from kfserving import V1alpha2KFService
from kubernetes.client import V1ResourceRequirements
###Output
_____no_output_____
###Markdown
Define KFService Firstly define default endpoint spec, and then define the kfservice basic on the endpoint spec.
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create KFService Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Add Canary to KFService Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process.
###Code
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
KFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10,
namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Rollout more traffic to canary of the KFService Rollout traffice percent to 50% to canary version.
###Code
KFServing.rollout_canary('flower-sample', percent=50, namespace='kubeflow',
watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Promote Canary to Default
###Code
KFServing.promote('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Delete the KFService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, patch and delete KFService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha1ModelSpec
from kfserving import V1alpha1TensorflowSpec
from kfserving import V1alpha1KFServiceSpec
from kfserving import V1alpha1KFService
###Output
_____no_output_____
###Markdown
Define KFService Firstly define the model spec for default model spec, and then define the kfservice basic on the model spec.
###Code
default_model_spec = V1alpha1ModelSpec(tensorflow=V1alpha1TensorflowSpec(
model_uri='gs://kfserving-samples/models/tensorflow/flowers'))
kfsvc = V1alpha1KFService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(name='flower-sample', namespace='kubeflow'),
spec=V1alpha1KFServiceSpec(default=default_model_spec))
###Output
_____no_output_____
###Markdown
Create KFService Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Patch the KFService Patch the KFService to add canary model.
###Code
canary_model_spec = V1alpha1ModelSpec(tensorflow=V1alpha1TensorflowSpec(
model_uri='gs://kfserving-samples/models/tensorflow/flowers'))
kfsvc = V1alpha1KFService(api_version=constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(name='flower-sample', namespace='kubeflow'),
spec=V1alpha1KFServiceSpec(default=default_model_spec,
canary=canary_model_spec,
canary_traffic_percent=10))
KFServing.patch('flower-sample', kfsvc)
###Output
_____no_output_____
###Markdown
Delete the KFService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, rollout_canary, promote and delete InferenceService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import utils
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1ResourceRequirements
###Output
_____no_output_____
###Markdown
Define namespace where InferenceService needs to be deployed to. If not specified, below function defines namespace to the current one where SDK is running in the cluster, otherwise it will deploy to default namespace.
###Code
namespace = utils.get_default_target_namespace()
###Output
_____no_output_____
###Markdown
Define InferenceService Firstly define default endpoint spec, and then define the inferenceservice basic on the endpoint spec.
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace=namespace),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create InferenceService Call KFServingClient to create InferenceService.
###Code
KFServing = KFServingClient()
KFServing.create(isvc)
###Output
_____no_output_____
###Markdown
Check the InferenceService
###Code
KFServing.get('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Add Canary to InferenceService Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process.
###Code
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
KFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10,
namespace=namespace, watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Rollout more traffic to canary of the InferenceService Rollout traffice percent to 50% to canary version.
###Code
KFServing.rollout_canary('flower-sample', percent=50, namespace=namespace,
watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Promote Canary to Default
###Code
KFServing.promote('flower-sample', namespace=namespace, watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Delete the InferenceService
###Code
KFServing.delete('flower-sample', namespace=namespace)
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, rollout_canary, promote and delete InferenceService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from kubernetes.client import V1ResourceRequirements
###Output
_____no_output_____
###Markdown
Define InferenceService Firstly define default endpoint spec, and then define the inferenceservice basic on the endpoint spec.
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
isvc = V1alpha2InferenceService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2InferenceServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create InferenceService Call KFServingClient to create InferenceService.
###Code
KFServing = KFServingClient()
KFServing.create(isvc)
###Output
_____no_output_____
###Markdown
Check the InferenceService
###Code
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Add Canary to InferenceService Firstly define canary endpoint spec, and then rollout 10% traffic to the canary version, watch the rollout process.
###Code
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
KFServing.rollout_canary('flower-sample', canary=canary_endpoint_spec, percent=10,
namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Rollout more traffic to canary of the InferenceService Rollout traffice percent to 50% to canary version.
###Code
KFServing.rollout_canary('flower-sample', percent=50, namespace='kubeflow',
watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Promote Canary to Default
###Code
KFServing.promote('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Delete the InferenceService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____
###Markdown
Sample for KFServing SDK This is a sample for KFServing SDK. The notebook shows how to use KFServing SDK to create, get, patch and delete KFService.
###Code
from kubernetes import client
from kfserving import KFServingClient
from kfserving import constants
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2KFServiceSpec
from kfserving import V1alpha2KFService
from kubernetes.client import V1ResourceRequirements
###Output
_____no_output_____
###Markdown
Define KFService Firstly define the model spec for default model spec, and then define the kfservice basic on the model spec.
###Code
api_version = constants.KFSERVING_GROUP + '/' + constants.KFSERVING_VERSION
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec))
###Output
_____no_output_____
###Markdown
Create KFService Call KFServingClient to create KFService.
###Code
KFServing = KFServingClient()
KFServing.create(kfsvc)
###Output
_____no_output_____
###Markdown
Check the KFService
###Code
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Add Canary to KFService Patch the KFService to add canary model.
###Code
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
tensorflow=V1alpha2TensorflowSpec(
storage_uri='gs://kfserving-samples/models/tensorflow/flowers-2',
resources=V1ResourceRequirements(
requests={'cpu':'100m','memory':'1Gi'},
limits={'cpu':'100m', 'memory':'1Gi'}))))
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=default_endpoint_spec,
canary=canary_endpoint_spec,
canary_traffic_percent=10))
KFServing.patch('flower-sample', kfsvc)
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Promote Canary to Default
###Code
kfsvc = V1alpha2KFService(api_version=api_version,
kind=constants.KFSERVING_KIND,
metadata=client.V1ObjectMeta(
name='flower-sample', namespace='kubeflow'),
spec=V1alpha2KFServiceSpec(default=canary_endpoint_spec,
canary=None,
canary_traffic_percent=0))
KFServing.patch('flower-sample', kfsvc)
KFServing.get('flower-sample', namespace='kubeflow', watch=True, timeout_seconds=120)
###Output
_____no_output_____
###Markdown
Delete the KFService
###Code
KFServing.delete('flower-sample', namespace='kubeflow')
###Output
_____no_output_____ |
JoeyNMT_embeddings.ipynb | ###Markdown
JoeyNMT Embeddings Example code for getting the mapping from vocabulary tokens to embeddings for a JoeyNMT model First we install JoeyNMT and train a toy model. The toy model serves the technical demonstration purpose and is not very useful for analysis. Please replace it with your own trained model.
###Code
!pip install joeynmt
!git clone https://github.com/joeynmt/joeynmt.git
! cd joeynmt; python -m joeynmt train configs/small.yaml
###Output
2020-07-30 02:50:39,435 Hello! This is Joey-NMT.
2020-07-30 02:50:39.581776: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1
2020-07-30 02:50:41,029 Total params: 66376
2020-07-30 02:50:41,030 Trainable parameters: ['decoder.att_vector_layer.bias', 'decoder.att_vector_layer.weight', 'decoder.attention.energy_layer.weight', 'decoder.attention.key_layer.weight', 'decoder.attention.query_layer.weight', 'decoder.output_layer.weight', 'decoder.rnn.bias_hh_l0', 'decoder.rnn.bias_hh_l1', 'decoder.rnn.bias_ih_l0', 'decoder.rnn.bias_ih_l1', 'decoder.rnn.weight_hh_l0', 'decoder.rnn.weight_hh_l1', 'decoder.rnn.weight_ih_l0', 'decoder.rnn.weight_ih_l1', 'encoder.rnn.bias_hh_l0', 'encoder.rnn.bias_hh_l0_reverse', 'encoder.rnn.bias_hh_l1', 'encoder.rnn.bias_hh_l1_reverse', 'encoder.rnn.bias_hh_l2', 'encoder.rnn.bias_hh_l2_reverse', 'encoder.rnn.bias_ih_l0', 'encoder.rnn.bias_ih_l0_reverse', 'encoder.rnn.bias_ih_l1', 'encoder.rnn.bias_ih_l1_reverse', 'encoder.rnn.bias_ih_l2', 'encoder.rnn.bias_ih_l2_reverse', 'encoder.rnn.weight_hh_l0', 'encoder.rnn.weight_hh_l0_reverse', 'encoder.rnn.weight_hh_l1', 'encoder.rnn.weight_hh_l1_reverse', 'encoder.rnn.weight_hh_l2', 'encoder.rnn.weight_hh_l2_reverse', 'encoder.rnn.weight_ih_l0', 'encoder.rnn.weight_ih_l0_reverse', 'encoder.rnn.weight_ih_l1', 'encoder.rnn.weight_ih_l1_reverse', 'encoder.rnn.weight_ih_l2', 'encoder.rnn.weight_ih_l2_reverse', 'src_embed.lut.weight', 'trg_embed.lut.weight']
2020-07-30 02:50:41,031 cfg.name : my_experiment
2020-07-30 02:50:41,031 cfg.data.src : de
2020-07-30 02:50:41,031 cfg.data.trg : en
2020-07-30 02:50:41,031 cfg.data.train : test/data/toy/train
2020-07-30 02:50:41,031 cfg.data.dev : test/data/toy/dev
2020-07-30 02:50:41,031 cfg.data.test : test/data/toy/test
2020-07-30 02:50:41,031 cfg.data.random_train_subset : -1
2020-07-30 02:50:41,031 cfg.data.level : word
2020-07-30 02:50:41,031 cfg.data.lowercase : True
2020-07-30 02:50:41,031 cfg.data.max_sent_length : 30
2020-07-30 02:50:41,031 cfg.data.src_voc_min_freq : 1
2020-07-30 02:50:41,031 cfg.data.src_voc_limit : 101
2020-07-30 02:50:41,032 cfg.data.trg_voc_min_freq : 1
2020-07-30 02:50:41,032 cfg.data.trg_voc_limit : 102
2020-07-30 02:50:41,032 cfg.testing.beam_size : 5
2020-07-30 02:50:41,032 cfg.testing.alpha : 1.0
2020-07-30 02:50:41,032 cfg.testing.postprocess : True
2020-07-30 02:50:41,032 cfg.training.reset_best_ckpt : False
2020-07-30 02:50:41,032 cfg.training.reset_scheduler : False
2020-07-30 02:50:41,032 cfg.training.reset_optimizer : False
2020-07-30 02:50:41,032 cfg.training.random_seed : 42
2020-07-30 02:50:41,032 cfg.training.optimizer : adam
2020-07-30 02:50:41,032 cfg.training.adam_betas : [0.9, 0.999]
2020-07-30 02:50:41,032 cfg.training.learning_rate : 0.005
2020-07-30 02:50:41,032 cfg.training.learning_rate_min : 0.0001
2020-07-30 02:50:41,032 cfg.training.clip_grad_val : 1.0
2020-07-30 02:50:41,032 cfg.training.weight_decay : 0.0
2020-07-30 02:50:41,033 cfg.training.batch_size : 10
2020-07-30 02:50:41,033 cfg.training.batch_type : sentence
2020-07-30 02:50:41,033 cfg.training.eval_batch_size : 10
2020-07-30 02:50:41,033 cfg.training.eval_batch_type : sentence
2020-07-30 02:50:41,033 cfg.training.batch_multiplier : 1
2020-07-30 02:50:41,033 cfg.training.normalization : batch
2020-07-30 02:50:41,033 cfg.training.scheduling : plateau
2020-07-30 02:50:41,033 cfg.training.patience : 5
2020-07-30 02:50:41,033 cfg.training.decrease_factor : 0.5
2020-07-30 02:50:41,033 cfg.training.epochs : 1
2020-07-30 02:50:41,033 cfg.training.validation_freq : 10
2020-07-30 02:50:41,033 cfg.training.logging_freq : 10
2020-07-30 02:50:41,033 cfg.training.eval_metric : bleu
2020-07-30 02:50:41,033 cfg.training.early_stopping_metric : loss
2020-07-30 02:50:41,033 cfg.training.model_dir : models/small_model
2020-07-30 02:50:41,033 cfg.training.overwrite : True
2020-07-30 02:50:41,034 cfg.training.shuffle : True
2020-07-30 02:50:41,034 cfg.training.use_cuda : False
2020-07-30 02:50:41,034 cfg.training.max_output_length : 31
2020-07-30 02:50:41,034 cfg.training.print_valid_sents : [0, 1, 2]
2020-07-30 02:50:41,034 cfg.training.keep_last_ckpts : 3
2020-07-30 02:50:41,034 cfg.training.label_smoothing : 0.0
2020-07-30 02:50:41,034 cfg.model.initializer : xavier
2020-07-30 02:50:41,034 cfg.model.init_weight : 0.01
2020-07-30 02:50:41,034 cfg.model.init_gain : 1.0
2020-07-30 02:50:41,034 cfg.model.bias_initializer : zeros
2020-07-30 02:50:41,034 cfg.model.embed_initializer : normal
2020-07-30 02:50:41,034 cfg.model.embed_init_weight : 0.1
2020-07-30 02:50:41,034 cfg.model.embed_init_gain : 1.0
2020-07-30 02:50:41,034 cfg.model.init_rnn_orthogonal : False
2020-07-30 02:50:41,034 cfg.model.lstm_forget_gate : 1.0
2020-07-30 02:50:41,034 cfg.model.tied_embeddings : False
2020-07-30 02:50:41,035 cfg.model.tied_softmax : False
2020-07-30 02:50:41,035 cfg.model.encoder.type : recurrent
2020-07-30 02:50:41,035 cfg.model.encoder.rnn_type : gru
2020-07-30 02:50:41,035 cfg.model.encoder.embeddings.embedding_dim : 16
2020-07-30 02:50:41,035 cfg.model.encoder.embeddings.scale : False
2020-07-30 02:50:41,035 cfg.model.encoder.embeddings.freeze : False
2020-07-30 02:50:41,035 cfg.model.encoder.hidden_size : 30
2020-07-30 02:50:41,035 cfg.model.encoder.bidirectional : True
2020-07-30 02:50:41,035 cfg.model.encoder.dropout : 0.2
2020-07-30 02:50:41,035 cfg.model.encoder.num_layers : 3
2020-07-30 02:50:41,035 cfg.model.encoder.freeze : False
2020-07-30 02:50:41,035 cfg.model.decoder.type : recurrent
2020-07-30 02:50:41,035 cfg.model.decoder.rnn_type : gru
2020-07-30 02:50:41,035 cfg.model.decoder.embeddings.embedding_dim : 16
2020-07-30 02:50:41,035 cfg.model.decoder.embeddings.scale : False
2020-07-30 02:50:41,035 cfg.model.decoder.embeddings.freeze : False
2020-07-30 02:50:41,036 cfg.model.decoder.hidden_size : 30
2020-07-30 02:50:41,036 cfg.model.decoder.dropout : 0.2
2020-07-30 02:50:41,036 cfg.model.decoder.hidden_dropout : 0.2
2020-07-30 02:50:41,036 cfg.model.decoder.num_layers : 2
2020-07-30 02:50:41,036 cfg.model.decoder.input_feeding : True
2020-07-30 02:50:41,036 cfg.model.decoder.init_hidden : last
2020-07-30 02:50:41,036 cfg.model.decoder.attention : bahdanau
2020-07-30 02:50:41,036 cfg.model.decoder.freeze : False
2020-07-30 02:50:41,036 Data set sizes:
train 922,
valid 20,
test 20
2020-07-30 02:50:41,036 First training example:
[SRC] david gallo: das ist bill lange. ich bin dave gallo.
[TRG] david gallo: this is bill lange. i'm dave gallo.
2020-07-30 02:50:41,036 First 10 words (src): (0) <unk> (1) <pad> (2) <s> (3) </s> (4) und (5) die (6) wir (7) der (8) sie (9) das
2020-07-30 02:50:41,037 First 10 words (trg): (0) <unk> (1) <pad> (2) <s> (3) </s> (4) the (5) and (6) of (7) to (8) a (9) in
2020-07-30 02:50:41,037 Number of Src words (types): 105
2020-07-30 02:50:41,037 Number of Trg words (types): 106
2020-07-30 02:50:41,037 Model(
encoder=RecurrentEncoder(GRU(16, 30, num_layers=3, batch_first=True, dropout=0.2, bidirectional=True)),
decoder=RecurrentDecoder(rnn=GRU(46, 30, num_layers=2, batch_first=True, dropout=0.2), attention=BahdanauAttention),
src_embed=Embeddings(embedding_dim=16, vocab_size=105),
trg_embed=Embeddings(embedding_dim=16, vocab_size=106))
2020-07-30 02:50:41,038 EPOCH 1
2020-07-30 02:50:41,750 Epoch 1 Step: 10 Batch Loss: 19.871571 Tokens per Sec: 1453, Lr: 0.005000
2020-07-30 02:50:41,919 Hooray! New best validation result [loss]!
2020-07-30 02:50:41,919 Saving new checkpoint.
2020-07-30 02:50:41,925 Example #0
2020-07-30 02:50:41,925 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:41,925 Reference: i’m happy to be here .
2020-07-30 02:50:41,925 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:41,925 Example #1
2020-07-30 02:50:41,925 Source: ja , guten tag .
2020-07-30 02:50:41,925 Reference: yes , hello .
2020-07-30 02:50:41,926 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:41,926 Example #2
2020-07-30 02:50:41,926 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:41,926 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:41,926 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:41,926 Validation result (greedy) at epoch 1, step 10: bleu: 0.00, loss: 1045.0957, ppl: 15.8759, duration: 0.1752s
2020-07-30 02:50:44,099 Epoch 1 Step: 20 Batch Loss: 21.683571 Tokens per Sec: 708, Lr: 0.005000
2020-07-30 02:50:44,270 Hooray! New best validation result [loss]!
2020-07-30 02:50:44,270 Saving new checkpoint.
2020-07-30 02:50:44,275 Example #0
2020-07-30 02:50:44,275 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:44,275 Reference: i’m happy to be here .
2020-07-30 02:50:44,275 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:44,275 Example #1
2020-07-30 02:50:44,275 Source: ja , guten tag .
2020-07-30 02:50:44,275 Reference: yes , hello .
2020-07-30 02:50:44,275 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:44,276 Example #2
2020-07-30 02:50:44,276 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:44,276 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:44,276 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:44,276 Validation result (greedy) at epoch 1, step 20: bleu: 0.00, loss: 999.4313, ppl: 14.0693, duration: 0.1767s
2020-07-30 02:50:46,326 Epoch 1 Step: 30 Batch Loss: 32.438263 Tokens per Sec: 723, Lr: 0.005000
2020-07-30 02:50:46,485 Hooray! New best validation result [loss]!
2020-07-30 02:50:46,485 Saving new checkpoint.
2020-07-30 02:50:46,489 Example #0
2020-07-30 02:50:46,489 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:46,490 Reference: i’m happy to be here .
2020-07-30 02:50:46,490 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:46,490 Example #1
2020-07-30 02:50:46,490 Source: ja , guten tag .
2020-07-30 02:50:46,490 Reference: yes , hello .
2020-07-30 02:50:46,490 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:46,490 Example #2
2020-07-30 02:50:46,490 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:46,490 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:46,490 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:46,490 Validation result (greedy) at epoch 1, step 30: bleu: 0.00, loss: 994.4028, ppl: 13.8834, duration: 0.1643s
2020-07-30 02:50:48,621 Epoch 1 Step: 40 Batch Loss: 39.462688 Tokens per Sec: 699, Lr: 0.005000
2020-07-30 02:50:48,776 Example #0
2020-07-30 02:50:48,777 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:48,777 Reference: i’m happy to be here .
2020-07-30 02:50:48,777 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:48,777 Example #1
2020-07-30 02:50:48,777 Source: ja , guten tag .
2020-07-30 02:50:48,777 Reference: yes , hello .
2020-07-30 02:50:48,777 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:48,777 Example #2
2020-07-30 02:50:48,777 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:48,777 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:48,778 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:48,778 Validation result (greedy) at epoch 1, step 40: bleu: 0.00, loss: 999.0395, ppl: 14.0548, duration: 0.1560s
2020-07-30 02:50:50,701 Epoch 1 Step: 50 Batch Loss: 8.803011 Tokens per Sec: 578, Lr: 0.005000
2020-07-30 02:50:50,854 Example #0
2020-07-30 02:50:50,855 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:50,855 Reference: i’m happy to be here .
2020-07-30 02:50:50,855 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:50,855 Example #1
2020-07-30 02:50:50,855 Source: ja , guten tag .
2020-07-30 02:50:50,855 Reference: yes , hello .
2020-07-30 02:50:50,855 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:50,855 Example #2
2020-07-30 02:50:50,855 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:50,855 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:50,855 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:50,855 Validation result (greedy) at epoch 1, step 50: bleu: 0.00, loss: 1002.5289, ppl: 14.1851, duration: 0.1541s
2020-07-30 02:50:52,798 Epoch 1 Step: 60 Batch Loss: 32.723629 Tokens per Sec: 670, Lr: 0.005000
2020-07-30 02:50:52,951 Hooray! New best validation result [loss]!
2020-07-30 02:50:52,951 Saving new checkpoint.
2020-07-30 02:50:52,956 Example #0
2020-07-30 02:50:52,956 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:52,956 Reference: i’m happy to be here .
2020-07-30 02:50:52,956 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:52,956 Example #1
2020-07-30 02:50:52,956 Source: ja , guten tag .
2020-07-30 02:50:52,956 Reference: yes , hello .
2020-07-30 02:50:52,957 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:52,957 Example #2
2020-07-30 02:50:52,957 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:52,957 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:52,957 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:52,957 Validation result (greedy) at epoch 1, step 60: bleu: 0.00, loss: 992.4550, ppl: 13.8121, duration: 0.1583s
2020-07-30 02:50:55,108 Epoch 1 Step: 70 Batch Loss: 53.196281 Tokens per Sec: 638, Lr: 0.005000
2020-07-30 02:50:55,270 Example #0
2020-07-30 02:50:55,271 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:55,271 Reference: i’m happy to be here .
2020-07-30 02:50:55,271 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:55,271 Example #1
2020-07-30 02:50:55,271 Source: ja , guten tag .
2020-07-30 02:50:55,271 Reference: yes , hello .
2020-07-30 02:50:55,271 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:55,271 Example #2
2020-07-30 02:50:55,271 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:55,271 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:55,271 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:55,271 Validation result (greedy) at epoch 1, step 70: bleu: 0.00, loss: 1002.3389, ppl: 14.1780, duration: 0.1635s
2020-07-30 02:50:57,345 Epoch 1 Step: 80 Batch Loss: 48.362408 Tokens per Sec: 669, Lr: 0.005000
2020-07-30 02:50:57,500 Hooray! New best validation result [loss]!
2020-07-30 02:50:57,500 Saving new checkpoint.
2020-07-30 02:50:57,505 Example #0
2020-07-30 02:50:57,505 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:57,505 Reference: i’m happy to be here .
2020-07-30 02:50:57,505 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:57,505 Example #1
2020-07-30 02:50:57,505 Source: ja , guten tag .
2020-07-30 02:50:57,505 Reference: yes , hello .
2020-07-30 02:50:57,505 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:57,506 Example #2
2020-07-30 02:50:57,506 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:57,506 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:57,506 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:57,506 Validation result (greedy) at epoch 1, step 80: bleu: 0.00, loss: 987.8223, ppl: 13.6438, duration: 0.1608s
2020-07-30 02:50:59,632 Epoch 1 Step: 90 Batch Loss: 41.305519 Tokens per Sec: 684, Lr: 0.005000
2020-07-30 02:50:59,785 Example #0
2020-07-30 02:50:59,786 Source: ich freue mich , dass ich da bin .
2020-07-30 02:50:59,786 Reference: i’m happy to be here .
2020-07-30 02:50:59,786 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:59,786 Example #1
2020-07-30 02:50:59,786 Source: ja , guten tag .
2020-07-30 02:50:59,786 Reference: yes , hello .
2020-07-30 02:50:59,786 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:59,786 Example #2
2020-07-30 02:50:59,786 Source: ja , also , was soll biohacking sein ?
2020-07-30 02:50:59,786 Reference: yes , so , what is biohacking ?
2020-07-30 02:50:59,787 Hypothesis: <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk> <unk>
2020-07-30 02:50:59,787 Validation result (greedy) at epoch 1, step 90: bleu: 0.00, loss: 1000.0751, ppl: 14.0933, duration: 0.1543s
2020-07-30 02:51:01,420 Epoch 1: total training loss 3845.91
2020-07-30 02:51:01,421 Training ended after 1 epochs.
2020-07-30 02:51:01,421 Best validation result (greedy) at step 80: 987.82 loss.
/pytorch/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead.
2020-07-30 02:51:01,672 dev bleu: 0.00 [Beam search decoding with beam size = 5 and alpha = 1.0]
2020-07-30 02:51:01,672 Translations saved to: models/small_model/00000080.hyps.dev
2020-07-30 02:51:01,816 No references given for test -> no evaluation.
2020-07-30 02:51:01,816 Translations saved to: models/small_model/00000080.hyps.test
###Markdown
All we need is stored in the model directory: checkpoints, vocabulary and the log which helps us find the variable name.
###Code
model_dir = 'joeynmt/models/small_model/'
! ls $model_dir
###Output
00000080.hyps.dev 70.hyps att.30.0.pdf att.60.1.pdf att.90.2.pdf
00000080.hyps.test 80.ckpt att.30.1.pdf att.60.2.pdf best.ckpt
10.hyps 80.hyps att.30.2.pdf att.70.0.pdf config.yaml
20.hyps 90.hyps att.40.0.pdf att.70.1.pdf src_vocab.txt
30.ckpt att.10.0.pdf att.40.1.pdf att.70.2.pdf tensorboard
30.hyps att.10.1.pdf att.40.2.pdf att.80.0.pdf train.log
40.hyps att.10.2.pdf att.50.0.pdf att.80.1.pdf trg_vocab.txt
50.hyps att.20.0.pdf att.50.1.pdf att.80.2.pdf validations.txt
60.ckpt att.20.1.pdf att.50.2.pdf att.90.0.pdf
60.hyps att.20.2.pdf att.60.0.pdf att.90.1.pdf
###Markdown
In the log we find the variable names in the "Trainable parameters" list.
###Code
! head $model_dir/train.log
###Output
2020-07-30 02:50:39,435 Hello! This is Joey-NMT.
2020-07-30 02:50:41,029 Total params: 66376
2020-07-30 02:50:41,030 Trainable parameters: ['decoder.att_vector_layer.bias', 'decoder.att_vector_layer.weight', 'decoder.attention.energy_layer.weight', 'decoder.attention.key_layer.weight', 'decoder.attention.query_layer.weight', 'decoder.output_layer.weight', 'decoder.rnn.bias_hh_l0', 'decoder.rnn.bias_hh_l1', 'decoder.rnn.bias_ih_l0', 'decoder.rnn.bias_ih_l1', 'decoder.rnn.weight_hh_l0', 'decoder.rnn.weight_hh_l1', 'decoder.rnn.weight_ih_l0', 'decoder.rnn.weight_ih_l1', 'encoder.rnn.bias_hh_l0', 'encoder.rnn.bias_hh_l0_reverse', 'encoder.rnn.bias_hh_l1', 'encoder.rnn.bias_hh_l1_reverse', 'encoder.rnn.bias_hh_l2', 'encoder.rnn.bias_hh_l2_reverse', 'encoder.rnn.bias_ih_l0', 'encoder.rnn.bias_ih_l0_reverse', 'encoder.rnn.bias_ih_l1', 'encoder.rnn.bias_ih_l1_reverse', 'encoder.rnn.bias_ih_l2', 'encoder.rnn.bias_ih_l2_reverse', 'encoder.rnn.weight_hh_l0', 'encoder.rnn.weight_hh_l0_reverse', 'encoder.rnn.weight_hh_l1', 'encoder.rnn.weight_hh_l1_reverse', 'encoder.rnn.weight_hh_l2', 'encoder.rnn.weight_hh_l2_reverse', 'encoder.rnn.weight_ih_l0', 'encoder.rnn.weight_ih_l0_reverse', 'encoder.rnn.weight_ih_l1', 'encoder.rnn.weight_ih_l1_reverse', 'encoder.rnn.weight_ih_l2', 'encoder.rnn.weight_ih_l2_reverse', 'src_embed.lut.weight', 'trg_embed.lut.weight']
2020-07-30 02:50:41,031 cfg.name : my_experiment
2020-07-30 02:50:41,031 cfg.data.src : de
2020-07-30 02:50:41,031 cfg.data.trg : en
2020-07-30 02:50:41,031 cfg.data.train : test/data/toy/train
2020-07-30 02:50:41,031 cfg.data.dev : test/data/toy/dev
2020-07-30 02:50:41,031 cfg.data.test : test/data/toy/test
2020-07-30 02:50:41,031 cfg.data.random_train_subset : -1
###Markdown
We choose which checkpoint to load.
###Code
src_embed_name = 'src_embed.lut.weight'
trg_embed_name = 'trg_embed.lut.weight'
ckpt_path = model_dir+'80.ckpt'
###Output
_____no_output_____
###Markdown
Now we load the checkpoint.
###Code
import torch
state = torch.load(
ckpt_path,
map_location=(
lambda s, _: torch.serialization.default_restore_location(
s, 'cpu')
),
)
###Output
_____no_output_____
###Markdown
The state is a dictionary that contains all kinds of information, e.g. the state of the learning state scheduler and the optimizer, but also all model variables.
###Code
state.keys()
###Output
_____no_output_____
###Markdown
The model variables are stored in `model_state`, which in turns contains an ordered dictionaries mapping all variable names to their tensors.
###Code
model_params = state['model_state']
model_params.keys()
###Output
_____no_output_____
###Markdown
Let's get those embedding tensors into numpy!
###Code
src_embed_np = model_params[src_embed_name].numpy()
trg_embed_np = model_params[trg_embed_name].numpy()
src_embed_np
###Output
_____no_output_____
###Markdown
Now we just need to map the rows of the embedding matrices to the tokens in the vocabulary.
###Code
print('src embedding shape:', src_embed_np.shape)
print('trg embedding shape:', trg_embed_np.shape)
src_vocab_file = model_dir+'src_vocab.txt'
trg_vocab_file = model_dir+'trg_vocab.txt'
! wc -l $src_vocab_file
! wc -l $trg_vocab_file
###Output
105 joeynmt/models/small_model/src_vocab.txt
106 joeynmt/models/small_model/trg_vocab.txt
###Markdown
Luckily, this mapping is encoded in the vocabulary files stored in the model directory. Lines in that file correspond to rows in the embedding matrix.
###Code
src_tokens = []
with open(src_vocab_file, 'r') as of:
for line in of:
src_tokens.append(line.strip())
trg_tokens = []
with open(trg_vocab_file, 'r') as of:
for line in of:
trg_tokens.append(line.strip())
src_tokens
trg_tokens
###Output
_____no_output_____
###Markdown
Let's combine them into a dictionary for convenience.
###Code
src_lookup = dict()
for src_token, src_embedding in zip(src_tokens, src_embed_np):
src_lookup[src_token] = src_embedding
trg_lookup = dict()
for trg_token, trg_embedding in zip(trg_tokens, trg_embed_np):
trg_lookup[trg_token] = trg_embedding
###Output
_____no_output_____
###Markdown
Now we can access all the embeddings for the entries in the vocabularies.
###Code
src_lookup['und']
trg_lookup['and']
###Output
_____no_output_____
###Markdown
And we can do arbitrary calculations or visualizations.
###Code
import numpy as np
np.dot(src_lookup['und'], trg_lookup['and'])
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(np.expand_dims(src_lookup['und'], 0))
plt.show()
plt.imshow(np.expand_dims(trg_lookup['and'], 0))
plt.show()
###Output
_____no_output_____ |
old_versions/1main-v4-MCMC-tai.ipynb | ###Markdown
Network inference of categorical variables: non-sequential data
###Code
import sys
import numpy as np
from scipy import linalg
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
%matplotlib inline
import inference
import fem
# setting parameter:
np.random.seed(1)
n = 20 # number of positions
m = 3 # number of values at each position
l = int(((n*m)**2)) # number of samples
g = 2.
nm = n*m
def itab(n,m):
i1 = np.zeros(n)
i2 = np.zeros(n)
for i in range(n):
i1[i] = i*m
i2[i] = (i+1)*m
return i1.astype(int),i2.astype(int)
# generate coupling matrix w0:
def generate_interactions(n,m,g):
nm = n*m
w = np.random.normal(0.0,g/np.sqrt(nm),size=(nm,nm))
i1tab,i2tab = itab(n,m)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[i1:i2,:] -= w[i1:i2,:].mean(axis=0)
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
w[i1:i2,i1:i2] = 0. # no self-interactions
#for i in range(nm):
# for j in range(nm):
# if j > i: w[i,j] = w[j,i]
return w
i1tab,i2tab = itab(n,m)
w0 = inference.generate_interactions(n,m,g)
#plt.imshow(w0,cmap='rainbow',origin='lower')
#plt.clim(-0.5,0.5)
#plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5])
#plt.show()
#print(w0)
def generate_sequences2(w,n,m,l):
i1tab,i2tab = itab(n,m)
# initial s (categorical variables)
s_ini = np.random.randint(0,m,size=(l,n)) # integer values
#print(s_ini)
# onehot encoder
enc = OneHotEncoder(n_values=m)
s = enc.fit_transform(s_ini).toarray()
print(s)
nrepeat = 500
for irepeat in range(nrepeat):
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h = s.dot(w[i1:i2,:].T) # h[t,i1:i2]
h_old = (s[:,i1:i2]*h).sum(axis=1) # h[t,i0]
k = np.random.randint(0,m,size=l)
for t in range(l):
if np.exp(h[t,k[t]] - h_old[t]) > np.random.rand():
s[t,i1:i2] = 0.
s[t,i1+k[t]] = 1.
return s
# 2018.11.07: Tai
def nrgy(s,w):
l = s.shape[0]
n,m = 20,3
i1tab,i2tab = itab(n,m)
p = np.zeros((l,n))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
h = s.dot(w[i1:i2,:].T)
#e = (s[:,i1:i2]*h).sum(axis=1)
#p[:,i] = np.exp(e)
#p_sum = np.sum(np.exp(h),axis=1)
#p[:,i] /= p_sum
p[:,i] = np.exp((s[:,i1:i2]*h).sum(axis=1))/(np.exp(h).sum(axis=1))
#like = p.sum(axis=1)
return np.sum(np.log(p),axis=1)
# Vipul:
def nrgy_vp(onehot,w):
nrgy = onehot*(onehot.dot(w.T))
# print(nrgy - np.log(2*np.cosh(nrgy)))
return np.sum(nrgy - np.log(2*np.cosh(nrgy)),axis=1) #ln prob
def generate_sequences_vp(w,n_positions,n_residues,n_seq):
n_size = n_residues*n_positions
n_trial = 100*(n_size) #monte carlo steps to find the right sequences
b = np.zeros((n_size))
trial_seq = np.tile(np.random.randint(0,n_residues,size=(n_positions)),(n_seq,1))
print(trial_seq[0])
enc = OneHotEncoder(n_values=n_residues)
onehot = enc.fit_transform(trial_seq).toarray()
old_nrgy = nrgy(onehot,w) #+ n_positions*(n_residues-1)*np.log(2)
for trial in range(n_trial):
# print('before',np.mean(old_nrgy))
index_array = np.random.choice(range(n_positions),size=2,replace=False)
index,index1 = index_array[0],index_array[1]
r_trial = np.random.randint(0,n_residues,size=(n_seq))
r_trial1 = np.random.randint(0,n_residues,size=(n_seq))
mod_seq = np.copy(trial_seq)
mod_seq[:,index] = r_trial
mod_seq[:,index1] = r_trial1
mod_nrgy = nrgy(enc.fit_transform(mod_seq).toarray(),w) #+ n_positions*(n_residues-1)*np.log(2)
seq_change = mod_nrgy-old_nrgy > np.log(np.random.rand(n_seq))
#seq_change = mod_nrgy/(old_nrgy+mod_nrgy) > np.random.rand(n_seq)
if trial>n_size:
trial_seq[seq_change,index] = r_trial[seq_change]
trial_seq[seq_change,index1] = r_trial1[seq_change]
old_nrgy[seq_change] = mod_nrgy[seq_change]
else:
best_seq = np.argmax(mod_nrgy-old_nrgy)
trial_seq = np.tile(mod_seq[best_seq],(n_seq,1))
old_nrgy = np.tile(mod_nrgy[best_seq],(n_seq))
if trial%(10*n_size) == 0: print('after',np.mean(old_nrgy))#,trial_seq[0:5])
print(trial_seq[:10,:10])
#return trial_seq
return enc.fit_transform(trial_seq).toarray()
s = generate_sequences_vp(w0,n,m,l)
print(s.shape)
print(s[:10,:10])
## 2018.11.07: for non sequencial data
def fit_additive(s,n,m):
nloop = 10
i1tab,i2tab = itab(n,m)
nm = n*m
nm1 = nm - m
w_infer = np.zeros((nm,nm))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
# remove column i
x = np.hstack([s[:,:i1],s[:,i2:]])
x_av = np.mean(x,axis=0)
dx = x - x_av
c = np.cov(dx,rowvar=False,bias=True)
c_inv = linalg.pinv(c,rcond=1e-15)
#print(c_inv.shape)
h = s[:,i1:i2].copy()
for iloop in range(nloop):
h_av = h.mean(axis=0)
dh = h - h_av
dhdx = dh[:,:,np.newaxis]*dx[:,np.newaxis,:]
dhdx_av = dhdx.mean(axis=0)
w = np.dot(dhdx_av,c_inv)
#w = w - w.mean(axis=0)
h = np.dot(x,w.T)
p = np.exp(h)
p_sum = p.sum(axis=1)
#p /= p_sum[:,np.newaxis]
for k in range(m):
p[:,k] = p[:,k]/p_sum[:]
h += s[:,i1:i2] - p
w_infer[i1:i2,:i1] = w[:,:i1]
w_infer[i1:i2,i2:] = w[:,i1:]
return w_infer
w2 = fit_additive(s,n,m)
plt.plot([-1,1],[-1,1],'r--')
plt.scatter(w0,w2)
i1tab,i2tab = itab(n,m)
nloop = 20
nm1 = nm - m
w_infer = np.zeros((nm,nm))
wini = np.random.normal(0.0,1./np.sqrt(nm),size=(nm,nm1))
for i in range(n):
i1,i2 = i1tab[i],i2tab[i]
x = np.hstack([s[:,:i1],s[:,i2:]])
y = s.copy()
# covariance[ia,ib]
cab_inv = np.empty((m,m,nm1,nm1))
eps = np.empty((m,m,l))
for ia in range(m):
for ib in range(m):
if ib != ia:
eps[ia,ib,:] = y[:,i1+ia] - y[:,i1+ib]
which_ab = eps[ia,ib,:] !=0.
xab = x[which_ab]
# ----------------------------
xab_av = np.mean(xab,axis=0)
dxab = xab - xab_av
cab = np.cov(dxab,rowvar=False,bias=True)
cab_inv[ia,ib,:,:] = linalg.pinv(cab,rcond=1e-15)
w = wini[i1:i2,:].copy()
for iloop in range(nloop):
h = np.dot(x,w.T)
for ia in range(m):
wa = np.zeros(nm1)
for ib in range(m):
if ib != ia:
which_ab = eps[ia,ib,:] !=0.
eps_ab = eps[ia,ib,which_ab]
xab = x[which_ab]
# ----------------------------
xab_av = np.mean(xab,axis=0)
dxab = xab - xab_av
h_ab = h[which_ab,ia] - h[which_ab,ib]
ha = np.divide(eps_ab*h_ab,np.tanh(h_ab/2.), out=np.zeros_like(h_ab), where=h_ab!=0)
dhdx = (ha - ha.mean())[:,np.newaxis]*dxab
dhdx_av = dhdx.mean(axis=0)
wab = cab_inv[ia,ib,:,:].dot(dhdx_av) # wa - wb
wa += wab
w[ia,:] = wa/m
w_infer[i1:i2,:i1] = w[:,:i1]
w_infer[i1:i2,i2:] = w[:,i1:]
#return w_infer
plt.plot([-1,1],[-1,1],'r--')
plt.scatter(w0,w_infer)
#plt.scatter(w0[0:3,3:],w[0:3,:])
###Output
_____no_output_____ |
site/zh-cn/beta/tutorials/quickstart/advanced.ipynb | ###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
针对专业人员的 TensorFlow 2.0 入门 在 tensorflow.google.cn 上查看 在 Google Colab 中运行 在 GitHub 上查看源代码 下载 notebook Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的[官方英文文档](https://www.tensorflow.org/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到[tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入[[email protected] Google Group](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-zh-cn)。 这是一个 [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) 笔记本(notebook)文件。Python 程序直接在浏览器中运行——这是一种学习和使用 Tensorflow 的好方法。要学习本教程,请单击本页顶部按钮,在 Google Colab 中运行笔记本(notebook).1. 在 Colab 中,连接到 Python 运行时:在菜单栏右上角,选择*连接(CONNECT)*。2. 运行所有笔记本(notebook)代码单元格:选择*运行时(Runtime)* > *运行所有(Run all)*。 下载并安装 TensorFlow 2.0 Beta 软件包:
###Code
try:
# Colab only
%tensorflow_version 2.x
except Exception:
pass
###Output
_____no_output_____
###Markdown
将 Tensorflow 导入您的程序:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
###Output
_____no_output_____
###Markdown
加载并准备 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)。
###Code
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
###Output
_____no_output_____
###Markdown
使用 `tf.data` 来将数据集切分为 batch 以及混淆数据集:
###Code
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
###Output
_____no_output_____
###Markdown
使用 Keras [模型子类化(model subclassing) API](https://www.tensorflow.org/guide/kerasmodel_subclassing) 构建 `tf.keras` 模型:
###Code
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
###Output
_____no_output_____
###Markdown
为训练选择优化器与损失函数:
###Code
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
选择衡量指标来度量模型的损失值(loss)和准确率(accuracy)。这些指标在 epoch 上累积值,然后打印出整体结果。
###Code
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
###Output
_____no_output_____
###Markdown
使用 `tf.GradientTape` 来训练模型:
###Code
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
###Output
_____no_output_____
###Markdown
测试模型:
###Code
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
###Output
_____no_output_____
###Markdown
Copyright 2019 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
针对专业人员的 TensorFlow 2.0 入门 在 tensorflow.google.cn 上查看 在 Google Colab 中运行 在 GitHub 上查看源代码 下载 notebook Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的[官方英文文档](https://www.tensorflow.org/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到[tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入[[email protected] Google Group](https://groups.google.com/a/tensorflow.org/forum/!forum/docs-zh-cn)。 这是一个 [Google Colaboratory](https://colab.research.google.com/notebooks/welcome.ipynb) 笔记本(notebook)文件。Python 程序直接在浏览器中运行——这是一种学习和使用 Tensorflow 的好方法。要学习本教程,请单击本页顶部按钮,在 Google Colab 中运行笔记本(notebook).1. 在 Colab 中,连接到 Python 运行时:在菜单栏右上角,选择*连接(CONNECT)*。2. 运行所有笔记本(notebook)代码单元格:选择*运行时(Runtime)* > *运行所有(Run all)*。 下载并安装 TensorFlow 2.0 Beta 软件包:
###Code
!pip install tensorflow==2.0.0-beta1
###Output
_____no_output_____
###Markdown
将 Tensorflow 导入您的程序:
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
###Output
_____no_output_____
###Markdown
加载并准备 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)。
###Code
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
###Output
_____no_output_____
###Markdown
使用 `tf.data` 来将数据集切分为 batch 以及混淆数据集:
###Code
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
###Output
_____no_output_____
###Markdown
使用 Keras [模型子类化(model subclassing) API](https://www.tensorflow.org/guide/kerasmodel_subclassing) 构建 `tf.keras` 模型:
###Code
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu')
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
model = MyModel()
###Output
_____no_output_____
###Markdown
为训练选择优化器与损失函数:
###Code
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
###Output
_____no_output_____
###Markdown
选择衡量指标来度量模型的损失值(loss)和准确率(accuracy)。这些指标在 epoch 上累积值,然后打印出整体结果。
###Code
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
###Output
_____no_output_____
###Markdown
使用 `tf.GradientTape` 来训练模型:
###Code
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
###Output
_____no_output_____
###Markdown
测试模型:
###Code
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
###Output
_____no_output_____ |
FluentPython/.ipynb_checkpoints/Chapter01-checkpoint.ipynb | ###Markdown
* 我们通过实现__len__和__getitem__这两个特殊方法,FrenchDeck就跟一个python自有的序列数据类型一样,可以体现出Python的核心语言特性(例如迭代和切片),但是目前还不能实现洗牌功能,但是只要简单的实现__setitem__方法即可实现。* 特殊方法是为了给解释器调用的,我们并不会显示调用他们。
###Code
# 更进一步,我们如何给牌排序?
suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)
def spades_high(card):
rank_value = FrenchDeck.ranks.index(card.rank)
return rank_value * len(suit_values) + suit_values[card.suit]
sorted(french_deck, key=spades_high)
###Output
_____no_output_____
###Markdown
虽然 FrenchDeck 隐式地继承了 object 类,但功能却不是继承而来的。我们通过数据模型和一些合成来实现这些功能。通过实现**__len__** 和 **__getitem__** 这两个特殊方法,FrenchDeck 就跟一个Python 自有的序列数据类型一样,可以体现出 Python 的核心语言特性 (例如迭代和切片)。同时这个类还可以用于标准库中诸如**random.choice,reversed 和 sorted**这些函数。另外,对合成的 运用使得 **__len__** 和 **__getitem__ **的具体实现可以代理给 self._cards 这个 Python 列表(即 list 对象)。
###Code
#
from math import hypot
class Vector:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return 'Vector(%r, %r)' % (self.x, self.y)
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y)
def __mul__(self, num):
return Vector(self.x*num, self.y*num)
def __abs__(self):
return hypot(self.x, self.y)
def __bool__(self):
return bool(self.x or self.y)
# 测试repr
Vector(2,7)
a = Vector(3,6)
b = Vector(5,9)
print(a+b)
print(a*2)
print(abs(a))
print(bool(a))
###Output
Vector(8, 15)
Vector(6, 12)
6.708203932499369
True
|
AUTOEn.ipynb | ###Markdown
###Code
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import zipfile
with zipfile.ZipFile('/content/ml-100k.zip') as zip_ref:
zip_ref.extractall('/content/data')
training_set = pd.read_csv('/content/data/ml-100k/u1.base',sep = '\t')
test_set = pd.read_csv('/content/data/ml-100k/u1.test',sep = '\t')
training_set = np.array(training_set,dtype='int')
test_set = np.array(test_set,dtype='int')
nb_users = max( max(training_set[:,0]),max(test_set[:,0]))
nb_movies = max( max(training_set[:,1]) , max(test_set[:,1]))
nb_users,nb_movies
def convert(data):
new_data = []
for user_id in range(1,nb_users+1):
id_movie = data[:,1][data[:,0] == user_id]
id_ratings = data[:,2][data[:,0] == user_id]
new_zer = np.zeros(nb_movies)
new_zer[id_movie - 1] = id_ratings
id_ratings = new_zer
new_data.append(list(id_ratings))
return new_data
training_set = convert(training_set)
test_set = convert(test_set)
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
class SAE(nn.Module):
def __init__(self, ):
super(SAE, self).__init__()
self.fc1 = nn.Linear(nb_movies, 20)
self.fc2 = nn.Linear(20, 10)
self.fc3 = nn.Linear(10, 20)
self.fc4 = nn.Linear(20, nb_movies)
self.activation = nn.Sigmoid()
def forward(self, x):
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.activation(self.fc3(x))
x = self.fc4(x)
return x
sae = SAE()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(sae.parameters(),lr = 0.01,weight_decay=0.5)
nb_epoch = 200
for epoch in range(1, nb_epoch + 1):
train_loss = 0
s = 0.
for id_user in range(nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = input.clone()
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output, target)
mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)
loss.backward()
train_loss += np.sqrt(loss.data*mean_corrector)
s += 1.
optimizer.step()
print('epoch: '+str(epoch)+'\tloss: '+ str(train_loss/s))
test_loss = 0
s = 0.
for id_user in range(nb_users):
input = Variable(training_set[id_user]).unsqueeze(0)
target = Variable(test_set[id_user]).unsqueeze(0)
if torch.sum(target.data > 0) > 0:
output = sae(input)
target.require_grad = False
output[target == 0] = 0
loss = criterion(output, target)
mean_corrector = nb_movies/float(torch.sum(target.data > 0) + 1e-10)
test_loss += np.sqrt(loss.data*mean_corrector)
s += 1.
print('test loss: '+str(test_loss/s))
###Output
test loss: tensor(0.9548)
|
Module_02/01-FileHandling-and-itsVariousOperations/demo1_file_handling.ipynb | ###Markdown
How to take User input and use eval() function?
###Code
# Example 1
# This will throw error
var1=input('Enter a number')
var2=6
result=var1+var2
print(result)
###Output
Enter a number4
###Markdown
Why does the error present in the above code and What is that error?
###Code
## Whenever we take any input in python using input(), it always considers the data type of that variable value as String in spite
##of having some other types. So, in the above case, when we took the input as 10, it has been
## considered as String although it is an integer. We can't concatenate String and Integer like that.
###Output
_____no_output_____
###Markdown
Do we have any solution for this? Yes. That is using eval()
###Code
# Example 2
## When we use eval() before input(), then it will consider the actual data type as it was entered by user.
#Check the below example
# Python provides a built-in fucntion eval() to retain the original data type of the entered value.
x=input('Enter 1st number')
y=eval(input('Enter 2nd number'))
print(x, type(x))
print(y, type(y))
###Output
Enter 1st number5
Enter 2nd number7
5 <class 'str'>
7 <class 'int'>
###Markdown
File Handling and its different operations You can open file using Python's built-in open() function file_object = open(file_name, [acess_mode]) - The access mode determines the mode in which the file has to be opened, i.e read, write, append|Modes|Description||:-----:|:-----------|| r |This is the default mode and is used for opening a file in aread only mode|| rb |opens file in read only in binary format|| r+ |opens file for both reading and writing|| rb+|open file to read and write in binary format|| w |opes a file in write only mode. If the file exists, it overwrites the same or creates a new one.|| wb |opens a file for writing only in binary format. If the file exists, it overwrites the same or else creates a new one||a| opens a file to append||ab| opens a file to append in binary format||a+|opens a file to append and read||ab+|opens a file to append and read in binary format||w+|opens file to read and write||wb+|opens file to read and write in binary format|
###Code
# Example 3
import os
file_obj=open('File_1.txt','r') # This line of code is used to open the particular file in read only mode.
data=file_obj.read() # This line reads the whole content of the file and stores in a variable.
print(data)
file_obj.close() # This line closes the file which we have opened.
# Example 4 ( Reading each character of the file and printing each character on separate line)
file_obj_1=open('File_1.txt','r')
data_1=file_obj_1.read()
for i in data_1:
print(i)
# Example 5 ( Opening a file which is already open)
file_obj_1=open('File_1.txt','r')
data_1=file_obj_1.read()
print(data_1)
file_obj_2=open('File_1.txt','r')
data_2=file_obj_2.read()
print(data_2)
# Example 6 (Reading some specific count of characters)
file_obj_3=open('File_1.txt','r')
data_3=file_obj_3.read(29) # It is reading only 29 characters
print(data_3)
# Example 7 (Writing content to a file)
file_obj_4=open('File_1.txt','w')
for i in range(5):
file_obj_4.write('Vijay rocks'+str(i)+'\n')
file_obj_5=open('File_1.txt','r')
data_4=file_obj_5.read()
print(data_4)
# re-set the file
# Vijay welcomes you to Python Programming World.
file_obj_5=open('File_1.txt','w')
file_obj_5.write('Vijay welcomes you to Python Programming World.')
###Output
_____no_output_____
###Markdown
So, if you have observed, the content has been overwritten in File_1. The old content has been overwritten by new content. What if we want to add the new content at the end of old content. Can we Do that? Yes, through append mode.
###Code
# Example 8
file_obj_6=open('File_1.txt','a')
for i in range(5):
file_obj_6.write('Python rocks'+str(i)+'\n')
file_obj_6.close()
file_obj_6=open('File_1.txt','r')
data_5=file_obj_6.read()
print(data_5)
# Example 9
file_obj_6=open('File_2.txt','w') # opens the file only for write mode. If the file is not present, it will create a new file
file_obj_7=open('File_3.txt','r+') # opens the file for both reading and writing mode.
# Example 10 (This code is reading 5 characters at a time and printing. When it reads the entire content, it breaks)
file_obj=open('File_1.txt','r')
while True:
line_data=file_obj.read(5)
if line_data:
print(line_data+'\n')
else:
break
file_obj.close()
# Example 11
print(file_obj.closed) # It is checking if the file is closed
print(file_obj.mode) # It checks the mode in which the file was open
print(file_obj.name) # it gives the name of the file
file_obj.close()
file_obj_1.close()
file_obj_2.close()
file_obj_3.close()
file_obj_4.close()
file_obj_5.close()
file_obj_6.close()
file_obj_7.close()
## Why have these close operations been done? So, that it won't give the error as "The file is being used by another process"
# Example 12
os.rename('File_1.txt','File_New.txt') ## It is renaming the file.
###Output
_____no_output_____
###Markdown
tell() and seek() in File Handling**tell():** In python programming, within file handling concept tell() function is used to get the actual position of file object. By file object we mean a cursor. And it’s cursor, who decides from where data has to be read or written in a file.__seek():__ In python programming, within file handling concept seek() function is used to shift/change the position of file object to required position. By file object we mean a cursor. And it’s cursor, who decides from where data has to be read or write in a file.
###Code
#Example of tell
file_obj=open('File_4_tell_seek.txt','r')
# This will tell the position of pointer. When the file has been opened, the pointer is at the beginning
print('The pointer is at the beginning when the file is opened', file_obj.tell())
# This line reads the entire content.
file_obj.read()
# Now after reading the entire content, the pointer is at the end
print('Now after reading the entire content, the pointer is at the end', file_obj.tell())
output=file_obj.read()
# This will print empty. Why? Because the file has been already read. The pointer is at the end.
print('This will print empty', output)
# Open the file in read only mode
file_obj = open('File_4_tell_seek.txt', 'r')
# This will tell the position of pointer. When the file has been opened, the pointer is at the beginning
print('The current position of the pointer.',file_obj.tell())
# This line reads the entire content
file_obj.read()
# Now after reading the entire content, the pointer is at the end
print('The current position of the pointer.',file_obj.tell())
# This will shift the pointer to 4 position right from beginning. 0 means beginning
file_obj.seek(4,0)
# This give the output as 4.
print('The current position of the pointer.',file_obj.tell())
# This will print from the 5th character
output = file_obj.read()
print(output)
# Open the file in the readonly and binary mode
file_obj = open('File_4_tell_seek.txt', 'rb')
# This will tell the position of pointer. When the file has been opened, the pointer is at the beginning
print('THe current position of the pointer.', file_obj.tell())
# This line reads the entire content.
file_obj.read()
# Now after reading the entire content, the pointer is at the end
print('The current position of the pointer.', file_obj.tell())
# This will shift the pointer to 5 position left from end. 2 means end. And 1 means current position
file_obj.seek(-5,1)
#It is supported only when the file is opened in binary mode.
# This give the output as 65.
print('The current position of the pointer.', file_obj.tell())
# This will print last 5 characters
output = file_obj.read()
print(output)
file_obj.close()
###Output
THe current position of the pointer. 0
The current position of the pointer. 70
The current position of the pointer. 65
b'cks4\n'
|
T1_SaintPetersbergParadox.ipynb | ###Markdown
CS 5408 - Game Theory for Computing Topic 1: Decision Theory -- Saint Petersberg Paradox\COPYRIGHTS: © Sid Nadendla, 2021 Choice Experiment* Flip an unbiased coin over multiple iterations. * In the first iteration, you win$$r_1 = \begin{cases} 2, & \text{if "Heads"}, \\ 0, & \text{otherwise} \end{cases}.$$* In the second iteration, you win$$r_2 = \begin{cases} 4, & \text{if "Heads" in all previous iterations}, \\ 0, & \text{otherwise} \end{cases}.$$* In the third iteration, you win$$r_3 = \begin{cases} 8, & \text{if "Heads" in all previous iterations}, \\ 0, & \text{otherwise} \end{cases}.$$* Upon $n$ iterations, you win$$r_n = \begin{cases} 2^n, & \text{if "Heads" in all previous iterations}, \\ 0, & \text{otherwise} \end{cases}.$$The net reward that you obtain after $N$ iterations is given by$$ R_N = \displaystyle \sum_{n = 1}^{N} r_n. $$ If you are asked to choose $N$ before the experiment is conducted, what would you choose?
###Code
!pip install numpy
!pip install matplotlib
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Monte-Carlo Simulation Section 1: Basic Style
###Code
def SaintPetersburg1(iteration,state):
# Initialize instantaneous reward to zero
reward = 0
# COIN TOSS EXPERIMENT:
# Generate a uniformly distributed random variable in [0,1].
# If it is greater than 0.5, then declare HEADS. Otherwise, TAILS.
if np.random.uniform() > 0.5:
toss = 1 # 1 represents HEADS
else:
toss = 0 # 0 represents TAILS
state = 1
# Compute the instantaneous reward
if ((toss == 1) and (state == 0)):
reward = 2**(iteration+1)
return reward, state
###Output
_____no_output_____
###Markdown
**Choice Experiment:**
###Code
# Choice variable, i.e. the number of times you wish to toss the coin.
N = int(input("Enter the number of iterations you want to run the experiment:"))
# Initialize variables
experiment_state = 0
instantaneous_reward = np.zeros(N)
final_reward = 0
# Toss the coin N times, and compute the total reward.
for n in range(N):
instantaneous_reward[n], experiment_state = SaintPetersburg1(n,experiment_state)
if state == 1:
final_reward = np.sum(instantaneous_reward[:n+1])
# Print all instantaneous rewards and total reward from this choice experiment
print(f"Your rewards per iteration are: {instantaneous_reward}")
print(f"Your total reward: {final_reward}")
###Output
Enter the number of iterations you want to run the experiment:20
Your rewards per iteration are: [2. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
Your total reward: 2.0
###Markdown
**Empirical Distribution of Rewards:**
###Code
# Number of Monte-Carlo Runs
K = 1000
# Number of iterations per Monte-Carlo run.
N = 100
# Initialize variables
total_reward = np.zeros(K)
inst_reward = np.zeros((K,N))
prob = np.zeros(N)
# Toss the coin N times, and compute the total reward in each Monte-Carlo run
for k in range(K):
state = 0
for n in range(N):
inst_reward[k,n], state = SaintPetersburg1(n,state)
if state == 1:
total_reward[k] = np.sum(inst_reward[k,:n])
# Plot the histogram (empirical distribution)
plt.hist(total_reward, bins=[0, 1, 3, 7, 15, 31], density=True,color="C1")
# Compute the actual probability of different outcomes
for n in range(N):
prob[n] = 1/2**(n+1)
plt.xlim(0,15)
# Overlay the probability plot onto the histogram
plt.plot(prob,color="C2")
plt.xlabel("Number of Iterations")
plt.ylabel("Frequency of Occurrence")
plt.show()
###Output
_____no_output_____
###Markdown
**Computation of Average Rewards:** Consider the $n^{th}$ iteration. * In order to obtain a non-zero reward in this iteration, we need to observe HEADS in this and all previous iterations.* The probability of such an event is $p_n = \left( \frac{1}{2} \times \cdots \times \frac{1}{2} \right)_{n\text{ times}} = \frac{1}{2^n}$.* Given that the instantaneous reward is $r_n = \begin{cases} 2^n, & \text{if "Heads" in all previous iterations}, \\ 0, & \text{otherwise} \end{cases}$, the average reward after $N$ iterations is given by$$\mathbb{E}(R_N) = \displaystyle \sum_{n = 1}^N p_n r_n = \displaystyle \sum_{n = 1}^N \left[ \left( \frac{1}{2^n} \right) \cdot 2^n + \left( 1 - \frac{1}{2^n} \right) \cdot 0 \right] = N.$$* On the other hand, Bernoulli claimed that people work with logarithmic utilities, i.e. $u_n = \log r_n$.* Therefore, the expected utility at the human decision maker is$$\mathbb{E}(U_N) = \displaystyle \sum_{n = 1}^N p_n u_n = \displaystyle \sum_{n = 1}^N p_n \log_2(r_n) = \displaystyle \sum_{n = 1}^N \left[ \left( \frac{1}{2^n} \right) \cdot n + \left( 1 - \frac{1}{2^n} \right) \cdot 0 \right] = 2 - \frac{N+2}{2^N}.$$
###Code
# Initialize variables
reward = np.zeros(N)
log_reward = np.zeros(N)
av_reward = np.zeros(N)
log_utility = np.zeros(N)
# Compute the expected reward (theoretical)
for n in range(N):
reward[n] = 2**(n+1)
log_reward[n] = np.log2(reward[n])
av_reward[n] = np.sum(prob[:n]*reward[:n])
log_utility[n] = np.sum(prob[:n]*log_reward[:n])
# Plot expected reward and Bernoulli's utility (expected log-reward)
# Remark 1: Expected reward linearly increases with the number of iterations.
plt.plot(av_reward,color="C1")
plt.xlabel("Number of Iterations")
plt.ylabel("Average Reward")
plt.show()
# Remark 2: Bernoulli's utility converges to some finite value after a few iterations.
plt.plot(log_utility,color="C2")
plt.xlabel("Number of Iterations")
plt.ylabel("Bernoulli's Utility")
plt.xlim(0,12)
plt.show()
###Output
_____no_output_____
###Markdown
Section 2: Using Classes and Objects
###Code
class Saint_Petersberg(object):
def __init__(self, N):
self.N = N
self.state = 0
self.rewards = np.zeros(N)
self.total_reward = 0
self.iteration = 0
def __repr__(self):
return f"""Instantaneous Rewards : {self.rewards}\
\nTotal Reward : {self.total_reward}\
\n"""
def coin_toss(self):
if np.random.uniform() > 0.5:
toss = 1 # 1 represents HEADS
else:
toss = 0 # 0 represents TAILS
return toss
def inst_reward(self):
reward_ = 0
if ((self.coin_toss() == 1) and (self.state == 0)):
reward_ = 2**(self.iteration+1)
else:
self.state = 1
self.iteration+=1
return reward_
def play_game(self):
for i in range(self.N):
self.rewards[i] = self.inst_reward()
self.total_reward = np.sum(self.rewards)
N = int(input("Enter the number of tosses you wish to play: "))
st_petersberg_exp = Saint_Petersberg(N)
st_petersberg_exp.play_game()
print(st_petersberg_exp)
for _ in range(20):
st_petersberg_exp = Saint_Petersberg(N)
st_petersberg_exp.play_game()
print(st_petersberg_exp)
###Output
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [ 2. 4. 8. 16. 32.]
Total Reward : 62.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [2. 0. 0. 0. 0.]
Total Reward : 2.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
Instantaneous Rewards : [ 2. 4. 8. 16. 0.]
Total Reward : 30.0
Instantaneous Rewards : [0. 0. 0. 0. 0.]
Total Reward : 0.0
|
experiments/.ipynb_checkpoints/CIFAR10-checkpoint.ipynb | ###Markdown
model parameters
###Code
%load_ext autoreload
%autoreload 2
import torch
import torch.nn as nn
from fuzzytools.datascience.grid_search import GDIter, GridSeacher
from baseline_models import MLPClassifier, CNN2DClassifier
mdl_params = {
#'mdl_class':MLPClassifier,
'mdl_class':CNN2DClassifier,
'mdl_kwargs':{
'dropout':0.5,
#'dropout':0.0,
'cnn_features':[16, 32, 64],
#'cnn_features':[16, 32],
'uses_mlp_classifier':True,
#'uses_mlp_classifier':False,
},
}
gs = GridSeacher(mdl_params)
print(gs)
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
(0) - {'mdl_class': <class 'baseline_models.CNN2DClassifier'>, 'mdl_kwargs': {'dropout': 0.5, 'cnn_features': [16, 32, 64], 'uses_mlp_classifier': True}}
────────────────────────────────────────────────────────────────────────────────────────────────────
###Markdown
training
###Code
%load_ext autoreload
%autoreload 2
import os
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152
os.environ['CUDA_VISIBLE_DEVICES'] = '' # CPU
### LOSS
from fuzzytorch.losses import XEntropy
loss_kwargs = {
'model_output_is_with_softmax':False,
'target_is_onehot':False,
}
loss = XEntropy('xentropy', **loss_kwargs)
### METRICS
from fuzzytorch.metrics import DummyAccuracy, Accuracy
metrics = [
Accuracy('accuracy', balanced=False, **loss_kwargs),
Accuracy('b-accuracy', balanced=True, **loss_kwargs),
DummyAccuracy('dummy-accuracy', **loss_kwargs),
]
### GET MODEL
model = mdl_params['mdl_class'](**mdl_params['mdl_kwargs'])
### OPTIMIZER
import torch.optim as optims
from fuzzytorch.optimizers import LossOptimizer
optimizer_kwargs = {
'opt_kwargs':{
'lr':1e-3,
},
'decay_kwargs':{
'lr':0.9,
}
}
optimizer = LossOptimizer(model, optims.Adam, **optimizer_kwargs)
### MONITORS
from fuzzytools.prints import print_bar
from fuzzytorch.handlers import ModelTrainHandler
from fuzzytorch.monitors import LossMonitor
from fuzzytorch import C_
monitor_config = {
'val_epoch_counter_duration':0, # every k epochs check
#'val_epoch_counter_duration':2, # every k epochs check
#'earlystop_epoch_duration':1e2,
#'save_mode':C_.SM_NO_SAVE,
#'save_mode':C_.SM_ALL,
#'save_mode':C_.SM_ONLY_ALL,
#'save_mode':C_.SM_ONLY_INF_METRIC,
'save_mode':C_.SM_ONLY_INF_LOSS,
#'save_mode':C_.SM_ONLY_SUP_METRIC,
}
loss_monitors = LossMonitor(loss, optimizer, metrics, **monitor_config)
### TRAIN
mtrain_config = {
'id':0,
'epochs_max':1e3,
'save_rootdir':'../save',
}
model_train_handler = ModelTrainHandler(model, loss_monitors, **mtrain_config)
model_train_handler.build_gpu(gpu_index=None)
print(model_train_handler)
model_train_handler.fit_loader(train_loader_mnist, val_loader_mnist)
%load_ext autoreload
%autoreload 2
# loss_df opt_df loss_df_epoch metrics_df_epoch
loss_monitors.get_time_util_convergence()
# loss_df opt_df loss_df_epoch metrics_df_epoch
loss_monitors.get_save_dict()['opt_df']
# loss_df opt_df loss_df_epoch metrics_df_epoch
loss_monitors.get_save_dict()['loss_df_epoch']
# loss_df opt_df loss_df_epoch metrics_df_epoch
loss_monitors.get_save_dict()['metrics_df_epoch']
%load_ext autoreload
%autoreload 2
from fuzzytools.counters import Counter
d = {
'val_epoch_counter_duration':1,
'earlystop_epoch_duration':5,
}
c = Counter(d)
for _ in range(50):
print(c, c.check('earlystop_epoch_duration'))
c.update()
%load_ext autoreload
%autoreload 2
import flamingChoripan.tinyFlame.plots as tfplots
### training plots
fig, ax = tfplots.plot_trainloss(train_handler)
fig, ax = tfplots.plot_evaluation_loss(train_handler)
fig, ax = tfplots.plot_evaluation_metrics(train_handler)
#fig, ax = tfplots.plot_optimizer(train_handler, save_dir=mtrain_config['images_save_dir'])
plt.show()
###Output
_____no_output_____ |
coursera_ai/week4/a3_m3_yolo2.ipynb | ###Markdown
This example will illustrate the YOLO object detection algorithm.This notebook is a stripped-down version of Huynh Ngoc Anh's. Mainly, the training part has been removed. So only scoring is possible in this notebook. You can find the original notebook here:https://github.com/experiencor/basic-yolo-keras/blob/master/Yolo%20Step-by-Step.ipynb Initialization
###Code
from keras.models import Sequential, Model
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.merge import concatenate
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
import imgaug as ia
from tqdm import tqdm
from imgaug import augmenters as iaa
import numpy as np
import pickle
import os, cv2
from preprocessing import parse_annotation, BatchGenerator
from utils import WeightReader, decode_netout, draw_boxes, normalize
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
%matplotlib inline
LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
IMAGE_H, IMAGE_W = 416, 416
GRID_H, GRID_W = 13 , 13
BOX = 5
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3#0.5
NMS_THRESHOLD = 0.3#0.45
ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
NO_OBJECT_SCALE = 1.0
OBJECT_SCALE = 5.0
COORD_SCALE = 1.0
CLASS_SCALE = 1.0
BATCH_SIZE = 16
WARM_UP_BATCHES = 0
TRUE_BOX_BUFFER = 50
###Output
_____no_output_____
###Markdown
Get the weights for a pre-trained model from the authors of the YOLO paper
###Code
!rm yolo.weights
!wget https://pjreddie.com/media/files/yolo.weights
wt_path = 'yolo.weights'
###Output
_____no_output_____
###Markdown
Construct the network
###Code
# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
return tf.space_to_depth(x, block_size=2)
input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4))
# Layer 1
x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2
x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)
x = BatchNormalization(name='norm_2')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 3
x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x)
x = BatchNormalization(name='norm_3')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 4
x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x)
x = BatchNormalization(name='norm_4')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 5
x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x)
x = BatchNormalization(name='norm_5')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
x = BatchNormalization(name='norm_6')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 7
x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x)
x = BatchNormalization(name='norm_7')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x)
x = BatchNormalization(name='norm_8')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 9
x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x)
x = BatchNormalization(name='norm_9')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 10
x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x)
x = BatchNormalization(name='norm_10')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 11
x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x)
x = BatchNormalization(name='norm_11')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 12
x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x)
x = BatchNormalization(name='norm_12')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 13
x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x)
x = BatchNormalization(name='norm_13')(x)
x = LeakyReLU(alpha=0.1)(x)
skip_connection = x
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 14
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x)
x = BatchNormalization(name='norm_14')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 15
x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x)
x = BatchNormalization(name='norm_15')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 16
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x)
x = BatchNormalization(name='norm_16')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 17
x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x)
x = BatchNormalization(name='norm_17')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 18
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x)
x = BatchNormalization(name='norm_18')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 19
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x)
x = BatchNormalization(name='norm_19')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 20
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x)
x = BatchNormalization(name='norm_20')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 21
skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection)
skip_connection = BatchNormalization(name='norm_21')(skip_connection)
skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
skip_connection = Lambda(space_to_depth_x2)(skip_connection)
x = concatenate([skip_connection, x])
# Layer 22
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x)
x = BatchNormalization(name='norm_22')(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 23
x = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x)
output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
# small hack to allow true_boxes to be registered when Keras build the model
# for more information: https://github.com/fchollet/keras/issues/2790
output = Lambda(lambda args: args[0])([output, true_boxes])
model = Model([input_image, true_boxes], output)
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_3 (InputLayer) (None, 416, 416, 3) 0
__________________________________________________________________________________________________
conv_1 (Conv2D) (None, 416, 416, 32) 864 input_3[0][0]
__________________________________________________________________________________________________
norm_1 (BatchNormalization) (None, 416, 416, 32) 128 conv_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_22 (LeakyReLU) (None, 416, 416, 32) 0 norm_1[0][0]
__________________________________________________________________________________________________
max_pooling2d_6 (MaxPooling2D) (None, 208, 208, 32) 0 leaky_re_lu_22[0][0]
__________________________________________________________________________________________________
conv_2 (Conv2D) (None, 208, 208, 64) 18432 max_pooling2d_6[0][0]
__________________________________________________________________________________________________
norm_2 (BatchNormalization) (None, 208, 208, 64) 256 conv_2[0][0]
__________________________________________________________________________________________________
leaky_re_lu_23 (LeakyReLU) (None, 208, 208, 64) 0 norm_2[0][0]
__________________________________________________________________________________________________
max_pooling2d_7 (MaxPooling2D) (None, 104, 104, 64) 0 leaky_re_lu_23[0][0]
__________________________________________________________________________________________________
conv_3 (Conv2D) (None, 104, 104, 128 73728 max_pooling2d_7[0][0]
__________________________________________________________________________________________________
norm_3 (BatchNormalization) (None, 104, 104, 128 512 conv_3[0][0]
__________________________________________________________________________________________________
leaky_re_lu_24 (LeakyReLU) (None, 104, 104, 128 0 norm_3[0][0]
__________________________________________________________________________________________________
conv_4 (Conv2D) (None, 104, 104, 64) 8192 leaky_re_lu_24[0][0]
__________________________________________________________________________________________________
norm_4 (BatchNormalization) (None, 104, 104, 64) 256 conv_4[0][0]
__________________________________________________________________________________________________
leaky_re_lu_25 (LeakyReLU) (None, 104, 104, 64) 0 norm_4[0][0]
__________________________________________________________________________________________________
conv_5 (Conv2D) (None, 104, 104, 128 73728 leaky_re_lu_25[0][0]
__________________________________________________________________________________________________
norm_5 (BatchNormalization) (None, 104, 104, 128 512 conv_5[0][0]
__________________________________________________________________________________________________
leaky_re_lu_26 (LeakyReLU) (None, 104, 104, 128 0 norm_5[0][0]
__________________________________________________________________________________________________
max_pooling2d_8 (MaxPooling2D) (None, 52, 52, 128) 0 leaky_re_lu_26[0][0]
__________________________________________________________________________________________________
conv_6 (Conv2D) (None, 52, 52, 256) 294912 max_pooling2d_8[0][0]
__________________________________________________________________________________________________
norm_6 (BatchNormalization) (None, 52, 52, 256) 1024 conv_6[0][0]
__________________________________________________________________________________________________
leaky_re_lu_27 (LeakyReLU) (None, 52, 52, 256) 0 norm_6[0][0]
__________________________________________________________________________________________________
conv_7 (Conv2D) (None, 52, 52, 128) 32768 leaky_re_lu_27[0][0]
__________________________________________________________________________________________________
norm_7 (BatchNormalization) (None, 52, 52, 128) 512 conv_7[0][0]
__________________________________________________________________________________________________
leaky_re_lu_28 (LeakyReLU) (None, 52, 52, 128) 0 norm_7[0][0]
__________________________________________________________________________________________________
conv_8 (Conv2D) (None, 52, 52, 256) 294912 leaky_re_lu_28[0][0]
__________________________________________________________________________________________________
norm_8 (BatchNormalization) (None, 52, 52, 256) 1024 conv_8[0][0]
__________________________________________________________________________________________________
leaky_re_lu_29 (LeakyReLU) (None, 52, 52, 256) 0 norm_8[0][0]
__________________________________________________________________________________________________
max_pooling2d_9 (MaxPooling2D) (None, 26, 26, 256) 0 leaky_re_lu_29[0][0]
__________________________________________________________________________________________________
conv_9 (Conv2D) (None, 26, 26, 512) 1179648 max_pooling2d_9[0][0]
__________________________________________________________________________________________________
norm_9 (BatchNormalization) (None, 26, 26, 512) 2048 conv_9[0][0]
__________________________________________________________________________________________________
leaky_re_lu_30 (LeakyReLU) (None, 26, 26, 512) 0 norm_9[0][0]
__________________________________________________________________________________________________
conv_10 (Conv2D) (None, 26, 26, 256) 131072 leaky_re_lu_30[0][0]
__________________________________________________________________________________________________
norm_10 (BatchNormalization) (None, 26, 26, 256) 1024 conv_10[0][0]
__________________________________________________________________________________________________
leaky_re_lu_31 (LeakyReLU) (None, 26, 26, 256) 0 norm_10[0][0]
__________________________________________________________________________________________________
conv_11 (Conv2D) (None, 26, 26, 512) 1179648 leaky_re_lu_31[0][0]
__________________________________________________________________________________________________
norm_11 (BatchNormalization) (None, 26, 26, 512) 2048 conv_11[0][0]
__________________________________________________________________________________________________
leaky_re_lu_32 (LeakyReLU) (None, 26, 26, 512) 0 norm_11[0][0]
__________________________________________________________________________________________________
conv_12 (Conv2D) (None, 26, 26, 256) 131072 leaky_re_lu_32[0][0]
__________________________________________________________________________________________________
norm_12 (BatchNormalization) (None, 26, 26, 256) 1024 conv_12[0][0]
__________________________________________________________________________________________________
leaky_re_lu_33 (LeakyReLU) (None, 26, 26, 256) 0 norm_12[0][0]
__________________________________________________________________________________________________
conv_13 (Conv2D) (None, 26, 26, 512) 1179648 leaky_re_lu_33[0][0]
__________________________________________________________________________________________________
norm_13 (BatchNormalization) (None, 26, 26, 512) 2048 conv_13[0][0]
__________________________________________________________________________________________________
leaky_re_lu_34 (LeakyReLU) (None, 26, 26, 512) 0 norm_13[0][0]
__________________________________________________________________________________________________
max_pooling2d_10 (MaxPooling2D) (None, 13, 13, 512) 0 leaky_re_lu_34[0][0]
__________________________________________________________________________________________________
conv_14 (Conv2D) (None, 13, 13, 1024) 4718592 max_pooling2d_10[0][0]
__________________________________________________________________________________________________
norm_14 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_14[0][0]
__________________________________________________________________________________________________
leaky_re_lu_35 (LeakyReLU) (None, 13, 13, 1024) 0 norm_14[0][0]
__________________________________________________________________________________________________
conv_15 (Conv2D) (None, 13, 13, 512) 524288 leaky_re_lu_35[0][0]
__________________________________________________________________________________________________
norm_15 (BatchNormalization) (None, 13, 13, 512) 2048 conv_15[0][0]
__________________________________________________________________________________________________
leaky_re_lu_36 (LeakyReLU) (None, 13, 13, 512) 0 norm_15[0][0]
__________________________________________________________________________________________________
conv_16 (Conv2D) (None, 13, 13, 1024) 4718592 leaky_re_lu_36[0][0]
__________________________________________________________________________________________________
norm_16 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_16[0][0]
__________________________________________________________________________________________________
leaky_re_lu_37 (LeakyReLU) (None, 13, 13, 1024) 0 norm_16[0][0]
__________________________________________________________________________________________________
conv_17 (Conv2D) (None, 13, 13, 512) 524288 leaky_re_lu_37[0][0]
__________________________________________________________________________________________________
norm_17 (BatchNormalization) (None, 13, 13, 512) 2048 conv_17[0][0]
__________________________________________________________________________________________________
leaky_re_lu_38 (LeakyReLU) (None, 13, 13, 512) 0 norm_17[0][0]
__________________________________________________________________________________________________
conv_18 (Conv2D) (None, 13, 13, 1024) 4718592 leaky_re_lu_38[0][0]
__________________________________________________________________________________________________
norm_18 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_18[0][0]
__________________________________________________________________________________________________
leaky_re_lu_39 (LeakyReLU) (None, 13, 13, 1024) 0 norm_18[0][0]
__________________________________________________________________________________________________
conv_19 (Conv2D) (None, 13, 13, 1024) 9437184 leaky_re_lu_39[0][0]
__________________________________________________________________________________________________
norm_19 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_19[0][0]
__________________________________________________________________________________________________
conv_21 (Conv2D) (None, 26, 26, 64) 32768 leaky_re_lu_34[0][0]
__________________________________________________________________________________________________
leaky_re_lu_40 (LeakyReLU) (None, 13, 13, 1024) 0 norm_19[0][0]
__________________________________________________________________________________________________
norm_21 (BatchNormalization) (None, 26, 26, 64) 256 conv_21[0][0]
__________________________________________________________________________________________________
conv_20 (Conv2D) (None, 13, 13, 1024) 9437184 leaky_re_lu_40[0][0]
__________________________________________________________________________________________________
leaky_re_lu_42 (LeakyReLU) (None, 26, 26, 64) 0 norm_21[0][0]
__________________________________________________________________________________________________
norm_20 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_20[0][0]
__________________________________________________________________________________________________
lambda_1 (Lambda) (None, 13, 13, 256) 0 leaky_re_lu_42[0][0]
__________________________________________________________________________________________________
leaky_re_lu_41 (LeakyReLU) (None, 13, 13, 1024) 0 norm_20[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 13, 13, 1280) 0 lambda_1[0][0]
leaky_re_lu_41[0][0]
__________________________________________________________________________________________________
conv_22 (Conv2D) (None, 13, 13, 1024) 11796480 concatenate_1[0][0]
__________________________________________________________________________________________________
norm_22 (BatchNormalization) (None, 13, 13, 1024) 4096 conv_22[0][0]
__________________________________________________________________________________________________
leaky_re_lu_43 (LeakyReLU) (None, 13, 13, 1024) 0 norm_22[0][0]
__________________________________________________________________________________________________
conv_23 (Conv2D) (None, 13, 13, 425) 435625 leaky_re_lu_43[0][0]
__________________________________________________________________________________________________
reshape_1 (Reshape) (None, 13, 13, 5, 85 0 conv_23[0][0]
__________________________________________________________________________________________________
input_4 (InputLayer) (None, 1, 1, 1, 50, 0
__________________________________________________________________________________________________
lambda_2 (Lambda) (None, 13, 13, 5, 85 0 reshape_1[0][0]
input_4[0][0]
==================================================================================================
Total params: 50,983,561
Trainable params: 50,962,889
Non-trainable params: 20,672
__________________________________________________________________________________________________
###Markdown
Load pretrained weights **Load the weights originally provided by YOLO**
###Code
weight_reader = WeightReader(wt_path)
weight_reader.reset()
nb_conv = 23
for i in range(1, nb_conv+1):
conv_layer = model.get_layer('conv_' + str(i))
if i < nb_conv:
norm_layer = model.get_layer('norm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = weight_reader.read_bytes(size)
gamma = weight_reader.read_bytes(size)
mean = weight_reader.read_bytes(size)
var = weight_reader.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel, bias])
else:
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2,3,1,0])
conv_layer.set_weights([kernel])
###Output
_____no_output_____
###Markdown
Perform detection on image
###Code
dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4))
!rm africa-blog-posts-4.jpg
!wget http://www.lukeburrage.com/travelpodcast/africa-blog-posts/africa-blog-posts-4.jpg
image = cv2.imread('africa-blog-posts-4.jpg')
plt.figure(figsize=(10,10))
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:,:,::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=OBJ_THRESHOLD,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
plt.imshow(image[:,:,::-1]); plt.show()
###Output
_____no_output_____ |
深度学习/d2l-zh-1.1/chapter_convolutional-neural-networks/lenet.ipynb | ###Markdown
卷积神经网络(LeNet)在[“多层感知机的从零开始实现”](../chapter_deep-learning-basics/mlp-scratch.ipynb)一节里我们构造了一个含单隐藏层的多层感知机模型来对Fashion-MNIST数据集中的图像进行分类。每张图像高和宽均是28像素。我们将图像中的像素逐行展开,得到长度为784的向量,并输入进全连接层中。然而,这种分类方法有一定的局限性。1. 图像在同一列邻近的像素在这个向量中可能相距较远。它们构成的模式可能难以被模型识别。2. 对于大尺寸的输入图像,使用全连接层容易导致模型过大。假设输入是高和宽均为$1,000$像素的彩色照片(含3个通道)。即使全连接层输出个数仍是256,该层权重参数的形状也是$3,000,000\times 256$:它占用了大约3 GB的内存或显存。这会带来过于复杂的模型和过高的存储开销。卷积层尝试解决这两个问题。一方面,卷积层保留输入形状,使图像的像素在高和宽两个方向上的相关性均可能被有效识别;另一方面,卷积层通过滑动窗口将同一卷积核与不同位置的输入重复计算,从而避免参数尺寸过大。卷积神经网络就是含卷积层的网络。本节里我们将介绍一个早期用来识别手写数字图像的卷积神经网络:LeNet [1]。这个名字来源于LeNet论文的第一作者Yann LeCun。LeNet展示了通过梯度下降训练卷积神经网络可以达到手写数字识别在当时最先进的结果。这个奠基性的工作第一次将卷积神经网络推上舞台,为世人所知。 LeNet模型LeNet分为卷积层块和全连接层块两个部分。下面我们分别介绍这两个模块。卷积层块里的基本单位是卷积层后接最大池化层:卷积层用来识别图像里的空间模式,如线条和物体局部,之后的最大池化层则用来降低卷积层对位置的敏感性。卷积层块由两个这样的基本单位重复堆叠构成。在卷积层块中,每个卷积层都使用$5\times 5$的窗口,并在输出上使用sigmoid激活函数。第一个卷积层输出通道数为6,第二个卷积层输出通道数则增加到16。这是因为第二个卷积层比第一个卷积层的输入的高和宽要小,所以增加输出通道使两个卷积层的参数尺寸类似。卷积层块的两个最大池化层的窗口形状均为$2\times 2$,且步幅为2。由于池化窗口与步幅形状相同,池化窗口在输入上每次滑动所覆盖的区域互不重叠。卷积层块的输出形状为(批量大小, 通道, 高, 宽)。当卷积层块的输出传入全连接层块时,全连接层块会将小批量中每个样本变平(flatten)。也就是说,全连接层的输入形状将变成二维,其中第一维是小批量中的样本,第二维是每个样本变平后的向量表示,且向量长度为通道、高和宽的乘积。全连接层块含3个全连接层。它们的输出个数分别是120、84和10,其中10为输出的类别个数。下面我们通过`Sequential`类来实现LeNet模型。
###Code
import d2lzh as d2l
import mxnet as mx
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import loss as gloss, nn
import time
net = nn.Sequential()
net.add(nn.Conv2D(channels=6, kernel_size=5, activation='sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
nn.MaxPool2D(pool_size=2, strides=2),
# Dense会默认将(批量大小, 通道, 高, 宽)形状的输入转换成
# (批量大小, 通道 * 高 * 宽)形状的输入
nn.Dense(120, activation='sigmoid'),
nn.Dense(84, activation='sigmoid'),
nn.Dense(10))
###Output
_____no_output_____
###Markdown
接下来我们构造一个高和宽均为28的单通道数据样本,并逐层进行前向计算来查看每个层的输出形状。
###Code
X = nd.random.uniform(shape=(1, 1, 28, 28))
net.initialize()
for layer in net:
X = layer(X)
print(layer.name, 'output shape:\t', X.shape)
###Output
conv0 output shape: (1, 6, 24, 24)
pool0 output shape: (1, 6, 12, 12)
conv1 output shape: (1, 16, 8, 8)
pool1 output shape: (1, 16, 4, 4)
dense0 output shape: (1, 120)
dense1 output shape: (1, 84)
dense2 output shape: (1, 10)
###Markdown
可以看到,在卷积层块中输入的高和宽在逐层减小。卷积层由于使用高和宽均为5的卷积核,从而将高和宽分别减小4,而池化层则将高和宽减半,但通道数则从1增加到16。全连接层则逐层减少输出个数,直到变成图像的类别数10。 获取数据和训练模型下面我们来实验LeNet模型。实验中,我们仍然使用Fashion-MNIST作为训练数据集。
###Code
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
###Output
_____no_output_____
###Markdown
因为卷积神经网络计算比多层感知机要复杂,建议使用GPU来加速计算。我们尝试在`gpu(0)`上创建`NDArray`,如果成功则使用`gpu(0)`,否则仍然使用CPU。
###Code
def try_gpu(): # 本函数已保存在d2lzh包中方便以后使用
try:
ctx = mx.gpu()
_ = nd.zeros((1,), ctx=ctx)
except mx.base.MXNetError:
ctx = mx.cpu()
return ctx
ctx = try_gpu()
ctx
###Output
_____no_output_____
###Markdown
相应地,我们对[“softmax回归的从零开始实现”](../chapter_deep-learning-basics/softmax-regression-scratch.ipynb)一节中描述的`evaluate_accuracy`函数略作修改。由于数据刚开始存在CPU使用的内存上,当`ctx`变量代表GPU及相应的显存时,我们通过[“GPU计算”](../chapter_deep-learning-computation/use-gpu.ipynb)一节中介绍的`as_in_context`函数将数据复制到显存上,例如`gpu(0)`。
###Code
# 本函数已保存在d2lzh包中方便以后使用。该函数将被逐步改进:它的完整实现将在“图像增广”一节中
# 描述
def evaluate_accuracy(data_iter, net, ctx):
acc_sum, n = nd.array([0], ctx=ctx), 0
for X, y in data_iter:
# 如果ctx代表GPU及相应的显存,将数据复制到显存上
X, y = X.as_in_context(ctx), y.as_in_context(ctx).astype('float32')
acc_sum += (net(X).argmax(axis=1) == y).sum()
n += y.size
return acc_sum.asscalar() / n
###Output
_____no_output_____
###Markdown
我们同样对[“softmax回归的从零开始实现”](../chapter_deep-learning-basics/softmax-regression-scratch.ipynb)一节中定义的`train_ch3`函数略作修改,确保计算使用的数据和模型同在内存或显存上。
###Code
# 本函数已保存在d2lzh包中方便以后使用
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
num_epochs):
print('training on', ctx)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc,
time.time() - start))
###Output
_____no_output_____
###Markdown
我们重新将模型参数初始化到设备变量`ctx`之上,并使用Xavier随机初始化。损失函数和训练算法则依然使用交叉熵损失函数和小批量随机梯度下降。
###Code
lr, num_epochs = 0.9, 5
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
###Output
training on gpu(0)
|
site/ko/r2/tutorials/keras/overfit_and_underfit.ipynb | ###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
과대적합과 과소적합 TensorFlow.org에서 보기 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 지원하려면 [이 양식](https://bit.ly/tf-translate)을작성하거나[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs)로메일을 보내주시기 바랍니다. 지금까지 그랬듯이 이 예제의 코드도 `tf.keras` API를 사용합니다. 텐서플로 [케라스 가이드](https://www.tensorflow.org/guide/keras)에서 `tf.keras` API에 대해 더 많은 정보를 얻을 수 있습니다.앞서 영화 리뷰 분류와 주택 가격 예측의 두 예제에서 일정 에포크 동안 훈련하면 검증 세트에서 모델 성능이 최고점에 도달한 다음 감소하기 시작한 것을 보았습니다.다른 말로 하면, 모델이 훈련 세트에 *과대적합*(overfitting)된 것입니다. 과대적합을 다루는 방법은 꼭 배워야 합니다. *훈련 세트*에서 높은 성능을 얻을 수 있지만 진짜 원하는 것은 *테스트 세트*(또는 이전에 본 적 없는 데이터)에 잘 일반화되는 모델입니다. 과대적합의 반대는 *과소적합*(underfitting)입니다. 과소적합은 테스트 세트의 성능이 향상될 여지가 아직 있을 때 일어납니다. 발생하는 원인은 여러가지입니다. 모델이 너무 단순하거나, 규제가 너무 많거나, 그냥 단순히 충분히 오래 훈련하지 않는 경우입니다. 즉 네트워크가 훈련 세트에서 적절한 패턴을 학습하지 못했다는 뜻입니다.모델을 너무 오래 훈련하면 과대적합되기 시작하고 테스트 세트에서 일반화되지 못하는 패턴을 훈련 세트에서 학습합니다. 과대적합과 과소적합 사이에서 균형을 잡아야 합니다. 이를 위해 적절한 에포크 횟수동안 모델을 훈련하는 방법을 배워보겠습니다.과대적합을 막는 가장 좋은 방법은 더 많은 훈련 데이터를 사용하는 것입니다. 많은 데이터에서 훈련한 모델은 자연적으로 일반화 성능이 더 좋습니다. 데이터를 더 준비할 수 없을 때 그다음으로 가장 좋은 방법은 규제(regularization)와 같은 기법을 사용하는 것입니다. 모델이 저장할 수 있는 정보의 양과 종류에 제약을 부과하는 방법입니다. 네트워크가 소수의 패턴만 기억할 수 있다면 최적화 과정 동안 일반화 가능성이 높은 가장 중요한 패턴에 촛점을 맞출 것입니다.이 노트북에서 널리 사용되는 두 가지 규제 기법인 가중치 규제와 드롭아웃(dropout)을 알아 보겠습니다. 이런 기법을 사용하여 IMDB 영화 리뷰 분류 모델의 성능을 향상시켜 보죠.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
_____no_output_____
###Markdown
IMDB 데이터셋 다운로드이전 노트북에서처럼 임베딩을 사용하지 않고 여기에서는 문장을 멀티-핫 인코딩(multi-hot encoding)으로 변환하겠습니다. 이 모델은 훈련 세트에 빠르게 과대적합될 것입니다. 과대적합을 발생시키기고 어떻게 해결하는지 보이기 위해 선택했습니다.멀티-핫 인코딩은 정수 시퀀스를 0과 1로 이루어진 벡터로 변환합니다. 정확하게 말하면 시퀀스 `[3, 5]`를 인덱스 3과 5만 1이고 나머지는 모두 0인 10,000 차원 벡터로 변환한다는 의미입니다.
###Code
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
# 0으로 채워진 (len(sequences), dimension) 크기의 행렬을 만듭니다
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # results[i]의 특정 인덱스만 1로 설정합니다
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
###Output
_____no_output_____
###Markdown
만들어진 멀티-핫 벡터 중 하나를 살펴 보죠. 단어 인덱스는 빈도 순으로 정렬되어 있습니다. 그래프에서 볼 수 있듯이 인덱스 0에 가까울수록 1이 많이 등장합니다:
###Code
plt.plot(train_data[0])
###Output
_____no_output_____
###Markdown
과대적합 예제과대적합을 막는 가장 간단한 방법은 모델의 규모를 축소하는 것입니다. 즉, 모델에 있는 학습 가능한 파라미터의 수를 줄입니다(모델 파라미터는 층(layer)의 개수와 층의 유닛(unit) 개수에 의해 결정됩니다). 딥러닝에서는 모델의 학습 가능한 파라미터의 수를 종종 모델의 "용량"이라고 말합니다. 직관적으로 생각해 보면 많은 파라미터를 가진 모델이 더 많은 "기억 용량"을 가집니다. 이런 모델은 훈련 샘플과 타깃 사이를 일반화 능력이 없는 딕셔너리와 같은 매핑으로 완벽하게 학습할 수 있습니다. 하지만 이전에 본 적 없는 데이터에서 예측을 할 땐 쓸모가 없을 것입니다.항상 기억해야 할 점은 딥러닝 모델이 훈련 세트에는 학습이 잘 되는 경향이 있지만 진짜 해결할 문제는 학습이 아니라 일반화라는 것입니다.반면에 네트워크의 기억 용량이 부족하다면 이런 매핑을 쉽게 학습할 수 없을 것입니다. 손실을 최소화하기 위해서는 예측 성능이 더 많은 압축된 표현을 학습해야 합니다. 또한 너무 작은 모델을 만들면 훈련 데이터를 학습하기 어렵울 것입니다. "너무 많은 용량"과 "충분하지 않은 용량" 사이의 균형을 잡아야 합니다.안타깝지만 어떤 모델의 (층의 개수나 뉴런 개수에 해당하는) 적절한 크기나 구조를 결정하는 마법같은 공식은 없습니다. 여러 가지 다른 구조를 사용해 실험을 해봐야만 합니다.알맞은 모델의 크기를 찾으려면 비교적 적은 수의 층과 파라미터로 시작해서 검증 손실이 감소할 때까지 새로운 층을 추가하거나 층의 크기를 늘리는 것이 좋습니다. 영화 리뷰 분류 네트워크를 사용해 이를 실험해 보죠.```Dense``` 층만 사용하는 간단한 기준 모델을 만들고 작은 규모의 버전와 큰 버전의 모델을 만들어 비교하겠습니다. 기준 모델 만들기
###Code
baseline_model = keras.Sequential([
# `.summary` 메서드 때문에 `input_shape`가 필요합니다
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
작은 모델 만들기 앞서 만든 기준 모델과 비교하기 위해 적은 수의 은닉 유닛을 가진 모델을 만들어 보죠:
###Code
smaller_model = keras.Sequential([
keras.layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(4, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
smaller_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
smaller_model.summary()
###Output
_____no_output_____
###Markdown
같은 데이터를 사용해 이 모델을 훈련합니다:
###Code
smaller_history = smaller_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
큰 모델 만들기아주 큰 모델을 만들어 얼마나 빠르게 과대적합이 시작되는지 알아 볼 수 있습니다. 이 문제에 필요한 것보다 훨씬 더 큰 용량을 가진 네트워크를 추가해서 비교해 보죠:
###Code
bigger_model = keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
bigger_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
bigger_model.summary()
###Output
_____no_output_____
###Markdown
역시 같은 데이터를 사용해 모델을 훈련합니다:
###Code
bigger_history = bigger_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
훈련 손실과 검증 손실 그래프 그리기 실선은 훈련 손실이고 점선은 검증 손실입니다(낮은 검증 손실이 더 좋은 모델입니다). 여기서는 작은 네트워크가 기준 모델보다 더 늦게 과대적합이 시작되었습니다(즉 에포크 4가 아니라 6에서 시작됩니다). 또한 과대적합이 시작되고 훨씬 천천히 성능이 감소합니다.
###Code
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('smaller', smaller_history),
('bigger', bigger_history)])
###Output
_____no_output_____
###Markdown
큰 네트워크는 거의 바로 첫 번째 에포크 이후에 과대적합이 시작되고 훨씬 더 심각하게 과대적합됩니다. 네트워크의 용량이 많을수록 훈련 세트를 더 빠르게 모델링할 수 있습니다(훈련 손실이 낮아집니다). 하지만 더 쉽게 과대적합됩니다(훈련 손실과 검증 손실 사이에 큰 차이가 발생합니다). 과대적합을 방지하기 위한 전략 가중치를 규제하기 아마도 오캄의 면도날(Occam's Razor) 이론을 들어 보았을 것입니다. 어떤 것을 설명하는 두 가지 방법이 있다면 더 정확한 설명은 최소한의 가정이 필요한 가장 "간단한" 설명일 것입니다. 이는 신경망으로 학습되는 모델에도 적용됩니다. 훈련 데이터와 네트워크 구조가 주어졌을 때 이 데이터를 설명할 수 있는 가중치의 조합(즉, 가능한 모델)은 많습니다. 간단한 모델은 복잡한 것보다 과대적합되는 경향이 작을 것입니다.여기서 "간단한 모델"은 모델 파라미터의 분포를 봤을 때 엔트로피(entropy)가 작은 모델입니다(또는 앞 절에서 보았듯이 적은 파라미터를 가진 모델입니다). 따라서 과대적합을 완화시키는 일반적인 방법은 가중치가 작은 값을 가지도록 네트워크의 복잡도에 제약을 가하는 것입니다. 이는 가중치 값의 분포를 좀 더 균일하게 만들어 줍니다. 이를 "가중치 규제"(weight regularization)라고 부릅니다. 네트워크의 손실 함수에 큰 가중치에 해당하는 비용을 추가합니다. 이 비용은 두 가지 형태가 있습니다:* [L1 규제](https://developers.google.com/machine-learning/glossary/L1_regularization)는 가중치의 절댓값에 비례하는 비용이 추가됩니다(즉, 가중치의 "L1 노름(norm)"을 추가합니다).* [L2 규제](https://developers.google.com/machine-learning/glossary/L2_regularization)는 가중치의 제곱에 비례하는 비용이 추가됩니다(즉, 가중치의 "L2 노름"의 제곱을 추가합니다). 신경망에서는 L2 규제를 가중치 감쇠(weight decay)라고도 부릅니다. 이름이 다르지만 혼돈하지 마세요. 가중치 감쇠는 수학적으로 L2 규제와 동일합니다.L1 규제는 일부 가중치 파라미터를 0으로 만듭니다. L2 규제는 가중치 파라미터를 제한하지만 완전히 0으로 만들지는 않습니다. 이것이 L2 규제를 더 많이 사용하는 이유 중 하나입니다.`tf.keras`에서는 가중치 규제 객체를 층의 키워드 매개변수에 전달하여 가중치에 규제를 추가합니다. L2 가중치 규제를 추가해 보죠.
###Code
l2_model = keras.models.Sequential([
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model_history = l2_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
```l2(0.001)```는 네트워크의 전체 손실에 층에 있는 가중치 행렬의 모든 값이 ```0.001 * weight_coefficient_value**2```만큼 더해진다는 의미입니다. 이런 페널티(penalty)는 훈련할 때만 추가됩니다. 따라서 테스트 단계보다 훈련 단계에서 네트워크 손실이 훨씬 더 클 것입니다.L2 규제의 효과를 확인해 보죠:
###Code
plot_history([('baseline', baseline_history),
('l2', l2_model_history)])
###Output
_____no_output_____
###Markdown
결과에서 보듯이 모델 파라미터의 개수는 같지만 L2 규제를 적용한 모델이 기본 모델보다 과대적합에 훨씬 잘 견디고 있습니다. 드롭아웃 추가하기드롭아웃(dropout)은 신경망에서 가장 효과적이고 널리 사용하는 규제 기법 중 하나입니다. 토론토(Toronto) 대학의 힌튼(Hinton)과 그의 제자들이 개발했습니다. 드롭아웃을 층에 적용하면 훈련하는 동안 층의 출력 특성을 랜덤하게 끕니다(즉, 0으로 만듭니다). 훈련하는 동안 어떤 입력 샘플에 대해 [0.2, 0.5, 1.3, 0.8, 1.1] 벡터를 출력하는 층이 있다고 가정해 보죠. 드롭아웃을 적용하면 이 벡터에서 몇 개의 원소가 랜덤하게 0이 됩니다. 예를 들면, [0, 0.5, 1.3, 0, 1.1]가 됩니다. "드롭아웃 비율"은 0이 되는 특성의 비율입니다. 보통 0.2에서 0.5 사이를 사용합니다. 테스트 단계에서는 어떤 유닛도 드롭아웃하지 않습니다. 훈련 단계보다 더 많은 유닛이 활성화되기 때문에 균형을 맞추기 위해 층의 출력 값을 드롭아웃 비율만큼 줄입니다.`tf.keras`에서는 `Dropout` 층을 이용해 네트워크에 드롭아웃을 추가할 수 있습니다. 이 층은 바로 이전 층의 출력에 드롭아웃을 적용합니다.IMDB 네트워크에 두 개의 `Dropout` 층을 추가하여 과대적합이 얼마나 감소하는지 알아 보겠습니다:
###Code
dpt_model = keras.models.Sequential([
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid')
])
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
dpt_model_history = dpt_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('dropout', dpt_model_history)])
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
과대적합과 과소적합 TensorFlow.org에서 보기 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 지원하려면 [이 양식](https://bit.ly/tf-translate)을작성하거나[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs)로메일을 보내주시기 바랍니다. 지금까지 그랬듯이 이 예제의 코드도 `tf.keras` API를 사용합니다. 텐서플로 [케라스 가이드](https://www.tensorflow.org/guide/keras)에서 `tf.keras` API에 대해 더 많은 정보를 얻을 수 있습니다.앞서 영화 리뷰 분류와 주택 가격 예측의 두 예제에서 일정 에포크 동안 훈련하면 검증 세트에서 모델 성능이 최고점에 도달한 다음 감소하기 시작한 것을 보았습니다.다른 말로 하면, 모델이 훈련 세트에 *과대적합*(overfitting)된 것입니다. 과대적합을 다루는 방법은 꼭 배워야 합니다. *훈련 세트*에서 높은 성능을 얻을 수 있지만 진짜 원하는 것은 *테스트 세트*(또는 이전에 본 적 없는 데이터)에 잘 일반화되는 모델입니다. 과대적합의 반대는 *과소적합*(underfitting)입니다. 과소적합은 테스트 세트의 성능이 향상될 여지가 아직 있을 때 일어납니다. 발생하는 원인은 여러가지입니다. 모델이 너무 단순하거나, 규제가 너무 많거나, 그냥 단순히 충분히 오래 훈련하지 않는 경우입니다. 즉 네트워크가 훈련 세트에서 적절한 패턴을 학습하지 못했다는 뜻입니다.모델을 너무 오래 훈련하면 과대적합되기 시작하고 테스트 세트에서 일반화되지 못하는 패턴을 훈련 세트에서 학습합니다. 과대적합과 과소적합 사이에서 균형을 잡아야 합니다. 이를 위해 적절한 에포크 횟수동안 모델을 훈련하는 방법을 배워보겠습니다.과대적합을 막는 가장 좋은 방법은 더 많은 훈련 데이터를 사용하는 것입니다. 많은 데이터에서 훈련한 모델은 자연적으로 일반화 성능이 더 좋습니다. 데이터를 더 준비할 수 없을 때 그다음으로 가장 좋은 방법은 규제(regularization)와 같은 기법을 사용하는 것입니다. 모델이 저장할 수 있는 정보의 양과 종류에 제약을 부과하는 방법입니다. 네트워크가 소수의 패턴만 기억할 수 있다면 최적화 과정 동안 일반화 가능성이 높은 가장 중요한 패턴에 촛점을 맞출 것입니다.이 노트북에서 널리 사용되는 두 가지 규제 기법인 가중치 규제와 드롭아웃(dropout)을 알아 보겠습니다. 이런 기법을 사용하여 IMDB 영화 리뷰 분류 모델의 성능을 향상시켜 보죠.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tensorflow==2.0.0-alpha0
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
_____no_output_____
###Markdown
IMDB 데이터셋 다운로드이전 노트북에서처럼 임베딩을 사용하지 않고 여기에서는 문장을 멀티-핫 인코딩(multi-hot encoding)으로 변환하겠습니다. 이 모델은 훈련 세트에 빠르게 과대적합될 것입니다. 과대적합을 발생시키기고 어떻게 해결하는지 보이기 위해 선택했습니다.멀티-핫 인코딩은 정수 시퀀스를 0과 1로 이루어진 벡터로 변환합니다. 정확하게 말하면 시퀀스 `[3, 5]`를 인덱스 3과 5만 1이고 나머지는 모두 0인 10,000 차원 벡터로 변환한다는 의미입니다.
###Code
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
# 0으로 채워진 (len(sequences), dimension) 크기의 행렬을 만듭니다
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # results[i]의 특정 인덱스만 1로 설정합니다
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
###Output
_____no_output_____
###Markdown
만들어진 멀티-핫 벡터 중 하나를 살펴 보죠. 단어 인덱스는 빈도 순으로 정렬되어 있습니다. 그래프에서 볼 수 있듯이 인덱스 0에 가까울수록 1이 많이 등장합니다:
###Code
plt.plot(train_data[0])
###Output
_____no_output_____
###Markdown
과대적합 예제과대적합을 막는 가장 간단한 방법은 모델의 규모를 축소하는 것입니다. 즉, 모델에 있는 학습 가능한 파라미터의 수를 줄입니다(모델 파라미터는 층(layer)의 개수와 층의 유닛(unit) 개수에 의해 결정됩니다). 딥러닝에서는 모델의 학습 가능한 파라미터의 수를 종종 모델의 "용량"이라고 말합니다. 직관적으로 생각해 보면 많은 파라미터를 가진 모델이 더 많은 "기억 용량"을 가집니다. 이런 모델은 훈련 샘플과 타깃 사이를 일반화 능력이 없는 딕셔너리와 같은 매핑으로 완벽하게 학습할 수 있습니다. 하지만 이전에 본 적 없는 데이터에서 예측을 할 땐 쓸모가 없을 것입니다.항상 기억해야 할 점은 딥러닝 모델이 훈련 세트에는 학습이 잘 되는 경향이 있지만 진짜 해결할 문제는 학습이 아니라 일반화라는 것입니다.반면에 네트워크의 기억 용량이 부족하다면 이런 매핑을 쉽게 학습할 수 없을 것입니다. 손실을 최소화하기 위해서는 예측 성능이 더 많은 압축된 표현을 학습해야 합니다. 또한 너무 작은 모델을 만들면 훈련 데이터를 학습하기 어렵울 것입니다. "너무 많은 용량"과 "충분하지 않은 용량" 사이의 균형을 잡아야 합니다.안타깝지만 어떤 모델의 (층의 개수나 뉴런 개수에 해당하는) 적절한 크기나 구조를 결정하는 마법같은 공식은 없습니다. 여러 가지 다른 구조를 사용해 실험을 해봐야만 합니다.알맞은 모델의 크기를 찾으려면 비교적 적은 수의 층과 파라미터로 시작해서 검증 손실이 감소할 때까지 새로운 층을 추가하거나 층의 크기를 늘리는 것이 좋습니다. 영화 리뷰 분류 네트워크를 사용해 이를 실험해 보죠.```Dense``` 층만 사용하는 간단한 기준 모델을 만들고 작은 규모의 버전와 큰 버전의 모델을 만들어 비교하겠습니다. 기준 모델 만들기
###Code
baseline_model = keras.Sequential([
# `.summary` 메서드 때문에 `input_shape`가 필요합니다
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
작은 모델 만들기 앞서 만든 기준 모델과 비교하기 위해 적은 수의 은닉 유닛을 가진 모델을 만들어 보죠:
###Code
smaller_model = keras.Sequential([
keras.layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(4, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
smaller_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
smaller_model.summary()
###Output
_____no_output_____
###Markdown
같은 데이터를 사용해 이 모델을 훈련합니다:
###Code
smaller_history = smaller_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
큰 모델 만들기아주 큰 모델을 만들어 얼마나 빠르게 과대적합이 시작되는지 알아 볼 수 있습니다. 이 문제에 필요한 것보다 훨씬 더 큰 용량을 가진 네트워크를 추가해서 비교해 보죠:
###Code
bigger_model = keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
bigger_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
bigger_model.summary()
###Output
_____no_output_____
###Markdown
역시 같은 데이터를 사용해 모델을 훈련합니다:
###Code
bigger_history = bigger_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
훈련 손실과 검증 손실 그래프 그리기 실선은 훈련 손실이고 점선은 검증 손실입니다(낮은 검증 손실이 더 좋은 모델입니다). 여기서는 작은 네트워크가 기준 모델보다 더 늦게 과대적합이 시작되었습니다(즉 에포크 4가 아니라 6에서 시작됩니다). 또한 과대적합이 시작되고 훨씬 천천히 성능이 감소합니다.
###Code
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('smaller', smaller_history),
('bigger', bigger_history)])
###Output
_____no_output_____
###Markdown
큰 네트워크는 거의 바로 첫 번째 에포크 이후에 과대적합이 시작되고 훨씬 더 심각하게 과대적합됩니다. 네트워크의 용량이 많을수록 훈련 세트를 더 빠르게 모델링할 수 있습니다(훈련 손실이 낮아집니다). 하지만 더 쉽게 과대적합됩니다(훈련 손실과 검증 손실 사이에 큰 차이가 발생합니다). 과대적합을 방지하기 위한 전략 가중치를 규제하기 아마도 오캄의 면도날(Occam's Razor) 이론을 들어 보았을 것입니다. 어떤 것을 설명하는 두 가지 방법이 있다면 더 정확한 설명은 최소한의 가정이 필요한 가장 "간단한" 설명일 것입니다. 이는 신경망으로 학습되는 모델에도 적용됩니다. 훈련 데이터와 네트워크 구조가 주어졌을 때 이 데이터를 설명할 수 있는 가중치의 조합(즉, 가능한 모델)은 많습니다. 간단한 모델은 복잡한 것보다 과대적합되는 경향이 작을 것입니다.여기서 "간단한 모델"은 모델 파라미터의 분포를 봤을 때 엔트로피(entropy)가 작은 모델입니다(또는 앞 절에서 보았듯이 적은 파라미터를 가진 모델입니다). 따라서 과대적합을 완화시키는 일반적인 방법은 가중치가 작은 값을 가지도록 네트워크의 복잡도에 제약을 가하는 것입니다. 이는 가중치 값의 분포를 좀 더 균일하게 만들어 줍니다. 이를 "가중치 규제"(weight regularization)라고 부릅니다. 네트워크의 손실 함수에 큰 가중치에 해당하는 비용을 추가합니다. 이 비용은 두 가지 형태가 있습니다:* [L1 규제](https://developers.google.com/machine-learning/glossary/L1_regularization)는 가중치의 절댓값에 비례하는 비용이 추가됩니다(즉, 가중치의 "L1 노름(norm)"을 추가합니다).* [L2 규제](https://developers.google.com/machine-learning/glossary/L2_regularization)는 가중치의 제곱에 비례하는 비용이 추가됩니다(즉, 가중치의 "L2 노름"의 제곱을 추가합니다). 신경망에서는 L2 규제를 가중치 감쇠(weight decay)라고도 부릅니다. 이름이 다르지만 혼돈하지 마세요. 가중치 감쇠는 수학적으로 L2 규제와 동일합니다.L1 규제는 일부 가중치 파라미터를 0으로 만듭니다. L2 규제는 가중치 파라미터를 제한하지만 완전히 0으로 만들지는 않습니다. 이것이 L2 규제를 더 많이 사용하는 이유 중 하나입니다.`tf.keras`에서는 가중치 규제 객체를 층의 키워드 매개변수에 전달하여 가중치에 규제를 추가합니다. L2 가중치 규제를 추가해 보죠.
###Code
l2_model = keras.models.Sequential([
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model_history = l2_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
```l2(0.001)```는 네트워크의 전체 손실에 층에 있는 가중치 행렬의 모든 값이 ```0.001 * weight_coefficient_value**2```만큼 더해진다는 의미입니다. 이런 페널티(penalty)는 훈련할 때만 추가됩니다. 따라서 테스트 단계보다 훈련 단계에서 네트워크 손실이 훨씬 더 클 것입니다.L2 규제의 효과를 확인해 보죠:
###Code
plot_history([('baseline', baseline_history),
('l2', l2_model_history)])
###Output
_____no_output_____
###Markdown
결과에서 보듯이 모델 파라미터의 개수는 같지만 L2 규제를 적용한 모델이 기본 모델보다 과대적합에 훨씬 잘 견디고 있습니다. 드롭아웃 추가하기드롭아웃(dropout)은 신경망에서 가장 효과적이고 널리 사용하는 규제 기법 중 하나입니다. 토론토(Toronto) 대학의 힌튼(Hinton)과 그의 제자들이 개발했습니다. 드롭아웃을 층에 적용하면 훈련하는 동안 층의 출력 특성을 랜덤하게 끕니다(즉, 0으로 만듭니다). 훈련하는 동안 어떤 입력 샘플에 대해 [0.2, 0.5, 1.3, 0.8, 1.1] 벡터를 출력하는 층이 있다고 가정해 보죠. 드롭아웃을 적용하면 이 벡터에서 몇 개의 원소가 랜덤하게 0이 됩니다. 예를 들면, [0, 0.5, 1.3, 0, 1.1]가 됩니다. "드롭아웃 비율"은 0이 되는 특성의 비율입니다. 보통 0.2에서 0.5 사이를 사용합니다. 테스트 단계에서는 어떤 유닛도 드롭아웃하지 않습니다. 훈련 단계보다 더 많은 유닛이 활성화되기 때문에 균형을 맞추기 위해 층의 출력 값을 드롭아웃 비율만큼 줄입니다.`tf.keras`에서는 `Dropout` 층을 이용해 네트워크에 드롭아웃을 추가할 수 있습니다. 이 층은 바로 이전 층의 출력에 드롭아웃을 적용합니다.IMDB 네트워크에 두 개의 `Dropout` 층을 추가하여 과대적합이 얼마나 감소하는지 알아 보겠습니다:
###Code
dpt_model = keras.models.Sequential([
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid')
])
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
dpt_model_history = dpt_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('dropout', dpt_model_history)])
###Output
_____no_output_____
###Markdown
Copyright 2018 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###Output
_____no_output_____
###Markdown
과대적합과 과소적합 TensorFlow.org에서 보기 구글 코랩(Colab)에서 실행하기 깃허브(GitHub) 소스 보기 Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.이 번역에 개선할 부분이 있다면[tensorflow/docs](https://github.com/tensorflow/docs) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.문서 번역이나 리뷰에 지원하려면 [이 양식](https://bit.ly/tf-translate)을작성하거나[[email protected]](https://groups.google.com/a/tensorflow.org/forum/!forum/docs)로메일을 보내주시기 바랍니다. 지금까지 그랬듯이 이 예제의 코드도 `tf.keras` API를 사용합니다. 텐서플로 [케라스 가이드](https://www.tensorflow.org/guide/keras)에서 `tf.keras` API에 대해 더 많은 정보를 얻을 수 있습니다.앞서 영화 리뷰 분류와 주택 가격 예측의 두 예제에서 일정 에포크 동안 훈련하면 검증 세트에서 모델 성능이 최고점에 도달한 다음 감소하기 시작한 것을 보았습니다.다른 말로 하면, 모델이 훈련 세트에 *과대적합*(overfitting)된 것입니다. 과대적합을 다루는 방법은 꼭 배워야 합니다. *훈련 세트*에서 높은 성능을 얻을 수 있지만 진짜 원하는 것은 *테스트 세트*(또는 이전에 본 적 없는 데이터)에 잘 일반화되는 모델입니다. 과대적합의 반대는 *과소적합*(underfitting)입니다. 과소적합은 테스트 세트의 성능이 향상될 여지가 아직 있을 때 일어납니다. 발생하는 원인은 여러가지입니다. 모델이 너무 단순하거나, 규제가 너무 많거나, 그냥 단순히 충분히 오래 훈련하지 않는 경우입니다. 즉 네트워크가 훈련 세트에서 적절한 패턴을 학습하지 못했다는 뜻입니다.모델을 너무 오래 훈련하면 과대적합되기 시작하고 테스트 세트에서 일반화되지 못하는 패턴을 훈련 세트에서 학습합니다. 과대적합과 과소적합 사이에서 균형을 잡아야 합니다. 이를 위해 적절한 에포크 횟수동안 모델을 훈련하는 방법을 배워보겠습니다.과대적합을 막는 가장 좋은 방법은 더 많은 훈련 데이터를 사용하는 것입니다. 많은 데이터에서 훈련한 모델은 자연적으로 일반화 성능이 더 좋습니다. 데이터를 더 준비할 수 없을 때 그다음으로 가장 좋은 방법은 규제(regularization)와 같은 기법을 사용하는 것입니다. 모델이 저장할 수 있는 정보의 양과 종류에 제약을 부과하는 방법입니다. 네트워크가 소수의 패턴만 기억할 수 있다면 최적화 과정 동안 일반화 가능성이 높은 가장 중요한 패턴에 촛점을 맞출 것입니다.이 노트북에서 널리 사용되는 두 가지 규제 기법인 가중치 규제와 드롭아웃(dropout)을 알아 보겠습니다. 이런 기법을 사용하여 IMDB 영화 리뷰 분류 모델의 성능을 향상시켜 보죠.
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
!pip install tf-nightly-2.0-preview
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
###Output
_____no_output_____
###Markdown
IMDB 데이터셋 다운로드이전 노트북에서처럼 임베딩을 사용하지 않고 여기에서는 문장을 멀티-핫 인코딩(multi-hot encoding)으로 변환하겠습니다. 이 모델은 훈련 세트에 빠르게 과대적합될 것입니다. 과대적합을 발생시키기고 어떻게 해결하는지 보이기 위해 선택했습니다.멀티-핫 인코딩은 정수 시퀀스를 0과 1로 이루어진 벡터로 변환합니다. 정확하게 말하면 시퀀스 `[3, 5]`를 인덱스 3과 5만 1이고 나머지는 모두 0인 10,000 차원 벡터로 변환한다는 의미입니다.
###Code
NUM_WORDS = 10000
(train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS)
def multi_hot_sequences(sequences, dimension):
# 0으로 채워진 (len(sequences), dimension) 크기의 행렬을 만듭니다
results = np.zeros((len(sequences), dimension))
for i, word_indices in enumerate(sequences):
results[i, word_indices] = 1.0 # results[i]의 특정 인덱스만 1로 설정합니다
return results
train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS)
test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS)
###Output
_____no_output_____
###Markdown
만들어진 멀티-핫 벡터 중 하나를 살펴 보죠. 단어 인덱스는 빈도 순으로 정렬되어 있습니다. 그래프에서 볼 수 있듯이 인덱스 0에 가까울수록 1이 많이 등장합니다:
###Code
plt.plot(train_data[0])
###Output
_____no_output_____
###Markdown
과대적합 예제과대적합을 막는 가장 간단한 방법은 모델의 규모를 축소하는 것입니다. 즉, 모델에 있는 학습 가능한 파라미터의 수를 줄입니다(모델 파라미터는 층(layer)의 개수와 층의 유닛(unit) 개수에 의해 결정됩니다). 딥러닝에서는 모델의 학습 가능한 파라미터의 수를 종종 모델의 "용량"이라고 말합니다. 직관적으로 생각해 보면 많은 파라미터를 가진 모델이 더 많은 "기억 용량"을 가집니다. 이런 모델은 훈련 샘플과 타깃 사이를 일반화 능력이 없는 딕셔너리와 같은 매핑으로 완벽하게 학습할 수 있습니다. 하지만 이전에 본 적 없는 데이터에서 예측을 할 땐 쓸모가 없을 것입니다.항상 기억해야 할 점은 딥러닝 모델이 훈련 세트에는 학습이 잘 되는 경향이 있지만 진짜 해결할 문제는 학습이 아니라 일반화라는 것입니다.반면에 네트워크의 기억 용량이 부족하다면 이런 매핑을 쉽게 학습할 수 없을 것입니다. 손실을 최소화하기 위해서는 예측 성능이 더 많은 압축된 표현을 학습해야 합니다. 또한 너무 작은 모델을 만들면 훈련 데이터를 학습하기 어렵울 것입니다. "너무 많은 용량"과 "충분하지 않은 용량" 사이의 균형을 잡아야 합니다.안타깝지만 어떤 모델의 (층의 개수나 뉴런 개수에 해당하는) 적절한 크기나 구조를 결정하는 마법같은 공식은 없습니다. 여러 가지 다른 구조를 사용해 실험을 해봐야만 합니다.알맞은 모델의 크기를 찾으려면 비교적 적은 수의 층과 파라미터로 시작해서 검증 손실이 감소할 때까지 새로운 층을 추가하거나 층의 크기를 늘리는 것이 좋습니다. 영화 리뷰 분류 네트워크를 사용해 이를 실험해 보죠.```Dense``` 층만 사용하는 간단한 기준 모델을 만들고 작은 규모의 버전와 큰 버전의 모델을 만들어 비교하겠습니다. 기준 모델 만들기
###Code
baseline_model = keras.Sequential([
# `.summary` 메서드 때문에 `input_shape`가 필요합니다
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
baseline_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
baseline_model.summary()
baseline_history = baseline_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
작은 모델 만들기 앞서 만든 기준 모델과 비교하기 위해 적은 수의 은닉 유닛을 가진 모델을 만들어 보죠:
###Code
smaller_model = keras.Sequential([
keras.layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(4, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
smaller_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
smaller_model.summary()
###Output
_____no_output_____
###Markdown
같은 데이터를 사용해 이 모델을 훈련합니다:
###Code
smaller_history = smaller_model.fit(train_data,
train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
큰 모델 만들기아주 큰 모델을 만들어 얼마나 빠르게 과대적합이 시작되는지 알아 볼 수 있습니다. 이 문제에 필요한 것보다 훨씬 더 큰 용량을 가진 네트워크를 추가해서 비교해 보죠:
###Code
bigger_model = keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
bigger_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
bigger_model.summary()
###Output
_____no_output_____
###Markdown
역시 같은 데이터를 사용해 모델을 훈련합니다:
###Code
bigger_history = bigger_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
훈련 손실과 검증 손실 그래프 그리기 실선은 훈련 손실이고 점선은 검증 손실입니다(낮은 검증 손실이 더 좋은 모델입니다). 여기서는 작은 네트워크가 기준 모델보다 더 늦게 과대적합이 시작되었습니다(즉 에포크 4가 아니라 6에서 시작됩니다). 또한 과대적합이 시작되고 훨씬 천천히 성능이 감소합니다.
###Code
def plot_history(histories, key='binary_crossentropy'):
plt.figure(figsize=(16,10))
for name, history in histories:
val = plt.plot(history.epoch, history.history['val_'+key],
'--', label=name.title()+' Val')
plt.plot(history.epoch, history.history[key], color=val[0].get_color(),
label=name.title()+' Train')
plt.xlabel('Epochs')
plt.ylabel(key.replace('_',' ').title())
plt.legend()
plt.xlim([0,max(history.epoch)])
plot_history([('baseline', baseline_history),
('smaller', smaller_history),
('bigger', bigger_history)])
###Output
_____no_output_____
###Markdown
큰 네트워크는 거의 바로 첫 번째 에포크 이후에 과대적합이 시작되고 훨씬 더 심각하게 과대적합됩니다. 네트워크의 용량이 많을수록 훈련 세트를 더 빠르게 모델링할 수 있습니다(훈련 손실이 낮아집니다). 하지만 더 쉽게 과대적합됩니다(훈련 손실과 검증 손실 사이에 큰 차이가 발생합니다). 과대적합을 방지하기 위한 전략 가중치를 규제하기 아마도 오캄의 면도날(Occam's Razor) 이론을 들어 보았을 것입니다. 어떤 것을 설명하는 두 가지 방법이 있다면 더 정확한 설명은 최소한의 가정이 필요한 가장 "간단한" 설명일 것입니다. 이는 신경망으로 학습되는 모델에도 적용됩니다. 훈련 데이터와 네트워크 구조가 주어졌을 때 이 데이터를 설명할 수 있는 가중치의 조합(즉, 가능한 모델)은 많습니다. 간단한 모델은 복잡한 것보다 과대적합되는 경향이 작을 것입니다.여기서 "간단한 모델"은 모델 파라미터의 분포를 봤을 때 엔트로피(entropy)가 작은 모델입니다(또는 앞 절에서 보았듯이 적은 파라미터를 가진 모델입니다). 따라서 과대적합을 완화시키는 일반적인 방법은 가중치가 작은 값을 가지도록 네트워크의 복잡도에 제약을 가하는 것입니다. 이는 가중치 값의 분포를 좀 더 균일하게 만들어 줍니다. 이를 "가중치 규제"(weight regularization)라고 부릅니다. 네트워크의 손실 함수에 큰 가중치에 해당하는 비용을 추가합니다. 이 비용은 두 가지 형태가 있습니다:* [L1 규제](https://developers.google.com/machine-learning/glossary/L1_regularization)는 가중치의 절댓값에 비례하는 비용이 추가됩니다(즉, 가중치의 "L1 노름(norm)"을 추가합니다).* [L2 규제](https://developers.google.com/machine-learning/glossary/L2_regularization)는 가중치의 제곱에 비례하는 비용이 추가됩니다(즉, 가중치의 "L2 노름"의 제곱을 추가합니다). 신경망에서는 L2 규제를 가중치 감쇠(weight decay)라고도 부릅니다. 이름이 다르지만 혼돈하지 마세요. 가중치 감쇠는 수학적으로 L2 규제와 동일합니다.L1 규제는 일부 가중치 파라미터를 0으로 만듭니다. L2 규제는 가중치 파라미터를 제한하지만 완전히 0으로 만들지는 않습니다. 이것이 L2 규제를 더 많이 사용하는 이유 중 하나입니다.`tf.keras`에서는 가중치 규제 객체를 층의 키워드 매개변수에 전달하여 가중치에 규제를 추가합니다. L2 가중치 규제를 추가해 보죠.
###Code
l2_model = keras.models.Sequential([
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
l2_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
l2_model_history = l2_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
###Output
_____no_output_____
###Markdown
```l2(0.001)```는 네트워크의 전체 손실에 층에 있는 가중치 행렬의 모든 값이 ```0.001 * weight_coefficient_value**2```만큼 더해진다는 의미입니다. 이런 페널티(penalty)는 훈련할 때만 추가됩니다. 따라서 테스트 단계보다 훈련 단계에서 네트워크 손실이 훨씬 더 클 것입니다.L2 규제의 효과를 확인해 보죠:
###Code
plot_history([('baseline', baseline_history),
('l2', l2_model_history)])
###Output
_____no_output_____
###Markdown
결과에서 보듯이 모델 파라미터의 개수는 같지만 L2 규제를 적용한 모델이 기본 모델보다 과대적합에 훨씬 잘 견디고 있습니다. 드롭아웃 추가하기드롭아웃(dropout)은 신경망에서 가장 효과적이고 널리 사용하는 규제 기법 중 하나입니다. 토론토(Toronto) 대학의 힌튼(Hinton)과 그의 제자들이 개발했습니다. 드롭아웃을 층에 적용하면 훈련하는 동안 층의 출력 특성을 랜덤하게 끕니다(즉, 0으로 만듭니다). 훈련하는 동안 어떤 입력 샘플에 대해 [0.2, 0.5, 1.3, 0.8, 1.1] 벡터를 출력하는 층이 있다고 가정해 보죠. 드롭아웃을 적용하면 이 벡터에서 몇 개의 원소가 랜덤하게 0이 됩니다. 예를 들면, [0, 0.5, 1.3, 0, 1.1]가 됩니다. "드롭아웃 비율"은 0이 되는 특성의 비율입니다. 보통 0.2에서 0.5 사이를 사용합니다. 테스트 단계에서는 어떤 유닛도 드롭아웃하지 않습니다. 훈련 단계보다 더 많은 유닛이 활성화되기 때문에 균형을 맞추기 위해 층의 출력 값을 드롭아웃 비율만큼 줄입니다.`tf.keras`에서는 `Dropout` 층을 이용해 네트워크에 드롭아웃을 추가할 수 있습니다. 이 층은 바로 이전 층의 출력에 드롭아웃을 적용합니다.IMDB 네트워크에 두 개의 `Dropout` 층을 추가하여 과대적합이 얼마나 감소하는지 알아 보겠습니다:
###Code
dpt_model = keras.models.Sequential([
keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)),
keras.layers.Dropout(0.5),
keras.layers.Dense(16, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid')
])
dpt_model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy','binary_crossentropy'])
dpt_model_history = dpt_model.fit(train_data, train_labels,
epochs=20,
batch_size=512,
validation_data=(test_data, test_labels),
verbose=2)
plot_history([('baseline', baseline_history),
('dropout', dpt_model_history)])
###Output
_____no_output_____ |
.ipynb_checkpoints/VAE_FashionMnist_TensorFlow-checkpoint.ipynb | ###Markdown
Latent Space Projection of Variational Autoencoder Trained on Fashion-MNIST
###Code
n_to_show = 5000
figsize = 12
example_idx = np.random.choice(range(len(x_test)), n_to_show)
example_images = x_test[example_idx]
m, v = enc.predict(example_images)
embeddings = final([m,v])
plt.figure(figsize=(figsize, figsize))
plt.scatter(embeddings[:, 0] , embeddings[:, 1], alpha=0.5, s=2)
plt.xlabel("Dimension-1", size=20)
plt.ylabel("Dimension-2", size=20)
plt.xticks(size=20)
plt.yticks(size=20)
plt.title("Projection of 2D Latent-Space (Fashion-MNIST)", size=20)
plt.show()
# Create dictionary of target classes
label_dict = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot',
}
###Output
_____no_output_____
###Markdown
Reconstructing Fashion Images with Latent-Vector Sampled from Normal Distribution
###Code
figsize = 15
x = np.random.normal(size = (10,2))
#x = np.random.uniform(size = (10,200))
reconstruct = dec.predict(x)
fig = plt.figure(figsize=(figsize, 10))
for i in range(10):
ax = fig.add_subplot(5, 5, i+1)
ax.axis('off')
ax.imshow(reconstruct[i, :,:,0]*255, cmap = 'gray')
###Output
_____no_output_____
###Markdown
Reconstructing Test Images
###Code
figsize = 15
m, v = enc.predict(x_test[:25])
latent = final([m,v])
reconst = dec.predict(latent)
fig = plt.figure(figsize=(figsize, 10))
for i in range(25):
ax = fig.add_subplot(5, 5, i+1)
ax.axis('off')
ax.text(0.5, -0.15, str(label_dict[y_test[i]]), fontsize=10, ha='center', transform=ax.transAxes)
ax.imshow(reconst[i, :,:,0]*255, cmap = 'gray')
###Output
_____no_output_____
###Markdown
Reconstructing Fashion Images with Latent-Vector Sampled Uniformly
###Code
figsize = 15
min_x = min(embeddings[:, 0])
max_x = max(embeddings[:, 0])
min_y = min(embeddings[:, 1])
max_y = max(embeddings[:, 1])
x = np.random.uniform(min_x,max_x, size = (10,1))
y = np.random.uniform(min_y,max_y, size = (10,1))
z_grid = np.concatenate((x,y), axis=1)
reconst = dec.predict(z_grid)
fig = plt.figure(figsize=(figsize, 10))
for i in range(10):
ax = fig.add_subplot(5, 5, i+1)
ax.axis('off')
ax.text(0.5, -0.15, str(np.round(z_grid[i],1)), fontsize=10, ha='center', transform=ax.transAxes)
ax.imshow(reconst[i, :,:,0]*255, cmap = 'gray')
###Output
_____no_output_____ |
Buck-boost_Metodologia de projeto.ipynb | ###Markdown
Buck-Boost Converter========= Conversor dinâmicoPreciso fazer Esquemático de um conversor buck-boost Etapas do projeto: 1. Definir parâmetros de projeto (Vin, Vout, Po, fs, variação de IL e de Vc) 2. Calcular a razão cíclica (D/(1-D)=Vout/Vin) 3. Calcular indutância (L=(Vin).D/fs.(variação de IL) 4. Calcular a capacitância (C=variação de IL/8.fs.variação de Vc) 5. Calcular os esforços nos semicondutores (Ismd, Isef, Ismax, Idmds, Idef, Idmax, Vsmax e Vdmax) Tensão de Entrada:
###Code
Vin = 75
print("A tensão de entrada é", Vin, "V")
###Output
A tensão de entrada é 75 V
###Markdown
Tensão de Saída:
###Code
Vout = 50
print("A tensão de saída é", Vout, "V")
###Output
A tensão de saída é 50 V
###Markdown
Potência máxima de saída:
###Code
Po = 40
print("A potência máxima de saída é", Po, "W")
###Output
A potência máxima de saída é 40 W
###Markdown
Frequência de comutação:
###Code
fs = 20000
print("A frequência de comutação é", fs, "Hz")
###Output
A frequência de comutação é 20000 Hz
###Markdown
Razão ciclíca: $${D \over (1-D)} = {Vout \over Vin}$$
###Code
x = Vout/Vin
D = (x/(x+1))
print("A razão ciclíca é", D)
###Output
A razão ciclíca é 0.4
###Markdown
Corrente de saída: $$Io = {Po \over Vout}$$
###Code
Io = Po/Vout
print("A corrente de saída é", "%.3f" % Io, "A")
###Output
A corrente de saída é 0.800 A
###Markdown
Ondulação de corrente no indutor: Ondulação de 10%:$$\Delta_{Il} = 0.1Io$$
###Code
delta_Il = 0.1*Io
print("A ondulação de corrente no indutor é", "%.3f" % delta_Il, "A")
###Output
A ondulação de corrente no indutor é 0.080 A
###Markdown
Ondulação de tensão no capacitor: Ondulação de 1%:$$\Delta_{Vc} = 0.01Vout$$
###Code
delta_Vc = 0.01*Vout
print("A ondulação de tensão no capacitor é", delta_Vc, "V")
###Output
A ondulação de tensão no capacitor é 0.5 V
###Markdown
Resistência de carga: $$Ro = {Vout \over Io}$$
###Code
Ro = (Vout)/Io
print("A resistência de carga é", Ro, "ohms")
###Output
A resistência de carga é 62.5 ohms
###Markdown
Indutor de Saída: $$Lo = {Vin\cdot D \over fs\cdot\Delta_{Il}}$$
###Code
Lo = (Vin*D)/(fs*delta_Il)
print("O indutor de saída é", "%.4f" % Lo, "H")
###Output
O indutor de saída é 0.0187 H
###Markdown
Capacitor de Saída: $$Co = {{I_{o}\cdot D} \over fs\cdot\Delta_{Vc}}$$
###Code
Co = (Io*D)/(fs*delta_Vc)
print("O capacitor de saída é", Co, "F")
###Output
O capacitor de saída é 3.2000000000000005e-05 F
###Markdown
Esforços no indutor: Valor médio da corrente no Indutor: $$I_{L} = I_{out} + I_{in}$$
###Code
IL = Io+(Po/Vin)
print("O valor médio da corrente no indutor é", "%.3f" % IL, "A")
###Output
O valor médio da corrente no indutor é 1.333 A
###Markdown
Valor eficaz da corrente no Indutor: $$Is_{ef} = \sqrt{D}Io$$
###Code
Is_ef = (D**0.5)*Io
print("O valor eficaz da corrente no indutor é", "%.3f" % Is_ef, "A")
###Output
O valor eficaz da corrente no indutor é 0.506 A
###Markdown
Valor máximo da corrente no Indutor: $$Is_{max} = I_{L} + {\Delta_{Il} \over 2}$$
###Code
Is_max = Io + delta_Il/2
print("O valor máximo da corrente no indutor é", Is_max, "A")
###Output
O valor máximo da corrente no indutor é 0.8400000000000001 A
###Markdown
Valor máximo da tensão no Indutor: $$Vs_{max} = Vin$$
###Code
Vs_max = Vin
print("O valor máximo da tensão no indutor é", Vs_max, "V")
###Output
O valor máximo da tensão no indutor é 75 V
###Markdown
Esforços no diodo: Valor médio da corrente no diodo: $$Id_{md} = (1-D)\cdot Io$$
###Code
Id_md = (1-D)*Io
print("O valor médio da corrente no diodo é", "%.3f" % Id_md, "A")
###Output
O valor médio da corrente no diodo é 0.480 A
###Markdown
Valor eficaz da corrente no diodo: $$Id_{ef} = \sqrt{(1-D)}Io$$
###Code
Id_ef = ((1-D)**0.5)*Io
print("O valor eficaz da corrente no diodo é", "%.3f" % Id_ef, "A")
###Output
O valor eficaz da corrente no diodo é 0.620 A
###Markdown
Valor máximo da corrente no diodo: $$Id_{max} = Io + {\Delta_{Il} \over s}$$
###Code
Id_max = Io + delta_Il/2
print("O valor máximo da corrente no diodo é", Id_max, "A")
###Output
O valor máximo da corrente no diodo é 0.8400000000000001 A
###Markdown
Valor máximo da tensão no diodo: $$Vd_{max} = Vin$$
###Code
Vd_max = Vin
print("O valor máximo da tensão no diodo é", Vd_max, "V")
###Output
O valor máximo da tensão no diodo é 75 V
###Markdown
Resistência crítica: $$Rcrit = {2Lofs \over (1-D)^2}$$
###Code
Rcrit = 2*Lo*fs/(1-D)**2
print("A resistência crítica é", "%.0f" % Rcrit, "ohms")
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/50V-40W/tensão_no_indutor.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(x, y,'-')
plt.title('Tensão no Indutor')
plt.xlabel('tempo (s)')
plt.ylabel('tensão (V)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/50V-40W/corrente_no_indutor.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,6))#altera as dimensões do gráfico
plt.plot(x, y,'-')
plt.title('Corrente no Indutor')
plt.xlabel('tempo (s)')
plt.ylabel('corrente (A)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/50V-40W/Vout.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(x, y,'-')
plt.title('Tensão de saída')
plt.xlabel('tempo (s)')
plt.ylabel('tensão (V)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados//buck-boost/50V-40W/tensão_na_chave.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,6))#altera as dimensões do gráfico
plt.plot(x, y,'-')
plt.title('Forma de onda no comando')
plt.xlabel('tempo (s)')
plt.ylabel('tensão (V)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/50V-40W/pwm.csv', delimiter=',', skiprows=1)
x = data[:, 0]#tempo
y = data[:, 1]#tensão sobre a chave
z = data[:, 2]#Vcon
g = data[:, 3]#Vramp
plt.figure(figsize=(16,8))#altera as dimensões do gráfico
plt.plot(x,y)
plt.plot(x,z)
plt.plot(x,g)
plt.title('PWM')
plt.xlabel('tempo (s)')
plt.ylabel('tensão (V)')
plt.legend(['Comando','Vcon','Vramp'], loc = 'best', fontsize=10)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/50V-40W/correntes.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
z = data[:, 2]
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(x,y)
plt.plot(x,z)
plt.title('Corrente na entrada e na carga')
plt.xlabel('tempo (s)')
plt.ylabel('Corrente (A)')
plt.legend(['Entrada','Carga'], loc = 'best')
plt.show()
###Output
_____no_output_____
###Markdown
Segunda opção de projeto:- Alteração da tensão de saída para 127V.- Alteração da potência para 50W.
###Code
Vin2 = 75
print("A tensão de entrada é", Vin, "V")
Vout2 = 127
print("A tensão de saída é", Vout2, "V")
Po2 = 50
print("A potência máxima de saída é", Po2, "W")
fs2 = 20000
print("A frequência de comutação é", fs, "Hz")
x = Vout2/Vin2
D2= (x/(x+1))
print("A razão ciclíca é", D2)
Io2 = Po2/Vout2
print("A corrente de saída é", "%.3f" % Io2, "A")
delta_Il2 = 0.1*Io2
print("A ondulação de corrente no indutor é", "%.3f" % delta_Il2, "A")
delta_Vc2 = 0.01*Vout2
print("A ondulação de tensão no capacitor é", "%.3f" % delta_Vc2, "V")
Ro2 = (Vout2**2)/Po2
print("A resistência de carga é", Ro2, "ohms")
Lo2 = (Vin2*D2)/(fs2*delta_Il2)
print("O indutor de saída é", "%.4f" % Lo2, "H")
Co2 = (Io2*D2)/(fs*delta_Vc2)
print("O capacitor de saída é", Co2, "F")
Il_md2 = IL = Io2+(Po2/Vin2)
print("O valor médio da corrente no indutor é", "%.3f" % Il_md2, "A")
Is_ef2 = (D2**0.5)*Io2
print("O valor eficaz da corrente na chave é", "%.3f" % Is_ef2, "A")
Is_max2 = Io2 + delta_Il2/2
print("O valor máximo da corrente na chave é", "%.3f" % Is_max2, "A")
Vs_max2 = Vin
print("O valor máximo da tensão na chave é", Vs_max2, "V")
Id_md2 = (1-D2)*Io2
print("O valor médio da corrente no diodo é", "%.3f" % Id_md2, "A")
Id_ef2 = ((1-D2)**0.5)*Io2
print("O valor eficaz da corrente no diodo é", "%.3f" % Id_ef2, "A")
Id_max2 = Io2 + delta_Il2/2
print("O valor máximo da corrente no diodo é", "%.3f" % Id_max2, "A")
Vd_max2 = Vin2
print("O valor máximo da tensão no diodo é", Vd_max2, "V")
Rcrit2 = 2*Lo2*fs/(1-D2)**2
print("A resistência crítica é", "%.0f" % Rcrit2, "ohms")
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/127V-50W/Tensão_de_saida.csv', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(x,y)
plt.title('Tensão na carga')
plt.xlabel('tempo (s)')
plt.ylabel('Tensão (V)')
#plt.legend(['Entrada'], loc = 'best')
plt.show()
###Output
_____no_output_____
###Markdown
Terceira opção de projeto:- DCM: R> Rcrit.
###Code
Vin2 = 75
Vout2 = 127
Po2 = 50
fs2 = 20000
Io2 = Po2/Vout2
Il_md2 = IL2 = Io2+(Po2/Vin2)
delta_Il2 = (2*IL2)+0.5
D2= (Vout2*2*Io2)/(Vin2*delta_Il2)
delta_Vc2 = 0.01*Vout2
Ro2 = Po2**2/Vout2
Lo2 = (Vin2*D2)/(fs2*delta_Il2)
tx=(2*Io2)/(fs2*delta_Il2)
ta=((delta_Il2-Io2)*tx)/(delta_Il2)
Co2 = (ta*(Io2*D2))/(fs2*delta_Vc2)
print("A tensão de entrada é", Vin2, "V")
print("A tensão de saída é", Vout2, "V")
print("A potência máxima de saída é", Po2, "W")
print("A frequência de comutação é", fs2, "Hz")
print("A razão ciclíca é", D2)
print("A corrente de saída é", "%.3f" % Io2, "A")
print("A ondulação de corrente no indutor é", "%.3f" % delta_Il2, "A")
print("A ondulação de tensão no capacitor é", "%.3f" % delta_Vc2, "V")
print("A resistência de carga é", Ro2, "ohms")
print("O indutor de saída é", "%.4f" % Lo2, "H")
print("O capacitor de saída é", Co2, "F")
Il_md2 = IL = Io2+(Po2/Vin2)
print("O valor médio da corrente no indutor é", "%.3f" % Il_md2, "A")
Is_ef2 = (D2**0.5)*Io2
print("O valor eficaz da corrente na chave é", "%.3f" % Is_ef2, "A")
Is_max2 = Io2 + delta_Il2/2
print("O valor máximo da corrente na chave é", "%.3f" % Is_max2, "A")
Vs_max2 = Vin2
print("O valor máximo da tensão na chave é", Vs_max2, "V")
Id_md2 = (1-D2)*Io2
print("O valor médio da corrente no diodo é", "%.3f" % Id_md2, "A")
Id_ef2 = ((1-D2)**0.5)*Io2
print("O valor eficaz da corrente no diodo é", "%.3f" % Id_ef2, "A")
Id_max2 = Io2 + delta_Il2/2
print("O valor máximo da corrente no diodo é", "%.3f" % Id_max2, "A")
Vd_max2 = Vin2
print("O valor máximo da tensão no diodo é", Vd_max2, "V")
import numpy as np
import matplotlib.pyplot as plt
data = np.loadtxt('dados/buck-boost/127V-50W/DCM/', delimiter=',', skiprows=1)
x = data[:, 0]
y = data[:, 1]
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(x,y)
plt.title('corrente no indutor')
plt.xlabel('tempo (s)')
plt.ylabel('Corrente (A)')
#plt.legend(['Entrada'], loc = 'best')
plt.show()
###Output
_____no_output_____
###Markdown
Quarta opção de projeto:- Mudança de frequência - Projeto 1
###Code
import numpy as np
import matplotlib.pyplot as plt
f = np.arange(15000,60000,1)
Co_f = (Io*D)/(delta_Il)*(1/f)
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(f, Co_f,'-')
plt.title('Capacitor')
plt.xlabel('Frequência(Hz)')
plt.ylabel('Capacitância (F)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
f = np.arange(15000,60000,1)
Co_f = (Vin*D)/(f*delta_Vc)#*(1/f)
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(f, Co_f,'-')
plt.title('Indutor')
plt.xlabel('Frequência(Hz)')
plt.ylabel('Indutância (H)')
plt.show()
###Output
_____no_output_____
###Markdown
Quinta opção de projeto:- Mudança de frequência - Projeto 2 - (127V - 50W)
###Code
import numpy as np
import matplotlib.pyplot as plt
f = np.arange(15000,60000,1)
Co_f2 = (Io2*D2)/(delta_Il2)*(1/f)
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(f, Co_f,'-')
plt.title('Capacitor')
plt.xlabel('Frequência(Hz)')
plt.ylabel('Capacitância (F)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
f = np.arange(15000,60000,1)
Lo_f2 = (Vin2*D2)/(f*delta_Vc2)#*(1/f)
plt.figure(figsize=(12,4))#altera as dimensões do gráfico
plt.plot(f, Lo_f2,'-')
plt.title('Indutor')
plt.xlabel('Frequência(Hz)')
plt.ylabel('Indutância (H)')
plt.show()
###Output
_____no_output_____
###Markdown
Quinta opção de projeto:- Variação da frequência - fs = 5 KHz e fs= 50 KHz fs = 5 KHz:
###Code
Vin = 75
print("A tensão de entrada é", Vin, "V")
Vout = 30
print("A tensão de saída é", Vout, "V")
Po = 20
print("A potência máxima de saída é", Po, "W")
fs1 = 5000
print("A frequência de comutação é", fs1, "Hz")
D = Vout/Vin
print("A razão ciclíca é", D)
Io = Po/Vout
print("A corrente de saída é", "%.3f" % Io, "A")
delta_Il = 0.1*Io
print("A ondulação de corrente no indutor é", "%.3f" % delta_Il, "A")
delta_Vc = 0.01*Vout
print("A ondulação de tensão no capacitor é", "%.3f" % delta_Vc, "V")
Ro = (Vout**2)/Po
print("A resistência de carga é", "%.3f" % Ro, "ohms")
Lo5 = (Vin-Vout)*D/(fs1*delta_Il)
print("O indutor de saída é", "%.4f" % Lo5, "H")
Co5 = delta_Il/(8*fs1*delta_Vc)
print("O capacitor de saída é", Co5, "F")
Is_md = D*Io
print("O valor médio da corrente na chave é", "%.3f" % Is_md, "A")
Is_ef = (D**0.5)*Io
print("O valor eficaz da corrente na chave é", "%.3f" % Is_ef, "A")
Is_max = Io + delta_Il/2
print("O valor máximo da corrente na chave é", "%.3f" % Is_max, "A")
Vs_max = Vin
print("O valor máximo da tensão na chave é", Vs_max, "V")
Id_md = (1-D)*Io
print("O valor médio da corrente no diodo é", "%.3f" % Id_md, "A")
Id_ef = ((1-D)**0.5)*Io
print("O valor eficaz da corrente no diodo é", "%.3f" % Id_ef, "A")
Id_max = Io + delta_Il/2
print("O valor máximo da corrente no diodo é", "%.3f" % Id_max, "A")
Vd_max = Vin
print("O valor máximo da tensão no diodo é", Vd_max, "V")
Rcrit5 = 2*Lo5*fs1/(1-D)
print("A resistência crítica é", "%.0f" % Rcrit5, "ohms")
###Output
_____no_output_____
###Markdown
fs = 50 KHz:
###Code
Vin = 75
print("A tensão de entrada é", Vin, "V")
Vout = 30
print("A tensão de saída é", Vout, "V")
Po = 20
print("A potência máxima de saída é", Po, "W")
fs2 = 50000
print("A frequência de comutação é", fs2, "Hz")
D = Vout/Vin
print("A razão ciclíca é", D)
Io = Po/Vout
print("A corrente de saída é", "%.3f" % Io, "A")
delta_Il = 0.1*Io
print("A ondulação de corrente no indutor é", "%.3f" % delta_Il, "A")
delta_Vc = 0.01*Vout
print("A ondulação de tensão no capacitor é", "%.3f" % delta_Vc, "V")
Ro = (Vout**2)/Po
print("A resistência de carga é", "%.3f" % Ro, "ohms")
Lo6 = (Vin-Vout)*D/(fs2*delta_Il)
print("O indutor de saída é", "%.4f" % Lo6, "H")
Co6 = delta_Il/(8*fs2*delta_Vc)
print("O capacitor de saída é", Co6, "F")
Is_md = D*Io
print("O valor médio da corrente na chave é", "%.3f" % Is_md, "A")
Is_ef = (D**0.5)*Io
print("O valor eficaz da corrente na chave é", "%.3f" % Is_ef, "A")
Is_max = Io + delta_Il/2
print("O valor máximo da corrente na chave é", "%.3f" % Is_max, "A")
Vs_max = Vin
print("O valor máximo da tensão na chave é", Vs_max, "V")
Id_md = (1-D)*Io
print("O valor médio da corrente no diodo é", "%.3f" % Id_md, "A")
Id_ef = ((1-D)**0.5)*Io
print("O valor eficaz da corrente no diodo é", "%.3f" % Id_ef, "A")
Id_max = Io + delta_Il/2
print("O valor máximo da corrente no diodo é", "%.3f" % Id_max, "A")
Vd_max = Vin
print("O valor máximo da tensão no diodo é", Vd_max, "V")
Rcrit6 = 2*Lo6*fs2/(1-D)
print("A resistência crítica é", "%.0f" % Rcrit6, "ohms")
###Output
_____no_output_____
###Markdown
Ao se alterar a frequência do conversor buck, há mudanças nos valores de indutor e capacitor apenas, visto que os valores desses dois componentes dependem inversamente da frequência. (Considerando os outros valores difinidos inicialmente - Vout, Vin, D, P) Projeto do Indutor:- Selecionar o núcleo magnético mais adequado;- Calcular o número de espiras;- Calcular tamanho do entreferro;- Escolher o condutor;- Verificar a possibilidade da execução do projeto. Selecionar o núcleo magnético adequado Ferrita:
###Code
B_max = 0.3
print("O valor da densidade de fluxo magnético máximo é", B_max, "T")
Kw = 0.6
print("O valor do fator de utilização da área de enrolamento é", Kw)
J = 450
print("O valor da densidade de corrente no condutor é", J, "A/cm^2")
Il_max = 0.699
Il_min = 0.632
Il_rms = 0.666
print("O valor máximo da corrente no indutor é", Il_max, "A")
print("O valor mínimo da corrente no indutor é", Il_min, "A")
print("O valor RMS da corrente no indutor é", Il_rms, "A")
###Output
_____no_output_____
###Markdown
$$AeAw = {LoI_{lmax}I_{lrms}10^4 \over B_{max}K_{w}J}$$
###Code
A_eA_w = Lo*Il_max*Il_rms*10**4/(B_max*Kw*J)
print("A multiplicação entre a área efetiva e a área da janela do núcleo é", A_eA_w,"cm^4")
###Output
_____no_output_____
###Markdown
Tabela: Núcleos Magnéticos de ferrite com geométrica EE
###Code
A_eA_w = 1.02
A_e = 1.2
A_w = 0.85
le = 6.7
lt = 6.7
print("A multiplicação entre a área efetiva e a área da janela do núcleo será", A_eA_w,"cm^4 - valor mais próximo do valor calculado")
print("A área efetiva será", A_e,"cm^2 - valor escolhido pela tabela")
print("A área da janela do núcleo será", A_w,"cm^2 - valor escolhido pela tabela")
print("O comprimento do caminho magnético será", le,"cm - valor escolhido pela tabela")
print("O comprimento médio de uma espira será", lt,"cm - valor escolhido pela tabela")
###Output
_____no_output_____
###Markdown
Número de espiras $$N = {LoI_{lmax}10^4 \over B_{max}A_{e}}$$
###Code
N = Lo*Il_max*10**4/(B_max*A_e)
N = int(N)
print("O número de espiras é", N)
###Output
_____no_output_____
###Markdown
Tamanho do entreferro $$l_{g} = {N2u_{o}A_{e} \over Lo10^4}$$
###Code
uo = 4*3.1415926535897931*10**(-7)
lg = N**2*uo*A_e/(Lo*10**4)
print("O tamanho do entreferro é", "%.3f" % (lg*10**3), "mm")
###Output
_____no_output_____
###Markdown
Dimensionamento dos condutores $$I_{tot} = 1.1Nl_{t}$$ $$S_{fio} = {I_{lrms} \over J}$$
###Code
ltot = 1.1*N*lt
print("O comprimento total do condutor é", "%.3f" % ltot, "m")
Sfio = Il_rms/J
print("O valor mínimo da bitola é", "%.3f" % (Sfio*10**2), "mm^2")
###Output
_____no_output_____
###Markdown
Tabela: Condutores de cobre:
###Code
Sfio = 0.1626
print("O valor da bitola escolhido pela tabela acima foi", Sfio, "mm^2 - AWG=25")
###Output
_____no_output_____
###Markdown
$$A_{w min} = {n_{cond}S_{fio}10^{-2}N \over K_{w}}$$
###Code
ncond = 1
A_w_min = ncond*Sfio*10**(-2)*N/Kw
print("A valor mínimo da área da janela do núcleo será", A_w_min,"cm^2")
###Output
_____no_output_____
###Markdown
Como Aw_min é menor que Aw a execução do projeto é possível!!! Projeto do Dissipador de Calor: Para o caso sem dissipador, o calor circula da junção para o ambiente através do encapsulamento. Assim, o circuito térmico se resume apenas à resistência Rjc (junção-case) em série com Rca (case-ambiente), sendo estes parâmetros encontrados no datasheet do componenete. Para saber se o componente irá precisar de do dissipador de calor é estimado a temperatura de junção para a aplicação em específico. $$T_{j} = R_{ja}P_{T} + T_{a}$$ MOSFET (BSC100N10NSF):
###Code
P_T = 42.49*10**(-3)
R_ja = 62
Ta = 50
T_j_1 = R_ja*P_T + Ta
print("A temperatura de junção estimada é", "%.3f" % T_j_1, "°C")
###Output
_____no_output_____
###Markdown
Como 52.63438°C é menor que 150°C (máxima temperatura de operação do transistor), o componente não precisa de um dissipador!!! DIODO (MBR20100CT):
###Code
P_T2 = 185.55*10**(-3)
R_ja2 = 60
Ta2 = 50
T_j_2 = R_ja2*P_T2 + Ta2
print("A temperatura de junção estimada é", T_j_2, "°C")
###Output
_____no_output_____ |
bootcamp/lesson2/explore_predictions.ipynb | ###Markdown
MIT LicenseCopyright (c) Microsoft Corporation. All rights reserved.This notebook is adapted from Francesca Lazzeri Energy Demand Forecast Workbench workshop.Copyright (c) 2021 PyLadies Amsterdam, Alyona Galyeva Forecast output explorationThis notebook generates visual analyses of the generated forecast for a specified model.
###Code
%matplotlib inline
import os
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
WORKDIR = os.getcwd()
MODEL_NAME = "linear_regression"
# MODEL_NAME = "ridge"
with open(os.path.join(WORKDIR, MODEL_NAME + '_predictions.pkl'), 'rb') as f:
predictions = pickle.load(f)
for n in range(1, 7):
predictions['error_t+'+str(n)] = predictions['pred_t+'+str(n)] - predictions['demand']
predictions['abs_error_t+'+str(n)] = abs(predictions['error_t+'+str(n)])
predictions['abs_pct_error_t+'+str(n)] = abs(predictions['error_t+'+str(n)]) / predictions['demand']
###Output
_____no_output_____
###Markdown
Inspect forecast for a specified date range Set date range for forecast inspection. The test dataset covers the date range 2016-07-01 to 2017-08-10.
###Code
min_date = '2016-07-01'
max_date = '2016-07-07'
###Output
_____no_output_____
###Markdown
The chart below shows the actual demand and forecasts over the specified period. Multiple forecasts for the same period are shown. For example, forecast $t+3$ represents the prediction made 3 hours before the period being forecasted.
###Code
plot_df = predictions.loc[(predictions['timeStamp']>=min_date) & (predictions['timeStamp']<=max_date), ]
plt.figure(figsize=(15, 5))
plt.plot(plot_df['timeStamp'], plot_df['demand'], linewidth=3, label='Actual')
for n in [1,3,5]:
plt.plot(plot_df['timeStamp'], plot_df['pred_t+'+str(n)], linewidth=3/n, label='t+'+str(n))
plt.xticks(rotation=90)
plt.title('Actual demand vs forecast for period ' + min_date + ' to ' + max_date)
plt.legend()
plt.show()
plt.figure(figsize=(15, 5))
for n in range(1,7,1):
plt.plot(plot_df['timeStamp'], plot_df['abs_pct_error_t+'+str(n)], linewidth=6/n, label='t+'+str(n))
plt.xticks(rotation=90)
plt.title('Absolute percentage error for period ' + min_date + ' to ' + max_date)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Notice how the forecast error gets progressively worse the greater the time difference between the forecasted period and the time the forecast was produced. This is due to the recursive method used to generate the forecasts causing the accumulation of errors. Inspect forecast errors over entire test dataset This should reveal whether forecast error increases as time progresses. This could indicate that the model needs to be retrained more frequently so that the model can learn from more recent patterns in energy demand.
###Code
plt.figure(figsize=(15, 5))
plt.plot(predictions['timeStamp'], predictions['error_t+1'], 'ro', markersize=1)
plt.title('Forecast errors over entire test dataset')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Now we can check if there are particular periods of time on which the model performs poorly:
###Code
plt.figure(figsize=(15, 5))
plt.plot(predictions[['hour', 'error_t+1']].groupby('hour').mean())
plt.title('Average errors by hour')
plt.show()
plt.figure(figsize=(15, 5))
plt.plot(predictions[['dayofweek', 'error_t+1']].groupby('dayofweek').mean())
plt.title('Average errors by day of week')
plt.show()
plt.figure(figsize=(15, 5))
plt.plot(predictions[['month', 'error_t+1']].groupby('month').mean())
plt.title('Average errors by month')
plt.show()
###Output
_____no_output_____
###Markdown
MIT LicenseCopyright (c) Microsoft Corporation. All rights reserved.This notebook is adapted from Francesca Lazzeri Energy Demand Forecast Workbench workshop.Copyright (c) 2021 PyLadies Amsterdam, Alyona Galyeva Forecast output explorationThis notebook generates visual analyses of the generated forecast for a specified model.
###Code
%matplotlib inline
import os
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
WORKDIR = os.getcwd()
MODEL_NAME = "linear_regression"
# MODEL_NAME = "ridge"
with open(os.path.join(WORKDIR, MODEL_NAME + '_predictions.pkl'), 'rb') as f:
predictions = pickle.load(f)
for n in range(1, 7):
predictions['error_t+'+str(n)] = predictions['pred_t+'+str(n)] - predictions['demand']
predictions['abs_error_t+'+str(n)] = abs(predictions['error_t+'+str(n)])
predictions['abs_pct_error_t+'+str(n)] = abs(predictions['error_t+'+str(n)]) / predictions['demand']
###Output
_____no_output_____
###Markdown
Inspect forecast for a specified date range Set date range for forecast inspection. The test dataset covers the date range 2016-07-01 to 2017-08-10.
###Code
min_date = '2016-07-01'
max_date = '2016-07-07'
###Output
_____no_output_____
###Markdown
The chart below shows the actual demand and forecasts over the specified period. Multiple forecasts for the same period are shown. For example, forecast $t+3$ represents the prediction made 3 hours before the period being forecasted.
###Code
plot_df = predictions.loc[(predictions['timeStamp']>=min_date) & (predictions['timeStamp']<=max_date), ]
plt.figure(figsize=(15, 5))
plt.plot(plot_df['timeStamp'], plot_df['demand'], linewidth=3, label='Actual')
for n in [1,3,5]:
plt.plot(plot_df['timeStamp'], plot_df['pred_t+'+str(n)], linewidth=3/n, label='t+'+str(n))
plt.xticks(rotation=90)
plt.title('Actual demand vs forecast for period ' + min_date + ' to ' + max_date)
plt.legend()
plt.show()
plt.figure(figsize=(15, 5))
for n in range(1,7,1):
plt.plot(plot_df['timeStamp'], plot_df['abs_pct_error_t+'+str(n)], linewidth=6/n, label='t+'+str(n))
plt.xticks(rotation=90)
plt.title('Absolute percentage error for period ' + min_date + ' to ' + max_date)
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Notice how the forecast error gets progressively worse the greater the time difference between the forecasted period and the time the forecast was produced. This is due to the recursive method used to generate the forecasts causing the accumulation of errors. Inspect forecast errors over entire test dataset This should reveal whether forecast error increases as time progresses. This could indicate that the model needs to be retrained more frequently so that the model can learn from more recent patterns in energy demand.
###Code
plt.figure(figsize=(15, 5))
plt.plot(predictions['timeStamp'], predictions['error_t+1'], 'ro', markersize=1)
plt.title('Forecast errors over entire test dataset')
plt.xticks(rotation=45)
plt.show()
###Output
_____no_output_____
###Markdown
Now we can check if there are particular periods of time on which the model performs poorly:
###Code
plt.figure(figsize=(15, 5))
plt.plot(predictions[['hour', 'error_t+1']].groupby('hour').mean())
plt.title('Average errors by hour')
plt.show()
plt.figure(figsize=(15, 5))
plt.plot(predictions[['dayofweek', 'error_t+1']].groupby('dayofweek').mean())
plt.title('Average errors by day of week')
plt.show()
plt.figure(figsize=(15, 5))
plt.plot(predictions[['month', 'error_t+1']].groupby('month').mean())
plt.title('Average errors by month')
plt.show()
###Output
_____no_output_____ |
Data Pipeline/03 - Post Dedupe.ipynb | ###Markdown
SETUP and Connections
###Code
#pip install dj_database_url
#pip install psycopg2-binary
#pip install unidecode
import csv
import os
import zipfile
import dj_database_url
import psycopg2
import psycopg2.extras
import unidecode
import requests
import re
conn = psycopg2.connect(database="campaign-finance",
user="data",
password="data",
host="postgresdb",
port="5432")
c = conn.cursor()
###Output
_____no_output_____
###Markdown
Apply Dedupe to transactions
###Code
print("updates canon account numbers on transactions where matches occured")
c.execute("UPDATE transactions "
" set canon_account_id = entity_map.canon_id"
" from entity_map "
"where transactions.original_account_id = entity_map.original_id")
print("Done.")
conn.commit()
print("update canon account ids for those without a match")
c.execute("update transactions "
" set canon_account_id = original_account_id "
" where canon_account_id is null " )
print("Done.")
conn.commit()
print("set canon id flag on accounts table")
c.execute("update accounts "
" set is_canon = 1 "
" where account_id in (select distinct canon_account_id from transactions) " )
print("Done.")
conn.commit()
print("set canon id flag to zero on accounts not canon")
c.execute("update accounts "
" set is_canon = 0 "
" where is_canon is null " )
print("Done.")
conn.commit()
conn.commit()
###Output
_____no_output_____
###Markdown
Some committees don't have good IDs use committee name instead
###Code
print("fixing bad committee IDs")
c.execute("UPDATE committees "
" set sboe_committee_id = name"
" where sboe_committee_id = '---' " )
print("Done.")
conn.commit()
print("updates canon committee id on transactions ")
c.execute("UPDATE transactions "
" set canon_committee_sboe_id = original_committee_sboe_id"
)
print("Done.")
conn.commit()
###Output
_____no_output_____
###Markdown
User Friendly Views of contributions and expenditures
###Code
print("Creating Contributions View")
c.execute("CREATE VIEW contributions AS "
"(SELECT transactions.transaction_id AS trans_id, "
" transactions.original_committee_sboe_id AS original_receiver_id, "
" transactions.original_account_id AS original_source_id, "
" transactions.transaction_type, "
" transactions.transaction_category, "
" transactions.date_occured, "
" transactions.amount, "
" transactions.report_name, "
" transactions.account_code, "
" transactions.form_of_payment, "
" transactions.purpose, "
" transactions.candidate_referendum_name AS candidate_refereendum_name, "
" transactions.declaration, "
" transactions.canon_account_id AS canon_source_id, "
" transactions.canon_committee_sboe_id AS canon_receiver_id "
"FROM transactions "
" WHERE transactions.transaction_category = 'C' "
")")
conn.commit()
print("Done.")
print("Creating Expenses View")
c.execute("CREATE VIEW expenses AS "
"SELECT transactions.transaction_id AS trans_id, "
" transactions.original_committee_sboe_id AS orginal_source_id, "
" transactions.original_account_id AS original_receiver_id, "
" transactions.transaction_type, "
" transactions.transaction_category, "
" transactions.date_occured, "
" transactions.amount, "
" transactions.report_name, "
" transactions.account_code, "
" transactions.form_of_payment, "
" transactions.purpose, "
" transactions.candidate_referendum_name AS candidate_refereendum_name, "
" transactions.declaration, "
" transactions.canon_committee_sboe_id AS canon_source_id, "
" transactions.canon_account_id AS canon_receiver_id "
"FROM transactions "
"WHERE transactions.transaction_category = 'E' ")
conn.commit()
print("Done.")
print("Creating Contributions with Names View")
c.execute("CREATE VIEW contributions_with_names AS "
"SELECT contributions.trans_id AS transaction_id, "
" accounts.account_id AS source_id, "
" accounts.name AS source_name, "
" committees.comm_id AS receiver_id, "
" committees.name AS receiver_name, "
" contributions.transaction_type, "
" contributions.transaction_category, "
" contributions.date_occured, "
" contributions.amount "
"FROM contributions "
" INNER JOIN committees ON contributions.canon_receiver_id = committees.sboe_committee_id "
" INNER JOIN accounts ON contributions.canon_source_id = accounts.account_id ")
conn.commit()
print("Done.")
print("Creating Expenses with Names View")
c.execute("CREATE VIEW expenses_with_names AS "
"SELECT expenses.trans_id AS transaction_id, "
" committees.comm_id AS source_id, "
" committees.name AS source_name, "
" accounts.account_id AS receiver_id, "
" accounts.name AS receiver_name, "
" expenses.transaction_type, "
" expenses.transaction_category, "
" expenses.date_occured, "
" expenses.amount "
"FROM expenses "
" INNER JOIN committees ON expenses.canon_source_id = committees.sboe_committee_id "
" INNER JOIN accounts ON expenses.canon_receiver_id = accounts.account_id ")
conn.commit()
print("Done.")
print("Additional Info regarding accounts - is vendor")
c.execute("UPDATE accounts "
" SET is_vendor = 1 WHERE account_id in ( "
" SELECT distinct canon_receiver_id "
" FROM public.expenses )")
conn.commit()
print("Done.")
print("Additional Info regarding accounts - is donor")
c.execute("UPDATE accounts "
" SET is_donor = 1 WHERE account_id in ( "
" SELECT distinct canon_source_id "
" FROM public.contributions )")
conn.commit()
print("Done.")
###Output
_____no_output_____
###Markdown
Process and clean additional Committee Data
###Code
print('creating committee list table...')
c.execute("CREATE TABLE public.committee_list "
"(id SERIAL PRIMARY KEY, "
" sboeid_url VARCHAR(1024), "
" sboeid VARCHAR(1024), "
" status_url VARCHAR(1024), "
" status VARCHAR(1024), "
" candidatename_parententityname_url VARCHAR(1024), "
" candidatename_parententityname VARCHAR(1024), "
" committeename_url VARCHAR(1024), "
" committeename VARCHAR(1024)) ")
print("Done.")
directory = os.fsencode("./data/committee_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY committee_list "
"(committeename_url, committeename, "
" sboeid_url, sboeid, "
" status_url, status, "
" candidatename_parententityname_url, candidatename_parententityname) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
print('creating committee doc list table...')
c.execute("CREATE TABLE public.committee_doc_list "
"(doc_id SERIAL PRIMARY KEY, "
" committee_name VARCHAR(200), "
" year VARCHAR(4), "
" sboe_committee_id VARCHAR(200), "
" doctype VARCHAR(200), "
" docname VARCHAR(200), "
" receivedimage VARCHAR(50), "
" startdate VARCHAR(50), "
" enddate VARCHAR(50), "
" image VARCHAR(200), "
" image_url VARCHAR(2000), "
" data_text VARCHAR(200), "
" data_url VARCHAR(2000))")
print("Done.")
directory = os.fsencode("./data/committee_doc_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
print(full_filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY committee_doc_list "
"(committee_name, year, "
" doctype, docname, "
" receivedimage, startdate, enddate, "
" image_url, image, "
" data_text, data_url) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
print('fixing committee ids ...')
c.execute("update committee_doc_list "
"set committee_name = split_part(committee_name, '[', 1), "
" sboe_committee_id = substring(split_part(committee_name, '[', 2), 1, 16) "
)
print("Done.")
###Output
_____no_output_____
###Markdown
Begin matching committees to candidates
###Code
print('creating active candidate committees list table...')
c.execute("CREATE TABLE public.active_candidate_committees "
"(id SERIAL PRIMARY KEY, "
" sboe_id VARCHAR(200), "
" current_status VARCHAR(200), "
" committee_name VARCHAR(200), "
" committee_type VARCHAR(200), "
" committee_street_1 VARCHAR(1024), "
" committee_street_2 VARCHAR(1024), "
" committee_city VARCHAR(200), "
" committee_state VARCHAR(50), "
" committee_full_zip VARCHAR(50), "
" candidate_first_name VARCHAR(200), "
" candidate_middle_name VARCHAR(200), "
" candidate_last_name VARCHAR(200), "
" treasurer_first_name VARCHAR(200), "
" treasurer_middle_name VARCHAR(200), "
" treasurer_last_name VARCHAR(200), "
" treasurer_email VARCHAR(200), "
" asst_treasurer_first_name VARCHAR(200), "
" asst_treasurer_middle_name VARCHAR(200), "
" asst_treasurer_last_name VARCHAR(200), "
" asst_treasurer_email VARCHAR(200), "
" treasurer_street_1 VARCHAR(200), "
" treasurer_street_2 VARCHAR(200), "
" treasurer_city VARCHAR(200), "
" treasurer_state VARCHAR(200), "
" treasurer_full_zip VARCHAR(200), "
" party VARCHAR(200), "
" office VARCHAR(200), "
" juris VARCHAR(200))")
print("Done.")
conn.commit()
directory = os.fsencode("./data/active_committee_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
print(full_filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY active_candidate_committees "
"( sboe_id, "
"current_status, "
"committee_name, "
"committee_type, "
"committee_street_1, "
"committee_street_2, "
"committee_city, "
"committee_state, "
"committee_full_zip, "
"candidate_first_name, "
"candidate_middle_name, "
"candidate_last_name, "
"treasurer_first_name, "
"treasurer_middle_name, "
"treasurer_last_name, "
"treasurer_email, "
"asst_treasurer_first_name, "
"asst_treasurer_middle_name, "
"asst_treasurer_last_name, "
"asst_treasurer_email, "
"treasurer_street_1, "
"treasurer_street_2, "
"treasurer_city, "
"treasurer_state, "
"treasurer_full_zip, "
"party, "
"office, "
"juris) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
###Output
_____no_output_____
###Markdown
Update the committee table with info from active candidate committees print("applying active candiate info to committees - jurisdiction")c.execute("UPDATE committees " " set juris = acc.juris " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit() print("applying active candiate info to committees - party")c.execute("UPDATE committees " " set party = acc.party " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit() print("applying active candiate info to committees - office")c.execute("UPDATE committees " " set office = acc.office " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit()
###Code
print("applying active candiate info to committees - candidate_id")
c.execute("UPDATE committees "
" set candidate_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
print("applying active candiate info to committees - treasurer_id")
c.execute("UPDATE committees "
" set treasurer_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
print("applying active candiate info to committees - asst_treasurer_id")
c.execute("UPDATE committees "
" set asst_treasurer_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
###Output
_____no_output_____
###Markdown
Pivot the active candidate committees to committee_persons
###Code
conn.commit()
print('creating active committee person list table...')
c.execute("CREATE TABLE public.committee_person "
"(id SERIAL PRIMARY KEY, "
" first_name VARCHAR(200), "
" middle_name VARCHAR(200), "
" last_name VARCHAR(200), "
" street_1 VARCHAR(200), "
" street_2 VARCHAR(1024), "
" city VARCHAR(1024), "
" state VARCHAR(200), "
" full_zip VARCHAR(50), "
" email VARCHAR(50), "
" role INT, "
" committee_id VARCHAR(50))")
conn.commit()
print("Done.")
print("applying active candiate info to committee person table - treasurer")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT treasurer_first_name, treasurer_middle_name, treasurer_last_name, "
" treasurer_street_1, treasurer_street_2, treasurer_city, treasurer_state, "
" treasurer_full_zip, treasurer_email, '2', sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print("applying active candiate info to committee person table - candidate")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT candidate_first_name, candidate_middle_name, candidate_last_name, '', '', '', '', '', '', 1, sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print("applying active candiate info to committee person table - asst treasurer")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT asst_treasurer_first_name, asst_treasurer_middle_name, asst_treasurer_last_name, '', '', '', '', '', '', 3, sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print('creating candidates view...')
c.execute("CREATE VIEW candidates AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM committee_person where role = 1")
conn.commit()
print("Done.")
print('creating treasurers view...')
c.execute("CREATE VIEW treasurers AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM public.committee_person where role = 2")
conn.commit()
print("Done.")
print('creating asst_treasurers view...')
c.execute("CREATE VIEW asst_treasurers AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM public.committee_person where role = 3")
conn.commit()
print("Done.")
###Output
_____no_output_____
###Markdown
SETUP and Connections
###Code
pip install dj_database_url
import csv
import os
import zipfile
import dj_database_url
import psycopg2
import psycopg2.extras
import unidecode
import requests
import re
conn = psycopg2.connect(database="campaign-finance",
user="postgres",
password="",
host="172.16.238.13",
port="5432")
c = conn.cursor()
###Output
_____no_output_____
###Markdown
Apply Dedupe to transactions
###Code
print("updates canon account numbers on transactions where matches occured")
c.execute("UPDATE transactions "
" set canon_account_id = entity_map.canon_id"
" from entity_map "
"where transactions.original_account_id = entity_map.original_id")
print("Done.")
conn.commit()
print("update canon account ids for those without a match")
c.execute("update transactions "
" set canon_account_id = original_account_id "
" where canon_account_id is null " )
print("Done.")
conn.commit()
conn.commit()
###Output
_____no_output_____
###Markdown
Some committees don't have good IDs use committee name instead
###Code
print("fixing bad committee IDs")
c.execute("UPDATE committees "
" set sboe_committee_id = name"
" where sboe_committee_id = '---' " )
print("Done.")
conn.commit()
print("updates canon committee id on transactions ")
c.execute("UPDATE transactions "
" set canon_committee_sboe_id = original_committee_sboe_id"
)
print("Done.")
conn.commit()
###Output
updates canon committee id on transactions
Done.
###Markdown
User Friendly Views of contributions and expenditures
###Code
print("Creating Contributions View")
c.execute("CREATE VIEW contributions AS "
"(SELECT transactions.transaction_id AS trans_id, "
" transactions.original_committee_sboe_id AS original_receiver_id, "
" transactions.original_account_id AS original_source_id, "
" transactions.transaction_type, "
" transactions.transaction_category, "
" transactions.date_occured, "
" transactions.amount, "
" transactions.report_name, "
" transactions.account_code, "
" transactions.form_of_payment, "
" transactions.purpose, "
" transactions.candidate_referendum_name AS candidate_refereendum_name, "
" transactions.declaration, "
" transactions.canon_account_id AS canon_source_id, "
" transactions.canon_committee_sboe_id AS canon_receiver_id "
"FROM transactions "
" WHERE transactions.transaction_category = 'C' "
")")
conn.commit()
print("Done.")
print("Creating Expenses View")
c.execute("CREATE VIEW expenses AS "
"SELECT transactions.transaction_id AS trans_id, "
" transactions.original_committee_sboe_id AS orginal_source_id, "
" transactions.original_account_id AS original_receiver_id, "
" transactions.transaction_type, "
" transactions.transaction_category, "
" transactions.date_occured, "
" transactions.amount, "
" transactions.report_name, "
" transactions.account_code, "
" transactions.form_of_payment, "
" transactions.purpose, "
" transactions.candidate_referendum_name AS candidate_refereendum_name, "
" transactions.declaration, "
" transactions.canon_committee_sboe_id AS canon_source_id, "
" transactions.canon_account_id AS canon_receiver_id "
"FROM transactions "
"WHERE transactions.transaction_category = 'E' ")
conn.commit()
print("Done.")
print("Creating Contributions with Names View")
c.execute("CREATE VIEW contributions_with_names AS "
"SELECT contributions.trans_id AS transaction_id, "
" accounts.account_id AS source_id, "
" accounts.name AS source_name, "
" committees.comm_id AS receiver_id, "
" committees.name AS receiver_name, "
" contributions.transaction_type, "
" contributions.transaction_category, "
" contributions.date_occured, "
" contributions.amount "
"FROM contributions "
" INNER JOIN committees ON contributions.canon_receiver_id = committees.sboe_committee_id "
" INNER JOIN accounts ON contributions.canon_source_id = accounts.account_id ")
conn.commit()
print("Done.")
print("Creating Expenses with Names View")
c.execute("CREATE VIEW expenses_with_names AS "
"SELECT expenses.trans_id AS transaction_id, "
" committees.comm_id AS source_id, "
" committees.name AS source_name, "
" accounts.account_id AS receiver_id, "
" accounts.name AS receiver_name, "
" expenses.transaction_type, "
" expenses.transaction_category, "
" expenses.date_occured, "
" expenses.amount "
"FROM expenses "
" INNER JOIN committees ON expenses.canon_source_id = committees.sboe_committee_id "
" INNER JOIN accounts ON expenses.canon_receiver_id = accounts.account_id ")
conn.commit()
print("Done.")
print("Additional Info regarding accounts - is vendor")
c.execute("UPDATE accounts "
" SET is_vendor = 1 WHERE account_id in ( "
" SELECT distinct canon_receiver_id "
" FROM public.expenses )")
conn.commit()
print("Done.")
print("Additional Info regarding accounts - is donor")
c.execute("UPDATE accounts "
" SET is_donor = 1 WHERE account_id in ( "
" SELECT distinct canon_source_id "
" FROM public.contributions )")
conn.commit()
print("Done.")
###Output
Additional Info regarding accounts - is donor
Done.
###Markdown
Process and clean additional Committee Data
###Code
print('creating committee list table...')
c.execute("CREATE TABLE public.committee_list "
"(id SERIAL PRIMARY KEY, "
" sboeid_url VARCHAR(1024), "
" sboeid VARCHAR(1024), "
" status_url VARCHAR(1024), "
" status VARCHAR(1024), "
" candidatename_parententityname_url VARCHAR(1024), "
" candidatename_parententityname VARCHAR(1024), "
" committeename_url VARCHAR(1024), "
" committeename VARCHAR(1024)) ")
print("Done.")
directory = os.fsencode("./data/committee_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY committee_list "
"(committeename_url, committeename, "
" sboeid_url, sboeid, "
" status_url, status, "
" candidatename_parententityname_url, candidatename_parententityname) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
print('creating committee doc list table...')
c.execute("CREATE TABLE public.committee_doc_list "
"(doc_id SERIAL PRIMARY KEY, "
" committee_name VARCHAR(200), "
" year VARCHAR(4), "
" sboe_committee_id VARCHAR(200), "
" doctype VARCHAR(200), "
" docname VARCHAR(200), "
" receivedimage VARCHAR(50), "
" startdate VARCHAR(50), "
" enddate VARCHAR(50), "
" image VARCHAR(200), "
" image_url VARCHAR(2000), "
" data_text VARCHAR(200), "
" data_url VARCHAR(2000))")
print("Done.")
directory = os.fsencode("./data/committee_doc_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
print(full_filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY committee_doc_list "
"(committee_name, year, "
" doctype, docname, "
" receivedimage, startdate, enddate, "
" image_url, image, "
" data_text, data_url) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
print('fixing committee ids ...')
c.execute("update committee_doc_list "
"set committee_name = split_part(committee_name, '[', 1), "
" sboe_committee_id = substring(split_part(committee_name, '[', 2), 1, 16) "
)
print("Done.")
print('creating active candidate committees list table...')
c.execute("CREATE TABLE public.active_candidate_committees "
"(id SERIAL PRIMARY KEY, "
" sboe_id VARCHAR(200), "
" current_status VARCHAR(200), "
" committee_name VARCHAR(200), "
" committee_type VARCHAR(200), "
" committee_street_1 VARCHAR(1024), "
" committee_street_2 VARCHAR(1024), "
" committee_city VARCHAR(200), "
" committee_state VARCHAR(50), "
" committee_full_zip VARCHAR(50), "
" candidate_first_name VARCHAR(200), "
" candidate_middle_name VARCHAR(200), "
" candidate_last_name VARCHAR(200), "
" treasurer_first_name VARCHAR(200), "
" treasurer_middle_name VARCHAR(200), "
" treasurer_last_name VARCHAR(200), "
" treasurer_email VARCHAR(200), "
" asst_treasurer_first_name VARCHAR(200), "
" asst_treasurer_middle_name VARCHAR(200), "
" asst_treasurer_last_name VARCHAR(200), "
" asst_treasurer_email VARCHAR(200), "
" treasurer_street_1 VARCHAR(200), "
" treasurer_street_2 VARCHAR(200), "
" treasurer_city VARCHAR(200), "
" treasurer_state VARCHAR(200), "
" treasurer_full_zip VARCHAR(200), "
" party VARCHAR(200), "
" office VARCHAR(200), "
" juris VARCHAR(200))")
print("Done.")
conn.commit()
directory = os.fsencode("./data/active_committee_list")
for filename in os.listdir(directory):
full_filename = os.path.join(directory, filename)
print(full_filename)
with open(full_filename, 'rU') as csv_file:
c.copy_expert("COPY active_candidate_committees "
"( sboe_id, "
"current_status, "
"committee_name, "
"committee_type, "
"committee_street_1, "
"committee_street_2, "
"committee_city, "
"committee_state, "
"committee_full_zip, "
"candidate_first_name, "
"candidate_middle_name, "
"candidate_last_name, "
"treasurer_first_name, "
"treasurer_middle_name, "
"treasurer_last_name, "
"treasurer_email, "
"asst_treasurer_first_name, "
"asst_treasurer_middle_name, "
"asst_treasurer_last_name, "
"asst_treasurer_email, "
"treasurer_street_1, "
"treasurer_street_2, "
"treasurer_city, "
"treasurer_state, "
"treasurer_full_zip, "
"party, "
"office, "
"juris) "
"FROM STDIN CSV HEADER", csv_file)
conn.commit()
print("Done.")
###Output
b'./data/active_committee_list/active-candidate-committees-20200327.csv'
Done.
###Markdown
Update the committee table with info from active candidate committees print("applying active candiate info to committees - jurisdiction")c.execute("UPDATE committees " " set juris = acc.juris " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit() print("applying active candiate info to committees - party")c.execute("UPDATE committees " " set party = acc.party " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit() print("applying active candiate info to committees - office")c.execute("UPDATE committees " " set office = acc.office " " from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )print("Done.")conn.commit()
###Code
print("applying active candiate info to committees - candidate_id")
c.execute("UPDATE committees "
" set candidate_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
print("applying active candiate info to committees - treasurer_id")
c.execute("UPDATE committees "
" set treasurer_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
print("applying active candiate info to committees - asst_treasurer_id")
c.execute("UPDATE committees "
" set asst_treasurer_id = acc.id "
" from active_candidate_committees acc where committees.sboe_committee_id = acc.sboe_id " )
print("Done.")
conn.commit()
###Output
applying active candiate info to committees - asst_treasurer_id
Done.
###Markdown
Pivot the active candidate committees to committee_persons
###Code
conn.commit()
print('creating active committee person list table...')
c.execute("CREATE TABLE public.committee_person "
"(id SERIAL PRIMARY KEY, "
" first_name VARCHAR(200), "
" middle_name VARCHAR(200), "
" last_name VARCHAR(200), "
" street_1 VARCHAR(200), "
" street_2 VARCHAR(1024), "
" city VARCHAR(1024), "
" state VARCHAR(200), "
" full_zip VARCHAR(50), "
" email VARCHAR(50), "
" role INT, "
" committee_id VARCHAR(50))")
conn.commit()
print("Done.")
print("applying active candiate info to committee person table - treasurer")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT treasurer_first_name, treasurer_middle_name, treasurer_last_name, "
" treasurer_street_1, treasurer_street_2, treasurer_city, treasurer_state, "
" treasurer_full_zip, treasurer_email, '2', sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print("applying active candiate info to committee person table - candidate")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT candidate_first_name, candidate_middle_name, candidate_last_name, '', '', '', '', '', '', 1, sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print("applying active candiate info to committee person table - asst treasurer")
c.execute("INSERT INTO public.committee_person "
" (first_name, middle_name, last_name, street_1, street_2, city, state, full_zip, email, role, committee_id) "
" SELECT asst_treasurer_first_name, asst_treasurer_middle_name, asst_treasurer_last_name, '', '', '', '', '', '', 3, sboe_id "
"from active_candidate_committees")
print("Done.")
conn.commit()
print('creating candidates view...')
c.execute("CREATE VIEW candidates AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM committee_person where role = 1")
conn.commit()
print("Done.")
print('creating treasurers view...')
c.execute("CREATE VIEW treasurers AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM public.committee_person where role = 2")
conn.commit()
print("Done.")
print('creating asst_treasurers view...')
c.execute("CREATE VIEW asst_treasurers AS "
"SELECT id, first_name, middle_name, last_name, "
" street_1, street_2, city, state, full_zip, "
" email, role, committee_id "
"FROM public.committee_person where role = 3")
conn.commit()
print("Done.")
###Output
creating asst_treasurers view...
Done.
|
notebooks/21.03_parameter_drift.ipynb | ###Markdown
Parameter drift : Indentifiability PurposeIf the matematical model is not correct or too little data is available this may lead to paramter drift, so that the parameters in the matematical model changes depending on how the fitted data has been sampled. Methodology* Sample data of forces from a higher order model* Fit a lower order model to a random sample of this data Setup
###Code
# %load imports.py
## Local packages:
%matplotlib inline
%load_ext autoreload
%autoreload 2
%config Completer.use_jedi = False ## (To fix autocomplete)
## External packages:
import pandas as pd
pd.options.display.max_rows = 999
pd.options.display.max_columns = 999
pd.set_option("display.max_columns", None)
import numpy as np
np.set_printoptions(linewidth=150)
import numpy as np
import os
import matplotlib.pyplot as plt
#if os.name == 'nt':
# plt.style.use('presentation.mplstyle') # Windows
import plotly.express as px
import plotly.graph_objects as go
import seaborn as sns
import sympy as sp
from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame,
Particle, Point)
from sympy.physics.vector.printing import vpprint, vlatex
from IPython.display import display, Math, Latex
from src.substitute_dynamic_symbols import run, lambdify
import pyro
import sklearn
import pykalman
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import statsmodels.api as sm
from scipy.integrate import solve_ivp
## Local packages:
from src.data import mdl
from src.symbols import *
from src.parameters import *
import src.symbols as symbols
from src import prime_system
from src.models.regression import ForceRegression, results_summary_to_dataframe
from src.models.diff_eq_to_matrix import DiffEqToMatrix
from src.visualization.regression import show_pred, show_pred_captive
from src.visualization.plot import track_plot,captive_plot
## Load models:
# (Uncomment these for faster loading):
import src.models.vmm_abkowitz as vmm
import src.models.vmm_martin as vmm_simpler
from src.models.vmm import ModelSimulator
from src.data.wpcc import ship_parameters, df_parameters, ps, ship_parameters_prime, ps_ship, scale_factor
#format the book
import src.visualization.book_format as book_format
book_format.set_style()
###Output
_____no_output_____
###Markdown
Load model
###Code
model = ModelSimulator.load('../models/model_VCT_abkowitz.pkl')
u0_=2
angle_deg = 20
result = model.zigzag(u0=u0_, angle=angle_deg)
result.track_plot();
result.plot(compare=False);
df_result = result.result.copy()
df_result_prime = model.prime_system.prime(df_result, U=df_result['U'])
def variate(df, variation_keys, N=10):
variations = []
for variation_key in variation_keys:
variation = np.linspace(df[variation_key].min(),df[variation_key].max(), N)
variations.append(variation)
matrix = np.meshgrid(*variations)
df_variation = pd.DataFrame()
for variation_key,values in zip(variation_keys,matrix):
df_variation[variation_key] = values.flatten()
return df_variation
variations = {
'Rudder angle' : ['delta'],
'Drift angle' : ['v'],
'Circle' : ['r'],
'resistance' : ['u'],
"Rudder and drift angle" : ['delta','v'],
"Circle + Drift" : ['r','v'],
}
N = 10
V_ = u0_
inputs_base = {}
inputs_base['u'] = df_result_prime['u'].mean()
inputs_base['v'] = 0
inputs_base['r'] = 0
inputs_base['delta'] = 0
df_inputs = pd.DataFrame()
for test_type, variation_keys in variations.items():
inputs = variate(df=df_result_prime, variation_keys=variation_keys, N=N)
for column in list(set(inputs_base.keys())-set(variation_keys)):
inputs[column]=inputs_base[column]
inputs['test type'] = test_type
df_inputs = df_inputs.append(inputs, ignore_index=True)
df_outputs = model.forces(df_inputs)
df_captive = pd.concat([df_inputs,df_outputs], axis=1)
captive_plot(df_captive=df_captive, suffixes=[],
legends = ['VCT'], styles=['.', '-'])
###Output
_____no_output_____
###Markdown
Fit a lower order model to this captive dataset Regression
###Code
reg = ForceRegression(vmm=vmm, data=df_captive)
display(reg.show_pred_X())
display(reg.show_pred_Y())
display(reg.show_pred_N())
parameters = pd.DataFrame()
parameters['prime'] = model.parameters
model_vct = reg.create_model(df_parameters=parameters, ship_parameters=model.ship_parameters,
ps=model.prime_system, control_keys=['delta'])
outputs = model_vct.forces(inputs = df_inputs)
df_captive_all = pd.merge(left=df_captive, right=outputs,
how='left',
left_index=True,
right_index=True,
suffixes = ('','_model'),
)
captive_plot(df_captive=df_captive_all, suffixes=['_model'],
legends = ['VCT', 'model'], styles=['.', '-'])
#result_vct = model_vct.zigzag(u0=u0_, angle=angle_deg)
result_vct = model_vct.simulate(df_result.loc[0:6])
result_vct.track_plot(compare=True);
result_vct.plot(compare=True);
variation_keys = ['u','v','r','delta']
df_inputs = variate(df=df_result_prime, variation_keys=variation_keys, N=8)
df_outputs = model.forces(df_inputs)
df_captive_all = pd.concat([df_inputs,df_outputs], axis=1)
len(df_captive_all)
reg_all = ForceRegression(vmm=model, data=df_captive_all)
display(reg_all.show_pred_X())
display(reg_all.show_pred_Y())
display(reg_all.show_pred_N())
model_all = reg_all.create_model(df_parameters=parameters, ship_parameters=model.ship_parameters,
ps=model.prime_system, control_keys=['delta'])
#result_all = model_all.simulate(df_result)
result_all = model.simulate(df_result)
result_all.plot_compare();
df_compare_parameters =pd.DataFrame()
df_compare_parameters['model'] = model.parameters
df_compare_parameters['model captive all'] = model_all.parameters
df_compare_parameters['model captive 1'] = model_vct.parameters
df_compare_parameters['model_abs'] = df_compare_parameters['model'].abs()
df_compare_parameters.sort_values(by='model_abs', ascending=False, inplace=True)
df_compare_parameters.drop(columns=['model_abs'], inplace=True)
df_compare_parameters = df_compare_parameters.divide(df_compare_parameters['model'], axis=0)
df_compare_parameters['dof'] = pd.Series(df_compare_parameters.index).apply(lambda x:x[0]).values
for dof, df_ in df_compare_parameters.groupby(by='dof', sort=False):
fig,ax=plt.subplots()
fig.set_size_inches(10,2)
df_.plot(kind='bar', ax=ax)
fig.suptitle(dof)
df_captive = df_VCT_prime.copy()
df_captive['test type'] = df_VCT['test type']
N = len(df_captive)
N_sample = N - 5
df_captive_sample = df_captive.sample(n=N_sample, random_state=42)
N
model_names = [f'{i}' for i in range(20)]
df_captive_all = df_captive.copy()
np.random.seed(42)
models = {}
for model_name in model_names:
df_captive_sample = df_captive.sample(n=N_sample)
reg = ForceRegression(vmm=vmm, data=df_captive_sample)
model_reg = reg.create_model(df_parameters=df_parameters, ship_parameters=ship_parameters, ps=ps)
models[model_name] = model_vct = reg.create_model(df_parameters=df_parameters, ship_parameters=ship_parameters, ps=ps)
outputs = model_reg.forces(inputs = df_captive)
df_captive_all = pd.merge(left=df_captive_all, right=outputs,
how='left',
left_index=True,
right_index=True,
suffixes = ('',f'_{model_name}'),
)
suffixes = [f'_{model_name}' for model_name in model_names]
styles = ['r.'] + ['b-' for model_name in model_names]
legends = ['VCT'] + model_names
captive_plot(df_captive=df_captive_all, suffixes=suffixes,
legends = legends, styles=styles, alpha=0.2, lw=2, add_legend=False)
df_results = pd.DataFrame()
result = model_vct.zigzag(u0=2, angle=30)
for model_name, model in models.items():
result_ = model.simulate(result.result)
df_ = result_.result
df_['t'] = df_.index
df_['model_name'] = model_name
df_results = df_results.append(df_, ignore_index=True)
from src.visualization.plot import track_plot
fig,ax=plt.subplots()
fig.set_size_inches(10,10)
for model_name, df_ in df_results.groupby(by='model_name'):
df_.plot(x='x0', y='y0', ax=ax, alpha=0.2, lw=3, style='b-')
result.result.plot(x='x0', y='y0', style='k-', zorder=10, ax=ax)
ax.set_xlabel('x0 [m]')
ax.set_ylabel('y0 [m]')
ax.set_aspect("equal")
ax.set_title("Track plot")
ax.get_legend().set_visible(False)
ax.grid(True)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-2:],['simulations','model test'])
fig,ax=plt.subplots()
fig.set_size_inches(14,3)
df_results['psi_deg'] = np.rad2deg(df_results['psi'])
df_results_ = result.result.copy()
df_results_['-delta_deg'] =-np.rad2deg(df_results_['delta'])
df_results_['psi_deg'] = np.rad2deg(df_results_['psi'])
for model_name, df_ in df_results.groupby(by='model_name'):
df_.plot(x='t', y='psi_deg', ax=ax, alpha=0.2, lw=3, style='b-')
df_results_.plot(y='psi_deg', ax=ax, style='k-', zorder=10)
df_results_.plot(y='-delta_deg', ax=ax, style='m-', zorder=10)
ax.set_xlabel('time [s]')
ax.set_ylabel('Heading $\psi$ [deg]')
ax.set_title("ZigZag30/30")
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-3:],['alternative models','model','rudder angle'])
ax.grid(True)
ax.set_ylim(-60,60)
model_vct.parameters
###Output
_____no_output_____ |
concurrency/python3_solution.ipynb | ###Markdown
1114. Print in Order
###Code
from threading import Event
class Foo:
def __init__(self):
self.event_list = [Event(), Event()]
def first(self, printFirst: 'Callable[[], None]') -> None:
# printFirst() outputs "first". Do not change or remove this line.
printFirst()
self.event_list[0].set()
def second(self, printSecond: 'Callable[[], None]') -> None:
self.event_list[0].wait()
# printSecond() outputs "second". Do not change or remove this line.
printSecond()
self.event_list[1].set()
def third(self, printThird: 'Callable[[], None]') -> None:
self.event_list[1].wait()
# printThird() outputs "third". Do not change or remove this line.
printThird()
###Output
_____no_output_____
###Markdown
1115. Print FooBar Alternately
###Code
## Event solution
from threading import Event
class FooBar:
def __init__(self, n):
self.n = n
self.event_list = [Event(), Event()]
self.event_list[0].set()
def foo(self, printFoo: 'Callable[[], None]') -> None:
for i in range(self.n):
self.event_list[0].wait()
# printFoo() outputs "foo". Do not change or remove this line.
printFoo()
self.event_list[0].clear()
self.event_list[1].set()
def bar(self, printBar: 'Callable[[], None]') -> None:
for i in range(self.n):
self.event_list[1].wait()
# printBar() outputs "bar". Do not change or remove this line.
printBar()
self.event_list[1].clear()
self.event_list[0].set()
## Lock solution
from threading import Lock
class FooBar:
def __init__(self, n):
self.n = n
self.locks = [Lock(), Lock()]
self.locks[1].acquire()
def foo(self, printFoo: 'Callable[[], None]') -> None:
for i in range(self.n):
self.locks[0].acquire()
# printFoo() outputs "foo". Do not change or remove this line.
printFoo()
self.locks[1].release()
def bar(self, printBar: 'Callable[[], None]') -> None:
for i in range(self.n):
self.locks[1].acquire()
# printBar() outputs "bar". Do not change or remove this line.
printBar()
self.locks[0].release()
###Output
_____no_output_____
###Markdown
1116. Print Zero Even Odd
###Code
from threading import Lock
class ZeroEvenOdd:
def __init__(self, n):
self.n = n
self.locks = {"z": Lock(), "e": Lock(), "o": Lock()}
self.locks["o"].acquire()
self.locks["e"].acquire()
# printNumber(x) outputs "x", where x is an integer.
def zero(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n+1):
self.locks["z"].acquire()
printNumber(0)
if i % 2 == 0:
self.locks["e"].release()
else:
self.locks["o"].release()
def even(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(2, self.n+1, 2):
self.locks["e"].acquire()
printNumber(i)
self.locks["z"].release()
def odd(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n+1, 2):
self.locks["o"].acquire()
printNumber(i)
self.locks["z"].release()
###Output
_____no_output_____
###Markdown
1117. Building H2O
###Code
from threading import Lock
class H2O:
def __init__(self):
self.switch = True
self.locks = {"h1": Lock(), "h2": Lock(), "o": Lock()}
self.locks["h2"].acquire()
self.locks["o"].acquire()
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
if self.switch:
self.locks["h1"].acquire()
else:
self.locks["h2"].acquire()
# releaseHydrogen() outputs "H". Do not change or remove this line.
releaseHydrogen()
if self.switch:
self.locks["h2"].release()
else:
self.locks["o"].release()
self.switch ^= True
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
self.locks["o"].acquire()
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen()
self.locks["h1"].release()
###Output
_____no_output_____
###Markdown
1195. Fizz Buzz Multithreaded
###Code
from threading import Semaphore
class FizzBuzz:
def __init__(self, n: int):
self.n = n
self.done = False
self.sems = {
"f": Semaphore(0),
"b": Semaphore(0),
"fb": Semaphore(0),
"num": Semaphore(1),
}
# printFizz() outputs "fizz"
def fizz(self, printFizz: 'Callable[[], None]') -> None:
while True:
self.sems["f"].acquire()
if self.done: break
printFizz()
self.sems["num"].release()
# printBuzz() outputs "buzz"
def buzz(self, printBuzz: 'Callable[[], None]') -> None:
while True:
self.sems["b"].acquire()
if self.done: break
printBuzz()
self.sems["num"].release()
# printFizzBuzz() outputs "fizzbuzz"
def fizzbuzz(self, printFizzBuzz: 'Callable[[], None]') -> None:
while True:
self.sems["fb"].acquire()
if self.done: break
printFizzBuzz()
self.sems["num"].release()
# printNumber(x) outputs "x", where x is an integer.
def number(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n+1):
self.sems["num"].acquire()
if i % 15 == 0:
self.sems["fb"].release()
elif i % 3 == 0:
self.sems["f"].release()
elif i % 5 == 0:
self.sems["b"].release()
else:
printNumber(i)
self.sems["num"].release()
self.sems["num"].acquire()
# 结束循环,产生信号量
self.done = True
self.sems["f"].release()
self.sems["b"].release()
self.sems["fb"].release()
###Output
_____no_output_____
###Markdown
1226. The Dining Philosophers
###Code
from threading import Semaphore
class DiningPhilosophers:
forks = [Semaphore(1) for _ in range(5)]
# call the functions directly to execute, for example, eat()
def wantsToEat(self,
philosopher: int,
pickLeftFork: 'Callable[[], None]',
pickRightFork: 'Callable[[], None]',
eat: 'Callable[[], None]',
putLeftFork: 'Callable[[], None]',
putRightFork: 'Callable[[], None]') -> None:
left = philosopher
right = philosopher - 1
if right % 2 == 0:
self.forks[right].acquire()
self.forks[left].acquire()
else:
self.forks[left].acquire()
self.forks[right].acquire()
pickRightFork()
pickLeftFork()
eat()
putLeftFork()
putRightFork()
if right % 2 == 0:
self.forks[left].release()
self.forks[right].release()
else:
self.forks[right].release()
self.forks[left].release()
###Output
_____no_output_____ |
notebooks/benchmark/crash_predict_rnn.ipynb | ###Markdown
RNN Model for crash prediction Developed by: bpben
###Code
import re
import csv
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import scipy.stats as ss
from glob import glob
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from scipy.stats import describe
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import sys
sys.path.append('/Users/B/Documents')
###Output
_____no_output_____
###Markdown
Data processingThe approach here is to create 3 time-lag features:1. crashes in the past week2. crashes in the past month3. crashes in the past quarter (three months)4. average crashes per week up to target weekAll features except 4 are calculated to exclude one another. That is, crashes in the past month does not include the past week's crashes. Crashes in the past quarter do not include the past month.
###Code
SEG_CHARS = ['AADT', 'SPEEDLIMIT', 'Struct_Cnd', 'Surface_Tp', 'F_F_Class']
# Read in data
data = pd.read_csv('../data/boston/processed/vz_predict_dataset.csv.gz',
compression='gzip', dtype={'segment_id':'str'})
data.sort_values(['segment_id', 'year','week'], inplace=True)
# get segments with non-zero crashes
data_nonzero = data.set_index('segment_id').loc[data.groupby('segment_id').crash.sum()>0]
data_nonzero.reset_index(inplace=True)
# scaler
scaler = MinMaxScaler(feature_range=(0, 1))
data_nonzero[SEG_CHARS] = scaler.fit_transform(data_nonzero[SEG_CHARS])
data_nonzero['target'] = (data['crash']>0).astype(int)
data_model = data_nonzero[['target']+SEG_CHARS].values
pct_train = .7
#train_segments = data_nonzero.segment_id.unique()[:int(len(data_nonzero.segment_id.unique())*pct_train)]
segment_ids = data_nonzero.segment_id.unique()
inds = np.arange(0.0,len(segment_ids)) / len(segment_ids) < pct_train
train_ids = segment_ids[inds]
test_ids = segment_ids[~inds]
# split into train and test sets
train_size = int(len(data_model) * 0.67)
test_size = len(data_model) - train_size
#train, test = data_model[0:train_size,:], data_model[train_size:len(data_model),:]
train = data_nonzero[data_nonzero.segment_id.isin(train_ids)][['target']+SEG_CHARS].values
test = data_nonzero[data_nonzero.segment_id.isin(test_ids)][['target']+SEG_CHARS].values
print(len(train), len(test))
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 1:]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 12
trainX, trainY = create_dataset(train, look_back=look_back)
testX, testY = create_dataset(test, look_back=look_back)
# class_weight
neg, pos = data_nonzero.target.value_counts(normalize=True)
class_weight = {0: 1/neg, 1: 1/pos}
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], look_back, trainX.shape[2]))
testX = np.reshape(testX, (testX.shape[0], look_back, testX.shape[2]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=trainX.shape[1:]))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainY,
validation_data=(testX, testY),
epochs=50, batch_size=1000,
class_weight = class_weight,
verbose=2)
# make predictions
#trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
from sklearn.metrics import roc_auc_score
roc_auc_score(testY, testPredict)
a = testY.reshape(-1, 1) - testPredict
np.sum(testY - np.reshape(testPredict, (a.shape[0],)))
np.sum(testY - np.reshape(testPredict, (a.shape[0],)))
pd.Series(testPredict.flatten()).value_counts()
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
#Parameters for model
#Model parameters
params = dict()
#cv parameters
cvp = dict()
cvp['pmetric'] = 'roc_auc'
cvp['iter'] = 5 #number of iterations
cvp['folds'] = 5 #folds for cv (default)
#LR parameters
mp = dict()
mp['LogisticRegression'] = dict()
mp['LogisticRegression']['penalty'] = ['l1','l2']
mp['LogisticRegression']['C'] = ss.beta(a=5,b=2) #beta distribution for selecting reg strength
#RF model parameters
mp['RandomForestClassifier'] = dict()
mp['RandomForestClassifier']['n_estimators'] = [2**8] #number of trees in the forest
mp['RandomForestClassifier']['max_features'] = ss.beta(a=5,b=2) #number of features at split
mp['RandomForestClassifier']['max_leaf_nodes'] = ss.nbinom(n=2,p=0.001,loc=100) #max number of leaves to create
# Features
features = [u'pre_week', u'pre_month', u'pre_quarter', 'avg_week', u'AADT', u'SPEEDLIMIT',
u'Struct_Cnd', u'Surface_Tp', u'F_F_Class']
#Initialize tuner
tune = Tuner(df)
#Base RF model
tune.tune('RF_base', 'RandomForestClassifier', features, cvp, mp['RandomForestClassifier'])
#Base LR model
tune.tune('LR_base', 'LogisticRegression', features, cvp, mp['LogisticRegression'])
#Display results
tune.grid_results
# Run test
test = Tester(df)
test.init_tuned(tune)
test.run_tuned('RF_base', cal=False)
###Output
Fitting RF_base model with 9 features
f1_score: 0.153846153846
brier_score: 0.0316728329575
###Markdown
Lift chart by "risk bin"The classifier problem is difficult because the classes are unbalanced (.05% have crashes at target week). More useful are the probabilities being produced by the model, which give some idea of risk.
###Code
def lift_chart(x_col, y_col, data, ax=None):
p = sns.barplot(x=x_col, y=y_col, data=data,
palette='Reds', ax = None, ci=None)
vals = p.get_yticks()
p.set_yticklabels(['{:3.0f}%'.format(i*100) for i in vals])
xvals = [x.get_text().split(',')[-1].strip(']') for x in p.get_xticklabels()]
xvals = ['{:3.0f}%'.format(float(x)*100) for x in xvals]
p.set_xticklabels(xvals)
p.set_axis_bgcolor('white')
p.set_xlabel('')
p.set_ylabel('')
p.set_title('Predicted probability vs actual percent')
return(p)
def density(data, score, ax=None):
p = sns.kdeplot(risk_df['risk_score'], ax=ax)
p.set_axis_bgcolor('white')
p.legend('')
p.set_xlabel('Predicted probability of crash')
p.set_title('KDE plot predictions')
return(p)
risk_scores = test.rundict['RF_base']['m_fit'].predict_proba(test.data.test_x[features])[:,1]
risk_df = pd.DataFrame({'risk_score':risk_scores, 'crash':test.data.test_y})
print risk_df.risk_score.describe()
risk_df['categories'] = pd.cut(risk_df['risk_score'], bins=[-1, 0, .01, .02, .05, max(risk_scores)])
risk_mean = risk_df.groupby('categories')['crash'].count()
print risk_mean
fig, axes = plt.subplots(1, 2)
lift_chart('categories', 'crash', risk_df,
ax=axes[1])
density(risk_df, 'risk_score', ax=axes[0])
# output predictions
# predict on all segments
data_model['risk_score'] = test.rundict['RF_base']['m_fit'].predict_proba(data_model[features])[:,1]
data_model.to_csv('seg_with_risk_score.csv', index=False)
###Output
_____no_output_____
###Markdown
Check sensitivity to weekI predicted an arbitrary week as target here, but I'd like to see whether things change significantly if I change that week. A good metric to measure that is brier score loss. It'll be low throughout as the classifier doesn't perform great, but it shouldn't vary a huge amount.
###Code
for w in [20, 30, 40, 50]:
print "week ", w
crash_lags = format_crash_data(data_nonzero.set_index(['segment_id','week']), 'crash', w)
data_model = crash_lags.merge(data_segs, left_on='segment_id', right_on='segment_id')
df = Indata(data_model, 'target')
# create train/test split
df.tr_te_split(.7)
test = Tester(df)
test.init_tuned(tune)
test.run_tuned('RF_base', cal=False)
print '\n'
###Output
week 20
Train obs: 1360
Test obs: 618
Fitting RF_base model with 9 features
f1_score: 0.1
brier_score: 0.0292948065731
week 30
Train obs: 1374
Test obs: 604
Fitting RF_base model with 9 features
f1_score: 0.0
brier_score: 0.0393970488434
week 40
Train obs: 1396
Test obs: 582
Fitting RF_base model with 9 features
f1_score: 0.0
brier_score: 0.0384830758081
week 50
Train obs: 1410
Test obs: 568
Fitting RF_base model with 9 features
f1_score: 0.0952380952381
brier_score: 0.0312575867277
###Markdown
RNN Model for crash prediction Developed by: bpben
###Code
import re
import csv
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import scipy.stats as ss
from glob import glob
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from scipy.stats import describe
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import sys
sys.path.append('/Users/B/Documents')
###Output
_____no_output_____
###Markdown
Data processingThe approach here is to create 3 time-lag features:1. crashes in the past week2. crashes in the past month3. crashes in the past quarter (three months)4. average crashes per week up to target weekAll features except 4 are calculated to exclude one another. That is, crashes in the past month does not include the past week's crashes. Crashes in the past quarter do not include the past month.
###Code
SEG_CHARS = ['AADT', 'SPEEDLIMIT', 'Struct_Cnd', 'Surface_Tp', 'F_F_Class']
# Read in data
data = pd.read_csv('../data/boston/processed/vz_predict_dataset.csv.gz',
compression='gzip', dtype={'segment_id':'str'})
data.sort_values(['segment_id', 'year','week'], inplace=True)
# get segments with non-zero crashes
data_nonzero = data.set_index('segment_id').loc[data.groupby('segment_id').crash.sum()>0]
data_nonzero.reset_index(inplace=True)
# scaler
scaler = MinMaxScaler(feature_range=(0, 1))
data_nonzero[SEG_CHARS] = scaler.fit_transform(data_nonzero[SEG_CHARS])
data_nonzero['target'] = (data['crash']>0).astype(int)
data_model = data_nonzero[['target']+SEG_CHARS].values
pct_train = .7
#train_segments = data_nonzero.segment_id.unique()[:int(len(data_nonzero.segment_id.unique())*pct_train)]
segment_ids = data_nonzero.segment_id.unique()
inds = np.arange(0.0,len(segment_ids)) / len(segment_ids) < pct_train
train_ids = segment_ids[inds]
test_ids = segment_ids[~inds]
# split into train and test sets
train_size = int(len(data_model) * 0.67)
test_size = len(data_model) - train_size
#train, test = data_model[0:train_size,:], data_model[train_size:len(data_model),:]
train = data_nonzero[data_nonzero.segment_id.isin(train_ids)][['target']+SEG_CHARS].values
test = data_nonzero[data_nonzero.segment_id.isin(test_ids)][['target']+SEG_CHARS].values
print(len(train), len(test))
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 1:]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 12
trainX, trainY = create_dataset(train, look_back=look_back)
testX, testY = create_dataset(test, look_back=look_back)
# class_weight
neg, pos = data_nonzero.target.value_counts(normalize=True)
class_weight = {0: 1/neg, 1: 1/pos}
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], look_back, trainX.shape[2]))
testX = np.reshape(testX, (testX.shape[0], look_back, testX.shape[2]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=trainX.shape[1:]))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainY,
validation_data=(testX, testY),
epochs=50, batch_size=1000,
class_weight = class_weight,
verbose=2)
# make predictions
#trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
from sklearn.metrics import roc_auc_score
roc_auc_score(testY, testPredict)
a = testY.reshape(-1, 1) - testPredict
np.sum(testY - np.reshape(testPredict, (a.shape[0],)))
np.sum(testY - np.reshape(testPredict, (a.shape[0],)))
pd.Series(testPredict.flatten()).value_counts()
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
#Parameters for model
#Model parameters
params = dict()
#cv parameters
cvp = dict()
cvp['pmetric'] = 'roc_auc'
cvp['iter'] = 5 #number of iterations
cvp['folds'] = 5 #folds for cv (default)
#LR parameters
mp = dict()
mp['LogisticRegression'] = dict()
mp['LogisticRegression']['penalty'] = ['l1','l2']
mp['LogisticRegression']['C'] = ss.beta(a=5,b=2) #beta distribution for selecting reg strength
#RF model parameters
mp['RandomForestClassifier'] = dict()
mp['RandomForestClassifier']['n_estimators'] = [2**8] #number of trees in the forest
mp['RandomForestClassifier']['max_features'] = ss.beta(a=5,b=2) #number of features at split
mp['RandomForestClassifier']['max_leaf_nodes'] = ss.nbinom(n=2,p=0.001,loc=100) #max number of leaves to create
# Features
features = [u'pre_week', u'pre_month', u'pre_quarter', 'avg_week', u'AADT', u'SPEEDLIMIT',
u'Struct_Cnd', u'Surface_Tp', u'F_F_Class']
#Initialize tuner
tune = Tuner(df)
#Base RF model
tune.tune('RF_base', 'RandomForestClassifier', features, cvp, mp['RandomForestClassifier'])
#Base LR model
tune.tune('LR_base', 'LogisticRegression', features, cvp, mp['LogisticRegression'])
#Display results
tune.grid_results
# Run test
test = Tester(df)
test.init_tuned(tune)
test.run_tuned('RF_base', cal=False)
###Output
Fitting RF_base model with 9 features
f1_score: 0.153846153846
brier_score: 0.0316728329575
###Markdown
Lift chart by "risk bin"The classifier problem is difficult because the classes are unbalanced (.05% have crashes at target week). More useful are the probabilities being produced by the model, which give some idea of risk.
###Code
def lift_chart(x_col, y_col, data, ax=None):
p = sns.barplot(x=x_col, y=y_col, data=data,
palette='Reds', ax = None, ci=None)
vals = p.get_yticks()
p.set_yticklabels(['{:3.0f}%'.format(i*100) for i in vals])
xvals = [x.get_text().split(',')[-1].strip(']') for x in p.get_xticklabels()]
xvals = ['{:3.0f}%'.format(float(x)*100) for x in xvals]
p.set_xticklabels(xvals)
p.set_axis_bgcolor('white')
p.set_xlabel('')
p.set_ylabel('')
p.set_title('Predicted probability vs actual percent')
return(p)
def density(data, score, ax=None):
p = sns.kdeplot(risk_df['risk_score'], ax=ax)
p.set_axis_bgcolor('white')
p.legend('')
p.set_xlabel('Predicted probability of crash')
p.set_title('KDE plot predictions')
return(p)
risk_scores = test.rundict['RF_base']['m_fit'].predict_proba(test.data.test_x[features])[:,1]
risk_df = pd.DataFrame({'risk_score':risk_scores, 'crash':test.data.test_y})
print risk_df.risk_score.describe()
risk_df['categories'] = pd.cut(risk_df['risk_score'], bins=[-1, 0, .01, .02, .05, max(risk_scores)])
risk_mean = risk_df.groupby('categories')['crash'].count()
print risk_mean
fig, axes = plt.subplots(1, 2)
lift_chart('categories', 'crash', risk_df,
ax=axes[1])
density(risk_df, 'risk_score', ax=axes[0])
# output predictions
# predict on all segments
data_model['risk_score'] = test.rundict['RF_base']['m_fit'].predict_proba(data_model[features])[:,1]
data_model.to_csv('seg_with_risk_score.csv', index=False)
###Output
_____no_output_____
###Markdown
Check sensitivity to weekI predicted an arbitrary week as target here, but I'd like to see whether things change significantly if I change that week. A good metric to measure that is brier score loss. It'll be low throughout as the classifier doesn't perform great, but it shouldn't vary a huge amount.
###Code
for w in [20, 30, 40, 50]:
print "week ", w
crash_lags = format_crash_data(data_nonzero.set_index(['segment_id','week']), 'crash', w)
data_model = crash_lags.merge(data_segs, left_on='segment_id', right_on='segment_id')
df = Indata(data_model, 'target')
# create train/test split
df.tr_te_split(.7)
test = Tester(df)
test.init_tuned(tune)
test.run_tuned('RF_base', cal=False)
print '\n'
###Output
week 20
Train obs: 1360
Test obs: 618
Fitting RF_base model with 9 features
f1_score: 0.1
brier_score: 0.0292948065731
week 30
Train obs: 1374
Test obs: 604
Fitting RF_base model with 9 features
f1_score: 0.0
brier_score: 0.0393970488434
week 40
Train obs: 1396
Test obs: 582
Fitting RF_base model with 9 features
f1_score: 0.0
brier_score: 0.0384830758081
week 50
Train obs: 1410
Test obs: 568
Fitting RF_base model with 9 features
f1_score: 0.0952380952381
brier_score: 0.0312575867277
|
Math.ipynb | ###Markdown
数学 线代
###Code
import sys
import numpy as np
print('Python:{}'.format(sys.version))
print('Numpy:{}'.format(np.__version__))
# scalar
x = 6
x
# vector
x = np.array((1, 2, 3))
x
print('vector dimensions: {}'.format(x.shape))
print('vector size: {}'.format(x.size))
# matrix
x = np.matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
x
print('matrix dimensions: {}'.format(x.shape))
print('matrix size: {}'.format(x.size))
x = np.ones((3, 3))
x
# tensor
x = np.ones((3, 3, 3))
x
# indexing
A = np.ones((5, 5), dtype=np.int)
A
A[0,1]=2
A
A[:,0]=3
A
A[:,:]=5
A
A= np.ones((5, 5, 5), dtype=np.int)
A[:,0,0] = 6
A
# matrix operation
A= np.matrix([[1, 2], [3, 4]])
B = np.ones((2, 2), dtype=np.int)
A
B
C = A*B
C
# matrix transpose
A = np.array(range(9))
A = A.reshape(3, 3)
A
B = A.T
B
C = B.T
C
###Output
_____no_output_____
###Markdown
特征值与特征向量 EigenValue & Eigenvector
###Code
A = np.arange(9) - 3
A
B = A.reshape((3, 3))
B
# Euclidean (L2) norm - default
print(np.linalg.norm(A))
print(np.linalg.norm(B))
# the Frogenius norm is the L2 norm for a matrix
print(np.linalg.norm(B, 'fro'))
# the max norm (P = infinity)
print(np.linalg.norm(A, np.inf))
print(np.linalg.norm(B, np.inf))
# vector normalization - normalization to produce a unit vector
norm = np.linalg.norm(A)
A_unit = A / norm
print(A_unit)
# the manitude of a unit vector is equal to 1
np.linalg.norm(A_unit)
# find the eienvalues and eigenvectors for a simple square matrix
A = np.diag(np.arange(1, 4))
A
eigenvalues, eigenvectors = np.linalg.eig(A)
eigenvalues
eigenvectors
# the eigenvalue w[i] corresponds to the eigenvector v[:,i]
print('eigenvalue:', eigenvalues[1])
print('eigenvector:', eigenvectors[:, 1])
np.linalg.inv(eigenvectors)
# verify eigendecomposition
matrix = np.matmul(np.diag(eigenvalues), np.linalg.inv(eigenvectors))
output = np.matmul(eigenvectors, matrix).astype(np.int)
output
###Output
_____no_output_____
###Markdown
###Code
import math
print(math.pi+4)
numpy.column_stack() #Stack 1D arrays as columns into a 2D array.
x=[1,3,3,7,9,2]
x.remove(2)
print(x)
###Output
[1, 3, 3, 7, 9]
|
vp1_TensorFlow_Project_LendingClubData.ipynb | ###Markdown
Data Exploration and Cleanup - how do they relate to output?
###Code
# To Visualize the Correlation Between Label and Features
# Modify Target Output from Text to 0 and 1
# Loan Status text to 0/1 Integer
# df['loan_status'].value_counts()
# df['year'] = df['date'].apply(lambda date: date.year)
df['loan_repaid'] = df['loan_status'].apply(lambda status: status=='Fully Paid')
df['loan_repaid'] = df['loan_status'].apply(lambda x : 1 if (x == 'Fully Paid') else 0)
df['loan_repaid'] = df['loan_status'].map({'Fully Paid':1,'Charged Off':0})
df.corr()['loan_repaid'].sort_values().drop('loan_repaid').plot(kind='bar')
# visualize nulls
# need to remove missing data or fill missing data
# remove any unnecessary or repetitive features
# convert categorical string features to dummy variables
df.head()
print('Length of dataframe =', len(df))
# Missing Data Analysis
# Number of missing:
df.isnull().sum()
# Percentage missing:
df.isnull().sum()/len(df)*100
pd.DataFrame([df.isnull().sum(), (df.isnull().sum()/len(df)*100) ], index=['Number of Null','Percent Null (%)']).transpose()
# Missing Data Analysis
# Null heatmap
plt.figure(figsize=(10,5))
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# looks like there are 396030 entries
# and a number of them have null
"""
emp_title 373103 non-null object
emp_length 377729 non-null object
title 394275 non-null object
revol_util 395754 non-null float64
mort_acc 358235 non-null float64
pub_rec_bankruptcies 395495 non-null float64
"""
# are we going to drop it or fill it with something?
# depends if it is interesting or not
df['emp_title'].nunique() # 173000 unique employment titles
df['emp_title'].value_counts()
# future: could map to high income job or low income jobs
# for now: just drop it
# Drop emp_title
df = df.drop('emp_title', axis = 1)
# Next one: emp_length
# get a list of unique values and then plot them by count, plot will be ordered
df['emp_length'].value_counts()
# df['emp_length'].isna().sum() # 18301
"""
10+ years 126041
2 years 35827
< 1 year 31725
3 years 31665
5 years 26495
1 year 25882
4 years 23952
6 years 20841
7 years 20819
8 years 19168
9 years 15314
NaN 18301
"""
# run this to get a list:
sorted(df['emp_length'].dropna().unique())
# ['1 year','10+ years','2 years','3 years','4 years','5 years','6 years','7 years','8 years','9 years','< 1 year']
# we run the above and take above output list and paste it here, and then we re-order it manually:
emp_length_order = ['< 1 year', '1 year',
'2 years',
'3 years',
'4 years',
'5 years',
'6 years',
'7 years',
'8 years',
'9 years',
'10+ years']
# count plot seaborn
plt.figure(figsize=(10,5))
sns.countplot(x='emp_length', data=df, order=emp_length_order) # looks like most borrowers have been working a long time
# redo above chart with hue to differentiate folks who default
plt.figure(figsize=(10,5))
emp_length_order = ['< 1 year', '1 year',
'2 years',
'3 years',
'4 years',
'5 years',
'6 years',
'7 years',
'8 years',
'9 years',
'10+ years']
sns.countplot(x='emp_length', data=df, hue='loan_status', order=emp_length_order)
# chart does not say much - do ratio analysis
# ratio analysis
# want to build percent of people per category
# first filter on loan_status, then groupby emp_length, then count to get each number of the columns (above chart)
df[df['loan_status']=='Charged Off'].groupby('emp_length').count()
df[df['loan_status']=='Fully Paid'].groupby('emp_length').count()
# first filter on loan_status, then groupby emp_length, then count to get each number of the columns (above chart)
# second grab column loan_status
emp_charged_off = df[df['loan_status']=='Charged Off'].groupby('emp_length').count()['loan_status']
emp_fully_paid = df[df['loan_status']=='Fully Paid'].groupby('emp_length').count()['loan_status']
emp_ratio = emp_charged_off/(emp_charged_off+emp_fully_paid) # note this is percent of total rather than A:B it is A / (A+B)
emp_ratio_df = pd.DataFrame([emp_charged_off, emp_fully_paid, emp_ratio],
index=['Charged Off', 'Fully Paid', 'Charged Off Percent']
)
# here is the previous calculation
def emp_length_extraction(term_text):
if term_text == '0':
return 0
else:
temp_num = term_text.split()[0]
if temp_num == '<':
return 6
elif temp_num == '10+':
return 120
else:
return int(temp_num)*12
# df['emp_length'].fillna(value='0', inplace=True)
# df['emp_length_values'] = df['emp_length'].apply(emp_length_extraction)
# df['emp_length_values'].value_counts()
emp_ratio.transpose().plot(kind='bar')
emp_ratio_df.transpose()
# looks like 20% regardless of emp_length, so not useful info, so do not keep the feature
# drop the feature
df.drop(['emp_length'], axis=1, inplace=True)
# looked at title vs purpose (same info)
df = df.drop('title', axis=1)
###Output
_____no_output_____
###Markdown
FILLING MISSING DATA
###Code
# mort_acc has 10% missing data
# so, what do we do with this column?
# if you dropna then you drop 10% of data - is not very good
# if you drop the feature then you have to be careful not to lose good info...
# so if we fill with value, what is a reasonable way to fill in the data? look for other features are highly correlated
df.corr()['mort_acc'].sort_values()
# looks like a positive correlation with "total_acc" feature
# next step:
# use the average of mortgage accounts per total accounts to fill in the missing data
# so first analyze total account information
df.groupby('total_acc').mean()
# take this total account info and focus on number of mortgage accounts
df.groupby('total_acc').mean()['mort_acc']
# this will be the lookup table used to fill in mort_acc:
# fill in mort_acc missing values with a mapping table you created above
missing_data_map_total_acc = df.groupby('total_acc').mean()['mort_acc']
# how to use the mapping table:
missing_data_map_total_acc[2.0] # returns 0.0 like the above
# this is a helper function that checks if NaN, if it is then return the mapping table output, otherwise return mort_acc #
def fill_mort_acc(total_acc, mort_acc):
'''
Accepts the total_acc and mort_acc values for the row.
Checks if the mort_acc is NaN , if so, it returns the avg mort_acc value
for the corresponding total_acc value for that row.
total_acc_avg here should be a Series or dictionary containing the mapping of the
groupby averages of mort_acc per total_acc values.
'''
if np.isnan(mort_acc):
return missing_data_map_total_acc[total_acc] # this maps to the table above
else:
return mort_acc
# this executes the filling function which uses the mapping table function, axis=1 because column
# helpful: https://stackoverflow.com/questions/13331698/how-to-apply-a-function-to-two-columns-of-pandas-dataframe
df['mort_acc'] = df.apply(lambda x: fill_mort_acc(x['total_acc'],x['mort_acc']), axis=1)
df.isnull().sum()
# 2 more nulls to investigate
# i use fillna=0 but solutions use dropna because small number of rows
"""
df['revol_util'].fillna(value=0,inplace=True)
df['mort_acc'].fillna(value=0,inplace=True)
df['pub_rec_bankruptcies'].fillna(value=0,inplace=True)
"""
# drop empty values for 'revol_util' and 'pub_rec_bankruptcies'
# do not need to drop the feature but just drop the rows that have NaN
df = df.dropna()
# my own model:
# drop all the other columns
# df.drop(['home_ownership','address','emp_title','title','purpose', 'earliest_cr_line','initial_list_status'], axis=1, inplace=True)
# should be all zero now
df.isnull().sum() # df.isna().sum() might work too but isnull seems more comprehensive
###Output
_____no_output_____
###Markdown
Categorical Data and String Data
###Code
# First step: determine which features are not values
df.select_dtypes(['object']).columns
# term
# remove months text and convert to integer 36 and 60
# method 1 = create manual mapping table
# method 2 = grab 2 characters
df['term'].value_counts()
"""
36 months
60 months
"""
def month_extraction(term_text):
return int(term_text.split()[0])
# another way = df['term'] = df['term'].apply(lambda term: int(term[:3]))
df['term'] = df['term'].apply(month_extraction)
# run this to check if did it properly:
# df['term'].value_counts()
# grade
# grade already covered by sub_grade so drop it
df = df.drop('grade', axis=1)
# sub_grade (first run - not used but example code here anyway)
# convert sub_grade to dummy variables using a mapping function
# this might be wrong because not really a continous variable?
# sub grade might be better than grade
df['sub_grade'].value_counts()
"""
B3 26655
B4 25601
C1 23662
C2 22580
B2 22495
B5 22085
C3 21221
C4 20280
B1 19182
A5 18526
C5 18244
F5 1397
G1 1058
G2 754
G3 552
G4 374
G5 316
"""
# my way (not used in second run):
def grade_extraction(term_text):
grade_letter = term_text[0]
if grade_letter == 'A':
return int(term_text[1])
elif grade_letter == 'B':
return 10 + int(term_text[1])
elif grade_letter == 'C':
return 20 + int(term_text[1])
elif grade_letter == 'D':
return 30 + int(term_text[1])
elif grade_letter == 'E':
return 40 + int(term_text[1])
elif grade_letter == 'F':
return 50 + int(term_text[1])
else:
return 60 + int(term_text[1])
df['sub_grade_values'] = df['sub_grade'].apply(grade_extraction)
# drop columns because not used in second run:
df.drop(['sub_grade_values'], axis=1, inplace=True)
# sub_grade (second run)
# convert to dummy variables using one hot encoding
# use pandas dummy variables to add new columns to original dataframe, drop original sub_grade column
# and add drop_first - do not need to duplicate information
# A/B/C --> _/1/0 because A is implicitly encoded
dummies = pd.get_dummies(df['sub_grade'], drop_first=True)
# REMOVE the sub_grade column THEN concat to original dataframe
df = pd.concat([df.drop('sub_grade', axis=1), dummies], axis=1)
# more dummy variables:
# verification_status, appliction_type, initial_list_status, purpose
# mass create dummy variables since low unique values
df['purpose'].value_counts() # only 15 unique values -> 15 unique columns
# use pandas dummy variables to add new columns to original dataframe, drop original column
dummies = pd.get_dummies(df[['verification_status', 'application_type', 'initial_list_status', 'purpose']], drop_first=True)
# REMOVE the sub_grade column THEN concat to original dataframe
df = pd.concat([df.drop(['verification_status', 'application_type', 'initial_list_status', 'purpose'], axis=1),
dummies], axis=1)
# home_ownership
# we want to replace (NONE and ANY) into OTHER
# or custom replace function or mapping function
df['home_ownership'].value_counts()
"""
MORTGAGE 198022
RENT 159395
OWN 37660
OTHER 110
NONE 29
ANY 3
"""
# we want to replace (NONE and ANY) into OTHER
# or custom replace function or mapping function
df['home_ownership'] = df['home_ownership'].replace(['NONE','ANY'], 'OTHER')
df['home_ownership'].value_counts()
# use pandas dummy variables to add new columns to original dataframe, drop original column
dummies = pd.get_dummies(df['home_ownership'], drop_first=True)
# REMOVE the sub_grade column THEN concat to original dataframe
df = pd.concat([df.drop('home_ownership', axis=1), dummies], axis=1)
# See what columns we got
# df.columns
# Address
# Extract the zip code
df['zip_code'] = df['address'].apply(lambda address:address[-5:])
df['zip_code'].value_counts() # luckily we only have 20 here
# use pandas dummy variables to add new columns to original dataframe, drop original column
dummies = pd.get_dummies(df['zip_code'], drop_first=True)
# REMOVE the sub_grade column THEN concat to original dataframe
df = pd.concat([df.drop('zip_code', axis=1), dummies], axis=1)
df = df.drop('address', axis=1)
# issue_d
# does this feature help to show which will default?
# course thinks this is data leakage - but maybe in reality vintage matters
# date conversion (if needed)
df['issue_d'].value_counts()
df['issue_d'] = pd.to_datetime(df['issue_d'])
# date is not useful by itself, so extract year
df['issue_d'] = df['issue_d'].apply(lambda date: date.year) # df['issue_d'].apply(lambda date: date.month)
# will stick to course workbook:
df.drop(['issue_d'], axis=1, inplace=True)
# earliest_cr_line
# does this feature help to show which will default?
# this is a historical time stamp feature - extract the year
# no need to convert to dummy variable because year can be treated as a continuous data type
# date conversion (if needed)
df['earliest_cr_line'].value_counts()
df['earliest_cr_line'] = pd.to_datetime(df['earliest_cr_line'])
# date is not useful by itself, so extract year
# df['earliest_cr_line'] = df['earliest_cr_line'].apply(lambda date: int(date[-4:])) # last 4 letters approach
df['earliest_cr_line'] = df['earliest_cr_line'].apply(lambda date: date.year) # df['issue_d'].apply(lambda date: date.month)
# df['earliest_cr_line'].value_counts()
# len(df['earliest_cr_line']) # 396030
###Output
_____no_output_____
###Markdown
DATA Cleaned - Now DATA PREPROCESSING (TRAIN/TEST/SPLIT)
###Code
df.columns
df.select_dtypes(['object']).columns
# Remove old text output since we already have loan_repaid as 0/1
df = df.drop('loan_status', axis=1)
# Train Test Split
# df = df.sample(frac=0.1,random_state=101) # if many entries, can sample 10% of the rows for speed/memory issues
X = df.drop('loan_repaid', axis=1).values
y = df['loan_repaid'].values
# create two numpy arrays from pd dataframes
from sklearn.model_selection import train_test_split
# train_test_split()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101) # 20% will be test set
# normalize the data and fit the scaler
from sklearn.preprocessing import MinMaxScaler
# create instance of scaler
scaler = MinMaxScaler()
# only fit to the training set (to avoid data leakage)
scaler.fit(X_train)
# overwrite the X_train and X_test with the scaled values
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Tensorflow Model for Classification
# THIRD MODEL (callback with dropout) - see other models (Model 1=no callback, Model 2 = with callback no dropout)
# runs a little bit more than the Model 2 because of the randomly stopped updating neurons
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout
# Can use this to check: # X_train.shape[1] is number of features, in this case = 30
print('X_train shape: ', X_train.shape) # (316175 rows, 78 columns representing the features)
first_layer_neurons = X_train.shape[1]
# Build Neural Network
model = Sequential()
# Layer 1 - recommended to use number of features
# model.add(Dense(78, activation='relu', input_shape=(78,)))
model.add(Dense(first_layer_neurons, activation='relu', input_shape=(first_layer_neurons,))) # relu = rectified linear unit y = max(0, x)
model.add(Dropout(0.2))
# % Neurons Randomly Dropped Each time = means 0.2*100=20 neurons will not have weights or biases updated.
# Ex) Dropout = 1 drops 100% of neurons, if dropout = 0 then drops 0% of neurons
# Layer 2 - reduce half
model.add(Dense(39, activation='relu'))
model.add(Dropout(0.2))
# Layer 3 - reduce half
model.add(Dense(19, activation='relu'))
model.add(Dropout(0.2))
# Final Layer - Binary classification model so output must be binary: use sigmoid
model.add(Dense(units=1, activation='sigmoid')) # output is 0 or 1
# Binary classification model
model.compile(loss='binary_crossentropy', optimizer='adam')
# see the model: print the string summary of the network
model.summary()
# another way to visualize the model
# from keras.utils.vis_utils import plot_model
from tensorflow.keras.utils import plot_model
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True, expand_nested=True)
# this graphic requires the first layer to have input_shape: model.add(Dense(19, activation='relu', input_shape=(19,)))
from tensorflow.keras.callbacks import EarlyStopping
# min = stop when quantity monitored has stopped decreasing (minimize validation loss)
# max = stop when quantity monitored has stopped increasing (maximize accuracy)
# patience = how many epochs to wait before stopping despite hitting
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)
# Now we fit the model to 600 epochs to demonstrate the overfitting
# Also included the validation data
# model.fit(x=X_train, y=y_train, epochs=600,validation_data=(X_test,y_test), callbacks=[early_stop])
# course used epochs=30
model.fit(x=X_train, y=y_train, epochs=50,batch_size=256,
validation_data=(X_test,y_test),
callbacks=[early_stop]
)
###Output
Train on 316175 samples, validate on 79044 samples
Epoch 1/50
316175/316175 [==============================] - 18s 56us/sample - loss: 0.2984 - val_loss: 0.2644
Epoch 2/50
316175/316175 [==============================] - 16s 50us/sample - loss: 0.2655 - val_loss: 0.2627
Epoch 3/50
316175/316175 [==============================] - 16s 51us/sample - loss: 0.2626 - val_loss: 0.2624
Epoch 4/50
316175/316175 [==============================] - 16s 51us/sample - loss: 0.2620 - val_loss: 0.2619
Epoch 5/50
316175/316175 [==============================] - 15s 49us/sample - loss: 0.2610 - val_loss: 0.2616
Epoch 6/50
316175/316175 [==============================] - 16s 52us/sample - loss: 0.2602 - val_loss: 0.2615
Epoch 7/50
316175/316175 [==============================] - 14s 43us/sample - loss: 0.2600 - val_loss: 0.2613
Epoch 8/50
316175/316175 [==============================] - 13s 41us/sample - loss: 0.2593 - val_loss: 0.2617
Epoch 9/50
316175/316175 [==============================] - 16s 51us/sample - loss: 0.2591 - val_loss: 0.2617
Epoch 10/50
316175/316175 [==============================] - 13s 41us/sample - loss: 0.2587 - val_loss: 0.2609
Epoch 11/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2589 - val_loss: 0.2616
Epoch 12/50
316175/316175 [==============================] - 16s 49us/sample - loss: 0.2583 - val_loss: 0.2611
Epoch 13/50
316175/316175 [==============================] - 13s 40us/sample - loss: 0.2581 - val_loss: 0.2611
Epoch 14/50
316175/316175 [==============================] - 12s 38us/sample - loss: 0.2579 - val_loss: 0.2624
Epoch 15/50
316175/316175 [==============================] - 14s 43us/sample - loss: 0.2578 - val_loss: 0.2611
Epoch 16/50
316175/316175 [==============================] - 15s 48us/sample - loss: 0.2576 - val_loss: 0.2617
Epoch 17/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2573 - val_loss: 0.2616
Epoch 18/50
316175/316175 [==============================] - 15s 46us/sample - loss: 0.2573 - val_loss: 0.2615
Epoch 19/50
316175/316175 [==============================] - 16s 52us/sample - loss: 0.2570 - val_loss: 0.2616
Epoch 20/50
316175/316175 [==============================] - 15s 47us/sample - loss: 0.2570 - val_loss: 0.2612
Epoch 21/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2567 - val_loss: 0.2617
Epoch 22/50
316175/316175 [==============================] - 12s 38us/sample - loss: 0.2563 - val_loss: 0.2616
Epoch 23/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2563 - val_loss: 0.2612
Epoch 24/50
316175/316175 [==============================] - 15s 47us/sample - loss: 0.2564 - val_loss: 0.2611
Epoch 25/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2561 - val_loss: 0.2612
Epoch 26/50
316175/316175 [==============================] - 13s 40us/sample - loss: 0.2559 - val_loss: 0.2617
Epoch 27/50
316175/316175 [==============================] - 12s 38us/sample - loss: 0.2558 - val_loss: 0.2616
Epoch 28/50
316175/316175 [==============================] - 18s 57us/sample - loss: 0.2557 - val_loss: 0.2624
Epoch 29/50
316175/316175 [==============================] - 16s 51us/sample - loss: 0.2556 - val_loss: 0.2610
Epoch 30/50
316175/316175 [==============================] - 13s 42us/sample - loss: 0.2556 - val_loss: 0.2615
Epoch 31/50
316175/316175 [==============================] - 13s 41us/sample - loss: 0.2552 - val_loss: 0.2611
Epoch 32/50
316175/316175 [==============================] - 13s 42us/sample - loss: 0.2553 - val_loss: 0.2615
Epoch 33/50
316175/316175 [==============================] - 13s 41us/sample - loss: 0.2552 - val_loss: 0.2611
Epoch 34/50
316175/316175 [==============================] - 13s 40us/sample - loss: 0.2550 - val_loss: 0.2608
Epoch 35/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2547 - val_loss: 0.2615
Epoch 36/50
316175/316175 [==============================] - 14s 43us/sample - loss: 0.2548 - val_loss: 0.2616
Epoch 37/50
316175/316175 [==============================] - 13s 40us/sample - loss: 0.2546 - val_loss: 0.2610
Epoch 38/50
316175/316175 [==============================] - 12s 38us/sample - loss: 0.2545 - val_loss: 0.2612
Epoch 39/50
316175/316175 [==============================] - 12s 39us/sample - loss: 0.2545 - val_loss: 0.2622
Epoch 40/50
316175/316175 [==============================] - 16s 52us/sample - loss: 0.2542 - val_loss: 0.2615
Epoch 41/50
316175/316175 [==============================] - 16s 50us/sample - loss: 0.2543 - val_loss: 0.2615
Epoch 42/50
316175/316175 [==============================] - 17s 55us/sample - loss: 0.2540 - val_loss: 0.2616
Epoch 43/50
316175/316175 [==============================] - 18s 56us/sample - loss: 0.2540 - val_loss: 0.2612
Epoch 44/50
316175/316175 [==============================] - 22s 71us/sample - loss: 0.2540 - val_loss: 0.2618 lo - - ETA: 0s - loss: 0
Epoch 45/50
316175/316175 [==============================] - 22s 71us/sample - loss: 0.2537 - val_loss: 0.2616
Epoch 46/50
316175/316175 [==============================] - 18s 58us/sample - loss: 0.2538 - val_loss: 0.2609
Epoch 47/50
316175/316175 [==============================] - 15s 46us/sample - loss: 0.2537 - val_loss: 0.2612
Epoch 48/50
316175/316175 [==============================] - 13s 42us/sample - loss: 0.2536 - val_loss: 0.2613
Epoch 49/50
316175/316175 [==============================] - 13s 41us/sample - loss: 0.2537 - val_loss: 0.2612
Epoch 50/50
316175/316175 [==============================] - 13s 40us/sample - loss: 0.2535 - val_loss: 0.2615
###Markdown
EVALUATE MODEL PERFORMANCE AND SAVE/LOAD MODEL
###Code
# Evaluate Model Performance - Plot Validation Loss vs Training Loss
# look for flattening of errors - glad to see the early stop did its job
model_loss = pd.DataFrame(model.history.history)
model_loss.plot()
plt.title('Model Loss - Validation vs Training - with Early Stop')
# performance for classification problems
from sklearn.metrics import classification_report, confusion_matrix
# create predictions based on model
# predictions = model.predict(X_test)
predictions = model.predict_classes(X_test)
# compare predictions to actuals
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test,predictions))
# https://en.wikipedia.org/wiki/Precision_and_recall
# Precision = Accuracy of Positive Values = TP / (TP + FP)
# Recall = Accuracy overall = TP / all
# Accuracy is how much you got right
# Do not forget that most of the data was re-paid (imbalanced data set to begin with)
# Imbalanced Dataset assessment
# Re Assess y_test or the true data
df['loan_repaid'].value_counts() # 317696 Repaid and 77523 not repaid
317696 / len(df) # notice that 80% of my points were already repaid itself based on test data set, so 80% should be baseline
# hence 90% accuracy is not necessarily good because baseline is 80%. We need more context.
# so TRUE NOTIFICATION use F1 score of 0.6 as your metric to improve
# ways to improve:
# 1) add layers
# 2) add more neurons
# 3) try other algorithms
# 4) add more epochs
# use model to make a prediction on a single person
# select random existing person
import random
random.seed(101)
random_int = random.randint(0,len(df))
new_customer = df.drop('loan_repaid',axis=1).iloc[random_int]
# this is a pandas series
new_customer
# model requires it in a numpy array:
new_customer.values.reshape(1,78)
# recall, the model requires scaled input data
new_customer = scaler.transform(new_customer.values.reshape(1,78))
new_customer
# prediction using model # predictions = model.predict_classes(X_test)
predictions = model.predict_classes(new_customer)
predictions
# so did we get it right?
# pull the new_customer without the drop: new_customer = df.drop('loan_repaid',axis=1).iloc[random_int]
new_customer_orig = df.iloc[random_int]
new_customer_orig['loan_repaid']
# save the model down, after all this work
from tensorflow.keras.models import load_model
model.save('my_TF_lending_club_model1.h5')
# load the model
later_model = load_model('my_TF_lending_club_model1.h5')
# run predict - this is wrong:
later_model.predict(new_customer)
# run predict - the right way:
later_model.predict_classes(new_customer)
###Output
_____no_output_____
###Markdown
TENSORBOARD
###Code
# Tensorboard
# https://www.tensorflow.org/tensorboard/get_started
from tensorflow.keras.callbacks import EarlyStopping,TensorBoard
# Unique log for each time you run the model
from datetime import datetime
timestamp = datetime.now().strftime("%Y-%m-%d--%H%M-%S")
timestamp
# double backslash? yes
pwd
# Log folder
# Windows: use "logs\\fit"
# MacOS/Linux: use "logs\fit"
import os
print('Current Directory: ', os.getcwd())
log_directory = 'logs\\fit'
log_directory = log_directory + '\\' + timestamp
log_directory
board = TensorBoard(log_dir=log_directory, # where stuff saved
histogram_freq=1, # after freq=1, so each epoch, get weights, weights and histogram
write_graph=True,
write_images=True,
update_freq='epoch',
profile_batch=2,
embeddings_freq=1)
first_layer_neurons = X_train.shape[1]
# Build Neural Network
model2 = Sequential()
# Layer 1 - recommended to use number of features
# model.add(Dense(78, activation='relu', input_shape=(78,)))
model2.add(Dense(first_layer_neurons, activation='relu', input_shape=(first_layer_neurons,))) # relu = rectified linear unit y = max(0, x)
model2.add(Dropout(0.2))
# % Neurons Randomly Dropped Each time = means 0.2*100=20 neurons will not have weights or biases updated.
# Ex) Dropout = 1 drops 100% of neurons, if dropout = 0 then drops 0% of neurons
# Layer 2 - reduce half
model2.add(Dense(39, activation='relu'))
model2.add(Dropout(0.2))
# Layer 3 - reduce half
model2.add(Dense(19, activation='relu'))
model2.add(Dropout(0.2))
# Final Layer - Binary classification model so output must be binary: use sigmoid
model2.add(Dense(units=1, activation='sigmoid')) # output is 0 or 1
# Binary classification model
model2.compile(loss='binary_crossentropy', optimizer='adam')
# add "board" to callbacks=[early_stop, board]
early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25)
# course used epochs=30
model2.fit(x=X_train, y=y_train, epochs=50,batch_size=256,
validation_data=(X_test,y_test),
callbacks=[early_stop, board]
)
# run tensorboard visualization
# open http://localhost:6006/
# Two important pieces: 1) where did you save the log files and 2) where are you located
print(log_directory)
# then run command line in anaconda prompt
# conda activate [env]
# cd to the folder
# then: "tensorboard --logdir logs\fit"
# C:\A\Projects\Python\Python - matplotlib_VC> tensorboard --logdir\logs\fit\2020-04-27--1607-04
# now go back to http://localhost:6006/
###Output
_____no_output_____ |
comp-cientifica-I-2018-2/semana-2/raw_files/.ipynb_checkpoints/Euler erros-checkpoint.ipynb | ###Markdown
Como saber se os resultados estão convergindo corretamente?Vamos começar com um método bastante simples, mas muitas vezes eficaz:veremos como o método de Euler se comporta em casos em que conhecemos a resposta,e buscaremos aí algumas intuições para o caso geral.
###Code
from funcionario import euler_npts
###Output
_____no_output_____
###Markdown
Vejamos novamente uma EDO: $y' = 2y$.
###Code
def F(t,y):
return 2*y
def ansF(t,t0,y0): return np.exp(2*(t-t0))*y0
###Output
_____no_output_____
###Markdown
Erros ao mudar o número de passos Gráficos das soluções
###Code
ns = [10,20,30]
for n in ns:
ts, ys = euler_npts(F, [0,1], y0=1.2, npts=n, retpts=True)
plt.plot(ts, ys, 'x:', label=str(n))
# Tentar outros estilos: ., o; --, -., ...
plt.plot(ts, ansF(ts,0,1.2), label='exata')
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: solução')
plt.show()
###Output
_____no_output_____
###Markdown
Gráficos dos erros
###Code
ns = [10,20,30]
for n in ns:
ts, ys = euler_npts(F, [0,1], y0=1.2, npts=n, retpts=True)
plt.plot(ts, ys - ansF(ts,0,1.2), 'x:', label=str(n))
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: erro')
plt.show()
###Output
_____no_output_____
###Markdown
Gráficos dos erros relativosO erro relativo é sempre dado por$$ \frac{\text{Valor calculado} - \text{Valor exato}}{\text{Valor exato}}. $$Ou, quando estamos com pressa de escrever:$$ \frac{\hat x - x}{x},$$onde subentende-se que $x$ é a resposta correta, enquanto $\hat x$, o valor que calculamos no computador.
###Code
ns = [10,20,30]
for n in ns:
### Resposta aqui
plt.xlabel('t')
plt.legend(title='nstep')
plt.title('Euler explícito: erro relativo')
plt.show()
###Output
_____no_output_____
###Markdown
Generalizando: aumentando muito mais o número de passosAgora que temos um gráfico que parece ser bom para ver erros, vamos aumentar $n$ ;-)
###Code
ns = np.arange(100,500,step=30)
### Resposta aqui
###Output
_____no_output_____ |
(Course-1) Neural Networks and Deep Learning/Planar Data Classifier/Planar_data_classification_with_onehidden_layer_v6c.ipynb | ###Markdown
Updates to Assignment If you were working on the older version:* Please click on the "Coursera" icon in the top right to open up the folder directory. * Navigate to the folder: Week 3/ Planar data classification with one hidden layer. You can see your prior work in version 6b: "Planar data classification with one hidden layer v6b.ipynb" List of bug fixes and enhancements* Clarifies that the classifier will learn to classify regions as either red or blue.* compute_cost function fixes np.squeeze by casting it as a float.* compute_cost instructions clarify the purpose of np.squeeze.* compute_cost clarifies that "parameters" parameter is not needed, but is kept in the function definition until the auto-grader is also updated.* nn_model removes extraction of parameter values, as the entire parameter dictionary is passed to the invoked functions. Planar data classification with one hidden layerWelcome to your week 3 programming assignment. It's time to build your first neural network, which will have a hidden layer. You will see a big difference between this model and the one you implemented using logistic regression. **You will learn how to:**- Implement a 2-class classification neural network with a single hidden layer- Use units with a non-linear activation function, such as tanh - Compute the cross entropy loss - Implement forward and backward propagation 1 - Packages Let's first import all the packages that you will need during this assignment.- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.- [sklearn](http://scikit-learn.org/stable/) provides simple and efficient tools for data mining and data analysis. - [matplotlib](http://matplotlib.org) is a library for plotting graphs in Python.- testCases provides some test examples to assess the correctness of your functions- planar_utils provide various useful functions used in this assignment
###Code
# Package imports
import numpy as np
import matplotlib.pyplot as plt
from testCases_v2 import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
%matplotlib inline
np.random.seed(1) # set a seed so that the results are consistent
###Output
_____no_output_____
###Markdown
2 - Dataset First, let's get the dataset you will work on. The following code will load a "flower" 2-class dataset into variables `X` and `Y`.
###Code
X, Y = load_planar_dataset()
###Output
_____no_output_____
###Markdown
Visualize the dataset using matplotlib. The data looks like a "flower" with some red (label y=0) and some blue (y=1) points. Your goal is to build a model to fit this data. In other words, we want the classifier to define regions as either red or blue.
###Code
# Visualize the data:
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____
###Markdown
You have: - a numpy-array (matrix) X that contains your features (x1, x2) - a numpy-array (vector) Y that contains your labels (red:0, blue:1).Lets first get a better sense of what our data is like. **Exercise**: How many training examples do you have? In addition, what is the `shape` of the variables `X` and `Y`? **Hint**: How do you get the shape of a numpy array? [(help)](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.shape.html)
###Code
### START CODE HERE ### (≈ 3 lines of code)
shape_X = X.shape
shape_Y = Y.shape
m = X.shape[1] # training set size
### END CODE HERE ###
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
print ('I have m = %d training examples!' % (m))
###Output
The shape of X is: (2, 400)
The shape of Y is: (1, 400)
I have m = 400 training examples!
###Markdown
**Expected Output**: **shape of X** (2, 400) **shape of Y** (1, 400) **m** 400 3 - Simple Logistic RegressionBefore building a full neural network, lets first see how logistic regression performs on this problem. You can use sklearn's built-in functions to do that. Run the code below to train a logistic regression classifier on the dataset.
###Code
# Train the logistic regression classifier
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(X.T, Y.T);
###Output
_____no_output_____
###Markdown
You can now plot the decision boundary of these models. Run the code below.
###Code
# Plot the decision boundary for logistic regression
plot_decision_boundary(lambda x: clf.predict(x), X, Y)
plt.title("Logistic Regression")
# Print accuracy
LR_predictions = clf.predict(X.T)
print ('Accuracy of logistic regression: %d ' % float((np.dot(Y,LR_predictions) + np.dot(1-Y,1-LR_predictions))/float(Y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
###Output
Accuracy of logistic regression: 47 % (percentage of correctly labelled datapoints)
###Markdown
**Expected Output**: **Accuracy** 47% **Interpretation**: The dataset is not linearly separable, so logistic regression doesn't perform well. Hopefully a neural network will do better. Let's try this now! 4 - Neural Network modelLogistic regression did not work well on the "flower dataset". You are going to train a Neural Network with a single hidden layer.**Here is our model**:**Mathematically**:For one example $x^{(i)}$:$$z^{[1] (i)} = W^{[1]} x^{(i)} + b^{[1]}\tag{1}$$ $$a^{[1] (i)} = \tanh(z^{[1] (i)})\tag{2}$$$$z^{[2] (i)} = W^{[2]} a^{[1] (i)} + b^{[2]}\tag{3}$$$$\hat{y}^{(i)} = a^{[2] (i)} = \sigma(z^{ [2] (i)})\tag{4}$$$$y^{(i)}_{prediction} = \begin{cases} 1 & \mbox{if } a^{[2](i)} > 0.5 \\ 0 & \mbox{otherwise } \end{cases}\tag{5}$$Given the predictions on all the examples, you can also compute the cost $J$ as follows: $$J = - \frac{1}{m} \sum\limits_{i = 0}^{m} \large\left(\small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large \right) \small \tag{6}$$**Reminder**: The general methodology to build a Neural Network is to: 1. Define the neural network structure ( of input units, of hidden units, etc). 2. Initialize the model's parameters 3. Loop: - Implement forward propagation - Compute loss - Implement backward propagation to get the gradients - Update parameters (gradient descent)You often build helper functions to compute steps 1-3 and then merge them into one function we call `nn_model()`. Once you've built `nn_model()` and learnt the right parameters, you can make predictions on new data. 4.1 - Defining the neural network structure **Exercise**: Define three variables: - n_x: the size of the input layer - n_h: the size of the hidden layer (set this to 4) - n_y: the size of the output layer**Hint**: Use shapes of X and Y to find n_x and n_y. Also, hard code the hidden layer size to be 4.
###Code
# GRADED FUNCTION: layer_sizes
def layer_sizes(X, Y):
"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
### START CODE HERE ### (≈ 3 lines of code)
n_x = X.shape[0] # size of input layer
n_h = 4
n_y = Y.shape[0] # size of output layer
### END CODE HERE ###
return (n_x, n_h, n_y)
X_assess, Y_assess = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_assess, Y_assess)
print("The size of the input layer is: n_x = " + str(n_x))
print("The size of the hidden layer is: n_h = " + str(n_h))
print("The size of the output layer is: n_y = " + str(n_y))
###Output
The size of the input layer is: n_x = 5
The size of the hidden layer is: n_h = 4
The size of the output layer is: n_y = 2
###Markdown
**Expected Output** (these are not the sizes you will use for your network, they are just used to assess the function you've just coded). **n_x** 5 **n_h** 4 **n_y** 2 4.2 - Initialize the model's parameters **Exercise**: Implement the function `initialize_parameters()`.**Instructions**:- Make sure your parameters' sizes are right. Refer to the neural network figure above if needed.- You will initialize the weights matrices with random values. - Use: `np.random.randn(a,b) * 0.01` to randomly initialize a matrix of shape (a,b).- You will initialize the bias vectors as zeros. - Use: `np.zeros((a,b))` to initialize a matrix of shape (a,b) with zeros.
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h,n_x)*0.01
b1 = np.zeros((n_h,1))
W2 = np.random.randn(n_y,n_h)*0.01
b2 = np.zeros((n_y,1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[-0.00416758 -0.00056267]
[-0.02136196 0.01640271]
[-0.01793436 -0.00841747]
[ 0.00502881 -0.01245288]]
b1 = [[ 0.]
[ 0.]
[ 0.]
[ 0.]]
W2 = [[-0.01057952 -0.00909008 0.00551454 0.02292208]]
b2 = [[ 0.]]
###Markdown
**Expected Output**: **W1** [[-0.00416758 -0.00056267] [-0.02136196 0.01640271] [-0.01793436 -0.00841747] [ 0.00502881 -0.01245288]] **b1** [[ 0.] [ 0.] [ 0.] [ 0.]] **W2** [[-0.01057952 -0.00909008 0.00551454 0.02292208]] **b2** [[ 0.]] 4.3 - The Loop **Question**: Implement `forward_propagation()`.**Instructions**:- Look above at the mathematical representation of your classifier.- You can use the function `sigmoid()`. It is built-in (imported) in the notebook.- You can use the function `np.tanh()`. It is part of the numpy library.- The steps you have to implement are: 1. Retrieve each parameter from the dictionary "parameters" (which is the output of `initialize_parameters()`) by using `parameters[".."]`. 2. Implement Forward Propagation. Compute $Z^{[1]}, A^{[1]}, Z^{[2]}$ and $A^{[2]}$ (the vector of all your predictions on all the examples in the training set).- Values needed in the backpropagation are stored in "`cache`". The `cache` will be given as an input to the backpropagation function.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Implement Forward Propagation to calculate A2 (probabilities)
### START CODE HERE ### (≈ 4 lines of code)
Z1 = W1@X+b1
A1 = np.tanh(Z1)
Z2 = W2@A1+b2
A2 = sigmoid(Z2)
### END CODE HERE ###
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
X_assess, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_assess, parameters)
# Note: we use the mean here just to make sure that your output matches ours.
print(np.mean(cache['Z1']) ,np.mean(cache['A1']),np.mean(cache['Z2']),np.mean(cache['A2']))
###Output
0.262818640198 0.091999045227 -1.30766601287 0.212877681719
###Markdown
**Expected Output**: 0.262818640198 0.091999045227 -1.30766601287 0.212877681719 Now that you have computed $A^{[2]}$ (in the Python variable "`A2`"), which contains $a^{[2](i)}$ for every example, you can compute the cost function as follows:$$J = - \frac{1}{m} \sum\limits_{i = 1}^{m} \large{(} \small y^{(i)}\log\left(a^{[2] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[2] (i)}\right) \large{)} \small\tag{13}$$**Exercise**: Implement `compute_cost()` to compute the value of the cost $J$.**Instructions**:- There are many ways to implement the cross-entropy loss. To help you, we give you how we would have implemented$- \sum\limits_{i=0}^{m} y^{(i)}\log(a^{[2](i)})$:```pythonlogprobs = np.multiply(np.log(A2),Y)cost = - np.sum(logprobs) no need to use a for loop!```(you can use either `np.multiply()` and then `np.sum()` or directly `np.dot()`). Note that if you use `np.multiply` followed by `np.sum` the end result will be a type `float`, whereas if you use `np.dot`, the result will be a 2D numpy array. We can use `np.squeeze()` to remove redundant dimensions (in the case of single float, this will be reduced to a zero-dimension array). We can cast the array as a type `float` using `float()`.
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(A2, Y, parameters):
"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
[Note that the parameters argument is not used in this function,
but the auto-grader currently expects this parameter.
Future version of this notebook will fix both the notebook
and the auto-grader so that `parameters` is not needed.
For now, please include `parameters` in the function signature,
and also when invoking this function.]
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1] # number of example
# Compute the cross-entropy cost
### START CODE HERE ### (≈ 2 lines of code)
temp = Y*np.log(A2)+(1-Y)*np.log(1-A2)
cost = (-1/m)*sum(sum(temp)) # this is the built-in sum is different from np.sum. Here, np.sum(temp) would also give same ans
### END CODE HERE ###
cost = float(np.squeeze(cost)) # makes sure cost is the dimension we expect.
# E.g., turns [[17]] into 17
assert(isinstance(cost, float))
return cost
A2, Y_assess, parameters = compute_cost_test_case()
print("cost = " + str(compute_cost(A2, Y_assess, parameters)))
###Output
cost = 0.6930587610394646
###Markdown
**Expected Output**: **cost** 0.693058761... Using the cache computed during forward propagation, you can now implement backward propagation.**Question**: Implement the function `backward_propagation()`.**Instructions**:Backpropagation is usually the hardest (most mathematical) part in deep learning. To help you, here again is the slide from the lecture on backpropagation. You'll want to use the six equations on the right of this slide, since you are building a vectorized implementation. <!--$\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } = \frac{1}{m} (a^{[2](i)} - y^{(i)})$$\frac{\partial \mathcal{J} }{ \partial W_2 } = \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } a^{[1] (i) T} $$\frac{\partial \mathcal{J} }{ \partial b_2 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)}}}$$\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } = W_2^T \frac{\partial \mathcal{J} }{ \partial z_{2}^{(i)} } * ( 1 - a^{[1] (i) 2}) $$\frac{\partial \mathcal{J} }{ \partial W_1 } = \frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)} } X^T $$\frac{\partial \mathcal{J} _i }{ \partial b_1 } = \sum_i{\frac{\partial \mathcal{J} }{ \partial z_{1}^{(i)}}}$- Note that $*$ denotes elementwise multiplication.- The notation you will use is common in deep learning coding: - dW1 = $\frac{\partial \mathcal{J} }{ \partial W_1 }$ - db1 = $\frac{\partial \mathcal{J} }{ \partial b_1 }$ - dW2 = $\frac{\partial \mathcal{J} }{ \partial W_2 }$ - db2 = $\frac{\partial \mathcal{J} }{ \partial b_2 }$ !-->- Tips: - To compute dZ1 you'll need to compute $g^{[1]'}(Z^{[1]})$. Since $g^{[1]}(.)$ is the tanh activation function, if $a = g^{[1]}(z)$ then $g^{[1]'}(z) = 1-a^2$. So you can compute $g^{[1]'}(Z^{[1]})$ using `(1 - np.power(A1, 2))`.
###Code
# GRADED FUNCTION: backward_propagation
def backward_propagation(parameters, cache, X, Y):
"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
# First, retrieve W1 and W2 from the dictionary "parameters".
### START CODE HERE ### (≈ 2 lines of code)
W1 = parameters["W1"]
W2 = parameters["W2"]
### END CODE HERE ###
# Retrieve also A1 and A2 from dictionary "cache".
### START CODE HERE ### (≈ 2 lines of code)
A1 = cache["A1"]
A2 = cache["A2"]
### END CODE HERE ###
# Backward propagation: calculate dW1, db1, dW2, db2.
### START CODE HERE ### (≈ 6 lines of code, corresponding to 6 equations on slide above)
dZ2 = A2-Y
dW2 = (1/m)*[email protected]
db2 = (1/m)*np.sum(dZ2,axis=1,keepdims=True)
dZ1 = (W2.T@dZ2)*(1-A1**2)
dW1 = (1/m)*[email protected]
db1 = (1/m)*np.sum(dZ1,axis=1,keepdims=True)
### END CODE HERE ###
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
parameters, cache, X_assess, Y_assess = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_assess, Y_assess)
print ("dW1 = "+ str(grads["dW1"]))
print ("db1 = "+ str(grads["db1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("db2 = "+ str(grads["db2"]))
###Output
dW1 = [[ 0.00301023 -0.00747267]
[ 0.00257968 -0.00641288]
[-0.00156892 0.003893 ]
[-0.00652037 0.01618243]]
db1 = [[ 0.00176201]
[ 0.00150995]
[-0.00091736]
[-0.00381422]]
dW2 = [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]]
db2 = [[-0.16655712]]
###Markdown
**Expected output**: **dW1** [[ 0.00301023 -0.00747267] [ 0.00257968 -0.00641288] [-0.00156892 0.003893 ] [-0.00652037 0.01618243]] **db1** [[ 0.00176201] [ 0.00150995] [-0.00091736] [-0.00381422]] **dW2** [[ 0.00078841 0.01765429 -0.00084166 -0.01022527]] **db2** [[-0.16655712]] **Question**: Implement the update rule. Use gradient descent. You have to use (dW1, db1, dW2, db2) in order to update (W1, b1, W2, b2).**General gradient descent rule**: $ \theta = \theta - \alpha \frac{\partial J }{ \partial \theta }$ where $\alpha$ is the learning rate and $\theta$ represents a parameter.**Illustration**: The gradient descent algorithm with a good learning rate (converging) and a bad learning rate (diverging). Images courtesy of Adam Harley.
###Code
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate = 1.2):
"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
# Retrieve each parameter from the dictionary "parameters"
### START CODE HERE ### (≈ 4 lines of code)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
### END CODE HERE ###
# Retrieve each gradient from the dictionary "grads"
### START CODE HERE ### (≈ 4 lines of code)
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
## END CODE HERE ###
# Update rule for each parameter
### START CODE HERE ### (≈ 4 lines of code)
W1 -= learning_rate*dW1
b1 -= learning_rate*db1
W2 -= learning_rate*dW2
b2 -= learning_rate*db2
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
W1 = [[-0.00643025 0.01936718]
[-0.02410458 0.03978052]
[-0.01653973 -0.02096177]
[ 0.01046864 -0.05990141]]
b1 = [[ -1.02420756e-06]
[ 1.27373948e-05]
[ 8.32996807e-07]
[ -3.20136836e-06]]
W2 = [[-0.01041081 -0.04463285 0.01758031 0.04747113]]
b2 = [[ 0.00010457]]
###Markdown
**Expected Output**: **W1** [[-0.00643025 0.01936718] [-0.02410458 0.03978052] [-0.01653973 -0.02096177] [ 0.01046864 -0.05990141]] **b1** [[ -1.02420756e-06] [ 1.27373948e-05] [ 8.32996807e-07] [ -3.20136836e-06]] **W2** [[-0.01041081 -0.04463285 0.01758031 0.04747113]] **b2** [[ 0.00010457]] 4.4 - Integrate parts 4.1, 4.2 and 4.3 in nn_model() **Question**: Build your neural network model in `nn_model()`.**Instructions**: The neural network model has to use the previous functions in the right order.
###Code
# GRADED FUNCTION: nn_model
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
"""
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
# Initialize parameters
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x,n_h,n_y)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
### START CODE HERE ### (≈ 4 lines of code)
# Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache".
A2, cache = forward_propagation(X,parameters)
# Cost function. Inputs: "A2, Y, parameters". Outputs: "cost".
cost = compute_cost(A2,Y,parameters)
# Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads".
grads = backward_propagation(parameters,cache,X,Y)
# Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters".
parameters = update_parameters(parameters,grads)
### END CODE HERE ###
# Print the cost every 1000 iterations
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
X_assess, Y_assess = nn_model_test_case()
parameters = nn_model(X_assess, Y_assess, 4, num_iterations=10000, print_cost=True)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
###Output
Cost after iteration 0: 0.692739
Cost after iteration 1000: 0.000218
Cost after iteration 2000: 0.000107
Cost after iteration 3000: 0.000071
Cost after iteration 4000: 0.000053
Cost after iteration 5000: 0.000042
Cost after iteration 6000: 0.000035
Cost after iteration 7000: 0.000030
Cost after iteration 8000: 0.000026
Cost after iteration 9000: 0.000023
W1 = [[-0.65848169 1.21866811]
[-0.76204273 1.39377573]
[ 0.5792005 -1.10397703]
[ 0.76773391 -1.41477129]]
b1 = [[ 0.287592 ]
[ 0.3511264 ]
[-0.2431246 ]
[-0.35772805]]
W2 = [[-2.45566237 -3.27042274 2.00784958 3.36773273]]
b2 = [[ 0.20459656]]
###Markdown
**Expected Output**: **cost after iteration 0** 0.692739 $\vdots$ $\vdots$ **W1** [[-0.65848169 1.21866811] [-0.76204273 1.39377573] [ 0.5792005 -1.10397703] [ 0.76773391 -1.41477129]] **b1** [[ 0.287592 ] [ 0.3511264 ] [-0.2431246 ] [-0.35772805]] **W2** [[-2.45566237 -3.27042274 2.00784958 3.36773273]] **b2** [[ 0.20459656]] 4.5 Predictions**Question**: Use your model to predict by building predict().Use forward propagation to predict results.**Reminder**: predictions = $y_{prediction} = \mathbb 1 \text{{activation > 0.5}} = \begin{cases} 1 & \text{if}\ activation > 0.5 \\ 0 & \text{otherwise} \end{cases}$ As an example, if you would like to set the entries of a matrix X to 0 and 1 based on a threshold you would do: ```X_new = (X > threshold)```
###Code
# GRADED FUNCTION: predict
def predict(parameters, X):
"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.
### START CODE HERE ### (≈ 2 lines of code)
A2, cache = forward_propagation(X,parameters)
predictions = A2>0.5
### END CODE HERE ###
return predictions
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("predictions mean = " + str(np.mean(predictions)))
###Output
predictions mean = 0.666666666667
###Markdown
**Expected Output**: **predictions mean** 0.666666666667 It is time to run the model and see how it performs on a planar dataset. Run the following code to test your model with a single hidden layer of $n_h$ hidden units.
###Code
# Build a model with a n_h-dimensional hidden layer
parameters = nn_model(X, Y, n_h = 4, num_iterations = 10000, print_cost=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size " + str(4))
###Output
Cost after iteration 0: 0.693048
Cost after iteration 1000: 0.288083
Cost after iteration 2000: 0.254385
Cost after iteration 3000: 0.233864
Cost after iteration 4000: 0.226792
Cost after iteration 5000: 0.222644
Cost after iteration 6000: 0.219731
Cost after iteration 7000: 0.217504
Cost after iteration 8000: 0.219467
Cost after iteration 9000: 0.218610
###Markdown
**Expected Output**: **Cost after iteration 9000** 0.218607
###Code
# Print accuracy
predictions = predict(parameters, X)
print ('Accuracy: %d' % float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100) + '%')
###Output
Accuracy: 90%
###Markdown
**Expected Output**: **Accuracy** 90% Accuracy is really high compared to Logistic Regression. The model has learnt the leaf patterns of the flower! Neural networks are able to learn even highly non-linear decision boundaries, unlike logistic regression. Now, let's try out several hidden layer sizes. 4.6 - Tuning hidden layer size (optional/ungraded exercise) Run the following code. It may take 1-2 minutes. You will observe different behaviors of the model for various hidden layer sizes.
###Code
# This may take about 2 minutes to run
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50]
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations = 5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float((np.dot(Y,predictions.T) + np.dot(1-Y,1-predictions.T))/float(Y.size)*100)
print ("Accuracy for {} hidden units: {} %".format(n_h, accuracy))
###Output
Accuracy for 1 hidden units: 67.5 %
Accuracy for 2 hidden units: 67.25 %
Accuracy for 3 hidden units: 90.75 %
Accuracy for 4 hidden units: 90.5 %
Accuracy for 5 hidden units: 91.25 %
Accuracy for 20 hidden units: 90.0 %
Accuracy for 50 hidden units: 90.75 %
###Markdown
**Interpretation**:- The larger models (with more hidden units) are able to fit the training set better, until eventually the largest models overfit the data. - The best hidden layer size seems to be around n_h = 5. Indeed, a value around here seems to fits the data well without also incurring noticeable overfitting.- You will also learn later about regularization, which lets you use very large models (such as n_h = 50) without much overfitting. **Optional questions**:**Note**: Remember to submit the assignment by clicking the blue "Submit Assignment" button at the upper-right. Some optional/ungraded questions that you can explore if you wish: - What happens when you change the tanh activation for a sigmoid activation or a ReLU activation?- Play with the learning_rate. What happens?- What if we change the dataset? (See part 5 below!) **You've learnt to:**- Build a complete neural network with a hidden layer- Make a good use of a non-linear unit- Implemented forward propagation and backpropagation, and trained a neural network- See the impact of varying the hidden layer size, including overfitting. Nice work! 5) Performance on other datasets If you want, you can rerun the whole notebook (minus the dataset part) for each of the following datasets.
###Code
# Datasets
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
### START CODE HERE ### (choose your dataset)
dataset = "noisy_moons"
### END CODE HERE ###
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y%2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=Y, s=40, cmap=plt.cm.Spectral);
###Output
_____no_output_____ |
lesson4-week1/Convolutional Model step by step/Convolution+model+-+Application+-+v1.ipynb | ###Markdown
Convolutional Neural Networks: ApplicationWelcome to Course 4's second assignment! In this notebook, you will:- Implement helper functions that you will use when implementing a TensorFlow model- Implement a fully functioning ConvNet using TensorFlow **After this assignment you will be able to:**- Build and train a ConvNet in TensorFlow for a classification problem We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*"). 1.0 - TensorFlow modelIn the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call. As usual, we will start by loading in the packages.
###Code
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
%matplotlib inline
np.random.seed(1)
###Output
_____no_output_____
###Markdown
Run the next cell to load the "SIGNS" dataset you are going to use.
###Code
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
###Output
_____no_output_____
###Markdown
As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
###Code
# Example of a picture
index = 15
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
###Output
y = 5
###Markdown
In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.To get started, let's examine the shapes of your data.
###Code
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
###Output
number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 6)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 6)
###Markdown
1.1 - Create placeholdersTensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.**Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint](https://www.tensorflow.org/api_docs/python/tf/placeholder).
###Code
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape
[None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape
[None, n_y] and dtype "float"
"""
### START CODE HERE ### (≈2 lines)
X = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, n_C0])
Y = tf.placeholder(tf.float32, shape=[None, n_y])
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(64, 64, 3, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
###Output
X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32)
Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
###Markdown
**Expected Output** X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32) Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32) 1.2 - Initialize parametersYou will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.**Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:```pythonW = tf.get_variable("W", [1,2,3,4], initializer = ...)```[More Info](https://www.tensorflow.org/api_docs/python/tf/get_variable).
###Code
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with
tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1)
# so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
W1 = tf.get_variable("W1", dtype = tf.float32, shape=[4, 4, 3, 8],
initializer = tf.contrib.layers.xavier_initializer(seed = 0))
W2 = tf.get_variable("W2", dtype = tf.float32, shape=[2, 2, 8, 16],
initializer = tf.contrib.layers.xavier_initializer(seed = 0))
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters
tf.reset_default_graph()
with tf.Session() as sess_test:
parameters = initialize_parameters()
init = tf.global_variables_initializer()
sess_test.run(init)
print("W1 = " + str(parameters["W1"].eval()[1,1,1]))
print("W2 = " + str(parameters["W2"].eval()[1,1,1]))
###Output
W1 = [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394
-0.06847463 0.05245192]
W2 = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058
-0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228
-0.22779644 -0.1601823 -0.16117483 -0.10286498]
###Markdown
** Expected Output:** W1 = [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 -0.06847463 0.05245192] W2 = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 -0.22779644 -0.1601823 -0.16117483 -0.10286498] 1.2 - Forward propagationIn TensorFlow, there are built-in functions that carry out the convolution steps for you.- **tf.nn.conv2d(X,W1, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W1$, this function convolves $W1$'s filters on X. The third input ([1,s,s,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d)- **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool)- **tf.nn.relu(Z1):** computes the elementwise ReLU of Z1 (which can be any shape). You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/relu)- **tf.contrib.layers.flatten(P)**: given an input P, this function flattens each example into a 1D vector it while maintaining the batch-size. It returns a flattened tensor with shape [batch_size, k]. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten)- **tf.contrib.layers.fully_connected(F, num_outputs):** given a the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected)In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters. **Exercise**: Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above. In detail, we will use the following parameters for all the steps: - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME" - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME" - Flatten the previous output. - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
###Code
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D ->
RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
### START CODE HERE ###
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X, W1, strides = [1, 1, 1, 1], padding = "SAME")
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, sride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1, ksize = [1, 8, 8, 1],
strides = [1, 8, 8, 1], padding = "SAME")
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1, W2, strides = [1, 1, 1, 1], padding = "SAME")
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2, ksize = [1, 4, 4, 1],
strides = [1, 4, 4, 1], padding = "SAME")
# FLATTEN
P2 = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function
#(not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments
# should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(P2, num_outputs = 6,
activation_fn = None)
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(Z3, {X: np.random.randn(2,64,64,3),
Y: np.random.randn(2,6)})
print("Z3 = " + str(a))
###Output
Z3 = [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064]
[-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
###Markdown
**Expected Output**: Z3 = [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]] 1.3 - Compute costImplement the compute cost function below. You might find these two functions helpful: - **tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits)- **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to sum the losses over all the examples to get the overall cost. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/reduce_mean)** Exercise**: Compute the cost below using the function above.
###Code
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit),
of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits
(logits = Z3, labels = Y))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(cost, {X: np.random.randn(4,64,64,3),
Y: np.random.randn(4,6)})
print("cost = " + str(a))
###Output
cost = 2.91034
###Markdown
**Expected Output**: cost = 2.91034 1.4 Model Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset. You have implemented `random_mini_batches()` in the Optimization programming assignment of course 2. Remember that this function returns a list of mini-batches. **Exercise**: Complete the function below. The model below should:- create placeholders- initialize parameters- forward propagate- compute the cost- create an optimizerFinally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer)
###Code
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 100, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D ->
RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph()
# to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1)
# to keep results consistent (tensorflow seed)
seed = 3
# to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = []
# To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer.
# Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size)
# number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train,
minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost,
# the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , temp_cost = sess.run([optimizer, cost],
feed_dict = {X:minibatch_X, Y:minibatch_Y})
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
###Output
_____no_output_____
###Markdown
Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
###Code
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
###Output
Cost after epoch 0: 1.917929
Cost after epoch 5: 1.506757
Cost after epoch 10: 0.955359
Cost after epoch 15: 0.845802
Cost after epoch 20: 0.701174
Cost after epoch 25: 0.571977
Cost after epoch 30: 0.518435
Cost after epoch 35: 0.495806
Cost after epoch 40: 0.429827
Cost after epoch 45: 0.407291
Cost after epoch 50: 0.366394
Cost after epoch 55: 0.376922
Cost after epoch 60: 0.299491
Cost after epoch 65: 0.338870
Cost after epoch 70: 0.316400
Cost after epoch 75: 0.310413
Cost after epoch 80: 0.249549
Cost after epoch 85: 0.243457
Cost after epoch 90: 0.200031
Cost after epoch 95: 0.175452
###Markdown
**Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease. **Cost after epoch 0 =** 1.917929 **Cost after epoch 5 =** 1.506757 **Train Accuracy =** 0.940741 **Test Accuracy =** 0.783333 Congratulations! You have finised the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance). Once again, here's a thumbs up for your work!
###Code
fname = "images/thumbs_up.jpg"
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64))
plt.imshow(my_image)
###Output
_____no_output_____ |
als_model.ipynb | ###Markdown
Create a parameter grid, cross validate for best model with different hyperperametersparams_score = {}params = (ParamGridBuilder() .addGrid(als.regParam, [1, 0.01, 0.001, 0.1]) .addGrid(als.maxIter, [5, 10, 20]) .addGrid(als.rank, [4, 10, 50])).build()cv = CrossValidator(estimator=als, estimatorParamMaps=params, evaluator=evaluator, parallelism=4)best_model = cv.fit(movie_ratings)als_model = best_model.bestModel save modelals_model.save('als_model')
###Code
# load requests json file into a spark dataframe
requests = spark.read.json("data/requests.json")
# predict requests with als model
#requests_predictions = model.transform(requests)
requests_predictions = model.transform(requests).toPandas()
# predict null predictions with cold start model
for i, row in requests_predictions[requests_predictions['prediction'].isna()].iterrows():
requests_predictions.loc[i, 'prediction'] = get_cold_start_rating(row['user_id'], row['movie_id']
,user_df
,u_clusters
,ratings_df
)
print(requests_predictions['prediction'].isna().any())
# export request predictions dataframe as json file.
cols = ['user_id','movie_id', 'rating', 'timestamp', 'prediction']
requests_predictions = requests_predictions[cols]
# predictions = requests_predictions.to_json(r"data/predictions.json"
# ,orient='records'
# ,lines=True
# )
requests_predictions.to_json(r"data/predictions.json"
,orient='records'
,lines=True
)
!head data/predictions.json
!head data/requests.json
requests_predictions.info()
print("only {} nulls.".format(280260-280134))
requests_predictions.head()
###Output
_____no_output_____ |
pytorch_alternatives/migration_challenge_pytorch_image/Instructions.ipynb | ###Markdown
PyTorch MNIST Lift and Shift ExerciseFor this exercise notebook, you should be able to use the `Python 3 (PyTorch 1.6 Python 3.6 CPU Optimized)` kernel on SageMaker Studio, or `conda_pytorch_p36` on classic SageMaker Notebook Instances.--- IntroductionYour new colleague in the data science team (who isn't very familiar with SageMaker) has written a nice notebook to tackle an image classification problem with PyTorch: [Local Notebook.ipynb](Local%20Notebook.ipynb).It works OK with the simple MNIST data set they were working on before, but now they'd like to take advantage of some of the features of SageMaker to tackle bigger and harder challenges.**Can you help refactor the Local Notebook code, to show them how to use SageMaker effectively?** Getting StartedFirst, check you can **run the [Local Notebook.ipynb](Local%20Notebook.ipynb) notebook through** - reviewing what steps it takes.**This notebook** sets out a structure you can use to migrate code into, and lists out some of the changes you'll need to make at a high level. You can either work directly in here, or duplicate this notebook so you still have an unchanged copy of the original.Try to work through the sections first with an MVP goal in mind (fitting the model to data in S3 via a SageMaker Training Job, and deploying/using the model through a SageMaker Endpoint). At the end, there are extension exercises to bring in more advanced functionality. DependenciesListing all our imports at the start helps to keep the requirements to run any script/file transparent up-front, and is specified by nearly every style guide including Python's official [PEP 8](https://www.python.org/dev/peps/pep-0008/imports)
###Code
!pip install ipywidgets matplotlib
# External Dependencies:
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import numpy as np
# Local Dependencies:
from util.nb import upload_in_background
# TODO: What else will you need?
# Have a look at the documentation: https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html
# to see which libraries need to be imported to use sagemaker and the tensorflow estimator
# TODO: Here might be a good place to init any SDKs you need...
# 1. Setup the SageMaker role
role = ?
# 2. Setup the SageMaker session
sess = ?
# 3. Setup the SageMaker default bucket
bucket_name = ?
# Have a look at the previous examples to find out how to do it
###Output
_____no_output_____
###Markdown
Data PreparationThe primary data source for a SageMaker training job is (nearly) always S3 - so we should upload our training and test data there.We'd like our training job to be reusable for other image classification projects, so we'll upload in the **folders-of-images format** rather than the straight pre-processed numpy arrays.However, for this particular dataset (tens of thousands of tiny files) it's easy to accidentally write a poor-performing upload that **could take a long time**... So we prepared the below to help you run the upload **in the background** using the [aws s3 sync](https://docs.aws.amazon.com/cli/latest/reference/s3/sync.html) CLI command.**Check you understand** what data it's going to upload from this notebook, and where it's going to store it in S3, then start the upload running while you work on the rest.
###Code
upload_in_background(local_path="data", s3_uri=f"s3://{bucket_name}/mnist")
###Output
_____no_output_____
###Markdown
You can carry on working on the other sections while your data uploads! Data Input ("Channels") ConfigurationThe draft code has **2 data sets**: One for training, and one for test/validation. (For classification, the folder location of each image is sufficient as a label).In SageMaker terminology, each input data set is a "channel" and we can name them however we like... Just make sure you're consistent about what you call each one!For a simple input configuration, a channel spec might just be the S3 URI of the folder. For configuring more advanced options, there's the [s3_input](https://sagemaker.readthedocs.io/en/stable/inputs.html) class in the SageMaker SDK.
###Code
# TODO: Define your 2 data channels
# The data can be found in: "s3://{bucket_name}/mnist/train" and "s3://{bucket_name}/mnist/test"
inputs = # Look at the previous example to see how the inputs were defined
###Output
_____no_output_____
###Markdown
Algorithm ("Estimator") Configuration and RunInstead of loading and fitting this data here in the notebook, we'll be creating a [PyTorch Estimator](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/sagemaker.pytorch.htmlpytorch-estimator) through the SageMaker SDK, to run the code on a separate container that can be scaled as required.The ["Using PyTorch with the SageMaker Python SDK"](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html) docs give a good overview of this process. You should run your estimator in **Python 3**.**Use the [src/main.py](src/main.py) file** as your entry point to port code into - which has already been created for you with some basic hints.
###Code
# TODO: Create your PyTorch estimator
# Note the PyTorch class inherits from some cross-framework base classes with additional
# constructor options:
# https://sagemaker.readthedocs.io/en/stable/estimators.html
# https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#create-an-estimator
# We are using PyTorch 1.6 and python 3
# You can reuse the metrics definition from the previous example
# (Optional) Look at the Pytorch script and try to pass new hyperparameters
estimator = ?
###Output
_____no_output_____
###Markdown
Before running the actual training on SageMaker TrainingJob, it can be good to run it locally first using the code below. If there is any error, you can fix them first before running using SageMaker TrainingJob.
###Code
#!python3 src/main.py --train data/train --test data/test --output-data-dir data/local-output --model-dir data/local-model --epochs=2 --batch-size=128
# TODO: Call estimator.fit
###Output
_____no_output_____
###Markdown
Deploy and Use Your Model (Real-Time Inference)If your training job has completed; and saved the model in the correct PyTorch model format; it should now be pretty simple to deploy the model to a real-time endpoint.You can achieve this with the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html).
###Code
# TODO: Deploy a real-time endpoint
###Output
_____no_output_____
###Markdown
Reviewing the architecture from the example notebook, we set up the model to accept **batches** of **28x28** image tensors with **normalized 0-1 pixel values** and a **color channel dimension**Assuming you haven't added any custom pre-processing to our model source code (to accept e.g. encoded JPEGs/PNGs, or arbitrary shapes), we'll need to replicate that same format when we use our endpoint.We've provided a nice **interactive widget** below (which doesn't work in JupyterLab, unfortunately - only plain Jupyter!) and some skeleton code to help you use your model... But you'll need to fill in some details! WARNING: The next next cells for visualization only works with the classic Jupyter notebooks, skip to the next section if you are using JupyterLab and SageMaker Studio
###Code
# Display interactive widget:
# This widget updates variable "data" here in the Jupyter kernel when drawn on
HTML(open("util/input.html").read())
# Run a prediction:
print(f"Raw data shape {np.array(data).shape}")
img = np.squeeze(np.array(data)).astype(np.float32)
img = np.expand_dims(np.expand_dims(img, 0), 0)
print(f"Request data shape {img.shape}")
# TODO: Call the predictor with reqdata
# TODO: What structure is the response? How do we interpret it?
###Output
_____no_output_____
###Markdown
If you are on JupyterLab or SageMaker Studio (or just struggle to get the interactive widget working)...don't worry: Try adapting the "Exploring Results" section from the Local Notebook to send in one of the test set images instead!
###Code
# TODO: import libraries
# TODO: Choose an image
# TODO: Load the image.
# Note: Can feed numpy array to predictor. There is no need to build tensor.
img =
# Send to the model:
# Plot the result:
plt.figure(figsize=(3, 3))
fig = plt.subplot(1, 1, 1)
ax = plt.imshow(img, cmap="gray")
fig.set_title(f"Predicted Number {np.argmax(result)}")
plt.show()
###Output
_____no_output_____
###Markdown
Further ImprovementsIf you've got the basic train/deploy/call cycle working, congratulations! This core pattern of experimenting in the notebook but executing jobs on scalable hardware is at the heart of the SageMaker data science workflow.There are still plenty of ways we can use the tools better though: Read on for the next challenges! 1. Cut training costs easily with SageMaker Managed Spot ModeAWS Spot Instances let you take advantage of unused capacity in the AWS cloud, at up to a 90% discount versus standard on-demand pricing! For small jobs like this, taking advantage of this discount is as easy as adding a couple of parameters to the Estimator constructor:https://sagemaker.readthedocs.io/en/stable/estimators.htmlNote that in general, spot capacity is offered at a discounted rate because it's interruptible based on instantaneous demand... Longer-running training jobs should implement checkpoint saving and loading, so that they can efficiently resume if interrupted part way through. More information can be found on the [Managed Spot Training in Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html) page of the [SageMaker Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/). 2. Parameterize your algorithmBeing able to change the parameters of your algorithm at run-time (without modifying the `main.py` script each time) is helpful for making your code more re-usable... But even more so because it's a pre-requisite for automatic hyperparameter tuning!Job parameter parsing should ideally be factored into a separate function, and as a best practice should accept setting values through **both** command line flags (as demonstrated in the [official MXNet MNIST example](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/mxnet_mnist/mnist.py)) **and** the [SageMaker Hyperparameter environment variable(s)](https://docs.aws.amazon.com/sagemaker/latest/dg/docker-container-environmental-variables-user-scripts.html). Perhaps the official MXNet example could be improved by setting environment-variable-driven defaults to the algorithm hyperparameters, the same as it already does for channels?Refactor your job to accept **epochs** and **batch size** as optional parameters, and show how you can set these before each training run through the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html). 3. Tune your network hyperparametersRe-use the same approach as before to parameterize some features in the structure of your network: Perhaps the sizes of the `Conv2D` kernels? The number, type, node count, or activation function of layers in the network? No need to stray too far away from the sample architecture!Instead of manually (or programmatically) calling `estimator.fit()` with different hyperparameters each time, we can use SageMaker's Bayesian Hyperparameter Tuning functionality to explore the space more efficiently!The SageMaker SDK Docs give a great [overview](https://sagemaker.readthedocs.io/en/stable/overview.htmlsagemaker-automatic-model-tuning) of using the HyperparameterTuner, which you can refer to if you get stuck.First, we'll need to define a specific **metric** to optimize for, which is really a specification of how to scrape metric values from the algorithm's console logs. Next, use the [\*Parameter](https://sagemaker.readthedocs.io/en/stable/tuner.html) classes (`ContinuousParameter`, `IntegerParameter` and `CategoricalParameter`) to define appropriate ranges for the hyperparameters whose combination you want to optimize.With the original estimator, target metric and parameter ranges defined, you'll be able to create a [HyperparameterTuner](https://sagemaker.readthedocs.io/en/stable/tuner.html) and use that to start a hyperparameter tuning job instead of a single model training job.Pay attention to likely run time and resource consumption when selecting the maximum total number of training jobs and maximum parallel jobs of your hyperparameter tuning run... You can always view and cancel ongoing hyperparameter tuning jobs through the SageMaker Console. Additional ChallengesIf you have time, the following challenges are trickier, and might stretch your SageMaker knowledge even further!**Batch Transform / Additional Inference Formats**: As discussed in this notebook, the deployed endpoint expects a particular tensor data format for requests... This complicates the usually-simple task of re-purposing the same model for batch inference (since our data in S3 is in JPEG format). The SageMaker TensorFlow SDK docs provide guidance on accepting custom formats in the ["Create Python Scripts for Custom Input and Output Formats"](https://sagemaker.readthedocs.io/en/stable/using_tf.htmlcreate-python-scripts-for-custom-input-and-output-formats) section. If you can refactor your algorithm to accept JPEG requests when deployed as a real-time endpoint, you'll be able to run it as a batch [Transformer](https://sagemaker.readthedocs.io/en/stable/transformer.html) against images in S3 with a simple `estimator.transformer()` call.**Optimized Training Formats**: A dataset like this (containing many tiny objects) may take much less time to load in to the algorithm if we either converted it to the standard Numpy format that Keras distributes it in (just 4 files X_train, Y_train, X_test, Y_test); or *streaming* the data with [SageMaker Pipe Mode](https://aws.amazon.com/blogs/machine-learning/using-pipe-input-mode-for-amazon-sagemaker-algorithms/), instead of downloading it up-front.**Experiment Tracking**: The [SageMaker Experiments](https://docs.aws.amazon.com/sagemaker/latest/dg/experiments.html) feature gives a more structured way to track trials across multiple related experiments (for example, different HPO runs, or between HPO and regular model training jobs). You can use the [official SageMaker Experiments Example](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-experiments) for guidance on how to track the experiments in this notebook... and should note that the [SageMaker Experiments SDK Docs](https://sagemaker-experiments.readthedocs.io/en/latest/) are maintained separately, since it's a different Python module. Clean-UpRemember to clean up any persistent resources that aren't needed anymore to save costs: The most significant of these are real-time prediction endpoints, and this SageMaker Notebook Instance.The SageMaker SDK [Predictor](https://sagemaker.readthedocs.io/en/stable/predictors.html) class provides an interface to clean up real-time prediction endpoints; and SageMaker Notebook Instances can be stopped through the SageMaker Console when you're finished.You might also like to clean up any S3 buckets / content we created, to prevent ongoing storage costs.
###Code
# TODO: Clean up any endpoints/etc to release resources
###Output
_____no_output_____
###Markdown
REFERENCE SOLUTION: PyTorch MNIST Lift and Shift Exercise IntroductionYour new colleague in the data science team (who isn't very familiar with SageMaker) has written a nice notebook to tackle an image classification problem with PyTorch: [Local Notebook.ipynb](Local%20Notebook.ipynb).It works OK with the simple MNIST data set they were working on before, but now they'd like to take advantage of some of the features of SageMaker to tackle bigger and harder challenges.**Can you help refactor the Local Notebook code, to show them how to use SageMaker effectively?** Getting StartedFirst, check you can **run the [Local Notebook.ipynb](Local%20Notebook.ipynb) notebook through** - reviewing what steps it takes.**This notebook** sets out a structure you can use to migrate code into, and lists out some of the changes you'll need to make at a high level. You can either work directly in here, or duplicate this notebook so you still have an unchanged copy of the original.Try to work through the sections first with an MVP goal in mind (fitting the model to data in S3 via a SageMaker Training Job, and deploying/using the model through a SageMaker Endpoint). At the end, there are extension exercises to bring in more advanced functionality. DependenciesListing all our imports at the start helps to keep the requirements to run any script/file transparent up-front, and is specified by nearly every style guide including Python's official [PEP 8](https://www.python.org/dev/peps/pep-0008/imports)
###Code
# External Dependencies:
from IPython.display import display, HTML
import numpy as np
# Local Dependencies:
from util.nb import upload_in_background
# TODO: What else will you need?
# TODO: Here might be a good place to init any SDKs you need...
###Output
_____no_output_____
###Markdown
Data PreparationThe primary data source for a SageMaker training job is (nearly) always S3 - so we should upload our training and test data there.We'd like our training job to be reusable for other image classification projects, so we'll upload in the **folders-of-images format** rather than the straight pre-processed numpy arrays.However, for this particular dataset (tens of thousands of tiny files) it's easy to accidentally write a poor-performing upload that **could take a long time**... So we prepared the below to help you run the upload **in the background** using the [aws s3 sync](https://docs.aws.amazon.com/cli/latest/reference/s3/sync.html) CLI command.**Check you understand** what data it's going to upload from this notebook, and where it's going to store it in S3, then start the upload running while you work on the rest.
###Code
upload_in_background(local_path="data", s3_uri="s3://MYBUCKET/MYFOLDERS")
###Output
_____no_output_____
###Markdown
You can carry on working on the other sections while your data uploads! Data Input ("Channels") ConfigurationThe draft code has **2 data sets**: One for training, and one for test/validation. (For classification, the folder location of each image is sufficient as a label).In SageMaker terminology, each input data set is a "channel" and we can name them however we like... Just make sure you're consistent about what you call each one!For a simple input configuration, a channel spec might just be the S3 URI of the folder. For configuring more advanced options, there's the [s3_input](https://sagemaker.readthedocs.io/en/stable/inputs.html) class in the SageMaker SDK.
###Code
# TODO: Define your 2 data channels
###Output
_____no_output_____
###Markdown
Algorithm ("Estimator") Configuration and RunInstead of loading and fitting this data here in the notebook, we'll be creating a [PyTorch Estimator](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/sagemaker.pytorch.htmlpytorch-estimator) through the SageMaker SDK, to run the code on a separate container that can be scaled as required.The ["Using PyTorch with the SageMaker Python SDK"](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html) docs give a good overview of this process. You should run your estimator in **Python 3**.**Use the [src/main.py](src/main.py) file** as your entry point to port code into - which has already been created for you with some basic hints.
###Code
# TODO: Create your PyTorch estimator
# Note the PyTorch class inherits from some cross-framework base classes with additional
# constructor options:
# https://sagemaker.readthedocs.io/en/stable/estimators.html
# TODO: Call estimator.fit
# Note: As configured, this job took about 12 clock minutes (but only 3 billable minutes) to run, reaching a
# test accuracy of ~82%. The majority of the time is the download of images to the container - which could be
# significantly optimized as discussed later in the "Further Improvements" section
# Top Tip: Check out "SageMaker Local Mode" for rapid debugging iterations while you flesh out your code!
# - Set instance_type="local" (or "local_gpu") in the Estimator constructor above
# - Spot mode doesn't make any sense on local, so you'll also need to comment out the arguments
# `use_spot_instances` and `max_wait`
# - Replace your `inputs` with local references to avoid downloading data from S3:
# e.g. { "train": "file:///home/ec2-user/SageMaker/.../data/train", "test": "..." }
###Output
_____no_output_____
###Markdown
Deploy and Use Your Model (Real-Time Inference)If your training job has completed; and saved the model in the correct TensorFlow Serving-compatible format; it should now be pretty simple to deploy the model to a real-time endpoint.You can achieve this with the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html).
###Code
# TODO: Deploy a real-time endpoint
###Output
_____no_output_____
###Markdown
Reviewing the architecture from the example notebook, we set up the model to accept **batches** of **28x28** image tensors with **normalized 0-1 pixel values** and a **color channel dimension** (which either came in front or behind the image dimensions, depending on the value of `K.image_data_format()`)Assuming you haven't added any custom pre-processing to our model source code (to accept e.g. encoded JPEGs/PNGs, or arbitrary shapes), we'll need to replicate that same format when we use our endpoint.We've provided a nice **interactive widget** below (which doesn't work in JupyterLab, unfortunately - only plain Jupyter!) and some skeleton code to help you use your model... But you'll need to fill in some details!
###Code
# Display interactive widget:
# This widget updates variable "data" here in the Jupyter kernel when drawn on
HTML(open("util/input.html").read())
# Run a prediction:
# Squeeze out any unneeded dimensions from "data", then put back the batch and channel dimensions
# we want (assuming batch dim is first and channel dim is last):
print(f"Raw data shape {np.array(data).shape}")
img = np.squeeze(np.array(data)).astype(np.float32)
img = np.expand_dims(np.expand_dims(img, 0), 0)
print(f"Request data shape {img.shape}")
# TODO: Call the predictor with reqdata
# TODO: What structure is the response? How do we interpret it?
###Output
_____no_output_____
###Markdown
If you love JupyterLab (or just struggle to get the interactive widget working)...don't worry: Try adapting the "Exploring Results" section from the Local Notebook to send in one of the test set images instead! Further ImprovementsIf you've got the basic train/deploy/call cycle working, congratulations! This core pattern of experimenting in the notebook but executing jobs on scalable hardware is at the heart of the SageMaker data science workflow.There are still plenty of ways we can use the tools better though: Read on for the next challenges! 1. Cut training costs easily with SageMaker Managed Spot ModeAWS Spot Instances let you take advantage of unused capacity in the AWS cloud, at up to a 90% discount versus standard on-demand pricing! For small jobs like this, taking advantage of this discount is as easy as adding a couple of parameters to the Estimator constructor:https://sagemaker.readthedocs.io/en/stable/estimators.htmlNote that in general, spot capacity is offered at a discounted rate because it's interruptible based on instantaneous demand... Longer-running training jobs should implement checkpoint saving and loading, so that they can efficiently resume if interrupted part way through. More information can be found on the [Managed Spot Training in Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html) page of the [SageMaker Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/). 2. Parameterize your algorithmBeing able to change the parameters of your algorithm at run-time (without modifying the `main.py` script each time) is helpful for making your code more re-usable... But even more so because it's a pre-requisite for automatic hyperparameter tuning!Job parameter parsing should ideally be factored into a separate function, and as a best practice should accept setting values through **both** command line flags (as demonstrated in the [official MXNet MNIST example](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/mxnet_mnist/mnist.py)) **and** the [SageMaker Hyperparameter environment variable(s)](https://docs.aws.amazon.com/sagemaker/latest/dg/docker-container-environmental-variables-user-scripts.html). Perhaps the official MXNet example could be improved by setting environment-variable-driven defaults to the algorithm hyperparameters, the same as it already does for channels?Refactor your job to accept **epochs** and **batch size** as optional parameters, and show how you can set these before each training run through the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html). 3. Tune your network hyperparametersRe-use the same approach as before to parameterize some features in the structure of your network: Perhaps the sizes of the `Conv2D` kernels? The number, type, node count, or activation function of layers in the network? No need to stray too far away from the sample architecture!Instead of manually (or programmatically) calling `estimator.fit()` with different hyperparameters each time, we can use SageMaker's Bayesian Hyperparameter Tuning functionality to explore the space more efficiently!The SageMaker SDK Docs give a great [overview](https://sagemaker.readthedocs.io/en/stable/overview.htmlsagemaker-automatic-model-tuning) of using the HyperparameterTuner, which you can refer to if you get stuck.First, we'll need to define a specific **metric** to optimize for, which is really a specification of how to scrape metric values from the algorithm's console logs. Next, use the [\*Parameter](https://sagemaker.readthedocs.io/en/stable/tuner.html) classes (`ContinuousParameter`, `IntegerParameter` and `CategoricalParameter`) to define appropriate ranges for the hyperparameters whose combination you want to optimize.With the original estimator, target metric and parameter ranges defined, you'll be able to create a [HyperparameterTuner](https://sagemaker.readthedocs.io/en/stable/tuner.html) and use that to start a hyperparameter tuning job instead of a single model training job.Pay attention to likely run time and resource consumption when selecting the maximum total number of training jobs and maximum parallel jobs of your hyperparameter tuning run... You can always view and cancel ongoing hyperparameter tuning jobs through the SageMaker Console. Additional ChallengesIf you have time, the following challenges are trickier, and might stretch your SageMaker knowledge even further!**Batch Transform / Additional Inference Formats**: As discussed in this notebook, the deployed endpoint expects a particular tensor data format for requests... This complicates the usually-simple task of re-purposing the same model for batch inference (since our data in S3 is in JPEG format). The SageMaker TensorFlow SDK docs provide guidance on accepting custom formats in the ["Create Python Scripts for Custom Input and Output Formats"](https://sagemaker.readthedocs.io/en/stable/using_tf.htmlcreate-python-scripts-for-custom-input-and-output-formats) section. If you can refactor your algorithm to accept JPEG requests when deployed as a real-time endpoint, you'll be able to run it as a batch [Transformer](https://sagemaker.readthedocs.io/en/stable/transformer.html) against images in S3 with a simple `estimator.transformer()` call.**Optimized Training Formats**: A dataset like this (containing many tiny objects) may take much less time to load in to the algorithm if we either converted it to the standard Numpy format that Keras distributes it in (just 4 files X_train, Y_train, X_test, Y_test); or *streaming* the data with [SageMaker Pipe Mode](https://aws.amazon.com/blogs/machine-learning/using-pipe-input-mode-for-amazon-sagemaker-algorithms/), instead of downloading it up-front.**Experiment Tracking**: The new (December 2019) [SageMaker Experiments](https://docs.aws.amazon.com/sagemaker/latest/dg/experiments.html) feature gives a more structured way to track trials across multiple related experiments (for example, different HPO runs, or between HPO and regular model training jobs). You can use the [official SageMaker Experiments Example](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-experiments) for guidance on how to track the experiments in this notebook... and should note that the [SageMaker Experiments SDK Docs](https://sagemaker-experiments.readthedocs.io/en/latest/) are maintained separately, since it's a different Python module. Clean-UpRemember to clean up any persistent resources that aren't needed anymore to save costs: The most significant of these are real-time prediction endpoints, and this SageMaker Notebook Instance.The SageMaker SDK [Predictor](https://sagemaker.readthedocs.io/en/stable/predictors.html) class provides an interface to clean up real-time prediction endpoints; and SageMaker Notebook Instances can be stopped through the SageMaker Console when you're finished.You might also like to clean up any S3 buckets / content we created, to prevent ongoing storage costs.
###Code
# TODO: Clean up any endpoints/etc to release resources
###Output
_____no_output_____
###Markdown
PyTorch MNIST Lift and Shift ExerciseFor this exercise notebook, you should be able to use the `Python 3 (PyTorch 1.4 Python 3.6 CPU Optimized)` kernel on SageMaker Studio, or `conda_pytorch_p36` on classic SageMaker Notebook Instances.--- IntroductionYour new colleague in the data science team (who isn't very familiar with SageMaker) has written a nice notebook to tackle an image classification problem with PyTorch: [Local Notebook.ipynb](Local%20Notebook.ipynb).It works OK with the simple MNIST data set they were working on before, but now they'd like to take advantage of some of the features of SageMaker to tackle bigger and harder challenges.**Can you help refactor the Local Notebook code, to show them how to use SageMaker effectively?** Getting StartedFirst, check you can **run the [Local Notebook.ipynb](Local%20Notebook.ipynb) notebook through** - reviewing what steps it takes.**This notebook** sets out a structure you can use to migrate code into, and lists out some of the changes you'll need to make at a high level. You can either work directly in here, or duplicate this notebook so you still have an unchanged copy of the original.Try to work through the sections first with an MVP goal in mind (fitting the model to data in S3 via a SageMaker Training Job, and deploying/using the model through a SageMaker Endpoint). At the end, there are extension exercises to bring in more advanced functionality. DependenciesListing all our imports at the start helps to keep the requirements to run any script/file transparent up-front, and is specified by nearly every style guide including Python's official [PEP 8](https://www.python.org/dev/peps/pep-0008/imports)
###Code
!pip install matplotlib
!pip install ipywidgets
# External Dependencies:
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import numpy as np
# Local Dependencies:
from util.nb import upload_in_background
# TODO: What else will you need?
# Have a look at the documentation: https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html
# to see which libraries need to be imported to use sagemaker and the tensorflow estimator
# TODO: Here might be a good place to init any SDKs you need...
# 1. Setup the SageMaker role
role = ?
# 2. Setup the SageMaker session
sess = ?
# 3. Setup the SageMaker default bucket
bucket_name = ?
# Have a look at the previous examples to find out how to do it
###Output
_____no_output_____
###Markdown
Data PreparationThe primary data source for a SageMaker training job is (nearly) always S3 - so we should upload our training and test data there.We'd like our training job to be reusable for other image classification projects, so we'll upload in the **folders-of-images format** rather than the straight pre-processed numpy arrays.However, for this particular dataset (tens of thousands of tiny files) it's easy to accidentally write a poor-performing upload that **could take a long time**... So we prepared the below to help you run the upload **in the background** using the [aws s3 sync](https://docs.aws.amazon.com/cli/latest/reference/s3/sync.html) CLI command.**Check you understand** what data it's going to upload from this notebook, and where it's going to store it in S3, then start the upload running while you work on the rest.
###Code
upload_in_background(local_path="data", s3_uri=f"s3://{bucket_name}/mnist")
###Output
_____no_output_____
###Markdown
You can carry on working on the other sections while your data uploads! Data Input ("Channels") ConfigurationThe draft code has **2 data sets**: One for training, and one for test/validation. (For classification, the folder location of each image is sufficient as a label).In SageMaker terminology, each input data set is a "channel" and we can name them however we like... Just make sure you're consistent about what you call each one!For a simple input configuration, a channel spec might just be the S3 URI of the folder. For configuring more advanced options, there's the [s3_input](https://sagemaker.readthedocs.io/en/stable/inputs.html) class in the SageMaker SDK.
###Code
# TODO: Define your 2 data channels
# The data can be found in: "s3://{bucket_name}/mnist/train" and "s3://{bucket_name}/mnist/test"
inputs = # Look at the previous example to see how the inputs were defined
###Output
_____no_output_____
###Markdown
Algorithm ("Estimator") Configuration and RunInstead of loading and fitting this data here in the notebook, we'll be creating a [PyTorch Estimator](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/sagemaker.pytorch.htmlpytorch-estimator) through the SageMaker SDK, to run the code on a separate container that can be scaled as required.The ["Using PyTorch with the SageMaker Python SDK"](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html) docs give a good overview of this process. You should run your estimator in **Python 3**.**Use the [src/main.py](src/main.py) file** as your entry point to port code into - which has already been created for you with some basic hints.
###Code
# TODO: Create your PyTorch estimator
# Note the PyTorch class inherits from some cross-framework base classes with additional
# constructor options:
# https://sagemaker.readthedocs.io/en/stable/estimators.html
# https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#create-an-estimator
# We are using pytorch 1.4 and python 3
# You can reuse the metrics definition from the previous example
# (Optional) Look at the Pytorch script and try to pass new hyperparameters
estimator = ?
###Output
_____no_output_____
###Markdown
Before running the actual training on SageMaker TrainingJob, it can be good to run it locally first using the code below. If there is any error, you can fix them first before running using SageMaker TrainingJob.
###Code
#!python3 src/main.py --train data/train --test data/test --output-data-dir data/local-output --model-dir data/local-model --epochs=2 --batch-size=128
# TODO: Call estimator.fit
###Output
_____no_output_____
###Markdown
Deploy and Use Your Model (Real-Time Inference)If your training job has completed; and saved the model in the correct PyTorch model format; it should now be pretty simple to deploy the model to a real-time endpoint.You can achieve this with the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html).
###Code
# TODO: Deploy a real-time endpoint
###Output
_____no_output_____
###Markdown
Reviewing the architecture from the example notebook, we set up the model to accept **batches** of **28x28** image tensors with **normalized 0-1 pixel values** and a **color channel dimension**Assuming you haven't added any custom pre-processing to our model source code (to accept e.g. encoded JPEGs/PNGs, or arbitrary shapes), we'll need to replicate that same format when we use our endpoint.We've provided a nice **interactive widget** below (which doesn't work in JupyterLab, unfortunately - only plain Jupyter!) and some skeleton code to help you use your model... But you'll need to fill in some details! WARNING: The next next cells for visualization only works with the classic Jupyter notebooks, skip to the next section if you are using JupyterLab and SageMaker Studio
###Code
# Display interactive widget:
# This widget updates variable "data" here in the Jupyter kernel when drawn on
HTML(open("util/input.html").read())
# Run a prediction:
print(f"Raw data shape {np.array(data).shape}")
img = np.squeeze(np.array(data)).astype(np.float32)
img = np.expand_dims(np.expand_dims(img, 0), 0)
print(f"Request data shape {img.shape}")
# TODO: Call the predictor with reqdata
# TODO: What structure is the response? How do we interpret it?
###Output
_____no_output_____
###Markdown
If you are on JupyterLab or SageMaker Studio (or just struggle to get the interactive widget working)...don't worry: Try adapting the "Exploring Results" section from the Local Notebook to send in one of the test set images instead!
###Code
# TODO: import libraries
# TODO: Choose an image
# TODO: Load the image.
# Note: Can feed numpy array to predictor. There is no need to build tensor.
img =
# Send to the model:
# Plot the result:
plt.figure(figsize=(3, 3))
fig = plt.subplot(1, 1, 1)
ax = plt.imshow(img, cmap="gray")
fig.set_title(f"Predicted Number {np.argmax(result)}")
plt.show()
###Output
_____no_output_____
###Markdown
Further ImprovementsIf you've got the basic train/deploy/call cycle working, congratulations! This core pattern of experimenting in the notebook but executing jobs on scalable hardware is at the heart of the SageMaker data science workflow.There are still plenty of ways we can use the tools better though: Read on for the next challenges! 1. Cut training costs easily with SageMaker Managed Spot ModeAWS Spot Instances let you take advantage of unused capacity in the AWS cloud, at up to a 90% discount versus standard on-demand pricing! For small jobs like this, taking advantage of this discount is as easy as adding a couple of parameters to the Estimator constructor:https://sagemaker.readthedocs.io/en/stable/estimators.htmlNote that in general, spot capacity is offered at a discounted rate because it's interruptible based on instantaneous demand... Longer-running training jobs should implement checkpoint saving and loading, so that they can efficiently resume if interrupted part way through. More information can be found on the [Managed Spot Training in Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html) page of the [SageMaker Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/). 2. Parameterize your algorithmBeing able to change the parameters of your algorithm at run-time (without modifying the `main.py` script each time) is helpful for making your code more re-usable... But even more so because it's a pre-requisite for automatic hyperparameter tuning!Job parameter parsing should ideally be factored into a separate function, and as a best practice should accept setting values through **both** command line flags (as demonstrated in the [official MXNet MNIST example](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/mxnet_mnist/mnist.py)) **and** the [SageMaker Hyperparameter environment variable(s)](https://docs.aws.amazon.com/sagemaker/latest/dg/docker-container-environmental-variables-user-scripts.html). Perhaps the official MXNet example could be improved by setting environment-variable-driven defaults to the algorithm hyperparameters, the same as it already does for channels?Refactor your job to accept **epochs** and **batch size** as optional parameters, and show how you can set these before each training run through the [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html). 3. Tune your network hyperparametersRe-use the same approach as before to parameterize some features in the structure of your network: Perhaps the sizes of the `Conv2D` kernels? The number, type, node count, or activation function of layers in the network? No need to stray too far away from the sample architecture!Instead of manually (or programmatically) calling `estimator.fit()` with different hyperparameters each time, we can use SageMaker's Bayesian Hyperparameter Tuning functionality to explore the space more efficiently!The SageMaker SDK Docs give a great [overview](https://sagemaker.readthedocs.io/en/stable/overview.htmlsagemaker-automatic-model-tuning) of using the HyperparameterTuner, which you can refer to if you get stuck.First, we'll need to define a specific **metric** to optimize for, which is really a specification of how to scrape metric values from the algorithm's console logs. Next, use the [\*Parameter](https://sagemaker.readthedocs.io/en/stable/tuner.html) classes (`ContinuousParameter`, `IntegerParameter` and `CategoricalParameter`) to define appropriate ranges for the hyperparameters whose combination you want to optimize.With the original estimator, target metric and parameter ranges defined, you'll be able to create a [HyperparameterTuner](https://sagemaker.readthedocs.io/en/stable/tuner.html) and use that to start a hyperparameter tuning job instead of a single model training job.Pay attention to likely run time and resource consumption when selecting the maximum total number of training jobs and maximum parallel jobs of your hyperparameter tuning run... You can always view and cancel ongoing hyperparameter tuning jobs through the SageMaker Console. Additional ChallengesIf you have time, the following challenges are trickier, and might stretch your SageMaker knowledge even further!**Batch Transform / Additional Inference Formats**: As discussed in this notebook, the deployed endpoint expects a particular tensor data format for requests... This complicates the usually-simple task of re-purposing the same model for batch inference (since our data in S3 is in JPEG format). The SageMaker TensorFlow SDK docs provide guidance on accepting custom formats in the ["Create Python Scripts for Custom Input and Output Formats"](https://sagemaker.readthedocs.io/en/stable/using_tf.htmlcreate-python-scripts-for-custom-input-and-output-formats) section. If you can refactor your algorithm to accept JPEG requests when deployed as a real-time endpoint, you'll be able to run it as a batch [Transformer](https://sagemaker.readthedocs.io/en/stable/transformer.html) against images in S3 with a simple `estimator.transformer()` call.**Optimized Training Formats**: A dataset like this (containing many tiny objects) may take much less time to load in to the algorithm if we either converted it to the standard Numpy format that Keras distributes it in (just 4 files X_train, Y_train, X_test, Y_test); or *streaming* the data with [SageMaker Pipe Mode](https://aws.amazon.com/blogs/machine-learning/using-pipe-input-mode-for-amazon-sagemaker-algorithms/), instead of downloading it up-front.**Experiment Tracking**: The new (December 2019) [SageMaker Experiments](https://docs.aws.amazon.com/sagemaker/latest/dg/experiments.html) feature gives a more structured way to track trials across multiple related experiments (for example, different HPO runs, or between HPO and regular model training jobs). You can use the [official SageMaker Experiments Example](https://github.com/awslabs/amazon-sagemaker-examples/tree/master/sagemaker-experiments) for guidance on how to track the experiments in this notebook... and should note that the [SageMaker Experiments SDK Docs](https://sagemaker-experiments.readthedocs.io/en/latest/) are maintained separately, since it's a different Python module. Clean-UpRemember to clean up any persistent resources that aren't needed anymore to save costs: The most significant of these are real-time prediction endpoints, and this SageMaker Notebook Instance.The SageMaker SDK [Predictor](https://sagemaker.readthedocs.io/en/stable/predictors.html) class provides an interface to clean up real-time prediction endpoints; and SageMaker Notebook Instances can be stopped through the SageMaker Console when you're finished.You might also like to clean up any S3 buckets / content we created, to prevent ongoing storage costs.
###Code
# TODO: Clean up any endpoints/etc to release resources
###Output
_____no_output_____ |
Lucid_Sonic_Dreams_Tutorial_Notebook.ipynb | ###Markdown
A. Set-Up A.1. Set-up GPUNavigate to **Runtime -> Change runtime type** and make sure **Hardware accelerator** is set to GPU. A.2. Download Sample Audio Preview Files
###Code
## CHEMICAL LOVE - BASICALLY SATURDAY NIGHT ##
! gdown --id 1aTWrzCvJyYcQ82PS6av3YJrtsON2_CUK
## PANCAKE FEET - TENNYSSON ##
! gdown --id 14MqCkuREr1TmuWaxZd8bnuhVlL_vCE9s
## RASPBERRY - SAJE ##
! gdown --id 1GqRi4VFEbw46e9RRvuGPtNc7TuOKFbjl
## LUCID SONIC DREAMS DEMO TRACK ##
# Main File
! gdown --id 1Vc2yC2F5iO0ScC5F0CzF_YB1YPGI2uUP
# Pulse File
! gdown --id 1FY5MO6XqVu9abbdNQQY6C99RHxFGm36o
# Class File
! gdown --id 1-qwcs8_Va58YqkHMdXDm9uef-RcH01gh
## SEA OF VOICES - PORTER ROBINSON ##
# Instrumental (Main Audio)
! gdown --id 13-kS5-3Tw2x9kEVfE3ZMkUN955nw73mN
# Original (Output Audio)
! gdown --id 1r0Mo-vtUIf2njqJ0h3hPJuQELcJ8K2Gu
## UNFAITH - EKALI ##
! gdown --id 1rgwrhtnVwK2Dom9pJ7p2CBF0j7F2vdkM
###Output
Downloading...
From: https://drive.google.com/uc?id=1aTWrzCvJyYcQ82PS6av3YJrtsON2_CUK
To: /content/chemical_love.wav
15.9MB [00:00, 50.6MB/s]
Downloading...
From: https://drive.google.com/uc?id=14MqCkuREr1TmuWaxZd8bnuhVlL_vCE9s
To: /content/pancake_feet.mp3
100% 961k/961k [00:00<00:00, 64.1MB/s]
Downloading...
From: https://drive.google.com/uc?id=1GqRi4VFEbw46e9RRvuGPtNc7TuOKFbjl
To: /content/raspberry.mp3
2.16MB [00:00, 138MB/s]
Downloading...
From: https://drive.google.com/uc?id=1Vc2yC2F5iO0ScC5F0CzF_YB1YPGI2uUP
To: /content/lucidsonicdreams_main.mp3
5.97MB [00:00, 93.6MB/s]
Downloading...
From: https://drive.google.com/uc?id=1FY5MO6XqVu9abbdNQQY6C99RHxFGm36o
To: /content/lucidsonicdreams_pulse.mp3
5.97MB [00:00, 52.3MB/s]
Downloading...
From: https://drive.google.com/uc?id=1-qwcs8_Va58YqkHMdXDm9uef-RcH01gh
To: /content/lucidsonicdreams_class.mp3
5.97MB [00:00, 93.3MB/s]
Downloading...
From: https://drive.google.com/uc?id=13-kS5-3Tw2x9kEVfE3ZMkUN955nw73mN
To: /content/sea_of_voices_inst.mp3
100% 961k/961k [00:00<00:00, 56.2MB/s]
Downloading...
From: https://drive.google.com/uc?id=1r0Mo-vtUIf2njqJ0h3hPJuQELcJ8K2Gu
To: /content/sea_of_voices.mp3
100% 961k/961k [00:00<00:00, 61.9MB/s]
Downloading...
From: https://drive.google.com/uc?id=1rgwrhtnVwK2Dom9pJ7p2CBF0j7F2vdkM
To: /content/unfaith.mp3
100% 1.44M/1.44M [00:00<00:00, 92.7MB/s]
###Markdown
A.3. Install Lucid Sonic Dreams
###Code
! pip install lucidsonicdreams
###Output
Collecting lucidsonicdreams
Downloading https://files.pythonhosted.org/packages/4a/01/91ff8de2866a78435231966bf006eca06d7624c3f7cecce5b8c9b351d97d/lucidsonicdreams-0.4.tar.gz
Collecting tensorflow==1.15
[?25l Downloading https://files.pythonhosted.org/packages/92/2b/e3af15221da9ff323521565fa3324b0d7c7c5b1d7a8ca66984c8d59cb0ce/tensorflow-1.15.0-cp37-cp37m-manylinux2010_x86_64.whl (412.3MB)
[K |████████████████████████████████| 412.3MB 42kB/s
[?25hRequirement already satisfied: librosa in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.8.0)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.19.5)
Requirement already satisfied: moviepy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.2.3.5)
Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (7.1.2)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (4.41.1)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.4.1)
Requirement already satisfied: scikit-image in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.16.2)
Collecting pygit2
[?25l Downloading https://files.pythonhosted.org/packages/98/ed/41a09ca93374015958a18e37280d02c833238272ce3f2e28b10c6253477d/pygit2-1.5.0-cp37-cp37m-manylinux2014_x86_64.whl (3.1MB)
[K |████████████████████████████████| 3.1MB 39.4MB/s
[?25hRequirement already satisfied: gdown in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (3.6.4)
Collecting mega.py
Downloading https://files.pythonhosted.org/packages/a3/51/44a1085a091c27ade09e122d5abdafb4b6400265081879a7c4e32973a175/mega.py-1.0.8-py2.py3-none-any.whl
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (2.23.0)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.1.5)
Requirement already satisfied: SoundFile in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.10.3.post1)
Collecting tensorboard<1.16.0,>=1.15.0
[?25l Downloading https://files.pythonhosted.org/packages/1e/e9/d3d747a97f7188f48aa5eda486907f3b345cd409f0a0850468ba867db246/tensorboard-1.15.0-py3-none-any.whl (3.8MB)
[K |████████████████████████████████| 3.8MB 47.7MB/s
[?25hRequirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.36.2)
Collecting tensorflow-estimator==1.15.1
[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)
[K |████████████████████████████████| 512kB 55.1MB/s
[?25hRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.12.1)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.32.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.1.0)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.12.0)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.2.0)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.1.2)
Requirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.8.1)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.15.0)
Collecting keras-applications>=1.0.8
[?25l Downloading https://files.pythonhosted.org/packages/71/e3/19762fdfc62877ae9102edf6342d71b28fbfd9dea3d2f96a882ce099b03f/Keras_Applications-1.0.8-py3-none-any.whl (50kB)
[K |████████████████████████████████| 51kB 8.9MB/s
[?25hCollecting gast==0.2.2
Downloading https://files.pythonhosted.org/packages/4e/35/11749bf99b2d4e3cceb4d55ca22590b0d7c2c62b9de38ac4a4a7f4687421/gast-0.2.2.tar.gz
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (3.12.4)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (3.3.0)
Requirement already satisfied: pooch>=1.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (1.3.0)
Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (1.0.1)
Requirement already satisfied: resampy>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.2.2)
Requirement already satisfied: decorator>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (4.4.2)
Requirement already satisfied: audioread>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (2.1.9)
Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.22.2.post1)
Requirement already satisfied: numba>=0.43.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.51.2)
Requirement already satisfied: imageio<3.0,>=2.1.2 in /usr/local/lib/python3.7/dist-packages (from moviepy->lucidsonicdreams) (2.4.1)
Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (3.2.2)
Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (2.5.1)
Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (1.1.1)
Requirement already satisfied: cffi>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from pygit2->lucidsonicdreams) (1.14.5)
Collecting cached-property
Downloading https://files.pythonhosted.org/packages/48/19/f2090f7dad41e225c7f2326e4cfe6fff49e57dedb5b53636c9551f86b069/cached_property-1.5.2-py2.py3-none-any.whl
Requirement already satisfied: pathlib==1.0.1 in /usr/local/lib/python3.7/dist-packages (from mega.py->lucidsonicdreams) (1.0.1)
Collecting tenacity<6.0.0,>=5.1.5
Downloading https://files.pythonhosted.org/packages/45/67/67bb1db087678bc5c6f20766cf18914dfe37b0b9d4e4c5bb87408460b75f/tenacity-5.1.5-py2.py3-none-any.whl
Collecting pycryptodome<4.0.0,>=3.9.6
[?25l Downloading https://files.pythonhosted.org/packages/ad/16/9627ab0493894a11c68e46000dbcc82f578c8ff06bc2980dcd016aea9bd3/pycryptodome-3.10.1-cp35-abi3-manylinux2010_x86_64.whl (1.9MB)
[K |████████████████████████████████| 1.9MB 42.7MB/s
[?25hRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (2.10)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (2020.12.5)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (1.24.3)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->lucidsonicdreams) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->lucidsonicdreams) (2.8.1)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (56.0.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (1.0.1)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.3.4)
Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15->lucidsonicdreams) (2.10.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from pooch>=1.0->librosa->lucidsonicdreams) (20.9)
Requirement already satisfied: appdirs in /usr/local/lib/python3.7/dist-packages (from pooch>=1.0->librosa->lucidsonicdreams) (1.4.4)
Requirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba>=0.43.0->librosa->lucidsonicdreams) (0.34.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->lucidsonicdreams) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->lucidsonicdreams) (1.3.1)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->lucidsonicdreams) (2.4.7)
Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.4.0->pygit2->lucidsonicdreams) (2.20)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.10.1)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.4.1)
Requirement already satisfied: typing-extensions>=3.6.4; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < "3.8"->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.7.4.3)
Building wheels for collected packages: lucidsonicdreams, gast
Building wheel for lucidsonicdreams (setup.py) ... [?25l[?25hdone
Created wheel for lucidsonicdreams: filename=lucidsonicdreams-0.4-cp37-none-any.whl size=11500 sha256=34da3eb00a3274b72611e0a50c2310c51d1bbda3c93c39ab7a1c902efbd44023
Stored in directory: /root/.cache/pip/wheels/9c/97/52/70c4ed96ba0b0a21b938fb984b51f3c910f4f7af0afbb14400
Building wheel for gast (setup.py) ... [?25l[?25hdone
Created wheel for gast: filename=gast-0.2.2-cp37-none-any.whl size=7540 sha256=38a21ef188ad539ad226f23a4dd937e801c57c63283722538cd02a2ba2ef330e
Stored in directory: /root/.cache/pip/wheels/5c/2e/7e/a1d4d4fcebe6c381f378ce7743a3ced3699feb89bcfbdadadd
Successfully built lucidsonicdreams gast
[31mERROR: tensorflow-probability 0.12.1 has requirement gast>=0.3.2, but you'll have gast 0.2.2 which is incompatible.[0m
Installing collected packages: tensorboard, tensorflow-estimator, keras-applications, gast, tensorflow, cached-property, pygit2, tenacity, pycryptodome, mega.py, lucidsonicdreams
Found existing installation: tensorboard 2.4.1
Uninstalling tensorboard-2.4.1:
Successfully uninstalled tensorboard-2.4.1
Found existing installation: tensorflow-estimator 2.4.0
Uninstalling tensorflow-estimator-2.4.0:
Successfully uninstalled tensorflow-estimator-2.4.0
Found existing installation: gast 0.3.3
Uninstalling gast-0.3.3:
Successfully uninstalled gast-0.3.3
Found existing installation: tensorflow 2.4.1
Uninstalling tensorflow-2.4.1:
Successfully uninstalled tensorflow-2.4.1
Successfully installed cached-property-1.5.2 gast-0.2.2 keras-applications-1.0.8 lucidsonicdreams-0.4 mega.py-1.0.8 pycryptodome-3.10.1 pygit2-1.5.0 tenacity-5.1.5 tensorboard-1.15.0 tensorflow-1.15.0 tensorflow-estimator-1.15.1
###Markdown
B. Generate Sample Videos B.1. Choosing a StyleStyles can be selected using the **style** parameter, which takes in any of the following:* A valid default style name provided by the package. Run **show_styles()** to print valid values. *Note: These styles are loaded from [this repository](https://github.com/justinpinkney/awesome-pretrained-stylegan2) by Justin Pinkney.** A path to a .pkl file that contains pre-trained StyleGAN weights* A custom function that takes noise_batch and class_batch parameters and outputs a list of Pillow Images (see example in **B.5**)
###Code
from lucidsonicdreams import show_styles
# Show valid default style names.
show_styles()
###Output
Imageio: 'ffmpeg-linux64-v3.3.1' was not found on your computer; downloading it now.
Try 1. Download from https://github.com/imageio/imageio-binaries/raw/master/ffmpeg/ffmpeg-linux64-v3.3.1 (43.8 MB)
Downloading: 8192/45929032 bytes (0.0%)3473408/45929032 bytes (7.6%)7331840/45929032 bytes (16.0%)11378688/45929032 bytes (24.8%)15556608/45929032 bytes (33.9%)19521536/45929032 bytes (42.5%)23642112/45929032 bytes (51.5%)27934720/45929032 bytes (60.8%)32104448/45929032 bytes (69.9%)36274176/45929032 bytes (79.0%)40517632/45929032 bytes (88.2%)44597248/45929032 bytes (97.1%)45929032/45929032 bytes (100.0%)
Done
File saved as /root/.imageio/ffmpeg/ffmpeg-linux64-v3.3.1.
obama
painting faces
trypophobia
lsun cars
butterflies
fursona
cakes
faces (ffhq slim 256x256)
ukiyoe faces
wikiart faces
floor plans
abstract art
cat
microscope images
ukiyo-e faces
more abstract art
abstract photos
wikiart
my little pony
figure drawings
car (config-e)
faces (ffhq config-f)
anime portraits
pokemon
cifar 100
imagenet
modern art
cifar 10
wildlife
doors
textures
fireworks
faces (ffhq config-e 256x256)
faces (ffhq config-f 512x512)
lsun cats
ffhq faces
beetles
panda
grumpy cat
anime faces
flowers
faces (ffhq config-e)
celeba hq faces
maps
horse
church
car (config-f)
vases
lsun bedrooms
###Markdown
B.2. Using Default SettingsThis package is set-up so that the only arguments required are the **file path to your audio track** and the **file name of the video output**. This code snippet outputs a 45-second, low-resolution preview of a video using the "modern art" style, and all the other default settings.The song used here is **Chemical Love by Basically Saturday Night**. You can watch the official music video [here](https://youtu.be/Gi7oQrtyjKI), or listen to them on [Spotify](https://open.spotify.com/artist/46tGdhXAQbTvxVOGgy0Fqu?si=E8mUjbWbR2uiiMR2MUc_4w)!Click [here](https://youtu.be/oGXfOmqFYTg) to view a full-length sample video without having to run the code.
###Code
from lucidsonicdreams import LucidSonicDream
from google.colab import files
L = LucidSonicDream(song = 'chemical_love.wav',
style = 'abstract photos')
L.hallucinate(file_name = 'chemical_love.mp4',
resolution = 360,
start = 30,
duration = 45)
files.download("chemical_love.mp4")
###Output
Preparing style...
Downloading abstract photos weights (This may take a while)...
###Markdown
B.3. Tuning Parameters - How It Works There are **over 30 parameters** you can tune, offering tons of flexibility as to how you want your music to be visualized. This may seem like an overwhelming number, but things are easier to digest once you have a basic understanding of how the visualizer works. So, how does it work? 1. First, a batch of input vectors corresponding to output images is initialized. Linear interpolations between these vectors are produced, serving as the "base" vectors.2. Three components react to the audio: **Pulse**, **Motion**, and **Class**. These modify the "base" vectors accordingly. * **Pulse**, quite literally, refers to how the visuals "pulse" to the beat of the music. It is set to react to the audio's percussive elements by default. * **Motion** refers to how the visuals are "pushed forward" or "sped up" by the music, and is set to react to the audio's harmonic elements by default. * Finally, **Class** refers to the labels of objects shown in the generated images (e.g. in the case of the WikiArt style, classes can refer to Van Gogh, Andy Warhol, Da Vinci, etc). This is set to react to the audio's pitch, where each note controls the prominence of a class. *Note:* Among the default styles available, only WikiArt uses classes thus far.3. Finally, additional effects - such as contrast and flash - are added to the video. These are set to react to the audio's percussive elements by default. The ParametersNow, the parameters can be easily understood by separating them into 7 categories: Initialization, Pulse, Motion, Class, Effects, Video, and Other. If this is still overwhelming, it's recommended that you start off by tuning **speed_fpm**, **pulse_react**, **motion_react** and **class_pitch_react**, and build from there. These parameters make the biggest difference. **Initialization*** **speed_fpm** (*Default: 12*) - FPM stands for "Frames per Minute". This determines how many images are initialized - the more there are, the faster the visuals morph. If **speed_fpm = 0**, then only one image is initialized, and that single image reacts to the audio. In this case, there will be no motion during silent parts of the audio. **Pulse Parameters*** **pulse_react** (*Default: 0.5*) - The "strength" of the pulse. It is recommended to keep this between 0 and 2.* **pulse_percussive** (*Default: True*) - If True while *pulse_harmonic* is False, pulse reacts to the audio's percussive elements.* **pulse_harmonic** (*Default: False*) - If True while *pulse_percussive* is False, pulse reacts to the audio's harmonic elements. *Note*: If both parameters are True or both parameters are False, pulse reacts to the "entire" unaltered audio.* **pulse_audio** - Path to a separate audio file to be used to control pulse. This is recommended if you have access to an isolated drum/percussion track. If passed, *pulse_percussive* and *pulse_harmonic* are ignored. *Note:* this parameter is passed when defining the LucidSonicDream object. **Motion Parameters*** **motion_react** (*0.5*), **motion_percussive** (*False*), **motion_harmonic** (*True*), and **motion_audio** - Simply the "motion" equivalents of the pulse parameters above. * **motion_randomness** (*Default: 0.5*)- Degree of randomness of motion. Higher values will typically prevent the video from cycling through the same visuals repeatedly. Must range from 0 to 1.* **truncation** (*Default: 1*) - Controls the variety of visuals generated. Lower values lead to lower variety. *Note*: A very low value will usually lead to "jittery" visuals. Must range from 0 to 1. **Class Parameters** *(Note: Most of these parameters were heavily inspired by the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman)** **classes** - List of at most 12 numerical object labels. If none, 12 labels are selected at random. * **dominant_classes_first** (*Default: False*)- If True, the list passed to "classes" is sorted by prominence in descending order.* **class_pitch_react** (*Default: 0.5*)- Class equivalent of pulse_react and motion_react. It is recommended to keep this between 0 and 2.* **class_smooth_seconds** (*Default: 1*) - Number of seconds spent smoothly interpolating between each class vector. The higher the value, the less "sudden" the change of class.* **class_complexity** (*Default: 1*) - Controls the "complexity" of images generated. Lower values tend to generate more simple and mundane images, while higher values tend to generate more intricate and bizzare objects. It is recommended to keep this between 0 and 1.* **class_shuffle_seconds** (*Default: None*) - Controls the timestamps wherein the mapping of label to note is re-shuffled. This is recommended when the audio used has a limited range of pitches, but you wish for more classes to be shown. If the value passed is a number *n*, classes are shuffled every *n* seconds. If the value passed is a list of numbers, these numbers are used as timestamps (in seconds) wherein classes are shuffled.* **class_shuffle_strength** (*Default: 0.5*) - Controls how drastically classes are re-shuffled. Only applies when class_shuffle_seconds is passed. It is recommended to keep this between 0 and 1.* **class_audio** - Class equivalent of pulse_audio and motion_audio. Passed when defining the LucidSonicDream object. **Effects Parameters*** **contrast_strength** (*Default: 0.5*) - Strength of default contrast effect. It is recommended to keep this between 0 and 1.* **contrast_percussive** (*Default: True*) - If true, contrast reacts to the audio's percussive elements. Must range from 0 to 1.* **contrast_audio** - Equivalent of previous "audio" arguments. Passed when defining the LucidSonicDream object. *Note*: If none of these arguments are passed, the contrast effect will not be applied. * **flash_strength** (*0.5*), **flash_percussive** (*True*), and **flash_audio** - Equivalent of the previous three parameters, but for the a "flash" effect. It is recommended to keep these between 0 and 1. If none of these arguments are passed, the flash effect will not be applied. * **custom_effects** - List of custom, user-defined effects to apply (See **B.4**) **Video Parameters*** **resolution** - Self-explanatory. Low resolutions are recommended for "trial" renders. If none is passed, unaltered high-resolution images will be used.* **start** (*Default: 0*) - Starting timestamp in seconds.* **duration** - Video duration in seconds. If none is passed, full duration of audio will be used.* **output_audio** - Final output audio of the video. Overwrites audio from "song" parameter if provided (See **B.5**)* **fps** (*Default: 43*) - Video Frames Per Second. * **save_frames** (*Default: False*) - If true, saved all individual video frames on disk. **Other*** **batch_size** (*Default: 1*) - Determines how many vectors are simoultaneously fed to the model. Typically, larger batch sizes will output less clearly-defined images. Example 1 This is a simple example whose appeal lies mostly in how it utilizes Motion.The song used here is **Pancake Feet by Tennysson**. As usual, you can watch the official music video [here](https://youtu.be/_ODm4UZGh7g), or listen to them on [Spotify](https://open.spotify.com/artist/3Nb8N20WChM0swo5qWTvm8?si=oUZ2uV7eQH2ieMucvL_vgA)!Click [here](https://youtu.be/ztWCMm9cExY) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream('pancake_feet.mp3',
style = 'modern art')
L.hallucinate('pancake_feet.mp4',
resolution = 360,
duration = 45,
speed_fpm = 0,
motion_percussive = True,
motion_react = 0.8,
contrast_strength = 0.5,
flash_strength = 0.7)
files.download("pancake_feet.mp4")
###Output
_____no_output_____
###Markdown
Example 2This is another simple example that combines subtle Pulse, Motion, Contrast, and Flash reactions to complement the overall trippy style. The style weights used here are from a model trained by **Jeremy Torman**. You can check out his artworks on [Twitter](https://twitter.com/tormanjeremy), or see details on his [original Reddit post](https://www.reddit.com/r/deepdream/comments/leqwxs/stylegan2ada_pickle_file_in_comments_with_colab/) if you're interested!The song, meanwhile, is **Raspberry by Saje**. You can listen to the full track on [YouTube](https://www.youtube.com/watch?v=fOLxvL0_aMU) or [Spotify](https://open.spotify.com/artist/3I2596dGk4K3e4qKjwpzQb?si=TbyjmQuAQRWmrE--lNTRMg). Click [here](https://youtu.be/iEFqcMrszH0) to view a full-length sample video without having to run the code.
###Code
# Download Style Weights
! gdown --id 19hNptJSXji_9h7DMJBVlEMe-izWXvkYQ
L = LucidSonicDream(song = 'ecstacy abao jimi darkness 2020 no vox no hum 5 broken head.m4a',
style = 'VisionaryArt.pkl')
L.hallucinate(file_name = 'raspberry.mp4',
resolution = 1024,
#duration = 60,
pulse_react = 1.2,
motion_react = 0.7,
contrast_strength = 0.5,
flash_strength = 0.5)
files.download("raspberry2.mp4")
###Output
Downloading...
From: https://drive.google.com/uc?id=19hNptJSXji_9h7DMJBVlEMe-izWXvkYQ
To: /content/VisionaryArt.pkl
369MB [00:04, 91.9MB/s]
Preparing style...
Preparing audio...
Loading effects...
Doing math...
###Markdown
Example 3This is a much more complex example that utilizes multiple audio tracks and more fine-tuned parameters. It takes advantage of isolated audio tracks for cleaner Pulse, Class, and Contrast reactions.Note: Numerical labels for classes using the WikiArt style can be found [here](https://colab.research.google.com/github/Norod/my-colab-experiments/blob/master/WikiArt_Example_Generation_By_Peter_Baylies.ipynb). Click [here](https://youtu.be/l-nGC-ve7sI) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream(song = 'lucidsonicdreams_main.mp3',
pulse_audio = 'lucidsonicdreams_pulse.mp3',
class_audio = 'lucidsonicdreams_class.mp3',
contrast_audio = 'lucidsonicdreams_pulse.mp3',
style = 'wikiart')
L.hallucinate('lucidsonicdreams.mp4',
resolution = 360,
start = 32,
duration = 60,
pulse_react = 0.25,
motion_react = 0,
classes = [1,5,9,16,23,27,28,30,50,68,71,89],
dominant_classes_first = True,
class_shuffle_seconds = 8,
class_smooth_seconds = 4,
class_pitch_react = 0.2,
contrast_strength = 0.3,
flash_strength = 0.1)
files.download("lucidsonicdreams.mp4")
###Output
_____no_output_____
###Markdown
B.4. Using Custom EffectsYou can apply your own reactive custom effects to the video by defining an effects function and passing it to an EffectsGenerator object, as seen below. The effects function must contain the following parameters:* **array** - Refers to the image array that the effect is applied on.* **strength** - Reactivity parameter, similar to pulse_react, contrast_strength, etc.* **amplitude** - Refers to the volume of the audio at a given point in time. Simply multiply this to the parameter that controls the "intensity" of the effect.The function must output an NumPy array representing the output imageThe function is then passed to an EffectsGenerator object, which in turn has the following parameters: * **func** - The effects function* **audio** - Audio controlling the effect* **strength** - Strength of the effect* **percussive** - If True, effect reacts to the audio's percussive elements.The song used in the example below is **Unfaith by Ekali**. You can listen to the full track on [YouTube](https://youtu.be/8C4wgzP1KOI) or [Spotify](https://open.spotify.com/track/5UC6HF9VVgYMHQ7PcwcZNZ?si=hCIA2JMTQTC98zzPZfA3yQ). Click [here](https://youtu.be/V7jo281HSwM) to view a sample video without having to run the code.
###Code
import numpy as np
from skimage.transform import swirl
from lucidsonicdreams import EffectsGenerator
def swirl_func(array, strength, amplitude):
swirled_image = swirl(array,
rotation = 0,
strength = 100 * strength * amplitude,
radius=650)
return (swirled_image*255).astype(np.uint8)
swirl_effect = EffectsGenerator(swirl_func,
audio = 'unfaith.mp3',
strength = 0.2,
percussive = False)
L = LucidSonicDream('unfaith.mp3',
style = 'textures')
L.hallucinate('unfaith.mp4',
resolution = 360,
duration = 60,
motion_react = 0.15,
speed_fpm = 2,
pulse_react = 1.5,
contrast_strength = 1,
flash_strength = 1,
custom_effects = [swirl_effect])
files.download("unfaith.mp4")
###Output
_____no_output_____
###Markdown
B.5. Using Custom Visualization FunctionsFinally, you can choose not to use StyleGAN, and instead define any custom function that takes in a batches of vectors and outputs a Pillow image. The function must take in **noise_batch** and **class_batch** parameters. Moreover, when defining the LucidSonicDream object, **num_possible_classes** and **input_size** must be passed.The example below defines a custom function using a pre-trained PyTorch implementation of the BigGAN, similarly to the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman. Numerical labels for each class can be found [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). The song used is **Sea of Voices by Porter Robinson**. You can listen to the track on [YouTube](https://www.youtube.com/watch?v=lSooYPG-5Rg) or [Spotify](https://open.spotify.com/track/2lNFWUrxuNaQsf5I1pDTPr?si=MsD7GJUsRma4mkyfjbEhJg). Note that an [instrumental version](https://youtu.be/2Bo0JqTmVwg) was used as input in order to prevent vocals from influencing motion.Click [here](https://youtu.be/_TJCql7O9kU?t=180) to view a full-length sample video without having to run the code!
###Code
! pip install pytorch_pretrained_biggan
from pytorch_pretrained_biggan import BigGAN, convert_to_images
import torch
biggan = BigGAN.from_pretrained('biggan-deep-512')
biggan.to('cuda:0')
def biggan_func(noise_batch, class_batch):
noise_tensor = torch.from_numpy(noise_batch).cuda()
class_tensor = torch.from_numpy(class_batch).cuda()
with torch.no_grad():
output_tensor = biggan(noise_tensor.float(), class_tensor.float(), truncation = 1)
return convert_to_images(output_tensor.cpu())
L = LucidSonicDream('sea_of_voices_inst.mp3',
style = biggan_func,
input_shape = 128,
num_possible_classes = 1000)
L.hallucinate('sea_of_voices.mp4',
output_audio = 'sea_of_voices.mp3',
resolution = 360,
duration = 60,
speed_fpm = 3,
classes = [13, 14, 22, 24, 301, 84, 99, 100, 134, 143, 393, 394],
class_shuffle_seconds = 10,
class_shuffle_strength = 0.1,
class_complexity = 0.5,
class_smooth_seconds = 4,
motion_react = 0.35,
flash_strength = 1,
contrast_strength = 1)
###Output
_____no_output_____
###Markdown
A. Set-Up A.1. Set-up GPUNavigate to **Runtime -> Change runtime type** and make sure **Hardware accelerator** is set to GPU. A.2. Download Sample Audio Preview Files
###Code
## CHEMICAL LOVE - BASICALLY SATURDAY NIGHT ##
! gdown --id 1aTWrzCvJyYcQ82PS6av3YJrtsON2_CUK
## PANCAKE FEET - TENNYSSON ##
! gdown --id 14MqCkuREr1TmuWaxZd8bnuhVlL_vCE9s
## RASPBERRY - SAJE ##
! gdown --id 1GqRi4VFEbw46e9RRvuGPtNc7TuOKFbjl
## LUCID SONIC DREAMS DEMO TRACK ##
# Main File
! gdown --id 1Vc2yC2F5iO0ScC5F0CzF_YB1YPGI2uUP
# Pulse File
! gdown --id 1FY5MO6XqVu9abbdNQQY6C99RHxFGm36o
# Class File
! gdown --id 1-qwcs8_Va58YqkHMdXDm9uef-RcH01gh
## SEA OF VOICES - PORTER ROBINSON ##
# Instrumental (Main Audio)
! gdown --id 13-kS5-3Tw2x9kEVfE3ZMkUN955nw73mN
# Original (Output Audio)
! gdown --id 1r0Mo-vtUIf2njqJ0h3hPJuQELcJ8K2Gu
## UNFAITH - EKALI ##
! gdown --id 1rgwrhtnVwK2Dom9pJ7p2CBF0j7F2vdkM
###Output
_____no_output_____
###Markdown
A.3. Install Lucid Sonic Dreams
###Code
! pip install lucidsonicdreams
###Output
_____no_output_____
###Markdown
B. Generate Sample Videos B.1. Choosing a StyleStyles can be selected using the **style** parameter, which takes in any of the following:* A valid default style name provided by the package. Run **show_styles()** to print valid values. *Note: These styles are loaded from [this repository](https://github.com/justinpinkney/awesome-pretrained-stylegan2) by Justin Pinkney.** A path to a .pkl file that contains pre-trained StyleGAN weights* A custom function that takes noise_batch and class_batch parameters and outputs a list of Pillow Images (see example in **B.5**)
###Code
from lucidsonicdreams import show_styles
# Show valid default style names.
show_styles()
###Output
_____no_output_____
###Markdown
B.2. Using Default SettingsThis package is set-up so that the only arguments required are the **file path to your audio track** and the **file name of the video output**. This code snippet outputs a 45-second, low-resolution preview of a video using the "modern art" style, and all the other default settings.The song used here is **Chemical Love by Basically Saturday Night**. You can watch the official music video [here](https://youtu.be/Gi7oQrtyjKI), or listen to them on [Spotify](https://open.spotify.com/artist/46tGdhXAQbTvxVOGgy0Fqu?si=E8mUjbWbR2uiiMR2MUc_4w)!Click [here](https://youtu.be/oGXfOmqFYTg) to view a full-length sample video without having to run the code.
###Code
from lucidsonicdreams import LucidSonicDream
from google.colab import files
L = LucidSonicDream(song = 'chemical_love.wav',
style = 'abstract photos')
L.hallucinate(file_name = 'chemical_love.mp4',
resolution = 360,
start = 30,
duration = 45)
files.download("chemical_love.mp4")
###Output
_____no_output_____
###Markdown
B.3. Tuning Parameters - How It Works There are **over 30 parameters** you can tune, offering tons of flexibility as to how you want your music to be visualized. This may seem like an overwhelming number, but things are easier to digest once you have a basic understanding of how the visualizer works. So, how does it work? 1. First, a batch of input vectors corresponding to output images is initialized. Linear interpolations between these vectors are produced, serving as the "base" vectors.2. Three components react to the audio: **Pulse**, **Motion**, and **Class**. These modify the "base" vectors accordingly. * **Pulse**, quite literally, refers to how the visuals "pulse" to the beat of the music. It is set to react to the audio's percussive elements by default. * **Motion** refers to how the visuals are "pushed forward" or "sped up" by the music, and is set to react to the audio's harmonic elements by default. * Finally, **Class** refers to the labels of objects shown in the generated images (e.g. in the case of the WikiArt style, classes can refer to Van Gogh, Andy Warhol, Da Vinci, etc). This is set to react to the audio's pitch, where each note controls the prominence of a class. *Note:* Among the default styles available, only WikiArt uses classes thus far.3. Finally, additional effects - such as contrast and flash - are added to the video. These are set to react to the audio's percussive elements by default. The ParametersNow, the parameters can be easily understood by separating them into 7 categories: Initialization, Pulse, Motion, Class, Effects, Video, and Other. If this is still overwhelming, it's recommended that you start off by tuning **speed_fpm**, **pulse_react**, **motion_react** and **class_pitch_react**, and build from there. These parameters make the biggest difference. **Initialization*** **speed_fpm** (*Default: 12*) - FPM stands for "Frames per Minute". This determines how many images are initialized - the more there are, the faster the visuals morph. If **speed_fpm = 0**, then only one image is initialized, and that single image reacts to the audio. In this case, there will be no motion during silent parts of the audio. **Pulse Parameters*** **pulse_react** (*Default: 0.5*) - The "strength" of the pulse. It is recommended to keep this between 0 and 2.* **pulse_percussive** (*Default: True*) - If True while *pulse_harmonic* is False, pulse reacts to the audio's percussive elements.* **pulse_harmonic** (*Default: False*) - If True while *pulse_percussive* is False, pulse reacts to the audio's harmonic elements. *Note*: If both parameters are True or both parameters are False, pulse reacts to the "entire" unaltered audio.* **pulse_audio** - Path to a separate audio file to be used to control pulse. This is recommended if you have access to an isolated drum/percussion track. If passed, *pulse_percussive* and *pulse_harmonic* are ignored. *Note:* this parameter is passed when defining the LucidSonicDream object. **Motion Parameters*** **motion_react** (*0.5*), **motion_percussive** (*False*), **motion_harmonic** (*True*), and **motion_audio** - Simply the "motion" equivalents of the pulse parameters above. * **motion_randomness** (*Default: 0.5*)- Degree of randomness of motion. Higher values will typically prevent the video from cycling through the same visuals repeatedly. Must range from 0 to 1.* **truncation** (*Default: 1*) - Controls the variety of visuals generated. Lower values lead to lower variety. *Note*: A very low value will usually lead to "jittery" visuals. Must range from 0 to 1. **Class Parameters** *(Note: Most of these parameters were heavily inspired by the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman)** **classes** - List of at most 12 numerical object labels. If none, 12 labels are selected at random. * **dominant_classes_first** (*Default: False*)- If True, the list passed to "classes" is sorted by prominence in descending order.* **class_pitch_react** (*Default: 0.5*)- Class equivalent of pulse_react and motion_react. It is recommended to keep this between 0 and 2.* **class_smooth_seconds** (*Default: 1*) - Number of seconds spent smoothly interpolating between each class vector. The higher the value, the less "sudden" the change of class.* **class_complexity** (*Default: 1*) - Controls the "complexity" of images generated. Lower values tend to generate more simple and mundane images, while higher values tend to generate more intricate and bizzare objects. It is recommended to keep this between 0 and 1.* **class_shuffle_seconds** (*Default: None*) - Controls the timestamps wherein the mapping of label to note is re-shuffled. This is recommended when the audio used has a limited range of pitches, but you wish for more classes to be shown. If the value passed is a number *n*, classes are shuffled every *n* seconds. If the value passed is a list of numbers, these numbers are used as timestamps (in seconds) wherein classes are shuffled.* **class_shuffle_strength** (*Default: 0.5*) - Controls how drastically classes are re-shuffled. Only applies when class_shuffle_seconds is passed. It is recommended to keep this between 0 and 1.* **class_audio** - Class equivalent of pulse_audio and motion_audio. Passed when defining the LucidSonicDream object. **Effects Parameters*** **contrast_strength** (*Default: 0.5*) - Strength of default contrast effect. It is recommended to keep this between 0 and 1.* **contrast_percussive** (*Default: True*) - If true, contrast reacts to the audio's percussive elements. Must range from 0 to 1.* **contrast_audio** - Equivalent of previous "audio" arguments. Passed when defining the LucidSonicDream object. *Note*: If none of these arguments are passed, the contrast effect will not be applied. * **flash_strength** (*0.5*), **flash_percussive** (*True*), and **flash_audio** - Equivalent of the previous three parameters, but for the a "flash" effect. It is recommended to keep these between 0 and 1. If none of these arguments are passed, the flash effect will not be applied. * **custom_effects** - List of custom, user-defined effects to apply (See **B.4**) **Video Parameters*** **resolution** - Self-explanatory. Low resolutions are recommended for "trial" renders. If none is passed, unaltered high-resolution images will be used.* **start** (*Default: 0*) - Starting timestamp in seconds.* **duration** - Video duration in seconds. If none is passed, full duration of audio will be used.* **output_audio** - Final output audio of the video. Overwrites audio from "song" parameter if provided (See **B.5**)* **fps** (*Default: 43*) - Video Frames Per Second. * **save_frames** (*Default: False*) - If true, saved all individual video frames on disk. **Other*** **batch_size** (*Default: 1*) - Determines how many vectors are simoultaneously fed to the model. Typically, larger batch sizes will output less clearly-defined images. Example 1 This is a simple example whose appeal lies mostly in how it utilizes Motion.The song used here is **Pancake Feet by Tennysson**. As usual, you can watch the official music video [here](https://youtu.be/_ODm4UZGh7g), or listen to them on [Spotify](https://open.spotify.com/artist/3Nb8N20WChM0swo5qWTvm8?si=oUZ2uV7eQH2ieMucvL_vgA)!Click [here](https://youtu.be/ztWCMm9cExY) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream('pancake_feet.mp3',
style = 'modern art')
L.hallucinate('pancake_feet.mp4',
resolution = 360,
duration = 45,
speed_fpm = 0,
motion_percussive = True,
motion_react = 0.8,
contrast_strength = 0.5,
flash_strength = 0.7)
files.download("pancake_feet.mp4")
###Output
_____no_output_____
###Markdown
Example 2This is another simple example that combines subtle Pulse, Motion, Contrast, and Flash reactions to complement the overall trippy style. The style weights used here are from a model trained by **Jeremy Torman**. You can check out his artworks on [Twitter](https://twitter.com/tormanjeremy), or see details on his [original Reddit post](https://www.reddit.com/r/deepdream/comments/leqwxs/stylegan2ada_pickle_file_in_comments_with_colab/) if you're interested!The song, meanwhile, is **Raspberry by Saje**. You can listen to the full track on [YouTube](https://www.youtube.com/watch?v=fOLxvL0_aMU) or [Spotify](https://open.spotify.com/artist/3I2596dGk4K3e4qKjwpzQb?si=TbyjmQuAQRWmrE--lNTRMg). Click [here](https://youtu.be/iEFqcMrszH0) to view a full-length sample video without having to run the code.
###Code
# Download Style Weights
! gdown --id 19hNptJSXji_9h7DMJBVlEMe-izWXvkYQ
L = LucidSonicDream(song = 'raspberry.mp3',
style = 'VisionaryArt.pkl')
L.hallucinate(file_name = 'raspberry.mp4',
resolution = 360,
duration = 60,
pulse_react = 1.2,
motion_react = 0.7,
contrast_strength = 0.5,
flash_strength = 0.5)
files.download("raspberry.mp4")
###Output
_____no_output_____
###Markdown
Example 3This is a much more complex example that utilizes multiple audio tracks and more fine-tuned parameters. It takes advantage of isolated audio tracks for cleaner Pulse, Class, and Contrast reactions.Note: Numerical labels for classes using the WikiArt style can be found [here](https://colab.research.google.com/github/Norod/my-colab-experiments/blob/master/WikiArt_Example_Generation_By_Peter_Baylies.ipynb). Click [here](https://youtu.be/l-nGC-ve7sI) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream(song = 'lucidsonicdreams_main.mp3',
pulse_audio = 'lucidsonicdreams_pulse.mp3',
class_audio = 'lucidsonicdreams_class.mp3',
contrast_audio = 'lucidsonicdreams_pulse.mp3',
style = 'wikiart')
L.hallucinate('lucidsonicdreams.mp4',
resolution = 360,
start = 32,
duration = 60,
pulse_react = 0.25,
motion_react = 0,
classes = [1,5,9,16,23,27,28,30,50,68,71,89],
dominant_classes_first = True,
class_shuffle_seconds = 8,
class_smooth_seconds = 4,
class_pitch_react = 0.2,
contrast_strength = 0.3,
flash_strength = 0.1)
files.download("lucidsonicdreams.mp4")
###Output
_____no_output_____
###Markdown
B.4. Using Custom EffectsYou can apply your own reactive custom effects to the video by defining an effects function and passing it to an EffectsGenerator object, as seen below. The effects function must contain the following parameters:* **array** - Refers to the image array that the effect is applied on.* **strength** - Reactivity parameter, similar to pulse_react, contrast_strength, etc.* **amplitude** - Refers to the volume of the audio at a given point in time. Simply multiply this to the parameter that controls the "intensity" of the effect.The function must output an NumPy array representing the output imageThe function is then passed to an EffectsGenerator object, which in turn has the following parameters: * **func** - The effects function* **audio** - Audio controlling the effect* **strength** - Strength of the effect* **percussive** - If True, effect reacts to the audio's percussive elements.The song used in the example below is **Unfaith by Ekali**. You can listen to the full track on [YouTube](https://youtu.be/8C4wgzP1KOI) or [Spotify](https://open.spotify.com/track/5UC6HF9VVgYMHQ7PcwcZNZ?si=hCIA2JMTQTC98zzPZfA3yQ). Click [here](https://youtu.be/V7jo281HSwM) to view a sample video without having to run the code.
###Code
import numpy as np
from skimage.transform import swirl
from lucidsonicdreams import EffectsGenerator
def swirl_func(array, strength, amplitude):
swirled_image = swirl(array,
rotation = 0,
strength = 100 * strength * amplitude,
radius=650)
return (swirled_image*255).astype(np.uint8)
swirl_effect = EffectsGenerator(swirl_func,
audio = 'unfaith.mp3',
strength = 0.2,
percussive = False)
L = LucidSonicDream('unfaith.mp3',
style = 'textures')
L.hallucinate('unfaith.mp4',
resolution = 360,
duration = 60,
motion_react = 0.15,
speed_fpm = 2,
pulse_react = 1.5,
contrast_strength = 1,
flash_strength = 1,
custom_effects = [swirl_effect])
files.download("unfaith.mp4")
###Output
_____no_output_____
###Markdown
B.5. Using Custom Visualization FunctionsFinally, you can choose not to use StyleGAN, and instead define any custom function that takes in a batches of vectors and outputs a Pillow image. The function must take in **noise_batch** and **class_batch** parameters. Moreover, when defining the LucidSonicDream object, **num_possible_classes** and **input_size** must be passed.The example below defines a custom function using a pre-trained PyTorch implementation of the BigGAN, similarly to the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman. Numerical labels for each class can be found [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). The song used is **Sea of Voices by Porter Robinson**. You can listen to the track on [YouTube](https://www.youtube.com/watch?v=lSooYPG-5Rg) or [Spotify](https://open.spotify.com/track/2lNFWUrxuNaQsf5I1pDTPr?si=MsD7GJUsRma4mkyfjbEhJg). Note that an [instrumental version](https://youtu.be/2Bo0JqTmVwg) was used as input in order to prevent vocals from influencing motion.Click [here](https://youtu.be/_TJCql7O9kU?t=180) to view a full-length sample video without having to run the code!
###Code
! pip install pytorch_pretrained_biggan
from pytorch_pretrained_biggan import BigGAN, convert_to_images
import torch
biggan = BigGAN.from_pretrained('biggan-deep-512')
biggan.to('cuda:0')
def biggan_func(noise_batch, class_batch):
noise_tensor = torch.from_numpy(noise_batch).cuda()
class_tensor = torch.from_numpy(class_batch).cuda()
with torch.no_grad():
output_tensor = biggan(noise_tensor.float(), class_tensor.float(), truncation = 1)
return convert_to_images(output_tensor.cpu())
L = LucidSonicDream('sea_of_voices_inst.mp3',
style = biggan_func,
input_shape = 128,
num_possible_classes = 1000)
L.hallucinate('sea_of_voices.mp4',
output_audio = 'sea_of_voices.mp3',
resolution = 360,
duration = 60,
speed_fpm = 3,
classes = [13, 14, 22, 24, 301, 84, 99, 100, 134, 143, 393, 394],
class_shuffle_seconds = 10,
class_shuffle_strength = 0.1,
class_complexity = 0.5,
class_smooth_seconds = 4,
motion_react = 0.35,
flash_strength = 1,
contrast_strength = 1)
###Output
_____no_output_____
###Markdown
A. Set-Up A.1. Set-up GPUNavigate to **Runtime -> Change runtime type** and make sure **Hardware accelerator** is set to GPU. A.2. Download Sample Audio Preview Files
###Code
from google.colab import drive
drive.mount('/content/drive')
## CHEMICAL LOVE - BASICALLY SATURDAY NIGHT ##
! gdown --id 1aTWrzCvJyYcQ82PS6av3YJrtsON2_CUK
## PANCAKE FEET - TENNYSSON ##
! gdown --id 14MqCkuREr1TmuWaxZd8bnuhVlL_vCE9s
## RASPBERRY - SAJE ##
! gdown --id 1GqRi4VFEbw46e9RRvuGPtNc7TuOKFbjl
## LUCID SONIC DREAMS DEMO TRACK ##
# Main File
! gdown --id 1Vc2yC2F5iO0ScC5F0CzF_YB1YPGI2uUP
# Pulse File
! gdown --id 1FY5MO6XqVu9abbdNQQY6C99RHxFGm36o
# Class File
! gdown --id 1-qwcs8_Va58YqkHMdXDm9uef-RcH01gh
## SEA OF VOICES - PORTER ROBINSON ##
# Instrumental (Main Audio)
! gdown --id 13-kS5-3Tw2x9kEVfE3ZMkUN955nw73mN
# Original (Output Audio)
! gdown --id 1r0Mo-vtUIf2njqJ0h3hPJuQELcJ8K2Gu
## UNFAITH - EKALI ##
! gdown --id 1rgwrhtnVwK2Dom9pJ7p2CBF0j7F2vdkM
###Output
Downloading...
From: https://drive.google.com/uc?id=1aTWrzCvJyYcQ82PS6av3YJrtsON2_CUK
To: /content/chemical_love.wav
100% 15.9M/15.9M [00:00<00:00, 96.6MB/s]
Downloading...
From: https://drive.google.com/uc?id=14MqCkuREr1TmuWaxZd8bnuhVlL_vCE9s
To: /content/pancake_feet.mp3
100% 961k/961k [00:00<00:00, 64.7MB/s]
Downloading...
From: https://drive.google.com/uc?id=1GqRi4VFEbw46e9RRvuGPtNc7TuOKFbjl
To: /content/raspberry.mp3
100% 2.16M/2.16M [00:00<00:00, 69.0MB/s]
Downloading...
From: https://drive.google.com/uc?id=1Vc2yC2F5iO0ScC5F0CzF_YB1YPGI2uUP
To: /content/lucidsonicdreams_main.mp3
100% 5.97M/5.97M [00:00<00:00, 93.3MB/s]
Downloading...
From: https://drive.google.com/uc?id=1FY5MO6XqVu9abbdNQQY6C99RHxFGm36o
To: /content/lucidsonicdreams_pulse.mp3
100% 5.97M/5.97M [00:00<00:00, 94.3MB/s]
Downloading...
From: https://drive.google.com/uc?id=1-qwcs8_Va58YqkHMdXDm9uef-RcH01gh
To: /content/lucidsonicdreams_class.mp3
100% 5.97M/5.97M [00:00<00:00, 91.1MB/s]
Downloading...
From: https://drive.google.com/uc?id=13-kS5-3Tw2x9kEVfE3ZMkUN955nw73mN
To: /content/sea_of_voices_inst.mp3
100% 961k/961k [00:00<00:00, 63.1MB/s]
Downloading...
From: https://drive.google.com/uc?id=1r0Mo-vtUIf2njqJ0h3hPJuQELcJ8K2Gu
To: /content/sea_of_voices.mp3
100% 961k/961k [00:00<00:00, 47.8MB/s]
Downloading...
From: https://drive.google.com/uc?id=1rgwrhtnVwK2Dom9pJ7p2CBF0j7F2vdkM
To: /content/unfaith.mp3
100% 1.44M/1.44M [00:00<00:00, 95.1MB/s]
###Markdown
A.3. Install Lucid Sonic Dreams
###Code
! pip install lucidsonicdreams
###Output
Collecting lucidsonicdreams
Downloading lucidsonicdreams-0.4.tar.gz (11 kB)
Collecting tensorflow==1.15
Downloading tensorflow-1.15.0-cp37-cp37m-manylinux2010_x86_64.whl (412.3 MB)
[K |████████████████████████████████| 412.3 MB 15 kB/s
[?25hRequirement already satisfied: librosa in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.8.1)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.19.5)
Requirement already satisfied: moviepy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.2.3.5)
Requirement already satisfied: Pillow in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (7.1.2)
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (4.62.3)
Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.4.1)
Requirement already satisfied: scikit-image in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.16.2)
Collecting pygit2
Downloading pygit2-1.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.6 MB)
[K |████████████████████████████████| 4.6 MB 26.5 MB/s
[?25hRequirement already satisfied: gdown in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (3.6.4)
Collecting mega.py
Downloading mega.py-1.0.8-py2.py3-none-any.whl (19 kB)
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (2.23.0)
Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (1.1.5)
Requirement already satisfied: SoundFile in /usr/local/lib/python3.7/dist-packages (from lucidsonicdreams) (0.10.3.post1)
Collecting keras-applications>=1.0.8
Downloading Keras_Applications-1.0.8-py3-none-any.whl (50 kB)
[K |████████████████████████████████| 50 kB 6.2 MB/s
[?25hRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.13.3)
Requirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.41.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (3.3.0)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.1.2)
Requirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.15.0)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.12.0)
Collecting tensorboard<1.16.0,>=1.15.0
Downloading tensorboard-1.15.0-py3-none-any.whl (3.8 MB)
[K |████████████████████████████████| 3.8 MB 51.7 MB/s
[?25hCollecting tensorflow-estimator==1.15.1
Downloading tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503 kB)
[K |████████████████████████████████| 503 kB 53.5 MB/s
[?25hRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.8.1)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (1.1.0)
Requirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (3.17.3)
Requirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.2.0)
Collecting gast==0.2.2
Downloading gast-0.2.2.tar.gz (10 kB)
Requirement already satisfied: wheel>=0.26 in /usr/local/lib/python3.7/dist-packages (from tensorflow==1.15->lucidsonicdreams) (0.37.0)
Requirement already satisfied: h5py in /usr/local/lib/python3.7/dist-packages (from keras-applications>=1.0.8->tensorflow==1.15->lucidsonicdreams) (3.1.0)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.3.4)
Requirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (57.4.0)
Requirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (1.0.1)
Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (4.8.1)
Requirement already satisfied: cached-property in /usr/local/lib/python3.7/dist-packages (from h5py->keras-applications>=1.0.8->tensorflow==1.15->lucidsonicdreams) (1.5.2)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.6.0)
Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->markdown>=2.6.8->tensorboard<1.16.0,>=1.15.0->tensorflow==1.15->lucidsonicdreams) (3.10.0.2)
Requirement already satisfied: joblib>=0.14 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (1.1.0)
Requirement already satisfied: decorator>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (4.4.2)
Requirement already satisfied: scikit-learn!=0.19.0,>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.22.2.post1)
Requirement already satisfied: audioread>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (2.1.9)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (21.2)
Requirement already satisfied: numba>=0.43.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.51.2)
Requirement already satisfied: resampy>=0.2.2 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (0.2.2)
Requirement already satisfied: pooch>=1.0 in /usr/local/lib/python3.7/dist-packages (from librosa->lucidsonicdreams) (1.5.2)
Requirement already satisfied: llvmlite<0.35,>=0.34.0.dev0 in /usr/local/lib/python3.7/dist-packages (from numba>=0.43.0->librosa->lucidsonicdreams) (0.34.0)
Requirement already satisfied: pyparsing<3,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging>=20.0->librosa->lucidsonicdreams) (2.4.7)
Requirement already satisfied: appdirs in /usr/local/lib/python3.7/dist-packages (from pooch>=1.0->librosa->lucidsonicdreams) (1.4.4)
Requirement already satisfied: cffi>=1.0 in /usr/local/lib/python3.7/dist-packages (from SoundFile->lucidsonicdreams) (1.15.0)
Requirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.0->SoundFile->lucidsonicdreams) (2.20)
Collecting tenacity<6.0.0,>=5.1.5
Downloading tenacity-5.1.5-py2.py3-none-any.whl (34 kB)
Collecting pycryptodome<4.0.0,>=3.9.6
Downloading pycryptodome-3.11.0-cp35-abi3-manylinux2010_x86_64.whl (1.9 MB)
[K |████████████████████████████████| 1.9 MB 40.7 MB/s
[?25hRequirement already satisfied: pathlib==1.0.1 in /usr/local/lib/python3.7/dist-packages (from mega.py->lucidsonicdreams) (1.0.1)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (2.10)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (3.0.4)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->lucidsonicdreams) (2021.10.8)
Requirement already satisfied: imageio<3.0,>=2.1.2 in /usr/local/lib/python3.7/dist-packages (from moviepy->lucidsonicdreams) (2.4.1)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->lucidsonicdreams) (2018.9)
Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->lucidsonicdreams) (2.8.2)
Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (2.6.3)
Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (1.1.1)
Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image->lucidsonicdreams) (3.2.2)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->lucidsonicdreams) (0.11.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image->lucidsonicdreams) (1.3.2)
Building wheels for collected packages: lucidsonicdreams, gast
Building wheel for lucidsonicdreams (setup.py) ... [?25l[?25hdone
Created wheel for lucidsonicdreams: filename=lucidsonicdreams-0.4-py3-none-any.whl size=11498 sha256=2f64bc6e7c82e363d999c00d8a342977f735da1c1a384aade24dfc20062606ac
Stored in directory: /root/.cache/pip/wheels/18/23/3e/f6f4265bde5ac9993ce077083c570dd06032867ae0aadd3481
Building wheel for gast (setup.py) ... [?25l[?25hdone
Created wheel for gast: filename=gast-0.2.2-py3-none-any.whl size=7554 sha256=45ff379097154c89d3206d6874cdc2e7bfb1545db898e96ebcaed15af9906f0c
Stored in directory: /root/.cache/pip/wheels/21/7f/02/420f32a803f7d0967b48dd823da3f558c5166991bfd204eef3
Successfully built lucidsonicdreams gast
Installing collected packages: tensorflow-estimator, tensorboard, tenacity, pycryptodome, keras-applications, gast, tensorflow, pygit2, mega.py, lucidsonicdreams
Attempting uninstall: tensorflow-estimator
Found existing installation: tensorflow-estimator 2.7.0
Uninstalling tensorflow-estimator-2.7.0:
Successfully uninstalled tensorflow-estimator-2.7.0
Attempting uninstall: tensorboard
Found existing installation: tensorboard 2.7.0
Uninstalling tensorboard-2.7.0:
Successfully uninstalled tensorboard-2.7.0
Attempting uninstall: gast
Found existing installation: gast 0.4.0
Uninstalling gast-0.4.0:
Successfully uninstalled gast-0.4.0
Attempting uninstall: tensorflow
Found existing installation: tensorflow 2.7.0
Uninstalling tensorflow-2.7.0:
Successfully uninstalled tensorflow-2.7.0
[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
tensorflow-probability 0.14.1 requires gast>=0.3.2, but you have gast 0.2.2 which is incompatible.
kapre 0.3.5 requires tensorflow>=2.0.0, but you have tensorflow 1.15.0 which is incompatible.[0m
Successfully installed gast-0.2.2 keras-applications-1.0.8 lucidsonicdreams-0.4 mega.py-1.0.8 pycryptodome-3.11.0 pygit2-1.7.0 tenacity-5.1.5 tensorboard-1.15.0 tensorflow-1.15.0 tensorflow-estimator-1.15.1
###Markdown
B. Generate Sample Videos B.1. Choosing a StyleStyles can be selected using the **style** parameter, which takes in any of the following:* A valid default style name provided by the package. Run **show_styles()** to print valid values. *Note: These styles are loaded from [this repository](https://github.com/justinpinkney/awesome-pretrained-stylegan2) by Justin Pinkney.** A path to a .pkl file that contains pre-trained StyleGAN weights* A custom function that takes noise_batch and class_batch parameters and outputs a list of Pillow Images (see example in **B.5**)
###Code
from lucidsonicdreams import show_styles
# Show valid default style names.
show_styles()
###Output
Imageio: 'ffmpeg-linux64-v3.3.1' was not found on your computer; downloading it now.
Try 1. Download from https://github.com/imageio/imageio-binaries/raw/master/ffmpeg/ffmpeg-linux64-v3.3.1 (43.8 MB)
Downloading: 8192/45929032 bytes (0.0%)2301952/45929032 bytes (5.0%)5062656/45929032 bytes (11.0%)8323072/45929032 bytes (18.1%)11231232/45929032 bytes (24.5%)14426112/45929032 bytes (31.4%)17653760/45929032 bytes (38.4%)20520960/45929032 bytes (44.7%)23740416/45929032 bytes (51.7%)26877952/45929032 bytes (58.5%)30203904/45929032 bytes (65.8%)33226752/45929032 bytes (72.3%)36405248/45929032 bytes (79.3%)39723008/45929032 bytes (86.5%)42999808/45929032 bytes (93.6%)45929032/45929032 bytes (100.0%)
Done
File saved as /root/.imageio/ffmpeg/ffmpeg-linux64-v3.3.1.
maps
faces (ffhq slim 256x256)
panda
cifar 10
obama
ukiyo-e faces
faces (ffhq config-f)
more abstract art
floor plans
fursona
ukiyoe faces
lsun cats
church
car (config-e)
lsun bedrooms
modern art
faces (ffhq config-f 512x512)
painting faces
microscope images
wikiart faces
figure drawings
butterflies
trypophobia
textures
horse
abstract photos
cifar 100
car (config-f)
wikiart
faces (ffhq config-e 256x256)
anime faces
celeba hq faces
fireworks
faces (ffhq config-e)
wildlife
cakes
anime portraits
flowers
pokemon
grumpy cat
my little pony
abstract art
imagenet
doors
cat
lsun cars
ffhq faces
beetles
vases
###Markdown
B.2. Using Default SettingsThis package is set-up so that the only arguments required are the **file path to your audio track** and the **file name of the video output**. This code snippet outputs a 45-second, low-resolution preview of a video using the "modern art" style, and all the other default settings.The song used here is **Chemical Love by Basically Saturday Night**. You can watch the official music video [here](https://youtu.be/Gi7oQrtyjKI), or listen to them on [Spotify](https://open.spotify.com/artist/46tGdhXAQbTvxVOGgy0Fqu?si=E8mUjbWbR2uiiMR2MUc_4w)!Click [here](https://youtu.be/oGXfOmqFYTg) to view a full-length sample video without having to run the code.
###Code
files.download("leger.mp4")
from lucidsonicdreams import LucidSonicDream
from google.colab import files
L = LucidSonicDream(song = 'leger.mp3',
style = 'flowers')
L.hallucinate(file_name = 'leger.mp4',
resolution = 100,
start = 173,
duration = 30,
pulse_react = 0.5,
motion_react = 0.7,
flash_strength = 0.5)
files.download("leger.mp4")
###Output
_____no_output_____
###Markdown
B.3. Tuning Parameters - How It Works There are **over 30 parameters** you can tune, offering tons of flexibility as to how you want your music to be visualized. This may seem like an overwhelming number, but things are easier to digest once you have a basic understanding of how the visualizer works. So, how does it work? 1. First, a batch of input vectors corresponding to output images is initialized. Linear interpolations between these vectors are produced, serving as the "base" vectors.2. Three components react to the audio: **Pulse**, **Motion**, and **Class**. These modify the "base" vectors accordingly. * **Pulse**, quite literally, refers to how the visuals "pulse" to the beat of the music. It is set to react to the audio's percussive elements by default. * **Motion** refers to how the visuals are "pushed forward" or "sped up" by the music, and is set to react to the audio's harmonic elements by default. * Finally, **Class** refers to the labels of objects shown in the generated images (e.g. in the case of the WikiArt style, classes can refer to Van Gogh, Andy Warhol, Da Vinci, etc). This is set to react to the audio's pitch, where each note controls the prominence of a class. *Note:* Among the default styles available, only WikiArt uses classes thus far.3. Finally, additional effects - such as contrast and flash - are added to the video. These are set to react to the audio's percussive elements by default. The ParametersNow, the parameters can be easily understood by separating them into 7 categories: Initialization, Pulse, Motion, Class, Effects, Video, and Other. If this is still overwhelming, it's recommended that you start off by tuning **speed_fpm**, **pulse_react**, **motion_react** and **class_pitch_react**, and build from there. These parameters make the biggest difference. **Initialization*** **speed_fpm** (*Default: 12*) - FPM stands for "Frames per Minute". This determines how many images are initialized - the more there are, the faster the visuals morph. If **speed_fpm = 0**, then only one image is initialized, and that single image reacts to the audio. In this case, there will be no motion during silent parts of the audio. **Pulse Parameters*** **pulse_react** (*Default: 0.5*) - The "strength" of the pulse. It is recommended to keep this between 0 and 2.* **pulse_percussive** (*Default: True*) - If True while *pulse_harmonic* is False, pulse reacts to the audio's percussive elements.* **pulse_harmonic** (*Default: False*) - If True while *pulse_percussive* is False, pulse reacts to the audio's harmonic elements. *Note*: If both parameters are True or both parameters are False, pulse reacts to the "entire" unaltered audio.* **pulse_audio** - Path to a separate audio file to be used to control pulse. This is recommended if you have access to an isolated drum/percussion track. If passed, *pulse_percussive* and *pulse_harmonic* are ignored. *Note:* this parameter is passed when defining the LucidSonicDream object. **Motion Parameters*** **motion_react** (*0.5*), **motion_percussive** (*False*), **motion_harmonic** (*True*), and **motion_audio** - Simply the "motion" equivalents of the pulse parameters above. * **motion_randomness** (*Default: 0.5*)- Degree of randomness of motion. Higher values will typically prevent the video from cycling through the same visuals repeatedly. Must range from 0 to 1.* **truncation** (*Default: 1*) - Controls the variety of visuals generated. Lower values lead to lower variety. *Note*: A very low value will usually lead to "jittery" visuals. Must range from 0 to 1. **Class Parameters** *(Note: Most of these parameters were heavily inspired by the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman)** **classes** - List of at most 12 numerical object labels. If none, 12 labels are selected at random. * **dominant_classes_first** (*Default: False*)- If True, the list passed to "classes" is sorted by prominence in descending order.* **class_pitch_react** (*Default: 0.5*)- Class equivalent of pulse_react and motion_react. It is recommended to keep this between 0 and 2.* **class_smooth_seconds** (*Default: 1*) - Number of seconds spent smoothly interpolating between each class vector. The higher the value, the less "sudden" the change of class.* **class_complexity** (*Default: 1*) - Controls the "complexity" of images generated. Lower values tend to generate more simple and mundane images, while higher values tend to generate more intricate and bizzare objects. It is recommended to keep this between 0 and 1.* **class_shuffle_seconds** (*Default: None*) - Controls the timestamps wherein the mapping of label to note is re-shuffled. This is recommended when the audio used has a limited range of pitches, but you wish for more classes to be shown. If the value passed is a number *n*, classes are shuffled every *n* seconds. If the value passed is a list of numbers, these numbers are used as timestamps (in seconds) wherein classes are shuffled.* **class_shuffle_strength** (*Default: 0.5*) - Controls how drastically classes are re-shuffled. Only applies when class_shuffle_seconds is passed. It is recommended to keep this between 0 and 1.* **class_audio** - Class equivalent of pulse_audio and motion_audio. Passed when defining the LucidSonicDream object. **Effects Parameters*** **contrast_strength** (*Default: 0.5*) - Strength of default contrast effect. It is recommended to keep this between 0 and 1.* **contrast_percussive** (*Default: True*) - If true, contrast reacts to the audio's percussive elements. Must range from 0 to 1.* **contrast_audio** - Equivalent of previous "audio" arguments. Passed when defining the LucidSonicDream object. *Note*: If none of these arguments are passed, the contrast effect will not be applied. * **flash_strength** (*0.5*), **flash_percussive** (*True*), and **flash_audio** - Equivalent of the previous three parameters, but for the a "flash" effect. It is recommended to keep these between 0 and 1. If none of these arguments are passed, the flash effect will not be applied. * **custom_effects** - List of custom, user-defined effects to apply (See **B.4**) **Video Parameters*** **resolution** - Self-explanatory. Low resolutions are recommended for "trial" renders. If none is passed, unaltered high-resolution images will be used.* **start** (*Default: 0*) - Starting timestamp in seconds.* **duration** - Video duration in seconds. If none is passed, full duration of audio will be used.* **output_audio** - Final output audio of the video. Overwrites audio from "song" parameter if provided (See **B.5**)* **fps** (*Default: 43*) - Video Frames Per Second. * **save_frames** (*Default: False*) - If true, saved all individual video frames on disk. **Other*** **batch_size** (*Default: 1*) - Determines how many vectors are simoultaneously fed to the model. Typically, larger batch sizes will output less clearly-defined images. Example 1 This is a simple example whose appeal lies mostly in how it utilizes Motion.The song used here is **Pancake Feet by Tennysson**. As usual, you can watch the official music video [here](https://youtu.be/_ODm4UZGh7g), or listen to them on [Spotify](https://open.spotify.com/artist/3Nb8N20WChM0swo5qWTvm8?si=oUZ2uV7eQH2ieMucvL_vgA)!Click [here](https://youtu.be/ztWCMm9cExY) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream('pancake_feet.mp3',
style = 'modern art')
L.hallucinate('pancake_feet.mp4',
resolution = 360,
duration = 45,
speed_fpm = 0,
motion_percussive = True,
motion_react = 0.8,
contrast_strength = 0.5,
flash_strength = 0.7)
files.download("pancake_feet.mp4")
###Output
_____no_output_____
###Markdown
Example 2This is another simple example that combines subtle Pulse, Motion, Contrast, and Flash reactions to complement the overall trippy style. The style weights used here are from a model trained by **Jeremy Torman**. You can check out his artworks on [Twitter](https://twitter.com/tormanjeremy), or see details on his [original Reddit post](https://www.reddit.com/r/deepdream/comments/leqwxs/stylegan2ada_pickle_file_in_comments_with_colab/) if you're interested!The song, meanwhile, is **Raspberry by Saje**. You can listen to the full track on [YouTube](https://www.youtube.com/watch?v=fOLxvL0_aMU) or [Spotify](https://open.spotify.com/artist/3I2596dGk4K3e4qKjwpzQb?si=TbyjmQuAQRWmrE--lNTRMg). Click [here](https://youtu.be/iEFqcMrszH0) to view a full-length sample video without having to run the code.
###Code
# Download Style Weights
! gdown --id 19hNptJSXji_9h7DMJBVlEMe-izWXvkYQ
L = LucidSonicDream(song = 'raspberry.mp3',
style = 'VisionaryArt.pkl')
L.hallucinate(file_name = 'raspberry.mp4',
resolution = 360,
duration = 60,
pulse_react = 1.2,
motion_react = 0.7,
contrast_strength = 0.5,
flash_strength = 0.5)
files.download("raspberry.mp4")
###Output
_____no_output_____
###Markdown
Example 3This is a much more complex example that utilizes multiple audio tracks and more fine-tuned parameters. It takes advantage of isolated audio tracks for cleaner Pulse, Class, and Contrast reactions.Note: Numerical labels for classes using the WikiArt style can be found [here](https://colab.research.google.com/github/Norod/my-colab-experiments/blob/master/WikiArt_Example_Generation_By_Peter_Baylies.ipynb). Click [here](https://youtu.be/l-nGC-ve7sI) to view a full-length sample video without having to run the code.
###Code
L = LucidSonicDream(song = 'lucidsonicdreams_main.mp3',
pulse_audio = 'lucidsonicdreams_pulse.mp3',
class_audio = 'lucidsonicdreams_class.mp3',
contrast_audio = 'lucidsonicdreams_pulse.mp3',
style = 'wikiart')
L.hallucinate('lucidsonicdreams.mp4',
resolution = 360,
start = 32,
duration = 60,
pulse_react = 0.25,
motion_react = 0,
classes = [1,5,9,16,23,27,28,30,50,68,71,89],
dominant_classes_first = True,
class_shuffle_seconds = 8,
class_smooth_seconds = 4,
class_pitch_react = 0.2,
contrast_strength = 0.3,
flash_strength = 0.1)
files.download("lucidsonicdreams.mp4")
###Output
_____no_output_____
###Markdown
B.4. Using Custom EffectsYou can apply your own reactive custom effects to the video by defining an effects function and passing it to an EffectsGenerator object, as seen below. The effects function must contain the following parameters:* **array** - Refers to the image array that the effect is applied on.* **strength** - Reactivity parameter, similar to pulse_react, contrast_strength, etc.* **amplitude** - Refers to the volume of the audio at a given point in time. Simply multiply this to the parameter that controls the "intensity" of the effect.The function must output an NumPy array representing the output imageThe function is then passed to an EffectsGenerator object, which in turn has the following parameters: * **func** - The effects function* **audio** - Audio controlling the effect* **strength** - Strength of the effect* **percussive** - If True, effect reacts to the audio's percussive elements.The song used in the example below is **Unfaith by Ekali**. You can listen to the full track on [YouTube](https://youtu.be/8C4wgzP1KOI) or [Spotify](https://open.spotify.com/track/5UC6HF9VVgYMHQ7PcwcZNZ?si=hCIA2JMTQTC98zzPZfA3yQ). Click [here](https://youtu.be/V7jo281HSwM) to view a sample video without having to run the code.
###Code
import numpy as np
from skimage.transform import swirl
from lucidsonicdreams import EffectsGenerator
def swirl_func(array, strength, amplitude):
swirled_image = swirl(array,
rotation = 0,
strength = 100 * strength * amplitude,
radius=650)
return (swirled_image*255).astype(np.uint8)
swirl_effect = EffectsGenerator(swirl_func,
audio = 'unfaith.mp3',
strength = 0.2,
percussive = False)
L = LucidSonicDream('unfaith.mp3',
style = 'textures')
L.hallucinate('unfaith.mp4',
resolution = 360,
duration = 60,
motion_react = 0.15,
speed_fpm = 2,
pulse_react = 1.5,
contrast_strength = 1,
flash_strength = 1,
custom_effects = [swirl_effect])
files.download("unfaith.mp4")
###Output
_____no_output_____
###Markdown
B.5. Using Custom Visualization FunctionsFinally, you can choose not to use StyleGAN, and instead define any custom function that takes in a batches of vectors and outputs a Pillow image. The function must take in **noise_batch** and **class_batch** parameters. Moreover, when defining the LucidSonicDream object, **num_possible_classes** and **input_size** must be passed.The example below defines a custom function using a pre-trained PyTorch implementation of the BigGAN, similarly to the [Deep Music Visualizer](https://github.com/msieg/deep-music-visualizer) project by Matt Siegelman. Numerical labels for each class can be found [here](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a). The song used is **Sea of Voices by Porter Robinson**. You can listen to the track on [YouTube](https://www.youtube.com/watch?v=lSooYPG-5Rg) or [Spotify](https://open.spotify.com/track/2lNFWUrxuNaQsf5I1pDTPr?si=MsD7GJUsRma4mkyfjbEhJg). Note that an [instrumental version](https://youtu.be/2Bo0JqTmVwg) was used as input in order to prevent vocals from influencing motion.Click [here](https://youtu.be/_TJCql7O9kU?t=180) to view a full-length sample video without having to run the code!
###Code
! pip install pytorch_pretrained_biggan
from pytorch_pretrained_biggan import BigGAN, convert_to_images
import torch
biggan = BigGAN.from_pretrained('biggan-deep-512')
biggan.to('cuda:0')
def biggan_func(noise_batch, class_batch):
noise_tensor = torch.from_numpy(noise_batch).cuda()
class_tensor = torch.from_numpy(class_batch).cuda()
with torch.no_grad():
output_tensor = biggan(noise_tensor.float(), class_tensor.float(), truncation = 1)
return convert_to_images(output_tensor.cpu())
L = LucidSonicDream('sea_of_voices_inst.mp3',
style = biggan_func,
input_shape = 128,
num_possible_classes = 1000)
L.hallucinate('sea_of_voices.mp4',
output_audio = 'sea_of_voices.mp3',
resolution = 360,
duration = 60,
speed_fpm = 3,
classes = [13, 14, 22, 24, 301, 84, 99, 100, 134, 143, 393, 394],
class_shuffle_seconds = 10,
class_shuffle_strength = 0.1,
class_complexity = 0.5,
class_smooth_seconds = 4,
motion_react = 0.35,
flash_strength = 1,
contrast_strength = 1)
###Output
_____no_output_____ |
notebooks/rendered/python/tut_3.ipynb | ###Markdown
BooleansPython has a type `bool` which can take on one of two values: `True` and `False`.
###Code
x = True
print(x)
print(type(x))
###Output
_____no_output_____
###Markdown
Rather than putting `True` or `False` directly in our code, we usually get boolean values from **boolean operators**. These are operators that answer yes/no questions. We'll go through some of these operators below. Comparison Operations| Operation | Description || Operation | Description ||---------------|-----------------------------------||---------------|--------------------------------------|| ``a == b`` | ``a`` equal to ``b`` || ``a != b`` | ``a`` not equal to ``b`` || ``a b`` | ``a`` greater than ``b`` || ``a = b`` | ``a`` greater than or equal to ``b`` |
###Code
def can_run_for_president(age):
"""Can someone of the given age run for president in the US?"""
# The US Constitution says you must "have attained to the Age of thirty-five Years"
return age >= 35
print("Can a 19-year-old run for president?", can_run_for_president(19))
print("Can a 45-year-old run for president?", can_run_for_president(45))
###Output
_____no_output_____
###Markdown
Comparisons are a little bit clever...
###Code
3.0 == 3
###Output
_____no_output_____
###Markdown
But not too clever...
###Code
'3' == 3
###Output
_____no_output_____
###Markdown
Comparison operators can be combined with the arithmetic operators we've already seen to express a virtually limitless range of mathematical tests. For example, we can check if a number is odd by checking that the modulus with 2 returns 1:
###Code
def is_odd(n):
return (n % 2) == 1
print("Is 100 odd?", is_odd(100))
print("Is -1 odd?", is_odd(-1))
###Output
_____no_output_____
###Markdown
Remember to use `==` instead of `=` when making comparisons. If you write `n == 2` you are asking about the value of n. When you write `n = 2` you are changing the value of n. Combining Boolean ValuesPython provides operators to combine boolean values using the standard concepts of "and", "or", and "not". And in fact, the corresponding Python operators use just those words: ``and``, ``or``, and ``not``.With these, we can make our `can_run_for_president` function more accurate.
###Code
def can_run_for_president(age, is_natural_born_citizen):
"""Can someone of the given age and citizenship status run for president in the US?"""
# The US Constitution says you must be a natural born citizen *and* at least 35 years old
return is_natural_born_citizen and (age >= 35)
print(can_run_for_president(19, True))
print(can_run_for_president(55, False))
print(can_run_for_president(55, True))
###Output
_____no_output_____
###Markdown
Quick, can you guess the value of this expression?
###Code
True or True and False
###Output
_____no_output_____
###Markdown
(Click the "output" button to see the answer)Python has precedence rules that determine the order in which operations get evaluated in expressions like above. For example, `and` has a higher precedence than `or`, which is why the first expression above is `True`. If we had evaluated it from left to right, we would have calculated `True or True` first (which is `True`), and then taken the `and` of that result with `False`, giving a final value of `False`.You could try to [memorize the order of precedence](https://docs.python.org/3/reference/expressions.htmloperator-precedence), but a safer bet is to just use liberal parentheses. Not only does this help prevent bugs, it makes your intentions clearer to anyone who reads your code. For example, consider the following expression:```pythonprepared_for_weather = have_umbrella or rain_level 0 and is_workday```I'm trying to say that I'm safe from today's weather....- if I have an umbrella...- or if the rain isn't too heavy and I have a hood...- otherwise, I'm still fine unless it's raining *and* it's a workdayBut not only is my Python code hard to read, it has a bug. We can address both problems by adding some parentheses:```pythonprepared_for_weather = have_umbrella or (rain_level 0 and is_workday)```You can add even more parentheses if you think it helps readability:```pythonprepared_for_weather = have_umbrella or ((rain_level 0 and is_workday))```We can also split it over multiple lines to emphasize the 3-part structure described above:```pythonprepared_for_weather = ( have_umbrella or ((rain_level < 5) and have_hood) or (not (rain_level > 0 and is_workday)))``` ConditionalsWhile useful enough in their own right, booleans really start to shine when combined with *conditional statements*, using the keywords ``if``, ``elif``, and ``else``.Conditional statements, often referred to as *if-then* statements, allow the programmer to execute certain pieces of code depending on some Boolean condition.A basic example of a Python conditional statement is this:
###Code
def inspect(x):
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
inspect(0)
inspect(-15)
###Output
_____no_output_____
###Markdown
Python adopts the ``if`` and ``else`` often used in other languages; its more unique keyword is ``elif``, a contraction of "else if".In these conditional clauses, ``elif`` and ``else`` blocks are optional; additionally, you can include as many ``elif`` statements as you would like.Note especially the use of colons (``:``) and whitespace to denote separate blocks of code. This is similar to what happens when we define a function - the function header ends with `:`, and the following line is indented with 4 spaces. All subsequent indented lines belong to the body of the function, until we encounter an unindented line, ending the function definition.
###Code
def f(x):
if x > 0:
print("Only printed when x is positive; x =", x)
print("Also only printed when x is positive; x =", x)
print("Always printed, regardless of x's value; x =", x)
f(1)
f(0)
###Output
_____no_output_____
###Markdown
Boolean conversionWe've seen `int()`, which turns things into ints, and `float()`, which turns things into floats, so you might not be surprised to hear that Python has a `bool()` function which turns things into bools.
###Code
print(bool(1)) # all numbers are treated as true, except 0
print(bool(0))
print(bool("asf")) # all strings are treated as true, except the empty string ""
print(bool(""))
# Generally empty sequences (strings, lists, and other types we've yet to see like lists and tuples)
# are "falsey" and the rest are "truthy"
###Output
_____no_output_____
###Markdown
We can use non-boolean objects in `if` conditions and other places where a boolean would be expected. Python will implicitly treat them as their corresponding boolean value:
###Code
if 0:
print(0)
elif "spam":
print("spam")
###Output
_____no_output_____
###Markdown
Conditional expressions (aka 'ternary')Setting a variable to either of two values depending on some condition is a pretty common pattern.
###Code
def quiz_message(grade):
if grade < 50:
outcome = 'failed'
else:
outcome = 'passed'
print('You', outcome, 'the quiz with a grade of', grade)
quiz_message(80)
###Output
_____no_output_____
###Markdown
Python has a handy single-line 'conditional expression' syntax to simplify these cases:
###Code
def quiz_message(grade):
outcome = 'failed' if grade < 50 else 'passed'
print('You', outcome, 'the quiz with a grade of', grade)
quiz_message(45)
###Output
_____no_output_____ |
stocks_forecasting.ipynb | ###Markdown
Immediate day Prediction
###Code
next_day = data[-90:].values
next_day_scaled = scaler.transform(next_day)
next_day_pred = []
next_day_pred.append(next_day_scaled)
next_day_pred = np.array(next_day_pred)
next_day_pred = np.reshape(next_day_pred, (next_day_pred.shape[0], next_day_pred.shape[1], 1))
predicted_price = model.predict(next_day_pred)
predicted_price = scaler.inverse_transform(predicted_price)
print(predicted_price)
###Output
[[1983.3309]]
|
appyters/RNAseq_Data_Metadata_Analysis/RNAseq_Data_Metadata_Analysis.ipynb | ###Markdown
RNA-seq Data and Metadata Analysis Appyter This notebook template provides a pipeline for the visualization and analysis of RNA-seq gene read counts. Analysis Overview The RNA-seq data first undergoes normalization and dimensionality reduction via Principle Component Analysis (PCA) and Uniform Manifold Approximation and Projection (UMAP). Samples are then clustered based on their most-associated highly-variable genes and metadata features. The number of clusters is determined based on a modified silhouette score which prioritizes having more clusters over having larger clusters. Clusters are visualized using the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package. The most up-regulated and down-regulated genes are also identified for each cluster. These genes are used to perform enrichment analysis via the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The enrichment results are visualized with the [React-GSEA](https://github.com/MaayanLab/react-GSEA/tree/simplified) package. Finally, similar and opposite drug/small molecule signatures are queried using the [L1000FWD](https://maayanlab.cloud/L1000FWD/) API. *Note: If using GTEx data or other healthy tissue sample data for which querying drug signatures is not relevant, please use the GTEx Tissue-Specific RNA-seq Analysis Appyter instead. If using GEO data, please use the [Bulk RNA-seq Analysis Appyter](https://appyters.maayanlab.cloud/Bulk_RNA_seq/).* 0. Notebook SetupImport packages and set appropriate file names.
###Code
import os
import numpy as np
import pandas as pd
import requests
import time
from matplotlib import pyplot as plt
import seaborn as sns
from umap import UMAP
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, silhouette_samples, silhouette_score, plot_roc_curve
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.cm as cm
from maayanlab_bioinformatics.dge import characteristic_direction
from maayanlab_bioinformatics.normalization import log2_normalize, filter_by_var, zscore_normalize
from maayanlab_bioinformatics.utils import merge
import math
from collections import OrderedDict
import json
from react_scatter_board.jupyter_compat import ScatterBoard
from IPython.display import display, IFrame, Markdown, HTML
from textwrap import wrap
from react_gsea import ReactGSEA, dataFromResult
from react_gsea.jupyter_compat import ReactGSEA
# Notebook display util functions
def download_button(content, label, filename):
# Add download button
outname = filename.split('.')[0]
display(HTML('<textarea id="textbox_{outname}" style="display: none;">{content}</textarea> <button style="margin:10px 0;" id="create_{outname}">{label}</button> <a download="{filename}" id="downloadlink_{outname}" style="display: none">Download</a>'.format(**locals())))
display(HTML('<script type="text/javascript">!function(){{var e=null,t=document.getElementById("create_{outname}"),n=document.getElementById("textbox_{outname}");t.addEventListener("click",function(){{var t,l,c=document.getElementById("downloadlink_{outname}");c.href=(t=n.value,l=new Blob([t],{{type:"text/plain"}}),null!==e&&window.URL.revokeObjectURL(e),e=window.URL.createObjectURL(l)),c.click()}},!1)}}();</script>'.format(**locals())))
def make_clickable(link):
return f'<a target="_blank" href="{link}">{link}</a>'
def figure_header(label,title):
display(HTML(f"<div style='font-size:2rem; padding:1rem 0;'><b>{label}</b>: {title}</div>"))
def figure_legend(label,title,content=""):
display(HTML(f"<div style='font-size:1.5rem;'><b>{label}</b>: <i>{title}</i>. {content} </div>"))
%%appyter hide
{% do SectionField(
name = 'DATASETS',
title = 'Dataset Selection',
subtitle = 'Upload datasets for visualization and analysis. Both file uploads are required to run the analysis.'
) %}
{% do SectionField(
name = 'PARAMETERS',
title = 'Analysis Parameters',
subtitle = 'Set parameters for analysis.'
) %}
{% do SectionField(
name = "ENRICHR_LIBS",
title = "Enrichment Analysis Library Selection",
subtitle = "Choose Enrichr geneset libraries for comparison against input genes. Multiple libraries can be selected from each section. If nothing is selected, default libraries will be used."
) %}
{% set data_filename = FileField(
name='data_filename',
label='RNA-seq data file',
description='TSV or CSV file containing RNA-seq read counts. Index should be Entrez gene symbols, and columns should be individual samples.',
default='',
examples = {
'GSE159266 Data': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
},
section='DATASETS'
) %}
{% set metadata_filename = FileField(
name='metadata_filename',
label='Sample metadata file',
description='TSV or CSV file containing sample metadata. Index should be sample IDs corresponding to columns of RNA-seq data file, and columns should be different sample attributes.',
default='',
examples = {
'GSE159266 Metadata': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
},
section='DATASETS'
) %}
{% set n_neighbors = IntField(
name = 'n_neighbors',
label = 'Number of neighbors to use for UMAP calculations',
description = 'Smaller values preserve local structure, while larger values emphasize global structure.',
default = 40,
min = 2,
max = 200,
section = 'PARAMETERS'
) %}
{% set min_cluster_dist = FloatField(
name = 'min_cluster_dist',
label = 'Minimum distance between UMAP-projected points',
description = 'Determines how close/distant points belonging to different clusters are from each other.',
default = 0.3,
min = 0.1,
max = 1,
section = 'PARAMETERS'
) %}
{% set top_n_genes = IntField(
name = 'top_n_genes',
label = 'Number of genes to analyze',
description = 'Number of top variable genes to use in analysis.',
default = 2500,
section = 'PARAMETERS'
) %}
{% set top_n_genes_enrichment = IntField(
name = 'top_n_genes_enrichment',
label = 'Number of genes to use for enrichment analysis',
description = 'Number of top variable genes to use for enrichment analysis; must be less than top_n_genes.',
default = 250,
section = 'PARAMETERS'
) %}
{% set do_l1000 = BoolField(
name = 'do_l1000',
label = 'Query L1000 signatures?',
description = 'Option to query opposite and similar L1000 signatures to input data using L1000FWD.',
default = True,
section = 'PARAMETERS'
) %}
{% set transcription_libraries = MultiChoiceField(
name = 'transcription_libraries',
label = 'Transcription Libraries',
description = 'Default library is ENCODE_TF_ChIP-seq_2015',
choices = [
'ARCHS4_TFs_Coexp',
'ChEA_2016',
'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',
'ENCODE_Histone_Modifications_2015',
'ENCODE_TF_ChIP-seq_2015',
'Epigenomics_Roadmap_HM_ChIP-seq',
'Enrichr_Submissions_TF-Gene_Coocurrence',
'Genome_Browser_PWMs',
'lncHUB_lncRNA_Co-Expression',
'miRTarBase_2017',
'TargetScan_microRNA_2017',
'TF-LOF_Expression_from_GEO',
'TF_Perturbations_Followed_by_Expression',
'Transcription_Factor_PPIs',
'TRANSFAC_and_JASPAR_PWMs',
'TRRUST_Transcription_Factors_2019'
],
default = [
'ENCODE_TF_ChIP-seq_2015'
],
section = 'ENRICHR_LIBS'
) %}
{% set pathway_libraries = MultiChoiceField(
name = "pathway_libraries",
label = "Pathway Libraries",
description = 'Default libraries are KEGG_2019_Human and KEGG_2019_Mouse',
choices = [
'ARCHS4_Kinases_Coexp',
'BioCarta_2016',
'BioPlanet_2019',
'BioPlex_2017',
'CORUM',
'Elsevier_Pathway_Collection',
'HMS_LINCS_KinomeScan',
'HumanCyc_2016',
'huMAP',
'KEA_2015',
'KEGG_2019_Human',
'KEGG_2019_Mouse',
'Kinase_Perturbations_from_GEO_down',
'Kinase_Perturbations_from_GEO_up',
'L1000_Kinase_and_GPCR_Perturbations_down',
'L1000_Kinase_and_GPCR_Perturbations_up',
'NCI-Nature_2016',
'NURSA_Human_Endogenous_Complexome',
],
default = [
'KEGG_2019_Human',
'KEGG_2019_Mouse'
],
section = 'ENRICHR_LIBS'
) %}
{% set ontology_libraries = MultiChoiceField(
name = 'ontology_libraries',
label = 'Ontology Libraries',
description = 'Default libraries are GO_Biological_Process_2018 and MGI_Mammalian_Phenotype_Level_4_2019',
choices = [
'GO_Biological_Process_2018',
'GO_Cellular_Component_2018',
'GO_Molecular_Function_2018',
'Human_Phenotype_Ontology',
'Jensen_COMPARTMENTS',
'Jensen_DISEASES',
'Jensen_TISSUES',
'MGI_Mammalian_Phenotype_Level_4_2019'
],
default = [
'GO_Biological_Process_2018',
'MGI_Mammalian_Phenotype_Level_4_2019'],
section = 'ENRICHR_LIBS'
) %}
{% set disease_drug_libraries = MultiChoiceField(
name = 'disease_drug_libraries',
label = 'Disease Drug Libraries',
description = 'Default library is GWAS_Catalog_2019',
choices = [
'Achilles_fitness_decrease',
'Achilles_fitness_increase',
'ARCHS4_IDG_Coexp',
'ClinVar_2019',
'dbGaP',
'DepMap_WG_CRISPR_Screens_Broad_CellLines_2019',
'DepMap_WG_CRISPR_Screens_Sanger_CellLines_2019',
'DisGeNET',
'DrugMatrix',
'DSigDB',
'GeneSigDB',
'GWAS_Catalog_2019',
'LINCS_L1000_Chem_Pert_down',
'LINCS_L1000_Chem_Pert_up',
'LINCS_L1000_Ligand_Perturbations_down',
'LINCS_L1000_Ligand_Perturbations_up',
'MSigDB_Computational',
'MSigDB_Oncogenic_Signatures',
'Old_CMAP_down',
'Old_CMAP_up',
'OMIM_Disease',
'OMIM_Expanded',
'PheWeb_2019',
'Rare_Diseases_AutoRIF_ARCHS4_Predictions',
'Rare_Diseases_AutoRIF_Gene_Lists',
'Rare_Diseases_GeneRIF_ARCHS4_Predictions',
'Rare_Diseases_GeneRIF_Gene_Lists',
'UK_Biobank_GWAS_v1',
'Virus_Perturbations_from_GEO_down',
'Virus_Perturbations_from_GEO_up',
'VirusMINT'
],
default = [
'GWAS_Catalog_2019'
],
section = 'ENRICHR_LIBS'
) %}
{% set cell_type_libraries = MultiChoiceField(
name = 'cell_type_libraries',
label = 'Cell Type Libraries',
description = 'No libraries selected by default',
choices = [
'Allen_Brain_Atlas_down',
'Allen_Brain_Atlas_up',
'ARCHS4_Cell-lines',
'ARCHS4_Tissues',
'Cancer_Cell_Line_Encyclopedia',
'CCLE_Proteomics_2020',
'ESCAPE',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_down',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_up',
'Human_Gene_Atlas',
'Mouse_Gene_Atlas',
'NCI-60_Cancer_Cell_Lines',
'ProteomicsDB_2020',
'Tissue_Protein_Expression_from_Human_Proteome_Map'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set misc_libraries = MultiChoiceField(
name = 'misc_libraries',
label = 'Miscellaneous Libraries',
description = 'No libraries selected by default',
choices = [
'Chromosome_Location_hg19',
'Data_Acquisition_Method_Most_Popular_Genes',
'Enrichr_Libraries_Most_Popular_Genes',
'Genes_Associated_with_NIH_Grants',
'HMDB_Metabolites',
'HomoloGene',
'InterPro_Domains_2019',
'NIH_Funded_PIs_2017_AutoRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_GeneRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_Human_AutoRIF',
'NIH_Funded_PIs_2017_Human_GeneRIF',
'Pfam_Domains_2019',
'Pfam_InterPro_Domains',
'Table_Mining_of_CRISPR_Studies'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set legacy_libraries = MultiChoiceField(
name = 'legacy_libraries',
label = 'Legacy Libraries',
description = 'No libraries selected by default',
choices = [
'BioCarta_2013',
'BioCarta_2015',
'ChEA_2013',
'ChEA_2015',
'Chromosome_Location',
'Disease_Signatures_from_GEO_down_2014',
'Disease_Signatures_from_GEO_up_2014',
'Drug_Perturbations_from_GEO_2014',
'ENCODE_Histone_Modifications_2013',
'ENCODE_TF_ChIP-seq_2014',
'GO_Biological_Process_2013',
'GO_Biological_Process_2015',
'GO_Biological_Process_2017',
'GO_Biological_Process_2017b',
'GO_Cellular_Component_2013',
'GO_Cellular_Component_2015',
'GO_Cellular_Component_2017',
'GO_Cellular_Component_2017b',
'GO_Molecular_Function_2013',
'GO_Molecular_Function_2015',
'GO_Molecular_Function_2017',
'GO_Molecular_Function_2017b',
'HumanCyc_2015',
'KEA_2013',
'KEGG_2013',
'KEGG_2015',
'KEGG_2016',
'MGI_Mammalian_Phenotype_2013',
'MGI_Mammalian_Phenotype_2017',
'MGI_Mammalian_Phenotype_Level_3',
'MGI_Mammalian_Phenotype_Level_4',
'NCI-Nature_2015',
'Panther_2015',
'Reactome_2013',
'Reactome_2015',
'TargetScan_microRNA',
'Tissue_Protein_Expression_from_ProteomicsDB',
'WikiPathways_2013',
'WikiPathways_2015',
'WikiPathways_2016'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set crowd_libraries = MultiChoiceField(
name = 'crowd_libraries',
label = 'Crowd Libraries',
description = 'No libraries selected by default',
choices = [
'Aging_Perturbations_from_GEO_down',
'Aging_Perturbations_from_GEO_up',
'Disease_Perturbations_from_GEO_down',
'Disease_Perturbations_from_GEO_up',
'Drug_Perturbations_from_GEO_down',
'Drug_Perturbations_from_GEO_up',
'Gene_Perturbations_from_GEO_down',
'Gene_Perturbations_from_GEO_up',
'Ligand_Perturbations_from_GEO_down',
'Ligand_Perturbations_from_GEO_up',
'MCF7_Perturbations_from_GEO_down',
'MCF7_Perturbations_from_GEO_up',
'Microbe_Perturbations_from_GEO_down',
'Microbe_Perturbations_from_GEO_up',
'RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO',
'SysMyo_Muscle_Gene_Sets'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
%%appyter code_exec
data_filename = {{ data_filename }}
metadata_filename = {{ metadata_filename }}
n_neighbors = {{ n_neighbors }}
min_cluster_dist = {{ min_cluster_dist }}
top_n_genes = {{ top_n_genes }}
top_n_genes_enrichment = {{ top_n_genes_enrichment }}
do_l1000 = {{ do_l1000 }}
transcription_libraries = {{ transcription_libraries }}
pathway_libraries = {{ pathway_libraries }}
ontology_libraries = {{ ontology_libraries }}
disease_drug_libraries = {{ disease_drug_libraries }}
cell_type_libraries = {{ cell_type_libraries }}
misc_libraries = {{ misc_libraries }}
legacy_libraries = {{ legacy_libraries }}
crowd_libraries = {{ crowd_libraries }}
if data_filename == '' or metadata_filename == '':
print("One or both user-uploaded files missing, use example GEO data.")
data_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
metadata_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
print(data_filename + '\n' + metadata_filename)
###Output
_____no_output_____
###Markdown
1. Import DatasetsLoad RNA-seq gene read counts and associated sample metadata into dataframes.
###Code
def load_dataframe(file):
''' Load a file by downloading it or reading it if already downloaded.
'''
ext = os.path.splitext(file)[1]
if ext in {'.tsv', '.txt'}:
df = pd.read_csv(file, sep='\t', index_col=0)
elif ext == '.csv':
df = pd.read_csv(file, index_col=0)
else:
raise Exception('Unrecognized file format', ext)
# Fix any type coersion on identifiers
df.index = df.index.astype(str)
df.columns = df.columns.astype(str)
return df
data_index = "symbol"
metadata_index = "sample_id"
print(f"Loading user-uploaded data...")
df_data = load_dataframe(data_filename).sort_index()
df_metadata = load_dataframe(metadata_filename).sort_index()
df_data.index.name = "symbol"
df_metadata.index.name = "sample_id"
print("Data loaded!")
###Output
_____no_output_____
###Markdown
1a. RNA-seq Data
###Code
figure_legend("Table 1", "RNA-seq data", "The RNA-seq data contains a row per gene and a column per sample.")
display(df_data.head())
###Output
_____no_output_____
###Markdown
1b. Metadata
###Code
figure_legend("Table 2","Metadata", "The column indices are sample metadata attributes, while the row indices are sample IDs corresponding to the columns of the RNA-seq data.")
display(df_metadata.head())
###Output
_____no_output_____
###Markdown
Listed below are all the metadata categories. These categories will be used to cluster samples later in the analysis.
###Code
features = df_metadata.columns.values
print(features)
###Output
_____no_output_____
###Markdown
2. Normalize DataGiven the highly variable nature of expression level between different genes, it is necessary to normalize the read counts before proceeding.
###Code
# create dataframe to display sample stats
df_library_size = pd.DataFrame(
{
'n_reads': df_data[df_data > 0].count(),
'log_n_reads': np.log2(df_data[df_data > 0].count() + 1),
'n_expressed_genes': df_data.sum(),
}).sort_values('n_reads', ascending=False)
df_library_size.index.name = "sample_id"
figure_legend("Table 3","Library size", "By default, the first five entries are shown. A gene read is counted toward n_reads for a single sample if its value is greater than 0.")
display(df_library_size.head())
###Output
_____no_output_____
###Markdown
Below, the overall library distribution is shown.
###Code
sns.distplot(df_library_size["n_reads"]); plt.show()
figure_legend("Figure 1","Library size distribution")
###Output
_____no_output_____
###Markdown
Two versions of the dataset are normalized: one with just the `top_n_genes` most variable genes and one with all genes. The former will be used to compute clusters after dimensionality reduction, and the latter to compute the characteristic direction (up or down) of each gene in a cluster.
###Code
# copy full dataset for computing characteristic directions later
df_data_norm_all_genes = df_data.copy()
# take top_n_genes most variable rows
df_data_norm = filter_by_var(df_data,top_n = top_n_genes)
# compute log normalization of matrix
df_data_norm = log2_normalize(df_data_norm)
df_data_norm_all_genes = log2_normalize(df_data_norm_all_genes)
# convert to zscores
df_data_norm = zscore_normalize(df_data_norm)
df_data_norm_all_genes = zscore_normalize(df_data_norm_all_genes)
figure_legend("Table 4","Normalized RNA-seq data for most variable genes", "Counts are filtered for the most variable genes. The resulting dataset is log transformed and normalized, then converted to z-scores.")
display(df_data_norm.head())
# plot the first gene distribution
gene = df_data_norm.index.values[0]
sns.distplot(df_data_norm.iloc[0, :]); plt.show()
figure_legend("Figure 2",f"Sample gene expression distibution for {gene}", f"In this dataset, {gene} is the most variably expressed across all samples.")
# plot the last gene distribution
gene = df_data_norm.index.values[-1]
sns.distplot(df_data_norm.iloc[-1, :]); plt.show()
figure_legend("Figure 3",f"Sample gene expression distibution for {gene}", f"In this dataset, {gene} is the least variably expressed across all samples among the most variably expressed genes.")
# plot a single RNA-seq sample distribution
sns.distplot(df_data_norm.iloc[:, 0]); plt.show()
figure_legend("Figure 4",f"RNA-seq profile distribution for sample {df_data_norm.columns[0]}")
###Output
_____no_output_____
###Markdown
3. Reduce Data Dimensionality Now that the data has been loaded and normalized, the most variable genes across the dataset can be identified and visualized with hierachical clustering and heatmaps. Dimensionality reduction facilitates the differentiation of the data in a more efficient manner by reducing the number of attributes to be considered. 3a. Principle Component Analysis PCA is used first to reduce the dimensionality of the dataset, while still maintaining most of the variability. In PCA, a large number of dimensions -- in this case, the different sample metadata attributes -- can be reduced to a few new dimensions that capture the relevant information of the original attributes. First, all data values are scaled to (0, 1).
###Code
pca_scaler = MinMaxScaler()
df_data_norm[df_data_norm.columns.tolist()] = pca_scaler.fit_transform(df_data_norm[df_data_norm.columns.tolist()])
df_data_norm.head()
###Output
_____no_output_____
###Markdown
Instead of manually setting the number of PCA components, the number of components is chosen automatically to maximize variance (> 95%).
###Code
# PCA
data_norm_pca = PCA(
random_state=42,
n_components=0.95
)
data_norm_pca.fit(df_data_norm.values.T)
df_data_norm_pca = pd.DataFrame(
data_norm_pca.transform(df_data_norm.values.T),
index=df_data_norm.T.index
)
df_data_norm_pca.columns = [
f'PCA-{c}' # ({r:.3f})'
for c, r in zip(df_data_norm_pca.columns, data_norm_pca.explained_variance_ratio_)
]
df_data_norm_pca.index.name = "sample_id"
figure_legend("Table 5","Principle components of RNA-seq data", "The top principle components are the projections of each datapoint onto the axes along which there is the most variation in the dataset.")
display(df_data_norm_pca.head())
###Output
_____no_output_____
###Markdown
The data can now be plotted with the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package. The points can be shaped and colored by various metadata categories, with the default being the first two metadata columns. They can also be individually searched by sample_id.
###Code
# combine metadata with RNA-seq data; note this will fail if sample_ids are
# not exactly matched between both datasets
pca_data = merge(
df_data_norm_pca[["PCA-0", "PCA-1"]],
df_library_size,
df_metadata
)
# name columns for plotting purposes
pca_data = pca_data.rename(columns={'PCA-0': 'x', 'PCA-1': 'y'})
pca_data['sample_id'] = pca_data.index
# normalize dimensions to -10, 10
pca_min, pca_max = -10, 10
pca_x_min, pca_x_max = pca_data['x'].min(), pca_data['x'].max()
pca_y_min, pca_y_max = pca_data['y'].min(), pca_data['y'].max()
pca_data['x'] = (pca_data['x'] - pca_x_min) / (pca_x_max - pca_x_min) * (pca_max - pca_min) + pca_min
pca_data['y'] = (pca_data['y'] - pca_y_min) / (pca_y_max - pca_y_min) * (pca_max - pca_min) + pca_min
pca_scatter_data = pca_data.to_dict('records')
color_def = df_metadata.columns.values[0]
shape_def = df_metadata.columns.values[1]
ScatterBoard(
id='pca-scatterboard',
is3d=False,
data=pca_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
###Output
_____no_output_____
###Markdown
**Figure 5:** *First two PCA components of RNA-seq data.* Points are labeled by Sample ID and can be color- or shape-coded by any of the metadata categories using the dropdown menus. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around. 3b. Uniform Manifold Approximation and ProjectionThe dimensionality of the dataset is further reduced by performing UMAP on the PCA components. Parameters such as `n_neighbors` and `min_dist` are set according to defaults used by the Seurat R Package for single cell genomics analysis.
###Code
data_norm_umap = UMAP(
random_state=42,
n_components=2,
n_neighbors=n_neighbors,
metric='cosine',
min_dist=min_cluster_dist,
)
# use top 10 components of PCA
n_pca_components = min(10,df_data_norm_pca.shape[1])
data_norm_umap.fit(df_data_norm_pca.iloc[:, :n_pca_components].values)
# keep only first two UMAP components
df_data_norm_umap = pd.DataFrame(
data_norm_umap.transform(df_data_norm_pca.iloc[:, :n_pca_components].values),
columns=['UMAP-0', 'UMAP-1'],
index=df_data_norm_pca.index,
)
# project data onto first two UMAP components for visualization
umap_data = merge(
df_data_norm_umap[["UMAP-0", "UMAP-1"]],
df_library_size,
df_metadata
)
umap_data = umap_data.rename(columns={'UMAP-0': 'x', 'UMAP-1': 'y'})
umap_data['sample_id'] = umap_data.index
# normalize to (-10, 10)
umap_min, umap_max = -10, 10
umap_x_min, umap_x_max = umap_data['x'].min(), umap_data['x'].max()
umap_y_min, umap_y_max = umap_data['y'].min(), umap_data['y'].max()
umap_data['x'] = (umap_data['x'] - umap_x_min) / (umap_x_max - umap_x_min) * (umap_max - umap_min) + umap_min
umap_data['y'] = (umap_data['y'] - umap_y_min) / (umap_y_max - umap_y_min) * (umap_max - umap_min) + umap_min
umap_scatter_data = umap_data.to_dict('records')
color_def = df_metadata.columns.values[0]
shape_def = df_metadata.columns.values[1]
ScatterBoard(
id='umap-scatterboard',
is3d=False,
data=umap_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
###Output
_____no_output_____
###Markdown
**Figure 6:** *First two UMAP components of RNA-seq data.* The datapoints are again labeled by sample ID, and can be color- or shape-coded by any of the metadata categories using the dropdown menu. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around. 4. Clustering The first two UMAP components will be used from here on out. To compute sample clusters, the k-means method is used. The total number of clusters must be determined, by first testing a range for the number of total clusters, and then computing silhouette scores, which are a measure of how similar an entry is to its own cluster versus other clusters. The goal is to maximize both the similarity within a cluster and the differences between clusters, so the ideal number of clusters is that which produces the highest silhouette score.
###Code
silhouette_scores = []
# Set max clusters as a function of the sample size and the user-selected option
max_clusters = math.ceil(df_data_norm_umap.shape[0]**0.5)
max_clusters = int(math.ceil(max_clusters/2))
cluster_range = range(2, (max(max_clusters, 3)))
for n in cluster_range:
# apply k-means clustering for each possible k
X = df_data_norm_umap.values
clusterer = KMeans(n_clusters=n, random_state=42).fit(X)
y_pred = clusterer.predict(X)
# The silhouette_score gives the average value for all the samples
silhouette_avg = silhouette_score(X, y_pred, metric='cosine')
# Compute a weighted score that rewards higher numbers of clusters
# weighted_score = calc_weighted_score(silhouette_avg, n, max_clusters)
silhouette_scores.append({
"N Clusters": n,
"Silhouette Score": silhouette_avg
# "Weighted Score": weighted_score
})
# Labeling the clusters
centers = clusterer.cluster_centers_
# use weighted scores
points = {}
threshold = 0.3
for s in silhouette_scores:
points[s["N Clusters"]] = s["Silhouette Score"]
silhouette_scores = pd.DataFrame(silhouette_scores)
figure_legend("Table 6", "Silhouette scores by number of clusters", "Values are sorted by the highest weighted score.")
display(silhouette_scores.head().sort_values(["Silhouette Score"], ascending=False).reset_index())
k = int(silhouette_scores.sort_values(["Silhouette Score"], ascending=False)['N Clusters'].iloc[0])
print(f"Ideal k: {k} clusters")
# plot the silhouette score as a function of # of clusters
plt.plot(silhouette_scores['N Clusters'], silhouette_scores['Silhouette Score'], label='Silhouette Score', color='#7C88FB')
plt.scatter(silhouette_scores['N Clusters'], silhouette_scores['Silhouette Score'], color='#7C88FB')
plt.axvline(k, label = f"Ideal k: {k} clusters", color ="#EF553B", alpha=0.8,dashes=(3,3))
plt.legend()
plt.ylabel('Score')
plt.xlabel('Number of Clusters')
plt.show()
figure_legend("Figure 7", "Cluster size selection", "The dotted line indicates the value of the 'ideal' <i>k</i>. This value will be used in subsequent clustering.")
# Compute the k-means dataframe using the ideal number of clusters
km = KMeans(n_clusters=k, random_state=42)
km_clusters = km.fit_predict(df_data_norm_umap.values)
df_data_norm_km = pd.DataFrame({
'Cluster': [
str(c)
for c in km_clusters
]}, index=df_data_norm_umap.index)
print(f'Computed {len(df_data_norm_km["Cluster"].unique())} clusters')
# Map each cluster to a color for later plots
clusters = df_data_norm_km["Cluster"].unique()
plotly_colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
cluster_colors = {}
i = 0
for c in clusters:
cluster_colors[c] = plotly_colors[i % len(plotly_colors)]
i += 1
def cluster_heading(cluster):
display(HTML(f'''
<center>
<div style='background-color:{cluster_colors[cluster] + '98'};
width:100%;height:3rem;display:flex;align-items:center;
justify-content:center;color:white;font-size:2rem'>
<center>Cluster {cluster}</center>
</div>
</center>'''))
###Output
_____no_output_____
###Markdown
5. Differential ExpressionNext, the differential expression for each cluster is computed. The Characteristic Direction method is used for identifying differentially expressed genes among the different clusters.
###Code
# Get differential expression for each cluster, using the dataset containing all genes
diff_expr = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
diff_expr[f"Cluster {cluster} CD"] = characteristic_direction(
# expression outside of this cluster
df_data_norm_all_genes.loc[:, df_data_norm_all_genes.columns.difference(samples.index)],
# expression in this cluster
df_data_norm_all_genes.loc[:, samples.index],
)['CD-coefficient']
df_diff_expr = pd.DataFrame(diff_expr)
df_diff_expr = df_diff_expr.sort_values(by='Cluster 0 CD',ascending=True)
df_diff_expr['Symbol'] = df_diff_expr.index.values
figure_legend("Table 7", "Differential expression of genes by cluster", "By default, the top 5 most differentially expressed genes are shown, along with the corresponding characteristic directions for each cluster.")
display(df_diff_expr.head())
###Output
_____no_output_____
###Markdown
Logistic regression is performed for each metadata category to determine which categories most accurately predict cluster designations for each data point. ROC curves are also plotted for categories with the top two highest AUC scores.
###Code
# LR
aucs = {}
rocs = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
aucs[cluster] = {}
rocs[cluster] = []
for feature in features:
lr = LogisticRegression()
X = df_metadata.copy()
X = X[feature]
X = pd.merge(X, df_data_norm_km, left_index = True, right_index = True)
# drop NAs, and move on if dataset is empty
X.replace("not reported", None)
X = X.dropna()
if (X.shape[0] == 0): continue
cluster_data = X["Cluster"]
X = X.drop(columns= ["Cluster"])
# one-hot encode non numerical data
if (not isinstance(X[feature][0], (int, float, complex))):
X = pd.get_dummies(X[feature], prefix=feature)
y_true = (cluster_data == cluster)
if (len(y_true.unique()) < 2): # if there is only one class in the dataset
print(f"Not enough data to classify cluster {cluster} based on category {feature}")
aucs[cluster][feature] = np.nan
continue
lr.fit(X, y_true)
y_score = lr.predict_proba(X)[:, 1]
auc_score = roc_auc_score(y_true, y_score)
aucs[cluster][feature] = auc_score
# save the ROCs
rocs[cluster].append({"auc":auc_score, "lr": lr, "X": X, "y_true":y_true, "title": f'Predictions of cluster {cluster} by category {feature}'})
df_cluster_aucs = pd.DataFrame(aucs)
df_cluster_aucs.index.name="Category"
# sort features by avg AUC across all clusters
df_cluster_aucs["avg"] = [ np.mean(df_cluster_aucs.T[f]) for f in df_cluster_aucs.index.values ]
df_cluster_aucs = df_cluster_aucs.sort_values(by = "avg", ascending=False)
df_cluster_aucs = df_cluster_aucs.drop(columns = "avg")
cols = [('Cluster', col) for col in df_cluster_aucs.columns ]
df_cluster_aucs.columns = pd.MultiIndex.from_tuples(cols)
figure_legend("Table 8", "Average AUC scores for top predictive metadata categories, by cluster", "Scores for the top 5 metadata categories for predicting clusters, as determined by the average AUC score across all clusters, are shown. Higher AUC scores correspond to better classifiers for distinguishing whether or not a datapoint belongs to a certain cluster.")
display(df_cluster_aucs.head(5))
# plot top 2 ROCs for each cluster
plt.rc('font', size=16)
for cluster, plots in rocs.items():
plots.sort(reverse=True, key=lambda x: x["auc"])
cluster_heading(cluster)
if len(plots) < 2:
best_rocs = plots
else:
best_rocs = plots[:2]
num_plots = len(best_rocs)
figure,axes = plt.subplots(int(math.ceil(num_plots / 2.)), 2, figsize=(15,(len(best_rocs)*3.5)))
axes = axes.flatten()
for i in range(len(axes)):
if i >= len(best_rocs):
axes[i].remove()
else:
plot = best_rocs[i]
fig = plot_roc_curve(plot["lr"], plot["X"], plot["y_true"], ax=axes[i])
axes[i].set_title('\n'.join(wrap(plot["title"], 40)))
figure.tight_layout(pad=2)
plt.show()
figure_legend("Figure 8", "ROCs for top cluster-predicting metadata categories")
plt.rcdefaults()
###Output
_____no_output_____
###Markdown
6. Identify Up- and Down-Regulated GenesFind the most up- and down-regulated genes for each cluster for visualization in heatmap, and for enrichment analysis.
###Code
# Merge data
df_clustered_umap = pd.merge(left=df_data_norm_km, left_on="sample_id", right=df_data_norm_umap, right_on="sample_id")
# Get top Genes for each cluster
top_genes = {}
all_top_genes = []
heatmap_top_n = 100
for cluster in df_clustered_umap['Cluster'].unique():
cd_col = f'Cluster {cluster} CD'
if cd_col in df_diff_expr.columns:
# top up genes
up_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=False).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
# top down genes
dn_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=True).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
else:
raise Exception('Cant find col for cluster')
all_top_genes.append(up_genes[:heatmap_top_n])
all_top_genes.append(dn_genes[:heatmap_top_n])
# save results
top_genes[cluster] = (up_genes, dn_genes)
all_top_genes = [item for sublist in all_top_genes for item in sublist] # flatten all genes to one list
###Output
_____no_output_____
###Markdown
Data corresponding to only the top 100 up- and down-regulated genes for each cluster is selected for visualization in a heatmap, with log-transformation and normalization proceeding as before.
###Code
df_data_norm_heatmap_f = df_data.loc[all_top_genes, :]
# compute log normalization of matrix
df_data_norm_heatmap_f = log2_normalize(df_data_norm_heatmap_f)
# convert to zscores
df_data_norm_heatmap_f = zscore_normalize(df_data_norm_heatmap_f)
# Plot heatmap
cases = df_data_norm_heatmap_f.columns
heatmap_cluster_colors = [ cluster_colors[x] for x in df_clustered_umap.loc[cases, :]["Cluster"] ]
sns.clustermap(df_data_norm_heatmap_f,xticklabels=False,col_colors = heatmap_cluster_colors); plt.show()
figure_legend("Figure 9", "Heatmap of most differentially expressed genes", "Color coding along the top edge indicates cluster designation of the corresponding sample.")
###Output
_____no_output_____
###Markdown
7. Enrichment Analysis with EnrichrPerform enrichment analysis for each cluster by querying the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The background libraries are the default libraries from Enrichr. A link is provided to download the results.
###Code
# enrichment analysis libraries
enrichr_libraries = OrderedDict([
('Diseases/Drugs', disease_drug_libraries),
('Ontologies', ontology_libraries),
('Cell Type', cell_type_libraries),
('Pathways', pathway_libraries),
('Transcription', transcription_libraries),
('Legacy', legacy_libraries),
('Crowd', crowd_libraries)
])
# handle no selected libraries
all_empty = True
for key, libs in enrichr_libraries.items():
if len(libs) > 0:
all_empty = False
break
if all_empty:
enrichr_libraries = OrderedDict([
('Diseases/Drugs', ['GWAS_Catalog_2019']),
('Ontologies', ['GO_Biological_Process_2018', 'MGI_Mammalian_Phenotype_Level_4_2019']),
('Pathways', ['KEGG_2019_Human', 'KEGG_2019_Mouse']),
('Transcription', ['ENCODE_TF_ChIP-seq_2015'])
])
# Util functions
def enrichr_link_from_genes(genes, description='', enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
''' Functional access to Enrichr API
'''
time.sleep(1)
resp = requests.post(enrichr_link + '/addList', files={
'list': (None, '\n'.join(genes)),
'description': (None, description),
})
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
# wait a tinybit before returning link (backoff)
time.sleep(3)
result = resp.json()
return dict(result, link=enrichr_link + '/enrich?dataset=' + resp.json()['shortId'])
def enrichr_get_top_results(userListId, bg, enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
time.sleep(1)
resp = requests.get(enrichr_link + '/enrich?userListId={}&backgroundType={}'.format(userListId, bg))
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
time.sleep(3)
return pd.DataFrame(resp.json()[bg], columns=['rank', 'term', 'pvalue', 'zscore', 'combinedscore', 'overlapping_genes', 'adjusted_pvalue', '', ''])
# Get Enrichr links for each cluster
enrichr_links = {}
for cluster, (up_genes, dn_genes) in top_genes.items():
up_link, dn_link = None, None
if up_genes.size:
try:
up_link = enrichr_link_from_genes(up_genes, f'cluster {cluster} up')
except:
print(f'Enrichr failed for cluster {cluster} up genes')
else:
print(f'cluster {cluster} up: empty')
if dn_genes.size:
try:
dn_link = enrichr_link_from_genes(dn_genes, f'cluster {cluster} down')
except:
print(f'Enrichr failed for cluster {cluster} down genes')
else:
print(f'cluster {cluster} down: empty')
enrichr_links[cluster] = (up_link, dn_link)
# Grab top results for each cluster
all_enrichr_results = []
for cluster, (up_link, dn_link) in enrichr_links.items():
for link_type, link in [('up', up_link), ('down', dn_link)]:
if link is None:
continue
for category, libraries in enrichr_libraries.items():
for library in libraries:
try:
results = enrichr_get_top_results(link['userListId'], library).sort_values('pvalue').iloc[:5]
results['link'] = link['link']
results['library'] = library
results['category'] = category
results['direction'] = link_type
results['cluster'] = cluster
all_enrichr_results.append(results)
except:
print('{}: {} {} {} cluster {} failed, continuing'.format(link, library, category, link_type, cluster))
df_enrichr_results = pd.concat(all_enrichr_results).reset_index()
# Display a dataframe with clickable enrichr links
figure_legend("Table 10","Enrichment analysis results from Enrichr", "Results are grouped by expression direction (up/down) and gene set library. Within groups, results are sorted by lowest p-value (highest rank) first.")
df_clickable = df_enrichr_results.copy()
df_clickable['link'] = df_clickable["link"].apply(make_clickable)
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_enrichr_results.to_csv(), 'Download Enrichr results', 'Enrichr results.csv')
###Output
_____no_output_____
###Markdown
7a. BarplotsHorizontal barplots are used to display the top Enrichr results for each cluster, by library and characteristic expression direction.
###Code
# Make horizontal barplots to visualize top Enrichr results
clusters = df_enrichr_results["cluster"].unique()
for cluster in clusters:
cluster_results = df_enrichr_results.loc[df_enrichr_results["cluster"] == cluster, :]
libraries = cluster_results["library"].unique()
num_rows = len(libraries)
count = 1 # keep track of which subplot we're on
fig = plt.figure(figsize=(15,5*num_rows))
for library in cluster_results["library"].unique():
library_results = cluster_results.loc[cluster_results["library"] == library, :]
for direction in library_results["direction"].unique():
plot_results = library_results.loc[cluster_results["direction"] == direction, :]
plot_results = plot_results.sort_values("pvalue",ascending=False)
labels = plot_results["term"]
labels = [ '\n'.join(wrap(l, 20)) for l in labels ]
values = plot_results["pvalue"]
values = -np.log(values)
# normalize values to map from 0-1 -> color, with opacity also based on normalized pvalue
cmap = plt.get_cmap('cool')
norm_values = [ 0.3 + (x - min(values))/(max(values) - min(values))*0.7 for x in values]
colors = [ [*cmap(val)[:3], 0.4 + 0.2*val] for val in norm_values]
# plot result
ax = fig.add_subplot(num_rows,2,count)
ax.barh(labels,values,color = colors)
ax.set_title(f'{library}\n{direction} genes')
ax.set_xlabel(' – log(pvalue)')
count += 1
cluster_heading(cluster)
fig.tight_layout(pad=3, w_pad=2, h_pad=6)
plt.show()
display(HTML("<br><br>"))
figure_legend("Figure 11", "Enrichment results by cluster", "Bar plots indicate the negative log of the p-value for the specified term. One plot is presented per cluster, per gene-set library, per expression direction (up/down).")
###Output
_____no_output_____
###Markdown
7b. Running Sum VisualizationsWhile the above barplots display the top enriched terms for each cluster in each direction, individual enriched terms can also be compared to the tissue data using a random walk [GSEA running sum visualization](https://github.com/MaayanLab/react-GSEA/tree/master).First, each of the four default background libraries from Enrichr can be queried and saved as a JSON object which maps terms to their complete genesets.
###Code
libresp = {}
for lib in df_enrichr_results['library'].unique():
resp = requests.get('https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=json&libraryName=' + lib)
if resp.status_code == 200:
libresp[lib] = resp.json()[lib]['terms']
else:
print(f"Failed to access library {lib}, continuing")
###Output
_____no_output_____
###Markdown
For each cluster, the most enriched term for that cluster from each library can then be compared against the most up-regulated genes in the cluster. Below, GSEA plots display the overlap between the genes from each cluster and their most enriched genesets.
###Code
# iterate through each cluster
for cluster in clusters:
cluster_heading(cluster)
# iterate through each library for each cluster
for lib in libresp.keys():
# obtain the most enriched library term for the cluster in the up direction
up_df = df_enrichr_results[df_enrichr_results.direction.isin(['up'])
& df_enrichr_results.cluster.isin([cluster])
& df_enrichr_results.library.isin([lib])]
top_up_term = up_df[up_df['rank'] == 1]['term'].iloc[0]
# store the geneset for the most enriched term
top_up_set = list(libresp[lib][top_up_term])
display(HTML(f"<div style='font-size:1.25rem;'><b>Comparison of up-regulated genes in Cluster {cluster} to most enriched {lib} term</b> </div>"))
print(f"Most enriched {lib} geneset for up-regulated genes:", top_up_term)
# display the GSEA plot comparing the enriched genes and the top up-regulated cluster genes
display(ReactGSEA(
data=dataFromResult(
input_set=top_up_set,
ranked_entities=df_diff_expr['Cluster ' + cluster + ' CD'].sort_values(ascending=False).iloc[:math.ceil((df_diff_expr.shape[0]/2))].index.tolist()
)
))
###Output
_____no_output_____
###Markdown
8. L1000 AnalysisIf selected during user input, the most up- and down-regulated genes from each cluster, as identified from above, can be input into the [L1000FWD](https://amp.pharm.mssm.edu/L1000FWD/) API, which will then return the most similar and opposite gene expression signatures from the L1000 database. Links are provided to the interactive L1000FWD projections for each set of results.
###Code
def l1000fwd_results_from_genes(up_genes, down_genes, description='', l100fwd_link='http://amp.pharm.mssm.edu/L1000FWD/'):
''' Functional access to L1000FWD API
'''
import time
time.sleep(1)
response = requests.post(l100fwd_link + 'sig_search', json={
'up_genes': list(up_genes),
'down_genes': list(down_genes),
})
l1000fwd_results = {}
if response.status_code != 200:
raise Exception('L1000FWD failed with status {}: {}'.format(
response.status_code,
response.text,
))
if 'KeyError' in response.text:
l1000fwd_results['result_url'] = None
else:
# Get ID and URL
result_id = response.json()['result_id']
l1000fwd_results['result_url'] = 'https://amp.pharm.mssm.edu/l1000fwd/vanilla/result/'+result_id
l1000fwd_results['result_id'] = result_id
# Get Top
l1000fwd_results['signatures'] = requests.get(l100fwd_link + 'result/topn/' + result_id).json()
# wait a tinybit before returning link (backoff)
time.sleep(1)
return l1000fwd_results
def l1000fwd_sig_link(sig_id):
return 'https://amp.pharm.mssm.edu/dmoa/sig/' + sig_id
def get_signature_by_id(sig_id):
response = requests.get("http://amp.pharm.mssm.edu/L1000FWD/sig/" + sig_id)
if response.status_code != 200:
raise Exception('L1000FWD signature query failed with status {}: {}'.format(
response.status_code,
response.text,
))
return response.json()
def display_l1000fwd_results(l1000fwd_results, plot_counter,cluster_id,nr_drugs=7, height=300):
# Check if results
if l1000fwd_results['result_url']:
# Display cluster title
display(HTML('<br><br>'))
cluster_heading(cluster)
# Display IFrae
display(HTML(f"<a href='{l1000fwd_results['result_url']}' target='_blank'> View L1000FWD for cluster {cluster_id}</a>"))
# Display tables
for direction, signature_list in l1000fwd_results['signatures'].items():
# Fix dataframe
rename_dict = {'sig_id': 'Signature ID', 'pvals': 'P-value', 'qvals': 'FDR', 'zscores': 'Z-score', 'combined_scores': 'Combined Score'}
signature_dataframe = pd.DataFrame(signature_list)[list(rename_dict.keys())].rename(columns=rename_dict).sort_values('P-value').rename_axis('Rank')
signature_dataframe.index = [x + 1 for x in range(len(signature_dataframe.index))]
signature_csv = signature_dataframe.to_csv(sep=",")
# Display table
pd.set_option('max.colwidth', None)
signature_dataframe['Signature ID'] = [f'<a href={l1000fwd_sig_link(x)} target="_blank">{x}</a>' for x in signature_dataframe['Signature ID']]
table_html = signature_dataframe.to_html(escape=False, classes='w-100')
display(HTML(f'<h3>{direction.title()} Signatures: </h3>'))
display(HTML(f'<style>.w-100{{width: 100% !important;}}</style><div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
# Display download button
download_button(signature_csv, f'Download {direction.title()} Signatures', f'Cluster {cluster_id} L1000FWD {direction.title()} signatures.csv')
# Link
display(HTML('Full results available at: <a href="{result_url}" target="_blank">{result_url}</a>.'.format(**l1000fwd_results)))
# Display error
else:
display(Markdown('### No results were found.\n This is likely due to the fact that the gene identifiers were not recognized by L1000FWD. Please note that L1000FWD currently only supports HGNC gene symbols (https://www.genenames.org/). If your dataset uses other gene identifier systems, such as Ensembl IDs or Entrez IDs, consider converting them to HGNC. Automated gene identifier conversion is currently under development.'))
if do_l1000:
plot_counter = 0
all_l1000fwd_results = {}
figure_header("Figure 14", "Most similar and opposite L1000 signatures, by cluster")
for cluster, (up_genes, dn_genes) in top_genes.items():
try:
results = l1000fwd_results_from_genes(up_genes,dn_genes)
all_l1000fwd_results[cluster] = results
display_l1000fwd_results(results,plot_counter,cluster)
plot_counter += 1
except:
print(f'L1000FWD API failed for cluster {cluster}, continuing')
figure_legend("Figure 14", "Most similar and opposite L1000 signatures, by cluster", "Results are sorted by smallest p-value.")
###Output
_____no_output_____
###Markdown
In the case of disease state RNA-seq data, the reverse signatures provide a potential set of drugs that could perturb the cells/tissues towards a "healthy" direction. These may present novel treatments for patients whose samples belong to a certain cluster.
###Code
if do_l1000:
df_drugs = pd.read_csv("https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv")
# Load top drug suggestions for each cluster based on the drugs used to produce the top five opposite signatures
drug_results = {}
for cluster, results in all_l1000fwd_results.items():
opposite_sigs = results["signatures"]["opposite"][:5]
sig_ids = [sig["sig_id"] for sig in opposite_sigs]
pert_ids = []
for sig_id in sig_ids:
try:
signature = get_signature_by_id(sig_id)
pert_ids.append(signature["pert_id"])
except:
print(f'L1000FWD API failed for cluster {cluster}, sig_id {sig_id}, continuing')
df_cluster_drugs = df_drugs[df_drugs["pert_id"].isin(pert_ids)].copy()
df_cluster_drugs["cluster"] = cluster
df_cluster_drugs = df_cluster_drugs[["cluster", *list(filter(lambda x: x!="cluster", df_cluster_drugs.columns))]]
drug_results[cluster] = df_cluster_drugs
df_all_drugs = pd.concat(drug_results).reset_index()
if do_l1000:
figure_header("Table 13", "Drugs used to produce most opposite signatures for each cluster")
df_clickable = df_all_drugs.copy()
df_clickable['pert_url'] = df_clickable["pert_url"].apply(make_clickable)
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_all_drugs.to_csv(), 'Download L1000FWD drug results', 'L1000FWD drugs.csv')
figure_legend("Table 13", "Drugs used to produce most opposite signatures for each cluster", "Each entry is a drug/chemical used for perturbation in the L1000 experiments that resulted in a gene-expression signature most opposite to that of the specified cluster.")
###Output
_____no_output_____
###Markdown
General RNA-seq Data and Metadata Viewer This notebook template provides a flexible and generalized pipeline for the visualization and analysis of RNA-seq profiles from any source. Analysis Overview The RNA-seq data first undergoes normalization and dimensionality reduction via Principle Component Analysis (PCA) and Uniform Manifold Approximation and Projection (UMAP). Samples are then clustered based on their most-associated highly-variable genes and metadata features. The number of clusters is determined based on a modified silhouette score which prioritizes having more clusters over having larger clusters. Clusters are visualized using the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package. The most up-regulated and down-regulated genes are also identified for each cluster. These genes are used to perform enrichment analysis via the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The enrichment results are visualized with the [React-GSEA](https://github.com/MaayanLab/react-GSEA/tree/simplified) package. Finally, similar and opposite drug/small molecule signatures are queried using the [L1000FWD](https://maayanlab.cloud/L1000FWD/) API. *Note: If using GTEx data or other healthy tissue sample data for which querying drug signatures is not relevant, the [GTEx Tissue-Specific RNA-seq Analysis Appyter](https://appyters.maayanlab.cloud//GTEx_Tissue_RNA_Analysis) may be more useful instead. If using data from The Cancer Genome Atlas (TCGA), please use the more specific [TCGA Patient Cohorts Viewer Appyter](https://appyters.maayanlab.cloud//Patient_Cohorts_RNASeq_Viewer).* 0. Notebook SetupImport packages and set appropriate file names.
###Code
import os
import numpy as np
import pandas as pd
import requests
import time
from matplotlib import pyplot as plt
import fastcluster
import seaborn as sns
from umap import UMAP
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, silhouette_samples, silhouette_score, plot_roc_curve
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.cm as cm
from maayanlab_bioinformatics.dge import characteristic_direction
from maayanlab_bioinformatics.normalization import log2_normalize, filter_by_var
import qnorm
from scipy.stats import zscore
from maayanlab_bioinformatics.utils import merge
import math
from collections import OrderedDict
import json
from react_scatter_board.jupyter_compat import ScatterBoard
from IPython.display import display, IFrame, Markdown, HTML
from textwrap import wrap
from react_gsea import dataFromResult
from react_gsea.jupyter_compat import ReactGSEA
# Notebook display util functions
def download_button(content, label, filename):
# Add download button
outname = filename.split('.')[0]
display(HTML('<textarea id="textbox_{outname}" style="display: none;">{content}</textarea> <button style="margin:10px 0;" id="create_{outname}">{label}</button> <a download="{filename}" id="downloadlink_{outname}" style="display: none">Download</a>'.format(**locals())))
display(HTML('<script type="text/javascript">!function(){{var e=null,t=document.getElementById("create_{outname}"),n=document.getElementById("textbox_{outname}");t.addEventListener("click",function(){{var t,l,c=document.getElementById("downloadlink_{outname}");c.href=(t=n.value,l=new Blob([t],{{type:"text/plain"}}),null!==e&&window.URL.revokeObjectURL(e),e=window.URL.createObjectURL(l)),c.click()}},!1)}}();</script>'.format(**locals())))
def make_clickable(link):
return f'<a target="_blank" href="{link}">{link}</a>'
def figure_header(label,title):
display(HTML(f"<div style='font-size:1rem; padding:1rem 0;'><b>{label}</b>: {title}</div>"))
def figure_legend(label,title,content=""):
display(HTML(f"<div style='font-size:1rem;'><b>{label}</b>: <i>{title}</i>. {content} </div>"))
%%appyter hide
{% do SectionField(
name = 'DATASETS',
title = 'Dataset Selection',
subtitle = 'Upload datasets for visualization and analysis. Both file uploads are required to run the analysis.',
img = 'rna.png'
) %}
{% do SectionField(
name = 'PARAMETERS',
title = 'Analysis Parameters',
subtitle = 'Set parameters for analysis.',
img = 'analysis.png'
) %}
{% do SectionField(
name = "ENRICHR_LIBS",
title = "Enrichment Analysis Library Selection",
subtitle = "Choose Enrichr geneset libraries for comparison against input genes. Multiple libraries can be selected from each section. If nothing is selected, default libraries will be used.",
img = 'enrichr-logo.png'
) %}
{% set data_filename = FileField(
name='data_filename',
label='RNA-seq data file',
description='TSV or CSV file containing RNA-seq read counts. Index should be Entrez gene symbols, and columns should be individual samples.',
default='',
examples = {
'GSE159266 Data': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
},
section='DATASETS'
) %}
{% set metadata_filename = FileField(
name='metadata_filename',
label='Sample metadata file',
description='TSV or CSV file containing sample metadata. Index should be sample IDs corresponding to columns of RNA-seq data file, and columns should be different sample attributes.',
default='',
examples = {
'GSE159266 Metadata': 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
},
section='DATASETS'
) %}
{% set n_neighbors = IntField(
name = 'n_neighbors',
label = 'Number of neighbors to use for UMAP calculations',
description = 'Smaller values preserve local structure, while larger values emphasize global structure.',
default = 40,
min = 2,
max = 200,
section = 'PARAMETERS'
) %}
{% set min_cluster_dist = FloatField(
name = 'min_cluster_dist',
label = 'Minimum distance between UMAP-projected points',
description = 'Determines how close/distant points belonging to different clusters are from each other.',
default = 0.3,
min = 0.1,
max = 1,
section = 'PARAMETERS'
) %}
{% set top_n_genes = IntField(
name = 'top_n_genes',
label = 'Number of genes to analyze',
description = 'Number of top variable genes to use in analysis.',
default = 2500,
section = 'PARAMETERS'
) %}
{% set top_n_genes_enrichment = IntField(
name = 'top_n_genes_enrichment',
label = 'Number of genes to use for enrichment analysis',
description = 'Number of top variable genes to use for enrichment analysis; must be less than top_n_genes.',
default = 250,
section = 'PARAMETERS'
) %}
{% set do_l1000 = BoolField(
name = 'do_l1000',
label = 'Query L1000 signatures?',
description = 'Option to query opposite and similar L1000 signatures to input data using L1000FWD.',
default = True,
section = 'PARAMETERS'
) %}
{% set use_weighted_score = BoolField(
name = 'use_weighted_score',
label = 'Use weighted silhouette score?',
description = 'Option to prioritize more clusters over fewer.',
default = True,
section = 'PARAMETERS'
) %}
{% set transcription_libraries = MultiChoiceField(
name = 'transcription_libraries',
label = 'Transcription Libraries',
description = 'Default library is ENCODE_TF_ChIP-seq_2015',
choices = [
'ARCHS4_TFs_Coexp',
'ChEA_2016',
'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',
'ENCODE_Histone_Modifications_2015',
'ENCODE_TF_ChIP-seq_2015',
'Epigenomics_Roadmap_HM_ChIP-seq',
'Enrichr_Submissions_TF-Gene_Coocurrence',
'Genome_Browser_PWMs',
'lncHUB_lncRNA_Co-Expression',
'miRTarBase_2017',
'TargetScan_microRNA_2017',
'TF-LOF_Expression_from_GEO',
'TF_Perturbations_Followed_by_Expression',
'Transcription_Factor_PPIs',
'TRANSFAC_and_JASPAR_PWMs',
'TRRUST_Transcription_Factors_2019'
],
default = [
'ENCODE_TF_ChIP-seq_2015'
],
section = 'ENRICHR_LIBS'
) %}
{% set pathway_libraries = MultiChoiceField(
name = "pathway_libraries",
label = "Pathway Libraries",
description = 'Default libraries are KEGG_2019_Human and KEGG_2019_Mouse',
choices = [
'ARCHS4_Kinases_Coexp',
'BioCarta_2016',
'BioPlanet_2019',
'BioPlex_2017',
'CORUM',
'Elsevier_Pathway_Collection',
'HMS_LINCS_KinomeScan',
'HumanCyc_2016',
'huMAP',
'KEA_2015',
'KEGG_2019_Human',
'KEGG_2019_Mouse',
'Kinase_Perturbations_from_GEO_down',
'Kinase_Perturbations_from_GEO_up',
'L1000_Kinase_and_GPCR_Perturbations_down',
'L1000_Kinase_and_GPCR_Perturbations_up',
'NCI-Nature_2016',
'NURSA_Human_Endogenous_Complexome',
],
default = [
'KEGG_2019_Human',
'KEGG_2019_Mouse'
],
section = 'ENRICHR_LIBS'
) %}
{% set ontology_libraries = MultiChoiceField(
name = 'ontology_libraries',
label = 'Ontology Libraries',
description = 'Default libraries are GO_Biological_Process_2018 and MGI_Mammalian_Phenotype_Level_4_2019',
choices = [
'GO_Biological_Process_2018',
'GO_Cellular_Component_2018',
'GO_Molecular_Function_2018',
'Human_Phenotype_Ontology',
'Jensen_COMPARTMENTS',
'Jensen_DISEASES',
'Jensen_TISSUES',
'MGI_Mammalian_Phenotype_Level_4_2019'
],
default = [
'GO_Biological_Process_2018',
'MGI_Mammalian_Phenotype_Level_4_2019'],
section = 'ENRICHR_LIBS'
) %}
{% set disease_drug_libraries = MultiChoiceField(
name = 'disease_drug_libraries',
label = 'Disease Drug Libraries',
description = 'Default library is GWAS_Catalog_2019',
choices = [
'Achilles_fitness_decrease',
'Achilles_fitness_increase',
'ARCHS4_IDG_Coexp',
'ClinVar_2019',
'dbGaP',
'DepMap_WG_CRISPR_Screens_Broad_CellLines_2019',
'DepMap_WG_CRISPR_Screens_Sanger_CellLines_2019',
'DisGeNET',
'DrugMatrix',
'DSigDB',
'GeneSigDB',
'GWAS_Catalog_2019',
'LINCS_L1000_Chem_Pert_down',
'LINCS_L1000_Chem_Pert_up',
'LINCS_L1000_Ligand_Perturbations_down',
'LINCS_L1000_Ligand_Perturbations_up',
'MSigDB_Computational',
'MSigDB_Oncogenic_Signatures',
'Old_CMAP_down',
'Old_CMAP_up',
'OMIM_Disease',
'OMIM_Expanded',
'PheWeb_2019',
'Rare_Diseases_AutoRIF_ARCHS4_Predictions',
'Rare_Diseases_AutoRIF_Gene_Lists',
'Rare_Diseases_GeneRIF_ARCHS4_Predictions',
'Rare_Diseases_GeneRIF_Gene_Lists',
'UK_Biobank_GWAS_v1',
'Virus_Perturbations_from_GEO_down',
'Virus_Perturbations_from_GEO_up',
'VirusMINT'
],
default = [
'GWAS_Catalog_2019'
],
section = 'ENRICHR_LIBS'
) %}
{% set cell_type_libraries = MultiChoiceField(
name = 'cell_type_libraries',
label = 'Cell Type Libraries',
description = 'No libraries selected by default',
choices = [
'Allen_Brain_Atlas_down',
'Allen_Brain_Atlas_up',
'ARCHS4_Cell-lines',
'ARCHS4_Tissues',
'Cancer_Cell_Line_Encyclopedia',
'CCLE_Proteomics_2020',
'ESCAPE',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_down',
'GTEx_Tissue_Sample_Gene_Expression_Profiles_up',
'Human_Gene_Atlas',
'Mouse_Gene_Atlas',
'NCI-60_Cancer_Cell_Lines',
'ProteomicsDB_2020',
'Tissue_Protein_Expression_from_Human_Proteome_Map'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set misc_libraries = MultiChoiceField(
name = 'misc_libraries',
label = 'Miscellaneous Libraries',
description = 'No libraries selected by default',
choices = [
'Chromosome_Location_hg19',
'Data_Acquisition_Method_Most_Popular_Genes',
'Enrichr_Libraries_Most_Popular_Genes',
'Genes_Associated_with_NIH_Grants',
'HMDB_Metabolites',
'HomoloGene',
'InterPro_Domains_2019',
'NIH_Funded_PIs_2017_AutoRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_GeneRIF_ARCHS4_Predictions',
'NIH_Funded_PIs_2017_Human_AutoRIF',
'NIH_Funded_PIs_2017_Human_GeneRIF',
'Pfam_Domains_2019',
'Pfam_InterPro_Domains',
'Table_Mining_of_CRISPR_Studies'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set legacy_libraries = MultiChoiceField(
name = 'legacy_libraries',
label = 'Legacy Libraries',
description = 'No libraries selected by default',
choices = [
'BioCarta_2013',
'BioCarta_2015',
'ChEA_2013',
'ChEA_2015',
'Chromosome_Location',
'Disease_Signatures_from_GEO_down_2014',
'Disease_Signatures_from_GEO_up_2014',
'Drug_Perturbations_from_GEO_2014',
'ENCODE_Histone_Modifications_2013',
'ENCODE_TF_ChIP-seq_2014',
'GO_Biological_Process_2013',
'GO_Biological_Process_2015',
'GO_Biological_Process_2017',
'GO_Biological_Process_2017b',
'GO_Cellular_Component_2013',
'GO_Cellular_Component_2015',
'GO_Cellular_Component_2017',
'GO_Cellular_Component_2017b',
'GO_Molecular_Function_2013',
'GO_Molecular_Function_2015',
'GO_Molecular_Function_2017',
'GO_Molecular_Function_2017b',
'HumanCyc_2015',
'KEA_2013',
'KEGG_2013',
'KEGG_2015',
'KEGG_2016',
'MGI_Mammalian_Phenotype_2013',
'MGI_Mammalian_Phenotype_2017',
'MGI_Mammalian_Phenotype_Level_3',
'MGI_Mammalian_Phenotype_Level_4',
'NCI-Nature_2015',
'Panther_2015',
'Reactome_2013',
'Reactome_2015',
'TargetScan_microRNA',
'Tissue_Protein_Expression_from_ProteomicsDB',
'WikiPathways_2013',
'WikiPathways_2015',
'WikiPathways_2016'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
{% set crowd_libraries = MultiChoiceField(
name = 'crowd_libraries',
label = 'Crowd Libraries',
description = 'No libraries selected by default',
choices = [
'Aging_Perturbations_from_GEO_down',
'Aging_Perturbations_from_GEO_up',
'Disease_Perturbations_from_GEO_down',
'Disease_Perturbations_from_GEO_up',
'Drug_Perturbations_from_GEO_down',
'Drug_Perturbations_from_GEO_up',
'Gene_Perturbations_from_GEO_down',
'Gene_Perturbations_from_GEO_up',
'Ligand_Perturbations_from_GEO_down',
'Ligand_Perturbations_from_GEO_up',
'MCF7_Perturbations_from_GEO_down',
'MCF7_Perturbations_from_GEO_up',
'Microbe_Perturbations_from_GEO_down',
'Microbe_Perturbations_from_GEO_up',
'RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO',
'SysMyo_Muscle_Gene_Sets'
],
default = [],
section = 'ENRICHR_LIBS'
) %}
%%appyter code_exec
data_filename = {{ data_filename }}
metadata_filename = {{ metadata_filename }}
n_neighbors = {{ n_neighbors }}
min_cluster_dist = {{ min_cluster_dist }}
top_n_genes = {{ top_n_genes }}
top_n_genes_enrichment = {{ top_n_genes_enrichment }}
do_l1000 = {{ do_l1000 }}
use_weighted_score = {{ use_weighted_score }}
transcription_libraries = {{ transcription_libraries }}
pathway_libraries = {{ pathway_libraries }}
ontology_libraries = {{ ontology_libraries }}
disease_drug_libraries = {{ disease_drug_libraries }}
cell_type_libraries = {{ cell_type_libraries }}
misc_libraries = {{ misc_libraries }}
legacy_libraries = {{ legacy_libraries }}
crowd_libraries = {{ crowd_libraries }}
if data_filename == '' or metadata_filename == '':
print("One or both user-uploaded files missing, use example GEO data.")
data_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_data_cleaned.txt'
metadata_filename = 'https://appyters.maayanlab.cloud/storage/RNAseq_Data_Metadata_Analysis/GSE159266_metadata_cleaned.txt'
print(data_filename + '\n' + metadata_filename)
###Output
_____no_output_____
###Markdown
1. Import DatasetsLoad RNA-seq gene read counts and associated sample metadata into dataframes.
###Code
def load_dataframe(file):
''' Load a file by downloading it or reading it if already downloaded.
'''
ext = os.path.splitext(file)[1]
if ext in {'.tsv', '.txt'}:
df = pd.read_csv(file, sep='\t', index_col=0)
elif ext == '.csv':
df = pd.read_csv(file, index_col=0)
else:
raise Exception('Unrecognized file format', ext)
# Fix any type coersion on identifiers
df.index = df.index.astype(str)
df.columns = df.columns.astype(str)
return df
data_index = "symbol"
metadata_index = "sample_id"
print(f"Loading user-uploaded data...")
df_data = load_dataframe(data_filename).sort_index()
df_metadata = load_dataframe(metadata_filename).sort_index()
df_data.index.name = "symbol"
df_metadata.index.name = "sample_id"
print("Data loaded!")
###Output
_____no_output_____
###Markdown
1a. RNA-seq Data
###Code
figure_legend("Table 1", "RNA-seq data", "The RNA-seq data contains a row per gene and a column per sample.")
display(df_data.head())
###Output
_____no_output_____
###Markdown
1b. Metadata
###Code
figure_legend("Table 2","Metadata", "The column indices are sample metadata attributes, while the row indices are sample IDs corresponding to the columns of the RNA-seq data.")
display(df_metadata.head())
###Output
_____no_output_____
###Markdown
Listed below are all the metadata categories with >1 unique value and at least 1 repeated value. These categories will be used to cluster samples later in the analysis.
###Code
features = []
for col in df_metadata.columns:
if len(df_metadata[col].unique()) > 1 and len(df_metadata[col].unique()) < len(df_metadata[col]):
features.append(col)
else:
continue
# features = df_metadata.columns.values
features
df_metadata = df_metadata[features]
###Output
_____no_output_____
###Markdown
2. Normalize DataGiven the highly variable nature of expression level between different genes, it is necessary to normalize the read counts before proceeding.
###Code
# create dataframe to display sample stats
df_library_size = pd.DataFrame(
{
'n_expressed_genes': df_data[df_data > 0].count(),
'log_n_reads': np.log2(df_data.sum()),
'n_reads': df_data.sum(),
}).sort_values('n_reads', ascending=False)
df_library_size.index.name = "sample_id"
figure_legend("Table 3","Library size", "By default, the first five entries are shown. A gene read is counted toward n_reads for a single sample if its value is greater than 0.")
display(df_library_size.head())
###Output
_____no_output_____
###Markdown
Below, the library distribution is shown.
###Code
sns.displot(df_library_size["log_n_reads"]); plt.show()
figure_legend("Figure 1","Library size distribution")
###Output
_____no_output_____
###Markdown
Two versions of the dataset are normalized: one with just the `top_n_genes` most variable genes and one with all genes. The former will be used to compute clusters after dimensionality reduction, and the latter to compute the characteristic direction (up or down) of each gene in a cluster.
###Code
# copy full dataset for computing characteristic directions later
df_data_norm_all_genes = df_data.copy()
# compute log2(x+1)
df_data_norm = log2_normalize(df_data, offset=1)
df_data_norm_all_genes = log2_normalize(df_data_norm_all_genes, offset=1)
# quantile normalize each sample
df_data_norm = qnorm.quantile_normalize(df_data_norm, axis=1)
df_data_norm_all_genes = qnorm.quantile_normalize(df_data_norm_all_genes, axis=1)
# take top_n_genes most variable rows
df_data_norm = filter_by_var(df_data,top_n = top_n_genes)
# convert to zscores
df_data_norm = pd.DataFrame(zscore(df_data_norm, axis=1), index=df_data_norm.index, columns=df_data_norm.columns)
df_data_norm_all_genes = pd.DataFrame(zscore(df_data_norm_all_genes, axis=1), index=df_data_norm_all_genes.index, columns=df_data_norm_all_genes.columns)
figure_legend("Table 4","Normalized RNA-seq data for most variable genes", "Counts are filtered for the most variable genes. The resulting dataset is log transformed and normalized, then converted to z-scores.")
display(df_data_norm.head())
# plot the first gene distribution
gene1 = df_data_norm.index.values[0]
gene1_plt = sns.displot(df_data_norm.iloc[0, :])
gene1_plt.set(xlabel='Z-score', ylabel='Number of samples', title=f'Z-score distribution of {gene1}')
plt.show()
figure_legend("Figure 2",f"Sample gene expression distibution for {gene1}", f"In this dataset, {gene1} is the most variably expressed across all samples.")
# plot a single RNA-seq sample distribution
sample_plt = sns.displot(df_data_norm.iloc[:, 0])
sample_plt.set(xlabel='Z-score', ylabel='Number of genes', title=f'Z-score distribution of all genes in {df_data_norm.columns[0]}')
plt.show()
figure_legend("Figure 4",f"RNA-seq profile distribution for sample {df_data_norm.columns[0]}")
###Output
_____no_output_____
###Markdown
3. Reduce Data Dimensionality Now that the data has been loaded and normalized, the most variable genes across the dataset can be identified and visualized with hierachical clustering and heatmaps. Dimensionality reduction facilitates the differentiation of the data in a more efficient manner by reducing the number of attributes to be considered. 3a. Principle Component Analysis PCA is used first to reduce the dimensionality of the dataset, while still maintaining most of the variability. In PCA, a large number of dimensions -- in this case, the different sample metadata attributes -- can be reduced to a few new dimensions that capture the relevant information of the original attributes. First, all data values are scaled to (0, 1).
###Code
pca_scaler = MinMaxScaler()
df_data_norm[df_data_norm.columns.tolist()] = pca_scaler.fit_transform(df_data_norm[df_data_norm.columns.tolist()])
df_data_norm.head()
###Output
_____no_output_____
###Markdown
Instead of manually setting the number of PCA components, the number of components is chosen automatically to maximize variance (> 95%).
###Code
# PCA
data_norm_pca = PCA(
random_state=42,
n_components=0.95
)
data_norm_pca.fit(df_data_norm.values.T)
df_data_norm_pca = pd.DataFrame(
data_norm_pca.transform(df_data_norm.values.T),
index=df_data_norm.T.index
)
df_data_norm_pca.columns = [
f'PCA-{c}' # ({r:.3f})'
for c, r in zip(df_data_norm_pca.columns, data_norm_pca.explained_variance_ratio_)
]
df_data_norm_pca.index.name = "sample_id"
figure_legend("Table 5","Principle components of RNA-seq data", "The top principle components are the projections of each datapoint onto the axes along which there is the most variation in the dataset.")
display(df_data_norm_pca.head())
###Output
_____no_output_____
###Markdown
The data can now be plotted with the [React-Scatter-Board](https://github.com/MaayanLab/react-scatter-board) package. The points can be shaped and colored by various metadata categories, with the default being the first two metadata columns. They can also be individually searched by sample_id.
###Code
# combine metadata with RNA-seq data; note this will fail if sample_ids are
# not exactly matched between both datasets
pca_data = merge(
df_data_norm_pca[["PCA-0", "PCA-1"]],
df_library_size,
df_metadata
)
# name columns for plotting purposes
pca_data = pca_data.rename(columns={'PCA-0': 'x', 'PCA-1': 'y'})
pca_data['sample_id'] = pca_data.index
# normalize dimensions to -10, 10
pca_min, pca_max = -10, 10
pca_x_min, pca_x_max = pca_data['x'].min(), pca_data['x'].max()
pca_y_min, pca_y_max = pca_data['y'].min(), pca_data['y'].max()
pca_data['x'] = (pca_data['x'] - pca_x_min) / (pca_x_max - pca_x_min) * (pca_max - pca_min) + pca_min
pca_data['y'] = (pca_data['y'] - pca_y_min) / (pca_y_max - pca_y_min) * (pca_max - pca_min) + pca_min
pca_scatter_data = pca_data.to_dict('records')
color_def = features[0]
shape_def = features[1]
ScatterBoard(
id='pca-scatterboard',
is3d=False,
data=pca_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
###Output
_____no_output_____
###Markdown
**Figure 5:** *First two PCA components of RNA-seq data.* Points are labeled by Sample ID and can be color- or shape-coded by any of the metadata categories using the dropdown menus. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around. 3b. Uniform Manifold Approximation and ProjectionThe dimensionality of the dataset is further reduced by performing UMAP on the PCA components. Parameters such as `n_neighbors` and `min_dist` are set according to defaults used by the Seurat R Package for single cell genomics analysis.
###Code
data_norm_umap = UMAP(
random_state=42,
n_components=2,
n_neighbors=n_neighbors if df_data_norm_pca.shape[1] > n_neighbors else df_data_norm_pca.shape[1]-1,
metric='cosine',
min_dist=min_cluster_dist,
)
n_pca_components = df_data_norm_pca.shape[1]
data_norm_umap.fit(df_data_norm_pca.iloc[:, :n_pca_components].values)
# keep only first two UMAP components
df_data_norm_umap = pd.DataFrame(
data_norm_umap.transform(df_data_norm_pca.iloc[:, :n_pca_components].values),
columns=['UMAP-0', 'UMAP-1'],
index=df_data_norm_pca.index,
)
# project data onto first two UMAP components for visualization
umap_data = merge(
df_data_norm_umap[["UMAP-0", "UMAP-1"]],
df_library_size,
df_metadata
)
umap_data = umap_data.rename(columns={'UMAP-0': 'x', 'UMAP-1': 'y'})
umap_data['sample_id'] = umap_data.index
# normalize to (-10, 10)
umap_min, umap_max = -10, 10
umap_x_min, umap_x_max = umap_data['x'].min(), umap_data['x'].max()
umap_y_min, umap_y_max = umap_data['y'].min(), umap_data['y'].max()
umap_data['x'] = (umap_data['x'] - umap_x_min) / (umap_x_max - umap_x_min) * (umap_max - umap_min) + umap_min
umap_data['y'] = (umap_data['y'] - umap_y_min) / (umap_y_max - umap_y_min) * (umap_max - umap_min) + umap_min
umap_scatter_data = umap_data.to_dict('records')
color_def = features[0]
shape_def = features[1]
ScatterBoard(
id='umap-scatterboard',
is3d=False,
data=umap_scatter_data,
shapeKey=shape_def,
colorKey=color_def,
labelKeys=['sample_id'],
searchKeys=['sample_id'],
width=600,
height=600
)
###Output
_____no_output_____
###Markdown
**Figure 6:** *First two UMAP components of RNA-seq data.* The datapoints are again labeled by sample ID, and can be color- or shape-coded by any of the metadata categories using the dropdown menu. Points can also be isolated by searching by sample ID. Scroll to zoom, drag to move around. 4. Clustering The first two UMAP components will be used from here on out. To compute sample clusters, the k-means method is used. The total number of clusters must be determined, by first testing a range for the number of total clusters, and then computing silhouette scores, which are a measure of how similar an entry is to its own cluster versus other clusters. The goal is to maximize both the similarity within a cluster and the differences between clusters, so the ideal number of clusters is that which produces the highest silhouette score.
###Code
silhouette_scores = []
# set max clusters
max_clusters = math.ceil(df_data_norm_umap.shape[0]/2)
# function for weighting results with more clusters
def calc_weighted_score(sil_score, k, max_k):
return sil_score*0.7 + k/max_k*0.3
cluster_range = range(2, max_clusters)
for n in cluster_range:
# apply k-means clustering for each possible k
X = df_data_norm_umap.values
clusterer = KMeans(n_clusters=n, random_state=42).fit(X)
y_pred = clusterer.predict(X)
# The silhouette_score gives the average value for all the samples
silhouette_avg = silhouette_score(X, y_pred, metric='cosine')
# Compute a weighted score that rewards higher numbers of clusters
weighted_score = calc_weighted_score(silhouette_avg, n, max_clusters)
silhouette_scores.append({
"N Clusters": n,
"Silhouette Score": silhouette_avg,
"Weighted Score": weighted_score
})
# Labeling the clusters
centers = clusterer.cluster_centers_
# use unweighted or weighted scores
points = {}
threshold = 0.3
for s in silhouette_scores:
if use_weighted_score:
points[s["N Clusters"]] = s["Weighted Score"]
else:
points[s["N Clusters"]] = s["Silhouette Score"]
silhouette_scores = pd.DataFrame(silhouette_scores)
if use_weighted_score:
figure_legend("Table 6", "Weighted silhouette scores by number of clusters", "Values are sorted by the highest weighted score.")
display(silhouette_scores.head().sort_values(["Weighted Score"], ascending=False).reset_index().drop(columns=['index']))
else:
figure_legend("Table 6", "Silhouette scores by number of clusters", "Values are sorted by the highest silhouette score.")
display(silhouette_scores.head().sort_values(["Silhouette Score"], ascending=False).reset_index().drop(columns=['index']))
best_unweighted = silhouette_scores.sort_values('Silhouette Score').iloc[-1].to_dict()
best_weighted = silhouette_scores.sort_values('Weighted Score').iloc[-1].to_dict()
best = {"Silhouette Score": best_unweighted, "Weighted Score": best_weighted}
if use_weighted_score:
k = int(best['Weighted Score']['N Clusters'])
else:
k = int(best['Silhouette Score']['N Clusters'])
print(f"Ideal k: {k} clusters")
# plot the weighted and unweighted scores as a function of # of clusters
colors = {"Silhouette Score": "#7C88FB", "Weighted Score": "#00CC96"}
for score_type in ["Silhouette Score", "Weighted Score"]:
plt.plot(silhouette_scores['N Clusters'], silhouette_scores[score_type], label=score_type, color=colors[score_type])
plt.scatter([best[score_type]['N Clusters']], [best[score_type][score_type]], label=f"Best {score_type}: {int(best[score_type]['N Clusters'])} clusters", color=colors[score_type])
plt.axvline(k, label = f"Ideal k: {k} clusters", color ="#EF553B", alpha=0.8,dashes=(3,3))
plt.legend()
plt.ylabel('Score')
plt.xlabel('Number of Clusters')
plt.show()
figure_legend("Figure 7", "Cluster size selection", "The dotted line indicates the value of the 'ideal' <i>k</i> as chosen by the selected scoring method. This value will be used in subsequent clustering.")
# Compute the k-means dataframe using the ideal number of clusters
km = KMeans(n_clusters=k, random_state=42)
km_clusters = km.fit_predict(df_data_norm_umap.values)
df_data_norm_km = pd.DataFrame({
'Cluster': [
str(c)
for c in km_clusters
]}, index=df_data_norm_umap.index)
print(f'Computed {len(df_data_norm_km["Cluster"].unique())} clusters')
# Map each cluster to a color for later plots
clusters = df_data_norm_km["Cluster"].unique()
plotly_colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
cluster_colors = {}
i = 0
for c in clusters:
cluster_colors[c] = plotly_colors[i % len(plotly_colors)]
i += 1
def cluster_heading(cluster):
display(HTML(f'''
<center>
<div style='background-color:{cluster_colors[cluster] + '98'};
width:100%;height:3rem;display:flex;align-items:center;
justify-content:center;color:white;font-size:2rem'>
<center>Cluster {cluster}</center>
</div>
</center>'''))
###Output
_____no_output_____
###Markdown
5. Differential ExpressionNext, the differential expression for each cluster is computed. The Characteristic Direction method is used for identifying differentially expressed genes among the different clusters.
###Code
# Get differential expression for each cluster, using the dataset containing all genes
diff_expr = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
diff_expr[f"Cluster {cluster} CD"] = characteristic_direction(
# expression outside of this cluster
df_data_norm_all_genes.loc[:, df_data_norm_all_genes.columns.difference(samples.index)],
# expression in this cluster
df_data_norm_all_genes.loc[:, samples.index],
)['CD-coefficient']
df_diff_expr = pd.DataFrame(diff_expr)
df_diff_expr = df_diff_expr.sort_values(by='Cluster 0 CD',ascending=True)
df_diff_expr['Symbol'] = df_diff_expr.index.values
figure_legend("Table 7", "Differential expression of genes by cluster", "By default, the top 5 most differentially expressed genes are shown, along with the corresponding characteristic directions for each cluster.")
display(df_diff_expr.head())
###Output
_____no_output_____
###Markdown
Logistic regression is performed for each metadata category to determine which categories most accurately predict cluster designations for each data point. ROC curves are also plotted for categories with the top two highest AUC scores.
###Code
# LR
aucs = {}
rocs = {}
for cluster, samples in df_data_norm_km.groupby('Cluster'):
aucs[cluster] = {}
rocs[cluster] = []
for feature in features:
lr = LogisticRegression()
X = df_metadata.copy()
X = X[feature]
X = pd.merge(X, df_data_norm_km, left_index = True, right_index = True)
# drop NAs, and move on if dataset is empty
X.replace("not reported", None)
X = X.dropna()
if (X.shape[0] == 0): continue
cluster_data = X["Cluster"]
X = X.drop(columns= ["Cluster"])
# one-hot encode non numerical data
if (not isinstance(X[feature][0], (int, float, complex))):
X = pd.get_dummies(X[feature], prefix=feature)
y_true = (cluster_data == cluster)
if (len(y_true.unique()) < 2): # if there is only one class in the dataset
print(f"Not enough data to classify cluster {cluster} based on category {feature}")
aucs[cluster][feature] = np.nan
continue
lr.fit(X, y_true)
y_score = lr.predict_proba(X)[:, 1]
auc_score = roc_auc_score(y_true, y_score)
aucs[cluster][feature] = auc_score
# save the ROCs
rocs[cluster].append({"auc":auc_score, "lr": lr, "X": X, "y_true":y_true, "title": f'Predictions of cluster {cluster} by category {feature}'})
df_cluster_aucs = pd.DataFrame(aucs)
df_cluster_aucs.index.name="Category"
# sort features by avg AUC across all clusters
df_cluster_aucs["avg"] = [ np.mean(df_cluster_aucs.T[f]) for f in df_cluster_aucs.index.values ]
df_cluster_aucs = df_cluster_aucs.sort_values(by = "avg", ascending=False)
df_cluster_aucs = df_cluster_aucs.drop(columns = "avg")
cols = [('Cluster', col) for col in df_cluster_aucs.columns ]
df_cluster_aucs.columns = pd.MultiIndex.from_tuples(cols)
figure_legend("Table 8", "Average AUC scores for top predictive metadata categories, by cluster", "Scores for the top 5 metadata categories for predicting clusters, as determined by the average AUC score across all clusters, are shown. Higher AUC scores correspond to better classifiers for distinguishing whether or not a datapoint belongs to a certain cluster.")
display(df_cluster_aucs.head(5))
# plot top 2 ROCs for each cluster
plt.rc('font', size=16)
for cluster, plots in rocs.items():
plots.sort(reverse=True, key=lambda x: x["auc"])
cluster_heading(cluster)
if len(plots) < 2:
best_rocs = plots
else:
best_rocs = plots[:2]
num_plots = len(best_rocs)
figure,axes = plt.subplots(int(math.ceil(num_plots / 2.)), 2, figsize=(15,(len(best_rocs)*3.5)))
axes = axes.flatten()
for i in range(len(axes)):
if i >= len(best_rocs):
axes[i].remove()
else:
plot = best_rocs[i]
fig = plot_roc_curve(plot["lr"], plot["X"], plot["y_true"], ax=axes[i])
axes[i].set_title('\n'.join(wrap(plot["title"], 40)))
figure.tight_layout(pad=2)
plt.show()
figure_legend("Figure 8", "ROCs for top cluster-predicting metadata categories")
plt.rcdefaults()
###Output
_____no_output_____
###Markdown
6. Identify Up- and Down-Regulated GenesFind the most up- and down-regulated genes for each cluster for visualization in heatmap, and for enrichment analysis.
###Code
# Merge data
df_clustered_umap = pd.merge(left=df_data_norm_km, left_on="sample_id", right=df_data_norm_umap, right_on="sample_id")
# Get top Genes for each cluster
top_genes = {}
all_top_genes = []
heatmap_top_n = 100
for cluster in df_clustered_umap['Cluster'].unique():
cd_col = f'Cluster {cluster} CD'
if cd_col in df_diff_expr.columns:
# top up genes
up_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=False).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
# top down genes
dn_genes = df_diff_expr.loc[df_diff_expr[cd_col].sort_values(ascending=True).iloc[:top_n_genes_enrichment].index, 'Symbol'].values
else:
raise Exception('Cant find col for cluster')
all_top_genes.append(up_genes[:heatmap_top_n])
all_top_genes.append(dn_genes[:heatmap_top_n])
# save results
top_genes[cluster] = (up_genes, dn_genes)
all_top_genes = [item for sublist in all_top_genes for item in sublist] # flatten all genes to one list
###Output
_____no_output_____
###Markdown
Data corresponding to only the top 100 up- and down-regulated genes for each cluster is selected for visualization in a heatmap, with log-transformation and normalization proceeding as before.
###Code
df_data_norm_heatmap_f = df_data.loc[all_top_genes, :]
# compute log normalization of matrix
df_data_norm_heatmap_f = log2_normalize(df_data_norm_heatmap_f, offset=1)
# convert to zscores
# df_data_norm_heatmap_f = zscore_normalize(df_data_norm_heatmap_f)
df_data_norm_heatmap_f = pd.DataFrame(zscore(df_data_norm_heatmap_f, axis=1), index=df_data_norm_heatmap_f.index, columns=df_data_norm_heatmap_f.columns)
# Plot heatmap
cases = df_data_norm_heatmap_f.columns
heatmap_cluster_colors = [ cluster_colors[x] for x in df_clustered_umap.loc[cases, :]["Cluster"] ]
sns.clustermap(df_data_norm_heatmap_f,xticklabels=False,col_colors = heatmap_cluster_colors); plt.show()
figure_legend("Figure 9", "Heatmap of most differentially expressed genes", "Color coding along the top edge indicates cluster designation of the corresponding sample.")
###Output
_____no_output_____
###Markdown
7. Enrichment Analysis with EnrichrPerform enrichment analysis for each cluster by querying the [Enrichr](https://maayanlab.cloud/Enrichr/) API. The background libraries are the default libraries from Enrichr. A link is provided to download the results.
###Code
# enrichment analysis libraries
enrichr_libraries = OrderedDict([
('Diseases/Drugs', disease_drug_libraries),
('Ontologies', ontology_libraries),
('Cell Type', cell_type_libraries),
('Pathways', pathway_libraries),
('Transcription', transcription_libraries),
('Legacy', legacy_libraries),
('Crowd', crowd_libraries)
])
# handle no selected libraries
all_empty = True
for key, libs in enrichr_libraries.items():
if len(libs) > 0:
all_empty = False
break
if all_empty:
enrichr_libraries = OrderedDict([
('Diseases/Drugs', ['GWAS_Catalog_2019']),
('Ontologies', ['GO_Biological_Process_2018', 'MGI_Mammalian_Phenotype_Level_4_2019']),
('Pathways', ['KEGG_2019_Human', 'KEGG_2019_Mouse']),
('Transcription', ['ENCODE_TF_ChIP-seq_2015'])
])
# Util functions
def enrichr_link_from_genes(genes, description='', enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
''' Functional access to Enrichr API
'''
time.sleep(1)
resp = requests.post(enrichr_link + '/addList', files={
'list': (None, '\n'.join(genes)),
'description': (None, description),
})
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
# wait a tinybit before returning link (backoff)
time.sleep(3)
result = resp.json()
return dict(result, link=enrichr_link + '/enrich?dataset=' + resp.json()['shortId'])
def enrichr_get_top_results(userListId, bg, enrichr_link='https://amp.pharm.mssm.edu/Enrichr'):
time.sleep(1)
resp = requests.get(enrichr_link + '/enrich?userListId={}&backgroundType={}'.format(userListId, bg))
if resp.status_code != 200:
raise Exception('Enrichr failed with status {}: {}'.format(
resp.status_code,
resp.text,
))
time.sleep(3)
return pd.DataFrame(resp.json()[bg], columns=['rank', 'term', 'pvalue', 'zscore', 'combinedscore', 'overlapping_genes', 'adjusted_pvalue', '', ''])
# Get Enrichr links for each cluster
enrichr_links = {}
for cluster, (up_genes, dn_genes) in top_genes.items():
up_link, dn_link = None, None
if up_genes.size:
try:
up_link = enrichr_link_from_genes(up_genes, f'cluster {cluster} up')
except:
print(f'Enrichr failed for cluster {cluster} up genes')
else:
print(f'cluster {cluster} up: empty')
if dn_genes.size:
try:
dn_link = enrichr_link_from_genes(dn_genes, f'cluster {cluster} down')
except:
print(f'Enrichr failed for cluster {cluster} down genes')
else:
print(f'cluster {cluster} down: empty')
enrichr_links[cluster] = (up_link, dn_link)
# Grab top results for each cluster
all_enrichr_results = []
for cluster, (up_link, dn_link) in enrichr_links.items():
for link_type, link in [('up', up_link), ('down', dn_link)]:
if link is None:
continue
for category, libraries in enrichr_libraries.items():
for library in libraries:
try:
results = enrichr_get_top_results(link['userListId'], library).sort_values('pvalue').iloc[:5]
results['library'] = library
results['category'] = category
results['direction'] = link_type
results['cluster'] = cluster
all_enrichr_results.append(results)
except:
print('{}: {} {} {} cluster {} failed, continuing'.format(link, library, category, link_type, cluster))
df_enrichr_results = pd.concat(all_enrichr_results).reset_index()
###Output
_____no_output_____
###Markdown
Enrichment results are organized in table format below (Table 10). A description of each of the scores reported can be found on the [Enrichr help page](https://maayanlab.cloud/Enrichr/helpbackground&q=4). The full table can also be downloaded as a CSV.
###Code
# Display a dataframe with clickable enrichr links
figure_legend("Table 10","Enrichment analysis results from Enrichr", "Results are grouped by expression direction (up/down) and gene set library. Within groups, results are sorted by lowest p-value (highest rank) first.")
df_clickable = df_enrichr_results.copy().drop(columns=[''])
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_enrichr_results.to_csv(), 'Download Enrichr results', 'Enrichr results.csv')
###Output
_____no_output_____
###Markdown
To view the full Enrichr results for the directional gene sets of each cluster, please use the links below. On the webpage that opens, you can explore and visualize how the selected input gene set compares against each background libraries available in Enrichr.
###Code
for cluster in enrichr_links.keys():
up_link = enrichr_links[cluster][0]['link']
dn_link = enrichr_links[cluster][1]['link']
display(Markdown(f"[Full Enrichr results for Cluster {cluster} up-regulated genes]({up_link})"))
display(Markdown(f"[Full Enrichr results for Cluster {cluster} down-regulated genes]({dn_link})"))
###Output
_____no_output_____
###Markdown
7a. Enrichr Result BarplotsHorizontal barplots are used to display the top Enrichr results for each cluster, by library and characteristic expression direction.
###Code
# Make horizontal barplots to visualize top Enrichr results
clusters = df_enrichr_results["cluster"].unique()
for cluster in clusters:
cluster_results = df_enrichr_results.loc[df_enrichr_results["cluster"] == cluster, :]
libraries = cluster_results["library"].unique()
num_rows = len(libraries)
count = 1 # keep track of which subplot we're on
fig = plt.figure(figsize=(15,5*num_rows))
for library in cluster_results["library"].unique():
library_results = cluster_results.loc[cluster_results["library"] == library, :]
for direction in library_results["direction"].unique():
plot_results = library_results.loc[cluster_results["direction"] == direction, :]
plot_results = plot_results.sort_values("pvalue",ascending=False)
labels = plot_results["term"]
labels = [ '\n'.join(wrap(l, 20)) for l in labels ]
values = plot_results["pvalue"]
values = -np.log(values)
# normalize values to map from 0-1 -> color, with opacity also based on normalized pvalue
cmap = plt.get_cmap('cool')
norm_values = [ 0.3 + (x - min(values))/(max(values) - min(values))*0.7 for x in values]
colors = [ [*cmap(val)[:3], 0.4 + 0.2*val] for val in norm_values]
# plot result
ax = fig.add_subplot(num_rows,2,count)
ax.barh(labels,values,color = colors)
ax.set_title(f'{library}\n{direction} genes')
ax.set_xlabel(' – log(pvalue)')
count += 1
cluster_heading(cluster)
fig.tight_layout(pad=3, w_pad=2, h_pad=6)
plt.show()
display(HTML("<br><br>"))
figure_legend("Figure 11", "Enrichment results by cluster", "Bar plots indicate the negative log of the p-value for the specified term. One plot is presented per cluster, per gene-set library, per expression direction (up/down).")
###Output
_____no_output_____
###Markdown
7b. GSEA Running Sum VisualizationsWhile the above barplots display the top enriched terms for each cluster in each direction, individual enriched terms can also be compared to the tissue data using a random walk [GSEA running sum visualization](https://github.com/MaayanLab/react-GSEA/tree/master).First, each of the four default background libraries from Enrichr can be queried and saved as a JSON object which maps terms to their complete genesets.
###Code
libresp = {}
for lib in df_enrichr_results['library'].unique():
resp = requests.get('https://maayanlab.cloud/Enrichr/geneSetLibrary?mode=json&libraryName=' + lib)
if resp.status_code == 200:
libresp[lib] = resp.json()[lib]['terms']
else:
print(f"Failed to access library {lib}, continuing")
###Output
_____no_output_____
###Markdown
For each cluster, the most enriched term for that cluster from each library can then be compared against the most up-regulated genes in the cluster. Below, GSEA plots display the overlap between the genes from each cluster and their most enriched genesets. The x-axis of each plot is a list of genes in the tissue sample or uploaded data, ranked by expression level. The y-axis measures the running enrichment score: the score increases when a gene is in both the input gene set and the library gene set, and decreases otherwise. The peak of the plot gives the enrichment score for the library gene set when compared to the input.
###Code
# iterate through each cluster
for cluster in clusters:
cluster_heading(cluster)
# iterate through each library for each cluster
for lib in libresp.keys():
# obtain the most enriched library term for the cluster in the up direction
up_df = df_enrichr_results[
df_enrichr_results.direction.eq('up')
& df_enrichr_results.cluster.eq(cluster)
& df_enrichr_results.library.eq(lib)]
try:
top_up_term = up_df[up_df['rank'] == 1]['term'].iloc[0]
except:
display(HTML(f"<div style='font-size:1rem;'>Results unavailable for cluster {cluster} {lib}</div>"))
continue
# store the geneset for the most enriched term
top_up_set = libresp[lib][top_up_term].keys()
display(HTML(f"<div style='font-size:1.25rem;'><b>{top_up_term}</b> <br></div><div style='font-size:1rem;'>Most enriched term from {lib} for samples in Cluster {cluster}</div>"))
# display the GSEA plot comparing the enriched genes and the top up-regulated cluster genes
display(ReactGSEA(
data=dataFromResult(
input_set=top_up_set,
ranked_entities=df_diff_expr['Cluster ' + cluster + ' CD']
.sort_values(ascending=False)
.iloc[:math.ceil((df_diff_expr.shape[0]/2))]
.index.tolist()
)
))
###Output
_____no_output_____
###Markdown
8. L1000 AnalysisIf selected during user input, the most up- and down-regulated genes from each cluster, as identified from above, can be input into the [L1000FWD](https://amp.pharm.mssm.edu/L1000FWD/) API, which will then return the most similar and opposite gene expression signatures from the L1000 database. Links are provided to the interactive L1000FWD projections for each set of results.
###Code
def l1000fwd_results_from_genes(up_genes, down_genes, description='', l100fwd_link='http://amp.pharm.mssm.edu/L1000FWD/'):
''' Functional access to L1000FWD API
'''
time.sleep(1)
response = requests.post(l100fwd_link + 'sig_search', json={
'up_genes': list(up_genes),
'down_genes': list(down_genes),
})
l1000fwd_results = {}
if response.status_code != 200:
raise Exception('L1000FWD failed with status {}: {}'.format(
response.status_code,
response.text,
))
if 'KeyError' in response.text:
l1000fwd_results['result_url'] = None
else:
# Get ID and URL
result_id = response.json()['result_id']
l1000fwd_results['result_url'] = 'https://amp.pharm.mssm.edu/l1000fwd/vanilla/result/'+result_id
l1000fwd_results['result_id'] = result_id
# Get Top
l1000fwd_results['signatures'] = requests.get(l100fwd_link + 'result/topn/' + result_id).json()
# wait a tinybit before returning link (backoff)
time.sleep(1)
return l1000fwd_results
def l1000fwd_sig_link(sig_id):
return 'https://amp.pharm.mssm.edu/dmoa/sig/' + sig_id
def get_signature_by_id(sig_id):
response = requests.get("http://amp.pharm.mssm.edu/L1000FWD/sig/" + sig_id)
if response.status_code != 200:
raise Exception('L1000FWD signature query failed with status {}: {}'.format(
response.status_code,
response.text,
))
return response.json()
def display_l1000fwd_results(l1000fwd_results, plot_counter,cluster_id,nr_drugs=7, height=300):
# Check if results
if l1000fwd_results['result_url']:
# Display cluster title
display(HTML('<br><br>'))
cluster_heading(cluster)
# Display IFrae
display(HTML(f"<a href='{l1000fwd_results['result_url']}' target='_blank'> View L1000FWD for cluster {cluster_id}</a>"))
# Display tables
for direction, signature_list in l1000fwd_results['signatures'].items():
# Fix dataframe
rename_dict = {'sig_id': 'Signature ID', 'pvals': 'P-value', 'qvals': 'FDR', 'zscores': 'Z-score', 'combined_scores': 'Combined Score'}
signature_dataframe = pd.DataFrame(signature_list)[list(rename_dict.keys())].rename(columns=rename_dict).sort_values('P-value').rename_axis('Rank')
signature_dataframe.index = [x + 1 for x in range(len(signature_dataframe.index))]
signature_csv = signature_dataframe.to_csv(sep=",")
# Display table
pd.set_option('max.colwidth', None)
signature_dataframe['Signature ID'] = [f'<a href={l1000fwd_sig_link(x)} target="_blank">{x}</a>' for x in signature_dataframe['Signature ID']]
table_html = signature_dataframe.to_html(escape=False, classes='w-100')
display(HTML(f'<h3>{direction.title()} Signatures: </h3>'))
display(HTML(f'<style>.w-100{{width: 100% !important;}}</style><div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
# Display download button
download_button(signature_csv, f'Download {direction.title()} Signatures', f'Cluster {cluster_id} L1000FWD {direction.title()} signatures.csv')
# Link
display(HTML('Full results available at: <a href="{result_url}" target="_blank">{result_url}</a>.'.format(**l1000fwd_results)))
# Display error
else:
display(Markdown('### No results were found.\n This is likely due to the fact that the gene identifiers were not recognized by L1000FWD. Please note that L1000FWD currently only supports HGNC gene symbols (https://www.genenames.org/). If your dataset uses other gene identifier systems, such as Ensembl IDs or Entrez IDs, consider converting them to HGNC. Automated gene identifier conversion is currently under development.'))
if do_l1000:
plot_counter = 0
all_l1000fwd_results = {}
figure_header("Figure 14", "Most similar and opposite L1000 signatures, by cluster")
for cluster, (up_genes, dn_genes) in top_genes.items():
try:
results = l1000fwd_results_from_genes(up_genes,dn_genes)
all_l1000fwd_results[cluster] = results
display_l1000fwd_results(results,plot_counter,cluster)
plot_counter += 1
except:
print(f'L1000FWD API failed for cluster {cluster}, continuing')
figure_legend("Figure 14", "Most similar and opposite L1000 signatures, by cluster", "Results are sorted by smallest p-value.")
###Output
_____no_output_____
###Markdown
In the case of disease state RNA-seq data, the reverse signatures provide a potential set of drugs that could perturb the cells/tissues towards a "healthy" direction. These may present novel treatments for patients whose samples belong to a certain cluster.
###Code
if do_l1000:
df_drugs = pd.read_csv("https://amp.pharm.mssm.edu/l1000fwd/download/Drugs_metadata.csv")
# Load top drug suggestions for each cluster based on the drugs used to produce the top five opposite signatures
drug_results = {}
for cluster, results in all_l1000fwd_results.items():
opposite_sigs = results["signatures"]["opposite"][:5]
sig_ids = [sig["sig_id"] for sig in opposite_sigs]
pert_ids = []
for sig_id in sig_ids:
try:
signature = get_signature_by_id(sig_id)
pert_ids.append(signature["pert_id"])
except:
print(f'L1000FWD API failed for cluster {cluster}, sig_id {sig_id}, continuing')
df_cluster_drugs = df_drugs[df_drugs["pert_id"].isin(pert_ids)].copy()
df_cluster_drugs["cluster"] = cluster
df_cluster_drugs = df_cluster_drugs[["cluster", *list(filter(lambda x: x!="cluster", df_cluster_drugs.columns))]]
drug_results[cluster] = df_cluster_drugs
df_all_drugs = pd.concat(drug_results).reset_index()
if do_l1000:
figure_legend("Table 13", "Drugs used to produce most opposite signatures for each cluster", "Each entry is a drug/chemical used for perturbation in the L1000 experiments that resulted in a gene-expression signature most opposite to that of the specified cluster.")
df_clickable = df_all_drugs.copy()
df_clickable['pert_url'] = df_clickable["pert_url"].apply(make_clickable)
table_html = df_clickable.to_html(escape=False)
display(HTML(f'<div style="max-height: 250px; overflow-y: auto; margin-bottom: 25px;">{table_html}</div>'))
download_button(df_all_drugs.to_csv(), 'Download L1000FWD drug results', 'L1000FWD drugs.csv')
###Output
_____no_output_____ |
docs/tut/3-Renderers/3.1-Introduction-to-QRenderers.ipynb | ###Markdown
Introduction to QRenderersFor convenience, let's begin by enabling [automatic reloading of modules](https://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html?highlight=autoreload) when they change.
###Code
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
Import Qiskit Metal
###Code
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, Headings
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
from qiskit_metal.qlibrary.qubits.transmon_cross import TransmonCross
from qiskit_metal.renderers.renderer_gds.gds_renderer import QGDSRenderer
Headings.h1('The default_options in a QComponent are different than the default_options in QRenderers.')
###Output
_____no_output_____
###Markdown

###Code
TransmonPocket.default_options
QGDSRenderer.default_options
###Output
_____no_output_____
###Markdown
A renderer needs to inherent from QRendererFor Example, QGDSRender inherents from QRenderer.When any QRenderer is registered within QDesign, the QRenderer instance has options, which hold the latest set of values for default_options. The GUI can also update these options. An example of updating options is further below in this notebook. A user can customize things two ways1. Directly update the options that originated from default_options, for either QComponent or QRenderer.2. Pass options to a QComponent which will be placed in a QGeometry table, then used by QRenderer. How to get options from QRenderer to be placed within the QGeometry table?We set this up so that older QComponents can be agnostic of newer QRenderers. The "rate limiting factor" is to have QComponent denote in it's metadata, which QGeometry tables it will write to. For this example, we will discuss the "junction" table. More details will be in the notebook at "tutorials/4 Plugin Developer". If the QComponent identifies the table which it is aware of, and if QGDSRenderer wants to add a column to the table with a default value, then QComponent will pass the option from QGDSRenderer to QGeometry table without doing anything with it.An example of this below is `gds_cell_name='FakeJunction_01'`. This is passed through to QGeometry, when a QComponent is instantiated. The QGDSRenderer has a default, which is not editable during run-time, but can be customized when a QComponent is instantiated.
###Code
Headings.h1('How does a QRenderer get registered within QDesign?')
###Output
_____no_output_____
###Markdown
By default, QRenderers are registered within QDesign during init QDesignThe list of QRenderers which will be registered are in qiskit_metal.config.py; the dictionary `renderers_to_load` has the name of the QRenderer (key), class name (value), and path (value).Presently, GDS and Ansys QRenderers are registered during init.
###Code
design = designs.DesignPlanar()
# Use GDS QRenderer for remaining examples. Can do similar things with Ansys QRenderer.
#an_ansys = design._renderers['ansys']
#an_ansys = design._renderers.ansys
#a_gds = design._renderers['gds']
a_gds = design._renderers.gds
gui = MetalGUI(design)
design.overwrite_enabled = True
Headings.h1('Populate QDesign to demonstrate exporting to GDS format.')
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
# Allow running the same cell here multiple times to overwrite changes
design.overwrite_enabled = True
## Custom options for all the transmons
options = dict(
# Some options we want to modify from the deafults
# (see below for defaults)
pad_width = '425 um',
pad_gap = '80 um',
pocket_height = '650um',
# Adding 4 connectors (see below for defaults)
connection_pads=dict(
a = dict(loc_W=+1,loc_H=+1),
b = dict(loc_W=-1,loc_H=+1, pad_height='30um'),
c = dict(loc_W=+1,loc_H=-1, pad_width='200um'),
d = dict(loc_W=-1,loc_H=-1, pad_height='50um')
)
)
###Output
_____no_output_____
###Markdown
Note: The cell name denoted by, "gds_cell_name" will be the selected cell from design.renderers.gds.options['path_filename'] when design.renderers.gds.export_to_gds() is executed.
###Code
## Create 4 TransmonPockets
q1 = TransmonPocket(design, 'Q1', options = dict(
pos_x='+2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_02', **options))
q2 = TransmonPocket(design, 'Q2', options = dict(
pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', gds_cell_name='FakeJunction_02', **options))
q3 = TransmonPocket(design, 'Q3', options = dict(
pos_x='-2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_01',**options))
q4 = TransmonPocket(design, 'Q4', options = dict(
pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', gds_cell_name='my_other_junction', **options))
## Rebuild the design
gui.rebuild()
gui.autoscale()
#Connect using techniques explained earlier notebooks.
from qiskit_metal.qlibrary.tlines.meandered import RouteMeander
RouteMeander.get_template_options(design)
options = Dict(
meander=Dict(
lead_start='0.1mm',
lead_end='0.1mm',
asymmetry='0 um')
)
def connect(component_name: str, component1: str, pin1: str, component2: str, pin2: str,
length: str, asymmetry='0 um', flip=False, fillet='50um'):
"""Connect two pins with a CPW."""
myoptions = Dict(
fillet=fillet,
pin_inputs=Dict(
start_pin=Dict(
component=component1,
pin=pin1),
end_pin=Dict(
component=component2,
pin=pin2)),
lead=Dict(
start_straight='0.13mm',
end_straight='0.13mm'
),
total_length=length)
myoptions.update(options)
myoptions.meander.asymmetry = asymmetry
myoptions.meander.lead_direction_inverted = 'true' if flip else 'false'
return RouteMeander(design, component_name, myoptions)
asym = 90
cpw1 = connect('cpw1', 'Q1', 'd', 'Q2', 'c', '5.7 mm', f'+{asym}um', fillet='25um')
cpw2 = connect('cpw2', 'Q3', 'c', 'Q2', 'a', '5.4 mm', f'-{asym}um', flip=True, fillet='100um')
cpw3 = connect('cpw3', 'Q3', 'a', 'Q4', 'b', '5.3 mm', f'+{asym}um', fillet='75um')
cpw4 = connect('cpw4', 'Q1', 'b', 'Q4', 'd', '5.5 mm', f'-{asym}um', flip=True)
gui.rebuild()
gui.autoscale()
gui.screenshot()
Headings.h1('Exporting a GDS file.')
#QDesign enables GDS renderer during init.
a_gds = design.renderers.gds
# An alternate way to envoke the gds commands without using a_gds:
# design.renderers.gds.export_to_gds()
#Show the options for GDS
a_gds.options
###Output
_____no_output_____
###Markdown
To make the junction table work correctly, GDS Renderer needs the correct path to the gds file which has cellsEach cell is a junction to be placed in a Transmon. A sample gds file is provided in directory `qiskit_metal/tutorials/resources`.There are three cells with names "Fake_Junction_01", "Fake_Junction_01", and "my_other_junction".The default name used by GDS Render is "my_other_junction". If you want to customize and select a junction, through the options, you can pass it when a qcomponent is being added to QDesign. This allows for an already prepared e-beam pattern for a given junction structure to be automatically imported and placed at the correctlocation.
###Code
a_gds.options['path_filename'] = '../resources/Fake_Junctions.GDS'
###Output
_____no_output_____
###Markdown
Do you want GDS Renderer to fix any short-segments in your QDesign when using fillet?'
###Code
#If you have a fillet_value and there are LineSegments that are shorter than 2*fillet_value,
#When true, the short segments will not be fillet'd.
a_gds.options['short_segments_to_not_fillet'] = 'True'
scale_fillet = 2.0
a_gds.options['check_short_segments_by_scaling_fillet'] = scale_fillet
# Export GDS file for all components in design.
#def export_to_gds(self, file_name: str, highlight_qcomponents: list = []) -> int:
# Please change the path where you want to write a GDS file.
#Examples below.
#a_gds.export_to_gds("../../../gds-files/GDS QRenderer Notebook.gds")
a_gds.export_to_gds('GDS QRenderer Notebook.gds')
# Export a GDS file which contains only few components.
# You will probably want to put the exported file in a specific directory.
# Please give the full path for output.
a_gds.export_to_gds("four_qcomponents.gds",
highlight_qcomponents=['cpw1', 'cpw4', 'Q1', 'Q3'])
###Output
_____no_output_____
###Markdown
How to "execute" exporting an QRenderer from GUI vs notebook?Within the GUI, there are icons: GDS, HFSS and Q3D.Example for GDS:Select the components that you want to export from QGeometry Tables. Select the path/file_name and the same thing should happen as the cells above.
###Code
Headings.h1('QUESTION: Where is the geometry of a QComponent placed?')
###Output
_____no_output_____
###Markdown
Answer: QGeometry tables! What is QGeometry? All QRenderers use the QGeometry tables to export from QDesign. Each table is a Pandas DataFrame. We can get all the QGeometry of a QComponent. There are several kinds, such as `path`, `poly` and, `junction`.
###Code
#Many ways to view the QGeometry tables.
#If you want to view, uncomment below lines and and run it.
#design.qgeometry.tables
#design.qgeometry.tables['path']
#design.qgeometry.tables['poly']
design.qgeometry.tables['junction']
###Output
_____no_output_____
###Markdown
Let us look at all the polygons used to create qubit `q1` Poly table hold the polygons identified from QComponents.
###Code
q1.qgeometry_table('poly')
###Output
_____no_output_____
###Markdown
Paths are lines. These can have a width.
###Code
q1.qgeometry_table('path')
###Output
_____no_output_____
###Markdown
The junction table is handled differently by each QRenderer. What does GDS do with "junction" table?This is better explained in folder 5 All QRenderers/5.2 GDS/GDS QRenderer notebook.
###Code
q1.qgeometry_table('junction')
###Output
_____no_output_____
###Markdown
Geometric boundary of a QComponent?Return the boundry box of the geometry, for example: `q1.qgeometry_bounds()`. The function returns a tuple containing (minx, miny, maxx, maxy) bound valuesfor the bounds of the component as a whole.
###Code
for name, qcomponent in design.components.items():
print(f"{name:10s} : {qcomponent.qgeometry_bounds()}")
###Output
Q1 : [ 2.125 -0.325 2.975 0.325]
Q2 : [-0.325 -1.325 0.325 -0.475]
Q3 : [-2.975 -0.325 -2.125 0.325]
Q4 : [-0.325 0.475 0.325 1.325]
cpw1 : [ 0.22 -0.54399198 2.125 -0.07600802]
cpw2 : [-2.125 -0.55810289 -0.22 -0.06189711]
cpw3 : [-2.125 0.07552405 -0.22 0.54447595]
cpw4 : [0.22 0.07576603 2.125 0.54423397]
###Markdown
Qiskit Metal Version
###Code
metal.about();
# gui.main_window.close()
###Output
_____no_output_____ |
04 - Python tools for data analysis.ipynb | ###Markdown
[(précédent)](03%20-%20-%20-%20Python%20-%20intermediate.ipynb) | [(index)](00%20-%20-%20-%20Introduction%20-%20to%20-%20Python.ipynb) | [(suivant)](05%20-%20-%20-%20Introduction%20-%20to%20-%20data%20-%20as%20-%20a%20-%20science.ipynb) Outils Python pour l'analyse de données Objectifs d'apprentissages : Identifier les concepts de raisonnement éthique qui peuvent influence nos analyses et les résultats des données. Apprendre et mettre en application les méthodes de base des bibliothèques essentielles à l'analyse de données, Numpy, Pandas et Matplotlib. Les données sont devenues le langage le plus important de notre époque - elles servent de source à l'intelligence des machine automatisées, à l'analyse prédictive des diagnostiques médicaux. La diminution rapide des coûts et la facilité d'accès aux besoins de base de ces systèmes - données, logiciels, calcul distribué, capteurs - conduisent à l'adoption et la croissance de la prise de décision bâtie sur des données. Comme il devient de plus en plus simple de collecter des données au sujet des individus et des systèmes, différents types de professionnels - qui n'ont jamais été formé sur ces besoins - se débattent avec des capacités insuffisantes pour l'analyse et la gestion de donées, ainsi que les risques éthiques émergeant de la possession et des conséquences de ces données et outils.Avant d'entamer la formation technique, quelques considérations éthiques sur l'utilisation des données. ÉthiqueLes ordinateurs ne prennent pas de décision. Ils fournissent des résultats dépendant strictement des données fournies en entrée et des algorithmes appliqués pour les analyser. L'assistance que les ordinateurs peuvent fournir pour la prise de décision ne remplace pas la responsibilité humaine.Les données et de les algorithmes utilisées pour justifier des décisions doivent pouvoir être passés en revue. "L'ordinateur a dit « non »" n'est pas une justification acceptable.Nos actions en tant que *data scientists* sont destinées à convaincre d'agir ou de penser autrement, sur la simple base de la crédibillité de notre analyse, et informé par les données.Les processus par lesquels nous examinons et expliquons ce que nous considérons comme des comportements humains bons ou mauvais font partie de l'éthique.Cas d'usage: _Polygal_ est un gel fait à base de betterave et de pectine de pommes ; utilisé sur un patient sévèrement blessé, il était supposé réduire les saignements. Pour vérifier cette hypothèse, [Sigmund Rascher](https://en.wikipedia.org/wiki/Sigmund_Rascher) l'utilisa sur des humains blessés par balle ou amputés sans anesthésie.Pendant la deuxième Guerre Mondiale, sous la direction d'officiers supérieurs Nazis, de très violentes expériences médicales furent pratiquées sur des prisonniers de guerre et des civils considérés comme sous-humains par le régime nazi. Après la guerre, 20 médecins furent jugés pour crimes de guerre et crimes contre l'humanité lors du [procès des médécins](https://en.wikipedia.org/wiki/Doctors%27_trial) tenu à Nuremberg de 1946 à 1949.En 1947 Kurt Blome - le chef adjoint de la santé du 3ème Reich, un scientifique nazi de haut rang - fut acquitté de crimes de guerre sur intervention musclée des États-Unis. Dans les deux mois suivants, l'armée américaine le debriefa pour apprendre tout ce qu'il savait sur les armes biologiques.Pensez vous que les État-Unis ont eu raison ou tort d'offrir à Blome l'immunité en échange de ses connaissances ?Nombre des expériences menées par les Nazis soulèvent des dilemmes éthiques, par exemple : l'immersion de prisonniers dans de l'eau glaciale pour observer le résultat et tester des techniques de sauvetage d'hypothermie ; des expérences de pression de haute altitude et de décompression ; des tests de sulfanilamide pour le traitement de gangrènes et autres infections bactériennes.Pensez vous qu'il serait acceptable ou pas d'utiliser ces données dans vos travaux de recherche et d'analyse ? Pour en savoir plus.L'éthique fournit un cadre théorique pour décrire le monde non pas tel qu'il est, mais tel qu'il *devrait* ou *ne devrait pas* être. Cela permet de décrire un résultat idéal, et de consider tout ce que l'on connaît ou ne connaît pas qui puisse empêcher ou détourner le résultat désiré.impede or confound our desired result.Cas d'usage: Un homme nigérien voyage aux États-Unis. Après l'une des sessions, il va aux toilettes pour se laver les mains. Le distributeur automatique de savon ne reconnaît pas ses mains sous le détecteur. Un américan blanc le voit perdu et place ses propres mains sous l'appreil. Celui distribue due savon. Le Nigérien essaye à nouveau, mais l'appareil l'ignore à nouveau.Comment une telle chose peut se produire ? Y-a-t'il des considérations éthiques à l'affaire ?Pour en savoir plus.Lorsque l'on considère des résultats éthiques, on utilise les termes *bon* ou *mauvais* pour décrire les jugements sur les gens ou les choses, et *juste* ou *injuste* pour décrire le résultats d'actions spécifiques. Il faut noter cependant que, si juste ou injuste est parfois évident, on se retrouve souvent face à des dilemmes éthiques.Pour déterminer si une action est juste ou non, il faut confronter la tension entre l'intention de l'action et ses conséquences. Est-ce que seules les intentions comptent ? Ou devrions nous seulement considérer les effets ? Et jusqu'à quelle limite doit on pousser les jugements de l'enchaînement suivant : une motivation juste_ menant à une intention _juste_, pour effectuer une ation _juste_ menant à uniquement de _bonnes_ conséquences. Comment évaluer cela face aux faits impossibles à déterminer au moment de la prise de décision ?Il convient aussi de considérer les intérêts en compétition pour ce qui consitue un effet positive ou négatif. A effedt positif pour l'individu prenant la décision peut se révéler une mauvaise décision pour bien d'autres. Inversement, une personne altruiste peut agir uniquement au bénéfice des autres à son propre détrimentLes problèmes éthiques n'exigent pas systétmatiquement une analyse des faits pour justifier une décision donnée, mais ils ont un certain nombre de caractéristiques : * _ public_ : le processus par lequel un choix éthique est effectué est connu de tous les participants ; * _ informel_ : le processus ne peut pas toujours être codifié en terme de loi ou de système légal ; * _rationnel_ : malgré l'informalité, la logique utilisée doit être accessible et pouvoir être défendue ; * _impartial_ : toute décision se doit de ne pas favoriser un groupe ou une personne en particulier.Plutôt que d'imposer une ensemble de règles à suivre, l'éthique fournir un cadre dans lequel on peut déterminer si nos objectifs sont conformes à nos valeurs, et si le processus par lequel nous prenons des décisions peut être validé et vérifié par d'autres personnes.Quel que soit le degré de sophistication que les machines atteindront, et à moins que nous visions à bâtir une société "de machines pour les machines", les humains seront toujours requis pour décider de quelles considérations éthiques doivent être prises en compte.Il y a des limites à ce que le travail d'analyse peut accomplir, et il est de la responsabilité des individus produisant cette analyse de s'assurer que les hypothèses, les doutes et le contexte soient documentés en même temps que les résultats. Il est particulièrement critique que chacun se sente responsible de soulever les inquiétudes qui pourraient exister autour des données source utilisées dans l'analyse, y compris si l'usage de données personnelles est légitime, ou si les données source et les algorithmes de traitement de ces données sont fiables, L'analyse de donnéesCeci fournit une brève introduction à certains des outils d'analyse de données utilisé en Python. Cela ne couvre pas les approches à suivre pour l'analyse de données, qui devra être étudié séparément.. Les tableaux NumpyL'analyse de données requiert souvent d'effectuer des opérations sur de longues listes de données. Numpy est une suite d'outils puissante permettant de travailler rapidement et facilement avec des listes de données entières. Nous référerons à ces listes de données sous le nom de *array* (tableaux) - et si vous êtes familiers avec cette branche des mathématiques, vous pouvez imager ces fonctions comme s'appliquant à des matrices.La convention veut que l'import de Numpy se fasse sous l'étiquette np : `import numpy as np`.Nous allons aussi générer de nombreuses listes de nombre flottants aléatoires pour nos exercices. Nous allons utiliser Python à cette fin avec le module `random`.
###Code
import numpy as np
import random
def generate_float_list(lwr, upr, num):
"""
Return a list of num random decimal floats ranged between lwr and upr.
Range(lwr, upr) creates a list of every integer between lwr and upr.
random.sample takes num integers from the range list, chosen randomly.
"""
int_list = random.sample(range(lwr, upr), num)
return [x/100 for x in int_list]
# Crée deux listes
height = generate_float_list(100, 220, 10)
weight = generate_float_list(5000, 20000, 10)
# Convertir en Numpy arrays
np_height = np.array(height)
np_weight = np.array(weight)
print(np_height)
print(np_weight)
###Output
[1.33 2.07 2.11 1.74 2.13 1.5 1.76 1.49 1.05 2.09]
[155.33 172.64 157.02 166.43 133.5 166.17 124.89 115.04 111.62 129.7 ]
###Markdown
Les carnets de note Jupyter fournissent une fonction de chronométrage utile. En préfixant une ligne de code avec `%time`, vous obtiendrez le temps qu'à mis le code à tourner.Ceci est particulièrement important pour les opérations impliquant de lourds traitements de données pour lesquelles toute amélioration de performance peut s'avérer précieuse.Nous allons pouvoir maintenant effectuer des opérations directement sur l'ensemble des valeurs contenus dans les tableaux Numpy. Voici deux méthodes faciles à utiliser. Syntaxe Calculs sur tous les éléments : on peut traiter les tableaux Numpy tout comme s'il s'agissait d'entiers ou nombres flottants individuels. Pour cela, les talbeaux doivent avoir la même forme (à savoir, leur nombre d'éléments). On peut par ailleurs appliquer les opérations arithmétiques classiques avec un nombre simple (qui s'appliquent alors à chaque élément) Filtrage : On peut filtrer rapidement les tableaux Numpy avec des opérations booléennes ; par exemple `np_array[np_array > num]` ; pour obtenir une réponse booléenne, `np_array > num`
###Code
# Calcul l'indice de masse corporel (IMC, BMI en anglais) en fonction des tailles et poids dans nos tableaux
# Chronométrez le calcul - cela ne prendra pas longtemps
%time bmi = np_weight / np_height ** 2
print(bmi)
# Un BMI > 35 indique une obésité sévère. Voyons qui est concerné dans notre échantillon
# Pour une réponse booléenne
print(bmi > 35)
# Ou pour lister seulement les valeurs du BMI au dessus de 35
print(bmi[bmi > 35])
###Output
CPU times: user 67 µs, sys: 43 µs, total: 110 µs
Wall time: 270 µs
[ 87.81163435 40.29032183 35.26874958 54.97093407 29.42537856
73.85333333 40.31831095 51.8174857 101.24263039 29.69254367]
[ True True True True False True True True True False]
[ 87.81163435 40.29032183 35.26874958 54.97093407 73.85333333
40.31831095 51.8174857 101.24263039]
###Markdown
PandasNous avons brièvement essayer Pandas lors de [notre session sur les modules](03 - Python intermediate.ipynbBuilt-in-modules).La description donnée pour Pandas était alors :_**pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**._soit_**pandas** est un paquet Python qui founrit des structures rapides, souples et expressives conçues pour rendre les opérations sur des données relationnelles ou étiquettées à la fois simple et intuitif. Il vise à être la brique fondamentale haute-niveau pour l'analyse de données concrète et pragmatique en Python. Il a par ailleurs le but plus large de devenir **le plus puissant et le plus souple outil d'analyse et de manipulation des données open-source de tous les langages de programmations**._Pandas est développé par [Wes McKinney](http://wesmckinney.com/) et est soutenu par une formidable et très active communauté de développemen. Wes préfère voir pandas écrit en minuscule.Pandas est bâti sur Numpy - ils sont donc fortement liés et intégrés. Pandas permet de manipuler des données soit sous forme de série (`Series`) (de manière semblable à Numpy mais avec des fonctionnalités supplémentaires), ou bien sous forme tabulaire avec des lignes de valeurs et des colonnes nommées (semblable à un tableau Excel).Cette forme tabulaire est appelée un `DataFrame`. Pandas fonctionne bien avec Jupyter Notebook et celui-ci permet d'afficher de manière satisfaisante - il faut pour cela s'assurer que la dernière ligne du bloc de code est le nom du `DataFrame`.Par convention, pandas est importé sous l'étiquette pd, `import pandas as pd`.Le tutoriel suivant est dérivé directement de 'pandas en 10 minutes' dans la [documentation de Pandas](https://pandas.pydata.org/pandas-docs/stable/10min.html). À noter que ceci ne reproduit l'intégralité du tutoriel qui peut être complété sur le site originel. Création d'objetsPour créer une `Series`, passez une liste de valeurs et laissez pandas créer un index d'entiers par défaut.
###Code
import pandas as pd
import numpy as np
s = pd.Series([1,3,5,np.nan,6,8])
s
###Output
_____no_output_____
###Markdown
Notez que `np.nan` est la manière dont Numpy représente la valeur "pas-un-nombre" (nan pour "not a number"). Par exemple, une division par zéro renvoie `np.nan`. Cela permet d'effectuer des opérations complexes de manière protégée et de gérer les dégâts après coup.Créons un `DataFrame` en utilisant un tableau numpy, avec une index horodaté et des colonnes étiquetées.
###Code
# Créons une liste de dates formattées ISO (YYYYMMDD)
dates = pd.date_range('20130101', periods=6)
dates
# Créons un DataFrame qui utilise cette liste de données comme index
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
df
###Output
_____no_output_____
###Markdown
Il est aussi possible d'ajouter des données textuelles et numériques avec un index généré automatiquement.
###Code
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
brics = pd.DataFrame(dict)
brics
###Output
_____no_output_____
###Markdown
Les nombres à gauche du teableau forment l'index. Celui-ci permet de faire référence à une ligne donnée. Il est cependant possible d'utiliser votre propre index avec pandas. On pourrait utiliser l'une des colonnes existantes comme index (du moment qu'elle ne contient que des valeurs uniques) ou ajour un index spécialisé.
###Code
# Utilisons le code pays ISO à deux lettres comme index
brics.index = ["BR", "RU", "IN", "CH", "SA"]
brics
###Output
_____no_output_____
###Markdown
Visualiser les donnéesPandas fonctionne avec des jeux de données extrêment larges, y compris plusieurs millions de lignes. Afficher de telles données demande beaucoup d'espace et, pour avoir juste un aperçu de ces données (puisqu'en général, nous travaillerons dessus de manière symbolique), cela peut s'avérer déplaisant. Heureusement, pandas fournir un certain nombre de façons de visualiser et passer en revue les données Syntaxe Voir les lignes du haut et du bas de votre tableau (dataframe) avec `df.head()` ou `df.tail(num)` dans lequel `num` est le nombre de lignes à afficher Voir l'index, les colonnes et les données numpy sous-jacentes avec `df.index`, `df.columns` et `df.values` Obtenir un résumé statisque de vos données avec `df.describe()` Transposer les données (i.e. échanger les lignes et les colonnes) avec `df.T` Trier suivant un axe avec `df.sort_index(axis=1, ascending=False)` où `axis=1` signifie l'axe des colonnes, et `axis=0` les lignes Trier par valeur avec `df.sort_values(by=column)`
###Code
# Head (tête de liste)
df.head()
# Tail (queue)
df.tail(3)
# Index
df.index
# Values (valeurs)
df.values
# Résumé statisque
df.describe()
# Transpose
df.T
# Tri suivant un axe
df.sort_index(axis=1, ascending=False)
# Tri des valeurs
df.sort_values(by="B")
###Output
_____no_output_____
###Markdown
SélectionsUne des premières étapes en analyse des données est tout simplement de filtrer les données et d'obtenir les morceaux qui nous intéressent le plus.Pandas fournit de nombres approches pour restreindre rapidement les données à ce que l'on désire. Syntaxe Sélectionner une seule colonne en faisant référence au dataframe comme à un dictionnaire, avec `df[column]` ou bien, si la conne est en un seul mot, avec `df.column`. Cela renvoie une `Series` Sélectionner une tranche de manière similaire à la méthode utilisée pour une liste Python, avec `df[]`, par exemple `df[:3]`, ou par indice, `df["20130102":"20130104"]` Utiliser `.loc` pour sélectionner par étiquette, tel que : Obtenir une ligne via une étiquette, par exemple `df.loc[index[0]]` Obtenir une sélection sur plusieurs axes avec une étiquette avec `df.loc[:, ["A", "B"]]` où le premier `:` indique une tranche de lignes, et la deuxième liste `["A", "B"]` indique une liste de colonnes Comme avec Numpy, on peut obtenir une sélection à base de booléeans avec par ex. `df[df.A > num]` Il y a _beaucoup_ plus de façon de filtrer les données et d'y accéder, de même que de nombreuses méthodes pour affecter des données dans les tableaux `DataFrame` , mais ceci devrait suffire pour le moment.
###Code
# By column
df.A
# By slice
df["20130102":"20130104"]
# Cross-section
df.loc[dates[0]]
# Multi-axis
df.loc[:, ["A", "B"]]
# Boolean indexing
df[df.A > 0]
###Output
_____no_output_____
###Markdown
MatplotlibDans cette dernière section, nous allons faire connaissance avec _Matplotlib_, une bibliothèque puissante et largement utilisée pour les représentations graphiques en Python. Jupyter Notebook fournit une instruction "magique" pour créer des graphes directement dans un carnet de vote avec la commande `%matplotlib inline`.Matplotlib, Numpy et Pandas forment un trio incontournable de l'anayse de données.Gardez bien en tête que ceci est une introduction très limitée aux capacités de ces bibliothèques.
###Code
import matplotlib.pyplot as plt
# Ceci permettra que les graphes de Matplotlib s'affichent directement dans le carnet de note Jupyter
%matplotlib inline
# Produce a random timeseries
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
# Get the cumulative sum of the random numbers generated to mimic a historic data series
ts = ts.cumsum()
# And magically plot
ts.plot()
# De même avec un tableau DataFrame
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
# And plot, this time creating a figure and adding a plot and legend to it
plt.figure()
df.plot()
plt.legend(loc='best')
###Output
_____no_output_____
###Markdown
[(previous)](03%20-%20-%20-%20Python%20-%20intermediate.ipynb) | [(index)](00%20-%20-%20-%20Introduction%20-%20to%20-%20Python.ipynb) | [(next)](05%20-%20-%20-%20Introduction%20-%20to%20-%20data%20-%20as%20-%20a%20-%20science.ipynb) Python tools for data analysis Learning outcomes: Identify concepts in ethical reasoning which may influence our analysis and results from data. Learn and apply a basic set of methods from the core data analysis libraries of Numpy, Pandas and Matplotlib. Data has become the most important language of our era, informing everything from intelligence in automated machines, to predictive analytics in medical diagnostics. The plunging cost and easy accessibility of the raw requirements for such systems - data, software, distributed computing, and sensors - are driving the adoption and growth of data-driven decision-making. As it becomes ever-easier to collect data about individuals and systems, a diverse range of professionals - who have never been trained for such requirements - grapple with inadequate analytic and data management skills, as well as the ethical risks arising from the possession and consequences of such data and tools.Before we go on with the technical training, consider the following on the ethics of the data we use. EthicsComputers cannot make decisions. Their output is an absolute function of the data provided as input, and the algorithms applied to analyse that input. The aid of computers in decision-making does not override human responsibility and accountability.It should be expected that both data and algorithms should stand up to scrutiny so as to justify any and all decisions made as a result of their output. "Computer says no," is not an unquestionable statement.Our actions - as data scientists - are intended to persuade people to act or think other than the way they currently do based on nothing more than the strength of our analysis, and informed by data.The process by which we examine and explain why what we consider to be right or wrong, is considered right or wrong in matters of human conduct, belongs to the study of ethics.Case-study: _Polygal_ was a gel made from beet and apple pectin. Administered to a severely wounded patient, it was supposed to reduce bleeding. To test this hypothesis, [Sigmund Rascher](https://en.wikipedia.org/wiki/Sigmund_Rascher) administered a tablet to human subjects who were then shot or - without anesthesia - had their limbs amputated.During the Second World War, and under the direction of senior Nazi officers, medical experiments of quite unusual violence were conducted on prisoners of war and civilians regarded by the Nazi regime as sub-human. After the war, twenty medical doctors were tried for war crimes and crimes against humanity at the [Doctor's Trial](https://en.wikipedia.org/wiki/Doctors%27_trial) held in Nuremberg from 1946 to 1949.In 1947 Kurt Blome - Deputy Reich Health Leader, a high-ranking Nazi scientist - was acquitted of war crimes on the strength of intervention by the United States. Within two months, he was being debriefed by the US military who wished to learn everything he knew about biological warfare.Do you feel the US was "right" or "wrong" to offer Blome immunity from prosecution in exchange for what he knew?There were numerous experiments conducted by the Nazis that raise ethical dilemmas, including: immersing prisoners in freezing water to observe the result and test hypothermia revival techniques; high altitude pressure and decompression experiments; sulfanilamide tests for treating gangrene and other bacterial infections.Do you feel it would be "right" or "wrong" to use these data in your research and analysis? Further reading.Whereas everything else we do can describe human behaviour as it _is_, ethics provides a theoretical framework to describe the world as it _should_, or _should not_ be. It gives us full range to describe an ideal outcome, and to consider all that we know and do not know which may impede or confound our desired result.Case-study: A Nigerian man travels to a conference in the United States. After one of the sessions, he goes to the bathroom to wash his hands. The electronic automated soap dispensor does not recognise his hands beneath the sensor. A white American sees his confusion and places his hands beneath the device. Soap is dispensed. The Nigerian man tries again. It still does not recognise him.How would something like this happen? Is it an ethical concern?Further reading.When we consider ethical outcomes, we use the terms _good_ or _bad_ to describe judgements about people or things, and we use _right_ or _wrong_ to refer to the outcome of specific actions. Understand, though, that - while right and wrong may sometimes be obvious - we are often stuck in ethical dilemmas.How we consider whether an action is right or wrong comes down to the tension between what was intended by an action, and what the consequences of that action were. Are only intensions important? Or should we only consider outcomes? And how absolutely do you want to judge this chain: the _right_ motivation, leading to the _right_ intention, performing the _right_ action, resulting in only _good_ consequences. How do we evaluate this against what it may be impossible to know at the time, even if that information will become available after a decision is made?We also need to consider competing interests in good and bad outcomes. A good outcome for the individual making the decision may be a bad decision for numerous others. Conversely, an altruistic person may act only for the benefit of others even to their own detriment.Ethical problems do not always require a call to facts to justify a particular decision, but they do have a number of characteristics: - _Public_: the process by which we arrive at an ethical choice is known to all participants; - _Informal_: the process cannot always be codified into law like a legal system; - _Rational_: despite the informality, the logic used must be accessible and defensible; - _Impartial_: any decision must not favour any group or person;Rather than imposing a specific set of rules to be obeyed, ethics provides a framework in which we may consider whether what we are setting out to achieve conforms to our values, and whether the process by which we arrive at our decision can be validated and inspected by others.No matter how sophisticated our automated machines become, unless our intention is to construct a society "of machines, for machines", people will always be needed to decide on what ethical considerations must be taken into account.There are limits to what analysis can achieve, and it is up to the individuals producing that analysis to ensure that any assumptions, doubts, and requirements are documented along with their results. Critically, it is also each individual's personal responsibility to raise any concerns with the source data used in the analysis, including whether personal data are being used legitimately, or whether the source data are at all trustworthy, as well as the algorithms used to process those data and produce a result. Data analysisThis will be a very brief introduction to some tools used in data analysis in Python. This will not provide insight into the approaches to performing analysis, which is left to self-study, or to modules elsewhere in this series. Numpy arraysData analysis often involves performing operations on large lists of data. Numpy is a powerful suite of tools permitting you to work quickly and easily with complete data lists. We refer to these lists as arrays, and - if you are familiar with the term from mathematics - you can think of these as matrix methods.By convention, we import Numpy as np; `import numpy as np`.We're also going to want to be generating a lot of lists of random floats for these exercises, and that's tedious to write. Let's get Python to do this for us using the `random` module.
###Code
import numpy as np
import random
def generate_float_list(lwr, upr, num):
"""
Return a list of num random decimal floats ranged between lwr and upr.
Range(lwr, upr) creates a list of every integer between lwr and upr.
random.sample takes num integers from the range list, chosen randomly.
"""
int_list = random.sample(range(lwr, upr), num)
return [x/100 for x in int_list]
# Create two lists
height = generate_float_list(100, 220, 10)
weight = generate_float_list(5000, 20000, 10)
# Convert these to Numpy arrays
np_height = np.array(height)
np_weight = np.array(weight)
print(np_height)
print(np_weight)
###Output
[1.54 1.75 1.43 2.03 1.51 1.59 1.19 1.72 1.13 2.09]
[ 70.08 166.31 170.51 174.34 89.29 69.13 137.76 96.66 123.97 95.73]
###Markdown
There is a useful timer function built in to Jupyter Notebook. Start any line of code with `%time` and you'll get output on how long the code took to run.This is important when working with data-intensive operations where you want to squeeze out every drop of efficiency by optimising your code.We can now perform operations directly on all the values in these Numpy arrays. Here are two simple methods to use. Syntax Element-wise calculations: you can treat Numpy arrays as you would individual floats or integers. Note, they must either have the same shape (i.e. number or elements), or you can perform bitwise operations (operate on each item in the array) with a single float or int Filtering: You can quickly filter Numpy arrays by performing boolean operations, e.g. `np_array[np_array > num]`, or, for a purely boolean response, `np_array > num`
###Code
# Calculate body-mass index based on the heights and weights in our arrays
# Time the calculation ... it won't be long
%time bmi = np_weight / np_height ** 2
print(bmi)
# Any BMI > 35 is considered severely obese. Let's see who in our sample is at risk.
# Get a boolean response
print(bmi > 35)
# Or print only BMI values above 35
print(bmi[bmi > 35])
###Output
Wall time: 0 ns
[29.54967111 54.30530612 83.38305052 42.30629231 39.16056313 27.34464618
97.28126545 32.67306652 97.08669434 21.91570706]
[False True True True True False True False True False]
[54.30530612 83.38305052 42.30629231 39.16056313 97.28126545 97.08669434]
###Markdown
PandasWe briefly experimented with Pandas back in [Built-in modules](03 - Python intermediate.ipynbBuilt-in-modules).The description given for Pandas there was:_**pandas** is a Python package providing fast, flexible, and expressive data structures designed to make working with "relational" or "labeled" data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python. Additionally, it has the broader goal of becoming **the most powerful and flexible open source data analysis / manipulation tool available in any language**._Pandas is developed by [Wes McKinney](http://wesmckinney.com/) and has a marvelous and active development community. Wes prefers pandas to be written in the lower-case (I'll alternate).Underneath Pandas is Numpy, so they are closely related and tightly integrated. Pandas allows you to manipulate data either as a `Series` (similarly to Numpy, but with added features), or in a tabular form with rows of values and named columns (similar to the way you may think of an Excel spreadsheet).This tabular form is known as a `DataFrame`. Pandas works well with Jupyter Notebook and you can output nicely formated dataframes (just make sure the last line of your code block is the name of the dataframe).The convention is to import pandas as pd, `import pandas as pd`.The following tutorial is taken direclty from the '10 minutes to pandas' section of the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/10min.html). Note, this isn't the complete tutorial, and you can continue there. Object creationCreate a `Series` by passing a list of values, and letting pandas create a default integer index.
###Code
import pandas as pd
import numpy as np
s = pd.Series([1,3,5,np.nan,6,8])
s
###Output
_____no_output_____
###Markdown
Note that `np.nan` is Numpy's default way of presenting a value as "not-a-number". For instance, divide-by-zero returns `np.nan`. This means you can perform complex operations relatively safely and sort out the damage afterwards.Create a DataFrame by passing a numpy array, with a datetime index and labeled columns.
###Code
# Create a date range starting at an ISO-formatted date (YYYYMMDD)
dates = pd.date_range('20130101', periods=6)
dates
# Create a dataframe using the date range we created above as the index
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
df
###Output
_____no_output_____
###Markdown
We can also mix text and numeric data with an automatically-generated index.
###Code
dict = {"country": ["Brazil", "Russia", "India", "China", "South Africa"],
"capital": ["Brasilia", "Moscow", "New Dehli", "Beijing", "Pretoria"],
"area": [8.516, 17.10, 3.286, 9.597, 1.221],
"population": [200.4, 143.5, 1252, 1357, 52.98] }
brics = pd.DataFrame(dict)
brics
###Output
_____no_output_____
###Markdown
The numbers down the left-hand side of the table are called the index. This permits you to reference a specific row. However, Pandas permits you to set your own index, as we did where we set a date range index. You could set one of the existing columns as an index (as long as it consists of unique values) or you could set a new custom index.
###Code
# Set the ISO two-letter country codes as the index
brics.index = ["BR", "RU", "IN", "CH", "SA"]
brics
###Output
_____no_output_____
###Markdown
Viewing dataPandas can work with exceptionally large datasets, including millions of rows. Presenting that takes up space and, if you only want to see what your data looks like (since, most of the time, you can work with it symbolically), then that can be painful. Fortunately, pandas comes with a number of ways of viewing and reviewing your data. Syntax See the top and bottom rows of your dataframe with `df.head()` or `df.tail(num)` where `num` is an integer number of rows See the index, columns and underlying numpy data with `df.index`, `df.columns` and `df.values` Get a quick statistical summary of your data with `df.describe()` Transpose your data with `df.T` Sort by an axis with `df.sort_index(axis=1, ascending=False)` where `axis=1` refers to columns, and `axis=0` refers to rows Sort by values with `df.sort_values(by=column)`
###Code
# Head
df.head()
# Tail
df.tail(3)
# Index
df.index
# Values
df.values
# Statistical summary
df.describe()
# Transpose
df.T
# Sort by an axis
df.sort_index(axis=1, ascending=False)
# Sort by values
df.sort_values(by="B")
###Output
_____no_output_____
###Markdown
SelectionsOne of the first steps in data analysis is simply to filter your data and get at slices you're most interested in. Pandas has numerous approaches to quickly get only what you want. Syntax Select a single column by addressing the dataframe as you would a dictionary, with `df[column]` or, if the column name is a single word, with `df.column`. This returns a series Select a slice in the way you would a Python list, with `df[]`, e.g. `df[:3]`, or by slicing the indices, `df["20130102":"20130104"]` Use `.loc` to select by specific labels, such as: Get a cross-section based on a label, with e.g. `df.loc[index[0]]` Get on multi-axis by a label, with `df.loc[:, ["A", "B"]]` where the first `:` indicates the slice of rows, and the second list `["A", "B"]` indicates the list of columns As you would with Numpy, you can get a boolean-based selection, with e.g. `df[df.A > num]` There are a _lot_ more ways to filter and access data, as well as methods to set data in your dataframes, but this will be enough for now.
###Code
# By column
df.A
# By slice
df["20130102":"20130104"]
# Cross-section
df.loc[dates[0]]
# Multi-axis
df.loc[:, ["A", "B"]]
# Boolean indexing
df[df.A > 0]
###Output
_____no_output_____
###Markdown
MatplotlibIn this last section, you get to meet _Matplotlib_, a fairly ubiquitous and powerful Python plotting library. Jupyter Notebook has some "magic" we can use in the line `%matplotlib inline` which permits us to draw charts directly in this notebook.Matplotlib, Numpy and Pandas form the three most important and ubiquitous tools in data analysis.Note that this is the merest slither of an introduction to what you can do with these libraries.
###Code
import matplotlib.pyplot as plt
# This bit of magic code will allow your Matplotlib plots to be shown directly in your Jupyter Notebook.
%matplotlib inline
# Produce a random timeseries
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
# Get the cumulative sum of the random numbers generated to mimic a historic data series
ts = ts.cumsum()
# And magically plot
ts.plot()
# And do the same thing with a dataframe
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
# And plot, this time creating a figure and adding a plot and legend to it
plt.figure()
df.plot()
plt.legend(loc='best')
###Output
_____no_output_____ |
docs/ipynb/field-read-write.ipynb | ###Markdown
Reading and writing fieldsThere are two main file formats to which a `discretisedfield.Field` object can be saved:- [VTK](https://vtk.org/) for visualisation using e.g., [ParaView](https://www.paraview.org/) or [Mayavi](https://docs.enthought.com/mayavi/mayavi/)- OOMMF [Vector Field File Format (OVF)](https://math.nist.gov/oommf/doc/userguide12a5/userguide/Vector_Field_File_Format_OV.html) for exchanging fields with micromagnetic simulators.Let us say we have a nanosphere sample:$$x^2 + y^2 + z^2 <= r^2$$with $r=5\,\text{nm}$. The space is discretised into cells with dimensions $(0.5\,\text{nm}, 0.5\,\text{nm}, 0.5\,\text{nm})$. The value of the field at $(x, y, z)$ point is $(-cy, cx, cz)$, with $c=10^{9}$. The norm of the field inside the cylinder is $10^{6}$.Let us first build that field.
###Code
import discretisedfield as df
r = 5e-9
cell = (0.5e-9, 0.5e-9, 0.5e-9)
mesh = df.Mesh(p1=(-r, -r, -r), p2=(r, r, r), cell=cell)
def norm_fun(pos):
x, y, z = pos
if x**2 + y**2 + z**2 <= r**2:
return 1e6
else:
return 0
def value_fun(pos):
x, y, z = pos
c = 1e9
return (-c*y, c*x, c*z)
field = df.Field(mesh, dim=3, value=value_fun, norm=norm_fun)
###Output
_____no_output_____
###Markdown
Let us have a quick view of the field we created
###Code
# NBVAL_IGNORE_OUTPUT
field.plane('z').k3d_vector(color_field=field.z)
###Output
_____no_output_____
###Markdown
Writing the field to a fileThe main method used for saving field in different files is `discretisedfield.Field.write()`. It takes `filename` as an argument, which is a string with one of the following extensions:- `'.vtk'` for saving in the VTK format- `'.ovf'`, `'.omf'`, `'.ohf'` for saving in the OVF formatLet us firstly save the field in the VTK file.
###Code
vtkfilename = 'my_vtk_file.vtk'
field.write(vtkfilename)
###Output
_____no_output_____
###Markdown
We can check if the file was saved in the current directory.
###Code
import os
os.path.isfile(f'./{vtkfilename}')
###Output
_____no_output_____
###Markdown
Now, we can delete the file:
###Code
os.remove(f'./{vtkfilename}')
###Output
_____no_output_____
###Markdown
Next, we can save the field in the OVF format and check whether it was created in the current directory.
###Code
omffilename = 'my_omf_file.omf'
field.write(omffilename)
os.path.isfile(f'./{omffilename}')
###Output
_____no_output_____
###Markdown
There are three different possible representations of an OVF file: one ASCII (`txt`) and two binary (`bin4` or `bin8`). ASCII `txt` representation is a default representation when `discretisedfield.Field.write()` is called. If any different representation is required, it can be passed via `representation` argument.
###Code
field.write(omffilename, representation='bin8')
os.path.isfile(f'./{omffilename}')
###Output
_____no_output_____
###Markdown
Reading the OVF fileThe method for reading OVF files is a class method `discretisedfield.Field.fromfile()`. By passing a `filename` argument, it reads the file and creates a `discretisedfield.Field` object. It is not required to pass the representation of the OVF file to the `discretisedfield.Field.fromfile()` method, because it can retrieve it from the content of the file.
###Code
read_field = df.Field.fromfile(omffilename)
###Output
_____no_output_____
###Markdown
Like previouly, we can quickly visualise the field
###Code
# NBVAL_IGNORE_OUTPUT
read_field.plane('z').k3d_vector(color_field=read_field.z)
###Output
_____no_output_____
###Markdown
Finally, we can delete the OVF file we created.
###Code
os.remove(f'./{omffilename}')
###Output
_____no_output_____ |
analysis/ool2020/block_silhouette_action_sequence_exploration.ipynb | ###Markdown
ActionsPlacement of block: Only certain placements are possible at any one time: i.e. those just above the current tower (including in holes).Consider set of actions as possible block placements *inside the silhouette* plus an error action- placing a block anywhere outside the silhouette.
###Code
blocks = np.array([[1,2],[2,1],[2,2],[2,4],[4,2]]) # block dimensions
locations = np.arange(0,8) # locations to place blocks- inside 8x8 grid only
###Output
_____no_output_____
###Markdown
Maps of silhouette
###Code
target_dir = os.path.join(stim_dir,'hand_selected_e2_subset')
## load in list of structures
file_list = os.listdir(target_dir)
file_list = [x for x in file_list if not (x.startswith('.'))]
print('Loaded {} structures.'.format(len(file_list)))
target_blocks_dict = {}
## loop through list of good sequences, and build list of versions, each containing a trial list
Meta = [] ## initialize list of all trial lists
for i,f in enumerate(file_list):
target = pd.read_json(os.path.join(target_dir,f)) ## stim list
targetName = f.split('.')[0]
targetBlocks = list(target['blocks'].values)
target_blocks_dict[targetName] = targetBlocks
stimWidth = 8
worldWidth = 18
worldHeight = 13
target_maps = {}
for i, (target_name, block_list) in enumerate(target_blocks_dict.items()):
#block_list = target_blocks_dict['hand_selected_009']
target_map = np.zeros([worldWidth,worldHeight])
for block in block_list:
# add block to map
width = block['width']
height = block['height']
blockLeft = int((worldWidth-stimWidth)/2) + block['x']
blockBottom = block['y']
blockTop = blockBottom + height
blockRight = blockLeft + width
#console.log('width', width);
#console.log('height', height);
#console.log('blockLeft', blockLeft);
#console.log('blockBottom', blockBottom);
for y in range(blockBottom,blockTop):
for x in range(blockLeft, blockRight):
target_map[x][y] = 1
target_maps[target_name] = target_map
for i, (target_name, target_map) in enumerate(target_maps.items()):
silhouette = 1*np.logical_not(target_map)
silhouette = np.rot90(silhouette)
fig, ax = plt.subplots()
ax.axis('off')
ax.imshow(silhouette)
###Output
_____no_output_____
###Markdown
Enumerate actions within silhouette
###Code
silhouette = 1*np.logical_not(target_maps['hand_selected_012'])
silhouette = np.rot90(silhouette)
fig, ax = plt.subplots()
ax.axis('off')
ax.imshow(silhouette)
tm = target_maps['hand_selected_012']
tm[5:12,0:8] # Slice only the silhouette
###Output
_____no_output_____
###Markdown
for a current world state:find set of viable actionsiterate through layers (keep track of floors?)for each block find viable locationsor for each location find viable blockssome of these paths will lead to dead ends Actual distributions of human behaviorLook at the sequences of actions people actually takeAnd see how close they areDo they end up closer (by rep 4)?Similarity:Closer if: Same blocks in same placeSame blocks in same placed placed at similar time
###Code
dfi['usableDiscreteWorld'] = dfi['discreteWorld'].apply(lambda a: 1+(-1)*np.array(ast.literal_eval(a)))
dfi['flatDiscreteWorld'] = dfi['discreteWorld'].apply(lambda a: (1+(-1)*np.array(ast.literal_eval(a))).flatten())
# We have sequences of world states, of length n where n is the number of blocks placed
# Each layer is a block placement
dfi['flatDiscreteWorld'][0]
###Output
_____no_output_____
###Markdown
Vector representing where blocks have been placedWant to look at sequences of theseIf people complete the silhouette, these vectors will be the sameDiffering numbers of blocks mean the list of these vectors will be different lengths Discrete World State
###Code
# get world state EVERY SECOND
# i.e. the flatDiscreteWorld for the highest time preceding i
# for every participant, for every structure
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
n_targets = df['targetName'].nunique()
n_ppts = df['gameID'].nunique()
m = np.zeros((n_targets,n_ppts,61,13*18)) #number of seconds, number of squares in grid
for t, t_name in enumerate(targets):
for p, p_id in enumerate(ppts):
subset = dfi[(dfi.targetName==t_name) & (dfi.gameID==p_id) & (dfi.phase=='pre')]
prev_world = np.zeros(13*18)
i = 0
for index, row in subset.iterrows():
while ((i*1000 < row['relativePlacementTime']) & ((i*1000)<60000)):
m[t,p,i,:] = prev_world
i += 1
prev_world = row['flatDiscreteWorld']
while (i < 60):
m[t,p,i,:] = prev_world
i += 1
# get world state EVERY 10 SECONDS
# i.e. the flatDiscreteWorld for the highest time preceding i
# PRE PHASE, for every participant, for every structure
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
n_targets = df['targetName'].nunique()
n_ppts = df['gameID'].nunique()
m = np.zeros((n_targets,n_ppts,6,13*18)) #number of seconds, number of squares in grid
for t, t_name in enumerate(targets):
for p, p_id in enumerate(ppts):
subset = dfi[(dfi.targetName==t_name) & (dfi.gameID==p_id) & (dfi.phase=='pre')]
prev_world = np.zeros(13*18)
i = 0
for index, row in subset.iterrows():
while ((i*10000 < row['relativePlacementTime']) & ((i*10000)<60000)):
m[t,p,i,:] = prev_world
i += 1
prev_world = row['flatDiscreteWorld']
while (i < 6):
m[t,p,i,:] = prev_world
i += 1
# get world state EVERY 10 SECONDS
# i.e. the flatDiscreteWorld for the highest time preceding i
# POST PHASE, for every participant, for every structure
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
n_targets = df['targetName'].nunique()
n_ppts = df['gameID'].nunique()
m2 = np.zeros((n_targets,n_ppts,6,13*18)) #number of seconds, number of squares in grid
for t, t_name in enumerate(targets):
for p, p_id in enumerate(ppts):
subset = dfi[(dfi.targetName==t_name) & (dfi.gameID==p_id) & (dfi.phase=='post')]
prev_world = np.zeros(13*18)
i = 0
for index, row in subset.iterrows():
while ((i*10000 < row['relativePlacementTime']) & ((i*10000)<60000)):
m2[t,p,i,:] = prev_world
i += 1
prev_world = row['flatDiscreteWorld']
while (i < 6):
m2[t,p,i,:] = prev_world
i += 1
# (targets, participants, seconds, squares)
# for one target, get all participants
mat = m[3,:,50,:]
###Output
_____no_output_____
###Markdown
Explore matrix creation (compare ppt world state for a given structure over time)
###Code
# Create rdm for a given structure and time step (all ppts, one phase)
mat = m[3,:,1,:]
n_rows = mat.shape[0]
rdm = np.zeros((n_rows,n_rows))
for i in range (0, n_rows): # row dim
for j in range (0, n_rows): # row dim
rdm[i,j] = distance.euclidean(mat[i,:],mat[j,:])
img = plt.matshow(rdm)
img.set_cmap('hot')
plt.axis('off')
plt.savefig("test.png", bbox_inches='tight')
plt.colorbar()
# Use biclustering to arrange rows and columns.
# This seems ok, except you have to specify the amount of clusters and the initial random state seems to have quite a large effect
mat = m[1,:,2,:]
n_rows = mat.shape[0]
rdm = np.zeros((n_rows,n_rows))
for i in range (0, n_rows): # row dim
for j in range (0, n_rows): # row dim
rdm[i,j] = distance.euclidean(mat[i,:],mat[j,:])
clustering = SpectralBiclustering(n_clusters=5, random_state=0).fit(rdm) # https://scikit-learn.org/stable/auto_examples/bicluster/plot_spectral_biclustering.html
order = clustering.row_labels_
sorted_rdm = rdm[np.argsort(clustering.row_labels_)]
sorted_rdm = sorted_rdm[:, np.argsort(clustering.column_labels_)]
img1 = plt.matshow(rdm)
plt.axis('off')
plt.colorbar()
img2 = plt.matshow(sorted_rdm)
plt.axis('off')
img1.set_cmap('hot')
img2.set_cmap('hot')
plt.colorbar()
importlib.reload(scoring)
# Create and display RDMs for one structure (all ppts, one phase)
mat = m[3,:,:,:]
n_rows = mat.shape[0]
time_steps = mat.shape[1]
rdm = np.zeros((n_rows,n_rows, time_steps))
for step in range(0,time_steps):
for i in range (0, n_rows): # row dim
for j in range (0, n_rows): # row dim
rdm[i,j,step] = scoring.get_jaccard(mat[i,step,:],mat[j,step,:])
for step in range(0,time_steps):
img = plt.matshow(rdm[:,:,step])
img.set_cmap('hot')
plt.axis('off')
plt.savefig("test.png", bbox_inches='tight')
plt.colorbar()
###Output
_____no_output_____
###Markdown
Sequences of world states by action
###Code
# Spread flatDiscreteWorld over columns
dfic = dfi[dfi.condition=='repeated']
dfic = dfic[['targetName','gameID','blockNum','repetition','flatDiscreteWorld']]
# Create index names
# for i in range(0,len(dfi['flatDiscreteWorld'][0])):
# inds.append('w_' + str(i).zfill(3))
# Create columns from values in flatDiscreteWorld
world_cols = dfic.apply(lambda x: pd.Series(x['flatDiscreteWorld']), axis=1)
dfic = pd.concat([dfic, world_cols], axis=1)
# Make empty values for non-existent blockNums
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
reps = np.sort(df['repetition'].unique())
max_actions = dfic['blockNum'].max()
# create multi-index
block_placement_index = (
pd.MultiIndex
.from_product(
iterables=[targets, ppts, reps, range(1,max_actions + 1)],
names=['targetName', 'gameID','repetition','blockNum']
)
)
# set indexes, then replace with full product index, filling blank values with NaNs
world_states = dfic.set_index(['targetName', 'gameID','repetition','blockNum']).\
reindex(index=block_placement_index, fill_value=np.nan).reset_index()
# Get scores for each pair of repetitions
importlib.reload(scoring)
rep_pairs = list(combinations(reps,2))
def get_scores(x):
tmp_df = pd.DataFrame(rep_pairs,
columns=['rep_a','rep_b'])
#tmp_df = tmp_df.assign(score=lambda r: x[x.repetition==r.rep_a]['flatDiscreteWorld'])
tmp_df['score'] = tmp_df.apply(lambda r: scoring.get_jaccard(\
x[x.repetition==r.rep_a].iloc[:,5:].values,\
x[x.repetition==r.rep_b].iloc[:,5:].values),\
axis=1)
return tmp_df
scores_df = world_states.groupby(['targetName','gameID','blockNum']).apply(lambda x: get_scores(x))\
.reset_index()
rep_pairs = list(combinations(reps,2))
rep_pairs
# Add convenient names for pairs of repeititons
scores_df['reps'] = scores_df.apply(lambda row: str(int(row.rep_a)) + ' to ' + str(int(row.rep_b)), axis = 1)
scores_sample = scores_df[scores_df.reps.isin(['0 to 1','1 to 2','2 to 3'])]
g = sns.FacetGrid(scores_sample, col="targetName", col_wrap=2, height=6, hue="reps")
g.map(sns.lineplot, "blockNum", "score");
g.map(sns.scatterplot, "blockNum", "score");
plt.legend(bbox_to_anchor=(1.0,1))
# Label pairs of repetitions by amount of overlap
def same_diff_from_jaccard(j):
if np.isnan(j):
return 'no values'
elif j==1:
return 'same'
elif j==0:
return 'no overlap'
else:
return 'overlap'
v_s = np.vectorize(same_diff_from_jaccard)
scores_df['samediff'] = v_s(scores_df.score)
scores_sample = scores_df[(scores_df.targetName==targets[0]) & (scores_df.reps.isin(['0 to 1','1 to 2','2 to 3']))]
# g = sns.FacetGrid(scores_sample, col="reps", col_wrap=2, height=6, hue="samediff")
# g.map(sns.barplot, "blockNum", );
# plt.legend(bbox_to_anchor=(1.0,1))
scores_df.groupby(['targetName','blockNum','reps'])['samediff'].value_counts()
# calculate all distances
# each small multiple is a target structure
# rows and columns are repetitions
# each value is a within participant, within structure, within action index, across repetition distance
# each layer is an action
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
reps = np.sort(df['repetition'].unique())
n_targets = df['targetName'].nunique()
n_ppts = df['gameID'].nunique()
n_reps = df['repetition'].nunique()
n_grid_squares = 13*18
max_actions = df['numBlocks'].max()
dfic = dfi[['targetName','gameID','blockNum','condition','repetition','flatDiscreteWorld']]
dfic = dfic[dfic.condition=='repeated']
n_rows = n_reps
n_cols = n_reps
all_dists = np.zeros((n_targets, n_ppts, max_actions, n_rows, n_cols))
df_dists = pd.DataFrame(columns=['target', 'participant', 'block_num','rep_a','rep_b','dist'])
for t in range(0, n_targets):
for p in range(0, n_ppts):
for a in range(0, max_actions):
for rep_a in range (0, n_rows): # row dim
for rep_b in range (0, n_cols): # col dim
world_a = all_worlds[t,p,a,rep_a,:]
world_b = all_worlds[t,p,a,rep_b,:]
all_dists[t,p,a,rep_a,rep_b] = scoring.get_jaccard(world_a,world_b)
# df_dists = df_dists.append({'target': t,
# 'participant': p,
# 'block_num': a,
# 'rep_a': rep_a,
# 'rep_b': rep_b,
# 'dist':scoring.get_jaccard(world_a,world_b)}, ignore_index=True)
v_jaccard = np.vectorize(scoring.get_jaccard)
img = plt.matshow(all_dists[1,3,8,:,:])
img.set_cmap('hot')
plt.axis('off')
plt.colorbar()
for action in range(0, max_actions):
action_dists = all_dists[1,:,action,:,:]
img.set_cmap('hot')
norm = plt.Normalize(0, 1)
img = plt.matshow(np.nanmean(action_dists, axis=0), norm=norm)
plt.axis('off')
plt.colorbar()
plt.set_cmap('hot')
cmap = plt.cm.get_cmap()
cmap.set_bad(color='green')
norm = plt.Normalize(0, 1)
for action in range(0, max_actions):
img = plt.matshow(all_dists[0,5,action,:,:], cmap = cmap, norm=norm)
plt.axis('off')
plt.colorbar()
#all_dists = (n_targets, n_ppts, max_actions, n_rows, n_cols)
mean_dists_ppts = np.mean(all_dists[:,:,:,:,:], axis = 1)
mean_dists_ppts.shape
df_dists['reps'] = df_dists.apply(lambda row: str(int(row.rep_a)) + ' to ' + str(int(row.rep_b)), axis = 1)
#fig = plt.figure(figsize=(10,6))
g = sns.FacetGrid(df_dists, row="target", hue="rep_a")
g.map(sns.scatterplot, "block_num", "dist")
fig = plt.figure()
fig.set_size_inches(12, 16)
sns.set(style="ticks", rc={"lines.linewidth": 1})
g = sns.FacetGrid(df_dists, col="target", hue="reps", col_wrap=2, height=8)
g.map(sns.scatterplot, "block_num", "dist", s=15)
g.map(sns.lineplot, "block_num", "dist")
#g.map(sns.lineplot, "block_num", "dist")
df_dists.reps.unique()
fig = plt.figure()
fig.set_size_inches(12, 16)
sns.set(style="ticks", rc={"lines.linewidth": 1})
g = sns.FacetGrid(df_dists[(df_dists.target==0) & (df_dists.participant.isin(range(0,5)))], col="reps", hue="participant", col_wrap=4, height=6)
g.map(sns.scatterplot, "block_num", "dist", s=15)
g.map(sns.pointplot, "block_num", "dist")
target = 0
fig = plt.figure(figsize=(10, 20))
for t in range(0, n_targets):
plt.subplot(4,2,t+1)
plt.plot(mean_dists_ppts[t,:,0,1], label="1 to 2", color=line_cmap[0])
#plt.plot(mean_dists_ppts[t,:,0,2], label="1 to 3", color=line_cmap[1])
#plt.plot(mean_dists_ppts[t,:,0,3], label="1 to 4", color=line_cmap[2])
plt.plot(mean_dists_ppts[t,:,1,2], label="2 to 3", color=line_cmap[3])
#plt.plot(mean_dists_ppts[t,:,1,3], label="2 to 4", color=line_cmap[4])
plt.plot(mean_dists_ppts[t,:,2,3], label="3 to 4", color=line_cmap[5])
plt.legend()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
target = 0
fig = plt.figure(figsize=(10, 20))
for t in range(0, n_targets):
plt.subplot(4,2,t+1)
plt.plot(mean_dists_ppts[t,:,0,1], label="1 to 2", color=line_cmap[0])
plt.scatter(x = all_dists[:,t,1,0,1], label="1 to 2", color=line_cmap[0])
#plt.plot(mean_dists_ppts[t,:,0,2], label="1 to 3", color=line_cmap[1])
#plt.plot(mean_dists_ppts[t,:,0,3], label="1 to 4", color=line_cmap[2])
plt.plot(mean_dists_ppts[t,:,1,2], label="2 to 3", color=line_cmap[3])
#plt.plot(mean_dists_ppts[t,:,1,3], label="2 to 4", color=line_cmap[4])
plt.plot(mean_dists_ppts[t,:,2,3], label="3 to 4", color=line_cmap[5])
plt.legend()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
###Output
_____no_output_____
###Markdown
At each time pointRelative proportion of full/partial/zero overlapEach point has a distribution of Jaccard numbersOver time, integration- area under curves
###Code
target = 0
fig = plt.figure(figsize=(10, 20))
for t in range(0, n_targets):
plt.subplot(4,2,t+1)
plt.plot(all_dists[t,1,:,0,1], label="1 to 2", color=line_cmap[0])
plt.plot(all_dists[t,1,:,0,2], label="1 to 3", color=line_cmap[1])
plt.plot(all_dists[t,1,:,0,3], label="1 to 4", color=line_cmap[2])
plt.plot(all_dists[t,1,:,1,2], label="2 to 3", color=line_cmap[3])
plt.plot(all_dists[t,1,:,1,3], label="2 to 4", color=line_cmap[4])
plt.plot(all_dists[t,1,:,2,3], label="3 to 4", color=line_cmap[5])
plt.legend()
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
###Output
_____no_output_____
###Markdown
Test Jaccard
###Code
arr1, arr2 = np.zeros((13,18)), np.zeros((13,18))
arr1[0:2,6:10] = 1
plt.matshow(arr1)
print('arr1: ', np.count_nonzero(arr1))
arr2[0:2,7:11] = 1
print('arr2: ', np.count_nonzero(arr1))
plt.matshow(arr2)
print('Jaccard: ', scoring.get_jaccard(arr1,arr2))
importlib.reload(rda)
all_dists, all_values = rda.rda_from_df(dfi[(dfi.gameID == ppts[0])],
row_col='repetition',
value_col='flatDiscreteWorld',
small_multiple_col='targetName',
layer_col='blockNum')
all_dists.shape
###Output
_____no_output_____
###Markdown
Action-centric analysis of build sequences Create Dataframe and Matrices
###Code
# Make action dataframe
dfa = dfi[['gameID','trialNum','targetName','repetition','blockNum','condition','phase_extended','y_index','x_index','width_discrete','height_discrete']]
dfa = dfa.rename(columns = {'y_index':'y',
'x_index':'x',
'width_discrete':'w',
'height_discrete':'h'})
targets = np.sort(df['targetName'].unique())
ppts = np.sort(df['gameID'].unique())
extended_phases = ['pre', 'repetition 1', 'repetition 2', 'post']
###Output
_____no_output_____
###Markdown
Experiment with one structure
###Code
# Choose which sequences to compare
truncating = True;
pptA = ppts[15]
targetA = targets[5]
phaseA = extended_phases[0]
pptB = ppts[15]
targetB = targets[5]
phaseB = extended_phases[3]
###Output
_____no_output_____
###Markdown
Calculate distance matrix and visualize
###Code
# Calculate distance matrix
# select two sets of actions e.g. compare one ppts attempts at one structure pre to post
s1 = dfa[(dfa.gameID == pptA) & (dfa.targetName == targetA) & (dfa.phase_extended == phaseA)]
s2 = dfa[(dfa.gameID == pptB) & (dfa.targetName == targetB) & (dfa.phase_extended == phaseB)]
# truncate to length of smaller set of actions
n_actions = min(len(s1),len(s2))
if truncating:
s1 = s1.iloc[0:n_actions]
s2 = s2.iloc[0:n_actions]
# apply distance metric between each 4-tuple in the sequence (but append s1 to s2)
s1_s2 = s1.append(s2)
dist_mat = s1_s2.apply(lambda r:
s1_s2.apply(lambda r2:
distance.euclidean(r[['x','y','w','h']],r2[['x','y','w','h']]),
axis=1),
axis=1)
# Plot matrix and structures
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(24,8),
gridspec_kw={
'width_ratios': [1, 1.6, 1.6]})
ax[0].axis('off')
ax[0].imshow(dist_mat,vmin=0,vmax=10)
ax[0].axvline(x=(len(s1))-0.5, ymin=0, ymax=2*(len(s1))+1,color='white')
ax[0].axhline(y=(len(s1))-0.5, xmin=0, xmax=2*(len(s1))+1,color='white')
ax[0].set_title('A B')
print('A: left, top, B: right, bottom')
# fig2, ax2 = plt.subplots(figsize=(3,3))
drawing.draw_reconstruction_subplot(df=df[df.phase_extended == phaseA],gameID=pptA,targetName=targetA, ax=ax[1], n_colors = n_actions)
#draw_from_actions_subplot(dfa[(dfa.phase_extended == phaseA) & (dfa.gameID==pptA) & (dfa.targetName==targetA)], ax[1])
ax[1].set_title('A')
ax[1].axis('off')
# fig3, ax2 = plt.subplots(figsize=(3,3))
drawing.draw_reconstruction_subplot(df=df[df.phase_extended == phaseB],gameID=pptB,targetName=targetB, ax=ax[2], n_colors = n_actions)
# draw_from_actions_subplot(dfa[(dfa.phase_extended == phaseB) & (dfa.gameID==pptB) & (dfa.targetName==targetB)], ax[2])
ax[2].set_title('B')
ax[2].axis('off')
###Output
A: left, top, B: right, bottom
###Markdown
1. Directly compare internal structures of each sequenceDifferent measures are going to do different things here.Flattening upper triangles and taking cosine distance
###Code
AA = dist_mat.values[0:len(s1),0:len(s1)]
AB = dist_mat.values[len(s1):,0:len(s1)]
BA = dist_mat.values[0:len(s1),len(s1):]
BB = dist_mat.values[len(s1):,len(s1):]
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(6,2))
ax[0].axis('off')
ax[0].imshow(AA, vmin=0,vmax=10)
ax[0].set_title('AA')
ax[1].axis('off', vmin=0,vmax=10)
ax[1].imshow(AB)
ax[1].set_title('AB')
ax[2].axis('off')
ax[2].imshow(BA, vmin=0,vmax=10)
ax[2].set_title('BA')
ax[3].axis('off')
ax[3].imshow(BB, vmin=0,vmax=10)
ax[3].set_title('BB')
###Output
_____no_output_____
###Markdown
Cosine distance between upper triangles
###Code
assert (truncating == True) | (len(s1)==len(s2))
AA_triu = AA[np.triu_indices(n_actions, k=1)]
BB_triu = BB[np.triu_indices(n_actions, k=1)]
distance.cosine(AA_triu, BB_triu)
###Output
_____no_output_____
###Markdown
Decay function to find matches and near matches
###Code
def exp_decay(x):
return np.e**(-0.5 * (x/1)**2)
AB_filtered = np.vectorize(exp_decay)(AB)
fig = plt.figure()
plt.imshow(AB_filtered)
AB_filtered
###Output
_____no_output_____
###Markdown
2. Detect block-diagonal structureNot sure how to go about thisClustering?Wait until we are using chunkier structures 3. Looking at between-sequence distances. Find measure of similarity that takes into account the fact that people might make the same placement but at different timesDiagonal tells us if same action performed at same time.Any off-diagonals should be taken into account but downweighted in some wayWhat does it mean to have a high score < 1 (less than max) here? 'Very similar action' not particularly meaningful here Explore Linear Sum Assignment (still working with one example from above)This finds a mapping between two sequences that minimizes the total distances between each element.It is being used here to find a 1-1 correspondence between block placements in two build sequences
###Code
# check displacements are equal in AB and BA case
AA = dist_mat.values[0:len(s1),0:len(s1)]
AB = dist_mat.values[len(s1):,0:len(s1)]
BA = dist_mat.values[0:len(s1),len(s1):]
BB = dist_mat.values[len(s1):,len(s1):]
sum_displacement_AB = sum(np.abs(np.arange(AB.shape[0]) - linear_sum_assignment(AB)[1]))
sum_displacement_BA = sum(np.abs(np.arange(BA.shape[0]) - linear_sum_assignment(BA)[1]))
print('Total displacement AB: ', sum_displacement_AB)
print('Total displacement BA: ', sum_displacement_BA)
###Output
Total displacement AB: 6
Total displacement BA: 6
###Markdown
Turns out the are not always equal! Linear sum assignments can have multiple optimal, with different arrangements
###Code
# Visualise the sort returned by linear_sum_assignment
AB_sorted = AB[:,linear_sum_assignment(AB)[1]]
BA_sorted = BA[:,linear_sum_assignment(BA)[1]]
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(8,2))
ax[0].axis('off')
ax[0].imshow(BA, vmin=0,vmax=10)
ax[0].set_title('BA')
ax[1].axis('off')
ax[1].imshow(BA_sorted, vmin=0,vmax=10)
ax[1].set_title('BA_sorted')
ax[2].axis('off')
ax[2].imshow(AB, vmin=0,vmax=10)
ax[2].set_title('AB')
ax[3].axis('off')
ax[3].imshow(AB_sorted, vmin=0,vmax=10)
ax[3].set_title('AB_sorted')
# Scaled displacement
AA = dist_mat.values[0:len(s1),0:len(s1)]
AB = dist_mat.values[len(s1):,0:len(s1)]
BA = dist_mat.values[0:len(s1),len(s1):]
BB = dist_mat.values[len(s1):,len(s1):]
sum_displacement_AB = np.mean((np.abs(np.arange(AB.shape[0]) - linear_sum_assignment(AB)[1])/(len(AB))))
sum_displacement_BA = np.mean((np.abs(np.arange(BA.shape[0]) - linear_sum_assignment(BA)[1])/(len(BA))))
print('Total displacement AB: ', sum_displacement_AB)
print('Total displacement BA: ', sum_displacement_BA)
###Output
Total displacement AB: 0.16666666666666666
Total displacement BA: 0.16666666666666666
###Markdown
Experimentation
###Code
# for all participants, for all structures that are built 4 times
truncating = True;
dfa_repeated = dfa[dfa.condition=='repeated']
displacements = np.zeros([len(ppts),len(targets),4,4])
displacements = np.full_like(displacements, np.nan, dtype=np.double)
for i_p, ppt in enumerate(ppts):
for i_t, target in enumerate(targets):
dfar = dfa_repeated[(dfa_repeated.gameID==ppt) & (dfa_repeated.targetName==target)]
# check if any row exists for this ppt, structure pair
if (dfar[(dfar.gameID==ppt) & (dfar.targetName==target)]['x'].any()):
for i_pa, phaseA in enumerate(extended_phases):
for i_pb, phaseB in enumerate(extended_phases):
if phaseA != phaseB:
# Calculate distance matrix
# select two sets of actions e.g. compare one ppts attempts at one structure pre to post
s1 = dfar[(dfar.phase_extended == phaseA)]
s2 = dfar[(dfar.phase_extended == phaseB)]
# truncate to length of smaller set of actions
n_actions = min(len(s1),len(s2))
if truncating:
s1 = s1.iloc[0:n_actions]
s2 = s2.iloc[0:n_actions]
BA = s1.apply(lambda r:
s2.apply(lambda r2:
distance.euclidean(r[['x','y','w','h']], r2[['x','y','w','h']]),
axis=1),
axis=1)
#sum_displacement_AB = np.mean((np.abs(np.arange(AB.shape[0]) - linear_sum_assignment(AB)[1])/(len(AB))))
sum_displacement_BA = np.mean((np.abs(np.arange(BA.shape[0]) - linear_sum_assignment(BA)[1])/(BA.shape[0])), dtype='float128')
displacements[i_p, i_t, i_pa, i_pb] = sum_displacement_BA
# Find any <row, structure> pairs that have different A->B from B->A values
# linear_sum_assignment finds a solution that optimizes Euclidean distance. There may be several of these.
# The pairs below indicate that different pairs were found
displacements2[np.isnan(displacements2)] = -1
same == True
for i_p, ppt in enumerate(ppts):
for i_t, target in enumerate(targets):
arr = np.reshape(displacements2[i_p,i_t,:,:],(4,4))
same = same & (arr.transpose() == arr).all()
if(not (arr.transpose() == arr).all()):
print(i_p, i_t)
same
# Inspect phase displacement matrices for one participant and structure
displacements[3,5,:,:]
# Inspect mean displacement matrices for all structures
np.nanmean(displacements, axis=0, keepdims=True)
# Inspect mean displacement matrices for all structures, all participants
phase_differences = np.nanmean(np.nanmean(displacements, axis=0, keepdims=True), axis=1)[0]
phase_differences
phase_differences_sem = stats.sem(np.nanmean(displacements, axis=0, keepdims=True), axis=1)
phase_differences_sem
phase_differences_se = np.nanstd(np.nanmean(displacements, axis=0), axis=0)/(np.sqrt(8))
phase_differences_se
plt.errorbar(x = range(1,4),
y = [phase_differences[0,1],phase_differences[1,2],phase_differences[2,3]],
yerr=[phase_differences_se[0,1],phase_differences_se[1,2],phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to rep 1','rep 1 to rep 2','rep 2 to post'])
plt.title('Comparing average displacements between consecutive phases')
plt.errorbar(x = range(1,4),
y = [phase_differences[0,3],phase_differences[1,3],phase_differences[2,3]],
yerr=[phase_differences_se[0,3],phase_differences_se[1,3],phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to post','rep 1 to post','rep 2 to post'])
plt.title('Comparing average displacements between each phase and posttest')
# for all participants, for all structures that are built 4 times
truncating = True;
dfa_repeated = dfa[dfa.condition=='repeated']
raw_displacements = np.zeros([len(ppts),len(targets),4,4])
raw_displacements = np.full_like(displacements, np.nan, dtype=np.double)
for i_p, ppt in enumerate(ppts):
for i_t, target in enumerate(targets):
dfar = dfa_repeated[(dfa_repeated.gameID==ppt) & (dfa_repeated.targetName==target)]
if (dfar[(dfar.gameID==ppt) & (dfar.targetName==target)]['x'].any()):
for i_pa, phaseA in enumerate(extended_phases):
for i_pb, phaseB in enumerate(extended_phases):
if phaseA != phaseB:
# Calculate distance matrix
# select two sets of actions e.g. compare one ppts attempts at one structure pre to post
s1 = dfar[(dfar.phase_extended == phaseA)]
s2 = dfar[(dfar.phase_extended == phaseB)]
# truncate to length of smaller set of actions
n_actions = min(len(s1),len(s2))
if truncating:
s1 = s1.iloc[0:n_actions]
s2 = s2.iloc[0:n_actions]
BA = s1.apply(lambda r:
s2.apply(lambda r2:
distance.euclidean(r[['x','y','w','h']], r2[['x','y','w','h']]),
axis=1),
axis=1)
#sum_displacement_AB = np.mean((np.abs(np.arange(AB.shape[0]) - linear_sum_assignment(AB)[1])/(len(AB))))
sum_displacement_BA = sum(np.abs(np.arange(BA.shape[0]) - linear_sum_assignment(BA)[1]))
raw_displacements[i_p, i_t, i_pa, i_pb] = sum_displacement_BA
# Inspect mean displacement matrices for all structures, all participants
raw_phase_differences = np.nanmean(np.nanmean(raw_displacements, axis=0, keepdims=True), axis=1)[0]
raw_phase_differences
raw_phase_differences_se = np.nanstd(np.nanmean(raw_displacements, axis=0), axis=0)/(np.sqrt(8))
raw_phase_differences_se
plt.errorbar(x = range(1,4),
y = [raw_phase_differences[0,1],raw_phase_differences[1,2],raw_phase_differences[2,3]],
yerr=[raw_phase_differences_se[0,1],raw_phase_differences_se[1,2],raw_phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to rep 1','rep 1 to rep 2','rep 2 to post'])
plt.title('Comparing average displacements between consecutive phases')
plt.errorbar(x = range(1,4),
y = [raw_phase_differences[0,3],raw_phase_differences[1,3],raw_phase_differences[2,3]],
yerr=[raw_phase_differences_se[0,3],raw_phase_differences_se[1,3],raw_phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to post','rep 1 to post','rep 2 to post'])
plt.title('Comparing average displacements between each phase and posttest')
# for all participants, for all structures that are built 4 times
# Meaned but not scaled
truncating = True;
dfa_repeated = dfa[dfa.condition=='repeated']
unscaled_displacements = np.zeros([len(ppts),len(targets),4,4])
unscaled_displacements = np.full_like(displacements, np.nan, dtype=np.double)
for i_p, ppt in enumerate(ppts):
for i_t, target in enumerate(targets):
dfar = dfa_repeated[(dfa_repeated.gameID==ppt) & (dfa_repeated.targetName==target)]
# check if any row exists for this ppt, structure pair
if (dfar[(dfar.gameID==ppt) & (dfar.targetName==target)]['x'].any()):
for i_pa, phaseA in enumerate(extended_phases):
for i_pb, phaseB in enumerate(extended_phases):
if phaseA != phaseB:
# Calculate distance matrix
# select two sets of actions e.g. compare one ppts attempts at one structure pre to post
s1 = dfar[(dfar.phase_extended == phaseA)]
s2 = dfar[(dfar.phase_extended == phaseB)]
# truncate to length of smaller set of actions
n_actions = min(len(s1),len(s2))
if truncating:
s1 = s1.iloc[0:n_actions]
s2 = s2.iloc[0:n_actions]
BA = s1.apply(lambda r:
s2.apply(lambda r2:
distance.euclidean(r[['x','y','w','h']], r2[['x','y','w','h']]),
axis=1),
axis=1)
#sum_displacement_AB = np.mean((np.abs(np.arange(AB.shape[0]) - linear_sum_assignment(AB)[1])/(len(AB))))
sum_displacement_BA = np.mean((np.abs(np.arange(BA.shape[0]) - linear_sum_assignment(BA)[1])), dtype='float128')
unscaled_displacements[i_p, i_t, i_pa, i_pb] = sum_displacement_BA
# Inspect mean displacement matrices for all structures, all participants
unscaled_phase_differences = np.nanmean(np.nanmean(unscaled_displacements, axis=0, keepdims=True), axis=1)[0]
unscaled_phase_differences
unscaled_phase_differences_se = np.nanstd(np.nanmean(unscaled_displacements, axis=0), axis=0)/(np.sqrt(8))
unscaled_phase_differences_se
plt.errorbar(x = range(1,4),
y = [unscaled_phase_differences[0,1],unscaled_phase_differences[1,2],unscaled_phase_differences[2,3]],
yerr=[unscaled_phase_differences_se[0,1],unscaled_phase_differences_se[1,2],unscaled_phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to rep 1','rep 1 to rep 2','rep 2 to post'])
plt.title('Comparing average displacements between consecutive phases')
plt.errorbar(x = range(1,4),
y = [unscaled_phase_differences[0,3],unscaled_phase_differences[1,3],unscaled_phase_differences[2,3]],
yerr=[unscaled_phase_differences_se[0,3],unscaled_phase_differences_se[1,3],unscaled_phase_differences_se[2,3]])
plt.xticks([1,2,3],['pre to post','rep 1 to post','rep 2 to post'])
plt.title('Comparing average displacements between each phase and posttest')
###Output
_____no_output_____ |
PyTorchPractice1.ipynb | ###Markdown
TensorWarm-up: numpyBefore introducing PyTorch, we will first implement the network using numpy.Numpy provides an n-dimensional array object, and many functions for manipulating these arrays. Numpy is a generic framework for scientific computing; it does not know anything about computation graphs, or deep learning, or gradients. However, we can easily use numpy to fit a two-layer network to random data by manually implementing the forward and backward passes through the network using numpy operations.
###Code
import numpy as np
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h,0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y)
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
#Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
###Output
[1;30;43mStreaming output truncated to the last 5000 lines.[0m
1.23664134e-11 5.96746892e-11]
[4.62474474e-07 4.97528966e-08 1.15428547e-07 7.72342084e-09
1.02887911e-07 7.55788970e-08 9.82605965e-09 2.17396180e-09
1.54057710e-08 2.98291254e-10]
[8.41029216e-09 2.23199603e-08 1.02928065e-08 7.69982622e-10
8.16922572e-09 7.36886884e-12 1.06134956e-10 4.61346512e-09
1.38076552e-11 4.47371286e-10]]
474 [[2.22644995e-09 3.81035222e-09 2.73155216e-09 2.28641608e-11
1.14669658e-09 6.10531709e-10 2.85668769e-11 5.01701841e-10
1.65682877e-11 1.97981132e-10]
[3.08881467e-08 6.35900710e-09 1.43705669e-08 2.17957695e-09
4.49646738e-09 5.20162557e-09 3.58985404e-10 7.19047368e-10
5.76354914e-11 2.23605463e-10]
[2.85422274e-08 4.43004910e-10 3.97568881e-09 1.75163482e-09
1.34355636e-09 2.59994032e-09 1.42054626e-09 5.58557472e-10
1.48523415e-09 2.79079191e-10]
[8.37229566e-09 7.69449244e-11 4.95876504e-10 7.71304703e-10
1.09265048e-12 7.01445172e-12 6.24542508e-10 1.72223779e-11
4.29895415e-11 4.27710099e-10]
[1.20476177e-08 1.51456381e-09 4.52203322e-09 5.52409388e-10
1.58922814e-09 4.56917298e-10 2.70861860e-10 1.97132459e-10
6.71204223e-11 4.58728446e-12]
[3.01777654e-10 1.09717837e-10 6.22989652e-10 1.82377652e-12
1.39770427e-10 1.57522136e-10 1.38391135e-10 2.11297751e-11
3.92984986e-12 4.52469791e-11]
[8.70418018e-09 1.15384822e-10 9.91991822e-11 1.09280539e-10
4.18274978e-11 3.28468794e-11 1.41094943e-09 1.19399610e-11
6.27903597e-10 7.06015489e-11]
[1.38466745e-10 1.27420876e-10 3.34513858e-11 2.77987551e-11
1.09539231e-12 8.76511306e-12 9.04327722e-11 8.28181341e-12
2.95035178e-12 3.13034195e-11]
[3.25254931e-10 2.98829888e-09 3.57358225e-09 1.80171493e-13
2.57314979e-09 6.78665916e-10 5.62218385e-10 4.83354465e-10
1.38711368e-10 1.01897427e-09]
[2.39649595e-10 6.05442698e-10 2.55804890e-09 3.50612317e-11
1.39204390e-09 9.76813909e-10 1.77434777e-11 1.65116149e-10
1.55944072e-11 6.68640724e-10]
[3.13159951e-12 5.76476312e-10 1.05585982e-09 1.21049370e-14
1.51623228e-10 1.76047632e-10 1.23253018e-10 6.69194148e-11
3.40515205e-11 3.37520068e-10]
[3.36231467e-10 5.44233023e-11 9.45974877e-12 1.82147931e-11
6.02371388e-12 1.21120566e-11 3.15950143e-12 1.50276872e-12
2.78826908e-11 5.04784064e-11]
[2.23397838e-11 9.60323596e-09 2.17333647e-09 3.05573572e-11
1.37858331e-09 2.28958383e-10 1.28045000e-09 1.19649695e-09
9.55240852e-10 5.43635285e-10]
[2.22941712e-09 1.23757011e-09 1.55963902e-09 6.85996952e-11
6.46234714e-10 4.77136537e-10 2.92815237e-11 2.27983141e-10
4.37437764e-13 9.26626236e-11]
[5.68872087e-10 2.27703827e-09 1.39089821e-09 9.35222375e-11
1.11838181e-09 7.12539289e-11 2.45755498e-11 2.04216213e-09
2.25966785e-10 1.08212637e-09]
[1.35368263e-10 1.68724490e-10 7.76653577e-12 9.75444954e-13
5.22102907e-11 8.82353597e-11 2.52093956e-11 1.98003254e-10
1.43066761e-10 3.29769051e-10]
[7.18404976e-09 7.14112852e-09 9.35720926e-09 2.33836519e-10
5.37961082e-09 5.13796248e-09 1.88450228e-10 7.73591639e-10
8.00416222e-10 2.00721726e-09]
[1.14109015e-08 1.22261679e-08 1.16179715e-08 5.12290765e-10
3.44805706e-09 1.14972321e-09 2.21088946e-10 1.59134943e-09
1.61069422e-11 1.88080233e-10]
[3.19254838e-08 1.03585888e-07 9.57655016e-08 1.11482888e-09
2.46330875e-08 6.89927569e-09 7.40576690e-08 1.62827012e-08
1.47471944e-08 1.46183841e-08]
[1.15926082e-09 1.38215576e-08 3.47050419e-09 6.11796664e-10
2.54661336e-09 5.37912503e-10 2.83568153e-10 3.75232465e-09
1.94044339e-09 7.56190170e-09]
[2.25351346e-09 1.21240691e-10 8.43190908e-12 3.88463799e-12
2.03179273e-11 2.73394173e-10 6.37580387e-11 2.10226280e-10
7.21548763e-11 2.09563465e-12]
[2.22039488e-08 2.24461525e-08 2.05595295e-08 4.31084960e-11
1.43678335e-08 5.20735806e-10 1.05197089e-10 6.61789895e-09
1.04191291e-09 5.95320169e-10]
[4.14253769e-10 4.72149791e-10 9.71062395e-11 2.21073663e-12
4.13016212e-11 1.14290586e-11 9.21586023e-10 1.77118656e-13
1.06038193e-10 5.30503446e-11]
[2.57345686e-09 1.38042980e-11 8.67598888e-11 2.82231161e-10
2.85005996e-10 4.20244920e-14 5.17051143e-12 2.96878674e-12
2.28401466e-12 3.36101774e-11]
[1.43324107e-09 2.01402185e-10 2.31208250e-10 4.30990728e-11
2.07314131e-15 3.95718070e-12 1.75764429e-10 1.54906485e-10
1.43482317e-10 1.94414687e-10]
[2.28774785e-09 4.51973559e-09 8.52252673e-10 4.58396451e-10
1.25162555e-10 2.15589636e-12 8.71383745e-10 6.47893478e-10
5.98702567e-10 2.28159625e-09]
[7.24983496e-09 1.52094651e-09 8.81637443e-10 7.51080027e-11
1.40212241e-09 4.02437771e-10 8.08055676e-11 2.02738977e-11
1.24201691e-11 2.44552495e-11]
[8.28186093e-10 2.51232651e-10 6.58819911e-10 2.13579126e-11
1.37079299e-10 1.54927466e-12 2.44709594e-10 2.68495721e-11
1.54784724e-10 1.35935373e-14]
[2.44468054e-08 2.33042282e-09 2.92212509e-09 9.38278943e-10
1.12180316e-09 5.76706889e-10 1.21416828e-09 6.61314931e-10
2.21770123e-11 1.74751339e-10]
[1.15227790e-09 1.51839479e-09 3.20082812e-09 9.41993408e-11
1.22638639e-09 2.00966530e-09 4.56528096e-10 2.06988920e-10
1.47797312e-11 2.97900297e-09]
[6.05207677e-09 7.30118008e-09 6.71443066e-09 3.46665334e-10
1.13428724e-09 2.34502656e-10 3.88540924e-09 1.20242704e-09
8.52936274e-10 3.27312471e-09]
[2.84036461e-09 1.84310883e-09 4.82705025e-09 2.09816995e-11
5.06509337e-10 6.76085526e-10 4.77626749e-11 1.28527360e-10
1.50001262e-12 2.66346394e-11]
[3.63970987e-09 4.81634138e-09 1.05114437e-09 1.76404789e-10
3.91035110e-10 3.44452346e-11 1.03398977e-10 5.81832172e-10
4.50142647e-10 1.09811153e-11]
[4.12480909e-10 8.19045417e-11 2.48486006e-10 1.87255324e-11
2.90378862e-13 7.50773381e-13 8.15351619e-11 4.68027116e-11
2.91701351e-11 7.37535803e-11]
[8.83643279e-09 2.62019088e-11 1.68105837e-09 3.88232107e-10
9.11249062e-10 1.42964699e-09 9.62419573e-10 4.29043401e-11
4.12002608e-10 1.81054924e-10]
[4.37323619e-08 1.21659581e-08 1.09274032e-08 1.22668524e-09
8.49012943e-09 1.11329236e-09 9.89776418e-10 9.95882091e-10
5.90352945e-10 1.90294947e-09]
[4.40242488e-10 1.76648334e-08 5.15581973e-09 5.79755293e-12
2.43719370e-09 3.41840031e-10 3.25594111e-09 4.20774665e-09
1.06730981e-09 6.55373390e-09]
[8.00435018e-09 9.56095571e-10 1.10814848e-09 7.30113501e-10
4.31232798e-10 2.28781094e-11 1.32847925e-10 2.62798529e-10
5.52607573e-11 4.42078197e-12]
[1.77912718e-09 2.58782445e-09 2.57414089e-09 8.02513618e-11
2.24869677e-09 3.93080544e-10 1.02601938e-10 5.82831733e-10
6.70929675e-11 6.87365303e-10]
[4.39430249e-09 1.18943689e-09 2.61793372e-10 6.67826660e-10
1.86271850e-10 8.69451232e-10 1.49952915e-09 2.92667320e-10
1.11337901e-09 3.74480119e-11]
[3.42648120e-10 2.26720848e-09 7.98436559e-10 1.19704299e-10
7.19332564e-10 4.10502167e-13 1.15643646e-09 1.28291640e-09
7.42443533e-10 3.37588353e-10]
[4.08019759e-09 9.08957133e-10 3.67181000e-09 5.11595532e-12
2.40607650e-09 9.50006678e-10 1.71671359e-11 3.62839681e-11
4.66580498e-10 2.15428989e-10]
[1.79250921e-08 1.71952780e-08 4.75165826e-09 2.37775665e-11
3.32960101e-09 4.19045444e-09 8.46804686e-09 6.37457293e-09
2.18832391e-09 1.09553513e-09]
[7.30502987e-09 2.19089276e-09 3.90419102e-10 2.83787412e-10
4.70841046e-10 3.70374943e-10 1.84024826e-09 9.04755595e-10
1.07273195e-11 2.81263103e-10]
[1.39858636e-09 1.54589172e-09 1.24300969e-10 1.15661567e-11
1.69316879e-10 1.11441030e-10 2.20787025e-11 5.40688824e-10
7.23631295e-11 1.31902863e-10]
[1.72230504e-10 4.44261203e-11 3.32155159e-10 4.90637663e-11
2.55553422e-10 1.97545954e-11 1.24610273e-10 4.05149478e-11
5.89813318e-11 2.36820502e-11]
[1.39521134e-11 2.05086418e-13 3.69827572e-11 8.51912445e-12
1.56892914e-11 7.70911783e-12 1.20516559e-10 3.61248030e-14
5.26447445e-11 2.80378769e-13]
[4.87862444e-09 3.67088821e-09 5.19549843e-09 8.58290784e-12
1.73219042e-09 2.07445139e-09 4.25488897e-11 6.31978427e-10
6.62580727e-10 7.63826257e-10]
[3.55857667e-10 1.07196790e-09 6.65263490e-10 2.86213807e-11
8.96517672e-10 1.44886051e-10 6.03998564e-11 2.54123137e-10
7.15277513e-11 8.29108446e-10]
[1.46072739e-08 1.41152510e-09 2.56383564e-09 8.10272026e-10
2.62930992e-09 4.61578260e-11 1.64641395e-10 9.29574385e-10
1.19446061e-09 6.25639340e-13]
[6.18303042e-08 9.31347781e-09 2.32587461e-08 6.14504880e-09
9.17864577e-09 9.29819557e-12 1.01734178e-11 1.33402147e-09
1.97393551e-09 1.27384300e-11]
[5.70346144e-10 2.34846713e-08 1.09501741e-08 3.69815488e-10
1.24854976e-08 1.72874675e-09 6.02401619e-11 4.59653442e-09
4.75436278e-10 1.13285001e-08]
[2.05721417e-10 4.77269435e-10 2.94597550e-09 6.09768590e-11
1.27116755e-09 1.14528309e-09 1.76447968e-13 1.10280578e-11
1.69965783e-12 1.02823703e-09]
[8.59649036e-09 9.75587135e-09 2.17618729e-08 2.74336633e-15
6.15718632e-09 5.18633956e-09 5.22348683e-10 3.18143772e-10
5.43999364e-10 1.20607734e-13]
[5.77077261e-10 1.46433666e-09 2.41443192e-10 7.85920484e-11
7.73637607e-11 1.05028906e-11 6.33327213e-10 1.78619720e-10
4.38965821e-10 8.82992789e-11]
[6.02042842e-09 3.42769589e-10 1.28313331e-09 2.94351903e-10
3.65813418e-10 2.51172537e-10 4.70926832e-11 9.47768432e-12
4.53019183e-11 9.50254830e-13]
[3.64812879e-08 8.82879582e-09 8.86696332e-09 5.56631611e-10
3.32596902e-09 2.08300710e-09 3.79534634e-10 2.18044369e-09
3.29481252e-11 1.65979948e-11]
[1.40231084e-08 2.79316849e-09 4.88625399e-09 2.02026275e-10
1.65655559e-10 1.56722972e-09 5.01781834e-12 5.85562390e-10
9.09870698e-10 1.52372312e-10]
[4.21083776e-09 1.63286411e-09 1.94782672e-09 4.09980099e-10
1.43541836e-09 2.16050637e-09 1.54539858e-10 2.60336693e-10
4.87354778e-10 2.21234425e-12]
[1.43954390e-08 5.23195049e-11 8.33294519e-10 3.64901620e-10
8.98153919e-11 6.37095700e-12 1.15464744e-09 4.08948973e-11
3.16017034e-10 9.48832164e-10]
[1.51840123e-08 1.96726308e-09 1.12520328e-12 1.45135164e-09
1.11259389e-10 8.20258489e-10 8.49004580e-10 6.94518887e-10
1.02478264e-09 4.13996264e-09]
[1.83331855e-08 2.62620222e-09 1.08289792e-09 9.13998678e-10
1.85785305e-09 1.05549894e-10 2.97543136e-11 1.01634880e-09
1.21694902e-11 5.66875879e-11]
[4.41604067e-07 4.75088598e-08 1.10229168e-07 7.36953080e-09
9.82721076e-08 7.21829432e-08 9.38550686e-09 2.07736035e-09
1.47122409e-08 2.84429736e-10]
[7.99966929e-09 2.12021935e-08 9.77349681e-09 7.31456270e-10
7.76210775e-09 7.13706007e-12 1.00562393e-10 4.38160317e-09
1.30871695e-11 4.23511776e-10]]
475 [[2.12956292e-09 3.63595918e-09 2.60461376e-09 2.16657610e-11
1.09231078e-09 5.82236726e-10 2.71097336e-11 4.77562555e-10
1.57086101e-11 1.87421042e-10]
[2.94449084e-08 6.08963639e-09 1.37460572e-08 2.07666216e-09
4.29873798e-09 4.97867870e-09 3.42566992e-10 6.88630602e-10
5.49895885e-11 2.16331221e-10]
[2.72643834e-08 4.25649287e-10 3.79822606e-09 1.67127786e-09
1.28373095e-09 2.47861996e-09 1.35920718e-09 5.32052155e-10
1.41387939e-09 2.65338668e-10]
[7.97634550e-09 7.21372741e-11 4.69967587e-10 7.35119681e-10
1.02109513e-12 6.69904045e-12 5.96927577e-10 1.60271467e-11
4.14354876e-11 4.08730483e-10]
[1.14970751e-08 1.43981052e-09 4.30496878e-09 5.27196553e-10
1.51325404e-09 4.36169615e-10 2.58377147e-10 1.86769505e-10
6.34308100e-11 4.10116777e-12]
[2.89147796e-10 1.04112677e-10 5.91982492e-10 1.76571421e-12
1.32956252e-10 1.49167531e-10 1.32433863e-10 2.00262907e-11
3.80502314e-12 4.25809449e-11]
[8.30369644e-09 1.09374762e-10 9.57740765e-11 1.04104352e-10
4.05048614e-11 3.15576100e-11 1.34832603e-09 1.12382741e-11
5.98780670e-10 6.67672153e-11]
[1.33651403e-10 1.22101859e-10 3.19914913e-11 2.61908443e-11
1.04375778e-12 8.38745539e-12 8.66010070e-11 7.91768358e-12
2.90751348e-12 2.95207763e-11]
[3.15702346e-10 2.85371771e-09 3.42341898e-09 1.75840008e-13
2.46455822e-09 6.50635929e-10 5.33441895e-10 4.62948708e-10
1.32300512e-10 9.77499626e-10]
[2.30225918e-10 5.76844801e-10 2.44143445e-09 3.33728458e-11
1.32817830e-09 9.32563617e-10 1.68177491e-11 1.57309783e-10
1.50787431e-11 6.37220560e-10]
[3.14812773e-12 5.51920961e-10 1.00993922e-09 1.09697124e-14
1.45313165e-10 1.68257489e-10 1.17167830e-10 6.38290245e-11
3.27360332e-11 3.22046024e-10]
[3.25578668e-10 5.15980883e-11 9.34920411e-12 1.76925770e-11
5.59438414e-12 1.19949953e-11 3.22988933e-12 1.49575820e-12
2.72612518e-11 4.81812891e-11]
[2.17955171e-11 9.15232232e-09 2.07750032e-09 2.92282363e-11
1.31583386e-09 2.19728835e-10 1.21817561e-09 1.14119391e-09
9.09918705e-10 5.19073069e-10]
[2.13386519e-09 1.18476969e-09 1.49295029e-09 6.56954122e-11
6.18413896e-10 4.56626358e-10 2.79109683e-11 2.18477412e-10
4.10197945e-13 8.88751078e-11]
[5.41522703e-10 2.16938384e-09 1.32905052e-09 8.95899988e-11
1.06643401e-09 6.72178398e-11 2.29330733e-11 1.94830337e-09
2.14737311e-10 1.03484993e-09]
[1.29020951e-10 1.60380421e-10 7.32504866e-12 9.17443439e-13
4.93955742e-11 8.40813474e-11 2.41717259e-11 1.88650723e-10
1.35835781e-10 3.13760735e-10]
[6.87848047e-09 6.81351411e-09 8.93085047e-09 2.24634863e-10
5.12864671e-09 4.90311742e-09 1.80725339e-10 7.37328975e-10
7.66647150e-10 1.90740774e-09]
[1.08362042e-08 1.16568987e-08 1.10694223e-08 4.85583545e-10
3.28406971e-09 1.09259638e-09 2.12649366e-10 1.51865180e-09
1.56831591e-11 1.79694390e-10]
[3.04070651e-08 9.89422468e-08 9.15224908e-08 1.06349994e-09
2.35197891e-08 6.59486596e-09 7.06634202e-08 1.55553969e-08
1.40696417e-08 1.39570420e-08]
[1.09298963e-09 1.31658278e-08 3.31657608e-09 5.82484741e-10
2.42406840e-09 5.14125289e-10 2.69347966e-10 3.58028341e-09
1.84592439e-09 7.20653451e-09]
[2.14428197e-09 1.14545855e-10 8.06211342e-12 3.66538107e-12
1.94289809e-11 2.59385719e-10 6.14330535e-11 2.00360756e-10
6.81711846e-11 2.06857135e-12]
[2.11949997e-08 2.13769202e-08 1.96047896e-08 4.15088182e-11
1.36984094e-08 4.98846307e-10 9.90756377e-11 6.30429087e-09
9.88380348e-10 5.65600403e-10]
[3.97239511e-10 4.58904152e-10 9.44657744e-11 2.20802464e-12
4.00364832e-11 1.06372286e-11 8.87544275e-10 1.37027148e-13
1.03163986e-10 5.05753000e-11]
[2.44409703e-09 1.36148053e-11 8.34077849e-11 2.67890288e-10
2.74066273e-10 2.50252994e-14 5.00402022e-12 3.04251253e-12
2.01149799e-12 3.06133438e-11]
[1.36400262e-09 1.90845625e-10 2.20769575e-10 4.11447235e-11
4.89674362e-15 3.60607345e-12 1.66517536e-10 1.47313995e-10
1.35444056e-10 1.84305355e-10]
[2.18331513e-09 4.31149514e-09 8.13967623e-10 4.37118453e-10
1.19426658e-10 2.05458371e-12 8.30875349e-10 6.18635592e-10
5.71008764e-10 2.17925964e-09]
[6.92521098e-09 1.45316477e-09 8.44372664e-10 7.16271252e-11
1.34050982e-09 3.85628980e-10 7.73492829e-11 1.93635532e-11
1.18448847e-11 2.35924550e-11]
[7.85014391e-10 2.39714596e-10 6.27307549e-10 2.02500802e-11
1.30540615e-10 1.46386166e-12 2.33965427e-10 2.53976958e-11
1.48178644e-10 1.34226850e-14]
[2.33066959e-08 2.22775014e-09 2.79087569e-09 8.93600949e-10
1.07190599e-09 5.51982134e-10 1.15906987e-09 6.30248687e-10
2.09078455e-11 1.67419434e-10]
[1.08717820e-09 1.45598897e-09 3.06443296e-09 8.90875356e-11
1.17207576e-09 1.91721305e-09 4.33720331e-10 1.98785712e-10
1.39664302e-11 2.83822946e-09]
[5.76325897e-09 6.95586973e-09 6.39275296e-09 3.29948688e-10
1.08012795e-09 2.22888600e-10 3.70400961e-09 1.14411033e-09
8.14406067e-10 3.11514928e-09]
[2.70667945e-09 1.75553450e-09 4.59782025e-09 2.01347528e-11
4.83087163e-10 6.44528132e-10 4.55104761e-11 1.22113201e-10
1.48342862e-12 2.51857032e-11]
[3.46765933e-09 4.59089965e-09 1.00239486e-09 1.67974439e-10
3.72950157e-10 3.27289744e-11 9.86213942e-11 5.54291179e-10
4.29405692e-10 1.05132310e-11]
[3.88580701e-10 8.03979888e-11 2.39470291e-10 1.73884880e-11
2.37265387e-13 6.76247982e-13 7.78891965e-11 4.53128124e-11
2.79601668e-11 7.17619721e-11]
[8.44043008e-09 2.49153355e-11 1.60237658e-09 3.70823921e-10
8.68916517e-10 1.36260516e-09 9.17215037e-10 4.07369545e-11
3.92543364e-10 1.71847959e-10]
[4.17339637e-08 1.16126862e-08 1.04424513e-08 1.16959788e-09
8.11169196e-09 1.06726068e-09 9.46833806e-10 9.49900546e-10
5.65843560e-10 1.82189513e-09]
[4.17853379e-10 1.68699348e-08 4.92773286e-09 5.55643564e-12
2.32711381e-09 3.26405661e-10 3.10931396e-09 4.02163419e-09
1.01809765e-09 6.25922555e-09]
[7.60958288e-09 9.14361592e-10 1.05968120e-09 6.92697858e-10
4.12345126e-10 2.15135298e-11 1.26873007e-10 2.51302857e-10
5.28570608e-11 3.81206219e-12]
[1.69642477e-09 2.45898484e-09 2.44811871e-09 7.70512592e-11
2.13732906e-09 3.74647617e-10 9.73242857e-11 5.53139384e-10
6.33586042e-11 6.51462487e-10]
[4.17962158e-09 1.12350814e-09 2.50344656e-10 6.34042239e-10
1.78341136e-10 8.26761815e-10 1.42050793e-09 2.77184726e-10
1.05569300e-09 3.63133865e-11]
[3.25292076e-10 2.14246230e-09 7.53807310e-10 1.13203582e-10
6.81328793e-10 3.84843508e-13 1.09467118e-09 1.21501993e-09
7.03075588e-10 3.19857927e-10]
[3.89682623e-09 8.63751661e-10 3.49872105e-09 4.94621156e-12
2.29305801e-09 9.05753729e-10 1.63763286e-11 3.43114609e-11
4.44640739e-10 2.03984090e-10]
[1.70497670e-08 1.63867986e-08 4.54984087e-09 2.22518078e-11
3.17656506e-09 3.96695624e-09 8.04940505e-09 6.08307496e-09
2.08127322e-09 1.05714299e-09]
[6.95569915e-09 2.09194592e-09 3.80176812e-10 2.67996644e-10
4.52412603e-10 3.45437020e-10 1.74750939e-09 8.67956197e-10
1.01304822e-11 2.58651435e-10]
[1.33597511e-09 1.46375292e-09 1.18196306e-10 1.08288458e-11
1.60706261e-10 1.05081764e-10 2.17429393e-11 5.15607644e-10
6.78153011e-11 1.25359605e-10]
[1.63976599e-10 4.27258718e-11 3.15136499e-10 4.67005037e-11
2.42100664e-10 1.86480904e-11 1.19015193e-10 3.86689260e-11
5.63298574e-11 2.22725527e-11]
[1.31322564e-11 1.71674930e-13 3.55978378e-11 8.16388605e-12
1.50301680e-11 7.28438169e-12 1.14916189e-10 3.92212134e-14
4.99039176e-11 2.74415624e-13]
[4.67126775e-09 3.50414448e-09 4.96501790e-09 8.09156048e-12
1.65383989e-09 1.98073251e-09 4.07384374e-11 6.03571012e-10
6.32280254e-10 7.28135192e-10]
[3.35651147e-10 1.01890433e-09 6.32810750e-10 2.70980897e-11
8.51200782e-10 1.37935820e-10 5.66783605e-11 2.40846287e-10
6.75027601e-11 7.85072037e-10]
[1.39084528e-08 1.34855828e-09 2.44969380e-09 7.71062050e-10
2.51092078e-09 4.32905800e-11 1.56587828e-10 8.87429263e-10
1.13765219e-09 4.87060202e-13]
[5.88997156e-08 8.86961895e-09 2.21681705e-08 5.85079299e-09
8.75060848e-09 8.42436302e-12 9.84488676e-12 1.27059297e-09
1.87883518e-09 1.22606506e-11]
[5.42127564e-10 2.23607267e-08 1.04230135e-08 3.51501034e-10
1.18727647e-08 1.64338871e-09 5.77788095e-11 4.37585374e-09
4.52797179e-10 1.07715262e-08]
[1.92677413e-10 4.59145572e-10 2.81850422e-09 5.79370018e-11
1.21379115e-09 1.09109366e-09 9.87035613e-14 1.07119155e-11
1.49501369e-12 9.79137245e-10]
[8.21037530e-09 9.33102073e-09 2.07965663e-08 5.42301283e-15
5.88819315e-09 4.95324093e-09 4.99017939e-10 3.04616192e-10
5.18560048e-10 9.58482129e-14]
[5.49083819e-10 1.39115815e-09 2.29274480e-10 7.47491546e-11
7.34071447e-11 1.01146293e-11 6.03123784e-10 1.69541060e-10
4.18288265e-10 8.37407950e-11]
[5.74278457e-09 3.27365624e-10 1.22585028e-09 2.80535690e-10
3.48812965e-10 2.40341666e-10 4.49874405e-11 9.00277938e-12
4.32096400e-11 9.19508265e-13]
[3.47771364e-08 8.40886048e-09 8.45589117e-09 5.30877641e-10
3.17327128e-09 1.98940434e-09 3.63644234e-10 2.07772401e-09
3.17150177e-11 1.61062691e-11]
[1.33635931e-08 2.65511903e-09 4.65290232e-09 1.92814059e-10
1.58007617e-10 1.49364261e-09 4.96205216e-12 5.56970632e-10
8.68215692e-10 1.44768816e-10]
[4.02097874e-09 1.55067663e-09 1.84798617e-09 3.92341597e-10
1.36691838e-09 2.05599469e-09 1.47033199e-10 2.46101937e-10
4.65381197e-10 1.95596585e-12]
[1.37067007e-08 5.03152843e-11 7.95501546e-10 3.47502245e-10
8.50164073e-11 6.00590757e-12 1.09880474e-09 3.92728873e-11
3.01270337e-10 9.01480245e-10]
[1.44593034e-08 1.86767736e-09 1.22577819e-12 1.37987678e-09
1.07744750e-10 7.82988975e-10 8.07234910e-10 6.59422846e-10
9.74330026e-10 3.93302880e-09]
[1.74681687e-08 2.49357326e-09 1.03241980e-09 8.71928797e-10
1.76926323e-09 9.90439630e-11 2.91170456e-11 9.67149568e-10
1.20929999e-11 5.38079753e-11]
[4.21657521e-07 4.53649990e-08 1.05277181e-07 7.03368192e-09
9.38654531e-08 6.89476592e-08 8.96065358e-09 1.98452064e-09
1.40565416e-08 2.71259524e-10]
[7.60814122e-09 2.01388082e-08 9.28035219e-09 6.95020209e-10
7.37598561e-09 6.91864665e-12 9.53151143e-11 4.16058525e-09
1.24537516e-11 4.00548609e-10]]
476 [[2.03353352e-09 3.46837549e-09 2.48866665e-09 2.09542442e-11
1.03995110e-09 5.56109108e-10 2.59880459e-11 4.55698450e-10
1.48980273e-11 1.77707237e-10]
[2.80675887e-08 5.83187710e-09 1.31490090e-08 1.97898023e-09
4.10987601e-09 4.76588422e-09 3.26744267e-10 6.59643478e-10
5.22343040e-11 2.09394341e-10]
[2.60339371e-08 4.08951469e-10 3.63598851e-09 1.59696684e-09
1.23033698e-09 2.37343878e-09 1.29559372e-09 5.07561153e-10
1.35119672e-09 2.52018596e-10]
[7.59993534e-09 6.75736757e-11 4.45352669e-10 7.00434496e-10
9.47449452e-13 6.35968940e-12 5.70791364e-10 1.49005577e-11
3.97006786e-11 3.90861571e-10]
[1.09720368e-08 1.36869081e-09 4.09753016e-09 5.02716844e-10
1.44059137e-09 4.16192462e-10 2.46459265e-10 1.76773471e-10
6.01931600e-11 3.63001007e-12]
[2.77583182e-10 9.87433953e-11 5.62775384e-10 1.75334772e-12
1.26288539e-10 1.41391296e-10 1.26513803e-10 1.89248867e-11
3.65489776e-12 4.00603301e-11]
[7.92246899e-09 1.03629498e-10 9.24339455e-11 9.92093907e-11
3.91384642e-11 3.03650369e-11 1.28702909e-09 1.04689488e-11
5.71644813e-10 6.27631512e-11]
[1.29092545e-10 1.17036435e-10 3.06243672e-11 2.47065039e-11
9.88053205e-13 8.01280179e-12 8.29277418e-11 7.58171414e-12
2.80989438e-12 2.78305932e-11]
[3.06204259e-10 2.72529460e-09 3.28052695e-09 1.81717684e-13
2.36116632e-09 6.24434316e-10 5.06441257e-10 4.43532757e-10
1.25577876e-10 9.38168131e-10]
[2.21254009e-10 5.49496997e-10 2.32993997e-09 3.17830171e-11
1.26710359e-09 8.90165295e-10 1.58980397e-11 1.49878664e-10
1.44536063e-11 6.06926657e-10]
[3.19909224e-12 5.28249677e-10 9.65571926e-10 1.14246421e-14
1.38948688e-10 1.60530173e-10 1.11475929e-10 6.10198815e-11
3.12743045e-11 3.07271669e-10]
[3.15285330e-10 4.88640725e-11 9.22588538e-12 1.71727396e-11
5.19324405e-12 1.18027899e-11 3.33063422e-12 1.49493082e-12
2.64208264e-11 4.60898343e-11]
[2.11983607e-11 8.72316049e-09 1.98605231e-09 2.80139547e-11
1.25618973e-09 2.11036964e-10 1.15929961e-09 1.08883355e-09
8.65473117e-10 4.95670586e-10]
[2.04319543e-09 1.13410824e-09 1.42856374e-09 6.28516880e-11
5.91631428e-10 4.36061097e-10 2.67116058e-11 2.09085230e-10
4.09083856e-13 8.50104018e-11]
[5.14994277e-10 2.06682660e-09 1.27063610e-09 8.58939614e-11
1.01724289e-09 6.33848609e-11 2.15098909e-11 1.85949298e-09
2.03353111e-10 9.89333010e-10]
[1.23161638e-10 1.52429295e-10 6.89953110e-12 8.50707889e-13
4.66975861e-11 8.01883409e-11 2.32820948e-11 1.79228633e-10
1.29430694e-10 2.98347111e-10]
[6.58673770e-09 6.50081571e-09 8.52290432e-09 2.15592489e-10
4.88882242e-09 4.67812374e-09 1.73428392e-10 7.02306602e-10
7.32872622e-10 1.81290448e-09]
[1.02913799e-08 1.11137117e-08 1.05463428e-08 4.60137333e-10
3.12774996e-09 1.03812670e-09 2.04418413e-10 1.44907721e-09
1.53488192e-11 1.71596645e-10]
[2.89609767e-08 9.45047981e-08 8.74674786e-08 1.01461151e-09
2.24566266e-08 6.30356905e-09 6.74234806e-08 1.48603834e-08
1.34246725e-08 1.33250340e-08]
[1.02981781e-09 1.25390606e-08 3.16668218e-09 5.54226477e-10
2.30832375e-09 4.91839599e-10 2.54689020e-10 3.41758032e-09
1.75745617e-09 6.86823088e-09]
[2.04118504e-09 1.08152087e-10 7.70149238e-12 3.47000376e-12
1.85129651e-11 2.46001887e-10 5.92063749e-11 1.90844026e-10
6.46814167e-11 2.04087093e-12]
[2.02329146e-08 2.03591785e-08 1.86943613e-08 3.99536747e-11
1.30607834e-08 4.77913555e-10 9.32828398e-11 6.00439656e-09
9.39449302e-10 5.37292563e-10]
[3.80936843e-10 4.46032681e-10 9.18130433e-11 2.20296541e-12
3.88007504e-11 9.88841267e-12 8.54819791e-10 1.01601249e-13
1.00068756e-10 4.81726556e-11]
[2.32267129e-09 1.32602455e-11 8.09781148e-11 2.54204565e-10
2.63418452e-10 9.78652658e-15 4.83309588e-12 3.08146976e-12
1.90187046e-12 2.81063414e-11]
[1.29909562e-09 1.80887136e-10 2.10477147e-10 3.93708788e-11
7.53468768e-15 3.29993184e-12 1.57444006e-10 1.39725354e-10
1.28500457e-10 1.74531512e-10]
[2.08409926e-09 4.11227111e-09 7.77369852e-10 4.17533131e-10
1.13832319e-10 1.97500664e-12 7.92635401e-10 5.90914351e-10
5.45141926e-10 2.08100696e-09]
[6.61385374e-09 1.38823635e-09 8.09622210e-10 6.84546665e-11
1.28182749e-09 3.69875487e-10 7.38739987e-11 1.85708064e-11
1.15350010e-11 2.27420833e-11]
[7.43820897e-10 2.28839292e-10 5.97005810e-10 1.91352376e-11
1.24302251e-10 1.40012986e-12 2.23663318e-10 2.41581046e-11
1.41583259e-10 1.11367446e-14]
[2.22178014e-08 2.12738538e-09 2.66792383e-09 8.52307314e-10
1.02390340e-09 5.28648406e-10 1.10701758e-09 6.02703482e-10
2.06109569e-11 1.60572601e-10]
[1.02614895e-09 1.39650094e-09 2.93320116e-09 8.43765056e-11
1.12017189e-09 1.82912657e-09 4.12417740e-10 1.90706340e-10
1.31317348e-11 2.70401999e-09]
[5.48825420e-09 6.62741444e-09 6.08752316e-09 3.13643932e-10
1.02894217e-09 2.11957316e-10 3.53149226e-09 1.08781288e-09
7.76821670e-10 2.96450647e-09]
[2.57983330e-09 1.67201901e-09 4.37915142e-09 1.92769313e-11
4.60536800e-10 6.14216036e-10 4.33324394e-11 1.15942545e-10
1.42398258e-12 2.37826046e-11]
[3.30284190e-09 4.37605590e-09 9.55970279e-10 1.59814039e-10
3.55746864e-10 3.11213107e-11 9.41357986e-11 5.28675811e-10
4.08827245e-10 1.00798811e-11]
[3.66194865e-10 7.88370464e-11 2.30742073e-10 1.61291597e-11
1.93114690e-13 6.11921142e-13 7.43291285e-11 4.38352477e-11
2.69547756e-11 6.97670982e-11]
[8.06121955e-09 2.36632797e-11 1.52779776e-09 3.54615038e-10
8.28888701e-10 1.29890653e-09 8.74417759e-10 3.87863674e-11
3.74799679e-10 1.63265124e-10]
[3.98290637e-08 1.10845646e-08 9.97896715e-09 1.11531240e-09
7.75054670e-09 1.02239103e-09 9.05957417e-10 9.05430392e-10
5.41687694e-10 1.74342757e-09]
[3.96567006e-10 1.61108060e-08 4.70983217e-09 5.35595263e-12
2.22218951e-09 3.11693905e-10 2.96698795e-09 3.84104673e-09
9.72234314e-10 5.97627468e-09]
[7.23494875e-09 8.74237991e-10 1.01313786e-09 6.57327456e-10
3.94496980e-10 2.01613790e-11 1.20981302e-10 2.40511481e-10
5.06563948e-11 3.31615938e-12]
[1.61740218e-09 2.33626845e-09 2.32769051e-09 7.36265598e-11
2.03148226e-09 3.56971571e-10 9.22847351e-11 5.24889081e-10
6.01069007e-11 6.16993301e-10]
[3.97589193e-09 1.06152079e-09 2.39771250e-10 6.02422248e-10
1.70789661e-10 7.86447401e-10 1.34567796e-09 2.62662527e-10
1.00046797e-09 3.51309477e-11]
[3.09699172e-10 2.02668604e-09 7.12391008e-10 1.06944705e-10
6.45378285e-10 3.27294983e-13 1.03682446e-09 1.15145899e-09
6.66079167e-10 3.01952619e-10]
[3.72192669e-09 8.21094972e-10 3.33363644e-09 4.82451614e-12
2.18568732e-09 8.63501397e-10 1.56165376e-11 3.23255421e-11
4.24149335e-10 1.93193022e-10]
[1.62169446e-08 1.56168254e-08 4.35674462e-09 2.08661276e-11
3.03071150e-09 3.75502781e-09 7.65220078e-09 5.80585916e-09
1.97835316e-09 1.02033723e-09]
[6.62362644e-09 1.99819841e-09 3.69710530e-10 2.53621627e-10
4.34944442e-10 3.22675468e-10 1.65950122e-09 8.32860925e-10
9.60112108e-12 2.37822064e-10]
[1.27594800e-09 1.38657639e-09 1.12281124e-10 1.00639713e-11
1.52623589e-10 9.91119170e-11 2.15441565e-11 4.90876376e-10
6.34285320e-11 1.19262832e-10]
[1.56136350e-10 4.11177826e-11 2.99000900e-10 4.45144039e-11
2.29291800e-10 1.75680382e-11 1.13677440e-10 3.69505626e-11
5.36114886e-11 2.09184950e-11]
[1.23010884e-11 1.42465464e-13 3.41223998e-11 7.81266828e-12
1.43611097e-11 6.89077467e-12 1.09441118e-10 4.35300298e-14
4.75080164e-11 2.76429450e-13]
[4.47345533e-09 3.34539898e-09 4.74502605e-09 7.55384107e-12
1.57888954e-09 1.89198681e-09 3.89642845e-11 5.75743209e-10
6.04067989e-10 6.94239073e-10]
[3.16651733e-10 9.68597302e-10 6.02228058e-10 2.55719596e-11
8.08414165e-10 1.31466960e-10 5.32776151e-11 2.28588717e-10
6.34054076e-11 7.43679461e-10]
[1.32438349e-08 1.28829294e-09 2.34024538e-09 7.33323287e-10
2.39768060e-09 4.07464567e-11 1.48959318e-10 8.46750559e-10
1.08465345e-09 3.79748901e-13]
[5.61058485e-08 8.44752122e-09 2.11285304e-08 5.57057769e-09
8.34253788e-09 7.61597123e-12 9.53908757e-12 1.20975274e-09
1.78747319e-09 1.18193893e-11]
[5.15439062e-10 2.12913271e-08 9.92072093e-09 3.34117130e-10
1.12902607e-08 1.56235610e-09 5.54772554e-11 4.16582026e-09
4.30768165e-10 1.02425010e-08]
[1.80487702e-10 4.41664696e-10 2.69642030e-09 5.50575525e-11
1.15876807e-09 1.03889843e-09 4.74196704e-14 1.04805251e-11
1.34858342e-12 9.32539449e-10]
[7.84079135e-09 8.92494370e-09 1.98741683e-08 8.45399083e-15
5.63151216e-09 4.73132309e-09 4.76795461e-10 2.91950044e-10
4.94914697e-10 7.45983747e-14]
[5.25314164e-10 1.32098555e-09 2.19613268e-10 7.00752174e-11
6.94871296e-11 9.56732337e-12 5.75707421e-10 1.61469265e-10
3.99286166e-10 7.95955818e-11]
[5.47773249e-09 3.12805562e-10 1.17110167e-09 2.67537213e-10
3.32683484e-10 2.29993717e-10 4.29807921e-11 8.55847253e-12
4.12992736e-11 9.09844490e-13]
[3.31480810e-08 8.00885677e-09 8.06513748e-09 5.05689774e-10
3.02848829e-09 1.90079826e-09 3.47907595e-10 1.98162451e-09
3.08493466e-11 1.56329723e-11]
[1.27363523e-08 2.52439590e-09 4.43074868e-09 1.84483798e-10
1.50652798e-10 1.42404806e-09 4.88138104e-12 5.29297916e-10
8.28889583e-10 1.37678164e-10]
[3.84013044e-09 1.47250151e-09 1.75317945e-09 3.75318568e-10
1.30156683e-09 1.95620651e-09 1.39949578e-10 2.32559506e-10
4.43908399e-10 1.71379959e-12]
[1.30522171e-08 4.83800163e-11 7.58774554e-10 3.30685672e-10
8.05521884e-11 5.67993973e-12 1.04614096e-09 3.76390949e-11
2.86451336e-10 8.56459201e-10]
[1.37680130e-08 1.77299648e-09 1.34834884e-12 1.31271261e-09
1.04516275e-10 7.47999132e-10 7.67145700e-10 6.25914391e-10
9.28102920e-10 3.73528092e-09]
[1.66453496e-08 2.36751528e-09 9.83932865e-10 8.31631496e-10
1.68468199e-09 9.30118989e-11 2.85530556e-11 9.20191038e-10
1.18700222e-11 5.10873746e-11]
[4.02626336e-07 4.33200303e-08 1.00533371e-07 6.71316690e-09
8.96506696e-08 6.58480117e-08 8.55947688e-09 1.89678667e-09
1.34220703e-08 2.58541861e-10]
[7.23638085e-09 1.91298074e-08 8.81228207e-09 6.60909193e-10
7.00891029e-09 6.68970386e-12 9.02842464e-11 3.94942859e-09
1.17979227e-11 3.78912604e-10]]
477 [[1.94549670e-09 3.30991897e-09 2.37325092e-09 1.98136940e-11
9.90196343e-10 5.30679720e-10 2.47445400e-11 4.34145657e-10
1.40969219e-11 1.68298323e-10]
[2.67550647e-08 5.58480432e-09 1.25769756e-08 1.88587250e-09
3.92904254e-09 4.56117780e-09 3.11793586e-10 6.31695362e-10
4.98402635e-11 2.02604650e-10]
[2.48635640e-08 3.92492414e-10 3.47995736e-09 1.52374379e-09
1.17690808e-09 2.26660102e-09 1.23619240e-09 4.83829851e-10
1.29047661e-09 2.40993242e-10]
[7.24007636e-09 6.34462955e-11 4.22432526e-10 6.68689839e-10
8.93575780e-13 6.07073028e-12 5.45232779e-10 1.39653175e-11
3.81853335e-11 3.73994206e-10]
[1.04682145e-08 1.30204901e-09 3.90199529e-09 4.80076448e-10
1.37135704e-09 3.97821536e-10 2.34751094e-10 1.67919132e-10
5.70718675e-11 3.14444879e-12]
[2.65686917e-10 9.36779066e-11 5.35290598e-10 1.72551324e-12
1.20122716e-10 1.34072199e-10 1.20630203e-10 1.80986629e-11
3.61748646e-12 3.77393632e-11]
[7.56001117e-09 9.81662627e-11 8.90513556e-11 9.45441143e-11
3.77817125e-11 2.91626107e-11 1.22894950e-09 9.80113155e-12
5.45237834e-10 5.90044339e-11]
[1.24496104e-10 1.12142846e-10 2.92589181e-11 2.32370446e-11
9.32458271e-13 7.68629919e-12 7.93062287e-11 7.23523315e-12
2.78007908e-12 2.63043868e-11]
[2.97474141e-10 2.60324869e-09 3.14121508e-09 1.85650709e-13
2.26231402e-09 5.97885704e-10 4.80410896e-10 4.24864005e-10
1.19789584e-10 8.99773257e-10]
[2.12171802e-10 5.23900802e-10 2.22471042e-09 3.01564172e-11
1.20884530e-09 8.50682090e-10 1.51096794e-11 1.43225091e-10
1.39621336e-11 5.77650082e-10]
[3.18450626e-12 5.06481327e-10 9.23568319e-10 5.29617482e-15
1.33172655e-10 1.53471190e-10 1.06439799e-10 5.86604129e-11
3.01580199e-11 2.93041539e-10]
[3.04953930e-10 4.64140121e-11 9.12216790e-12 1.68836844e-11
4.78308124e-12 1.16101631e-11 3.37478776e-12 1.46330226e-12
2.58577867e-11 4.40443657e-11]
[2.06634228e-11 8.31497598e-09 1.89696406e-09 2.67757477e-11
1.19917817e-09 2.02379636e-10 1.10272366e-09 1.03857410e-09
8.23671825e-10 4.73396541e-10]
[1.95408533e-09 1.08582438e-09 1.36789601e-09 6.03024125e-11
5.66436614e-10 4.17223605e-10 2.54020708e-11 2.00493528e-10
3.66210259e-13 8.14904061e-11]
[4.90320551e-10 1.96922622e-09 1.21334444e-09 8.22905666e-11
9.69774637e-10 5.99149791e-11 2.00683081e-11 1.77337785e-09
1.92865191e-10 9.46709166e-10]
[1.17409317e-10 1.45041679e-10 6.48055615e-12 7.82248173e-13
4.43196725e-11 7.65891039e-11 2.23390027e-11 1.70855130e-10
1.22717878e-10 2.83648662e-10]
[6.30593001e-09 6.20040899e-09 8.13784690e-09 2.06406724e-10
4.65911280e-09 4.46662653e-09 1.66301233e-10 6.68920330e-10
7.01030623e-10 1.72330268e-09]
[9.77443351e-09 1.05960451e-08 1.00480331e-08 4.35896494e-10
2.97926148e-09 9.86620586e-10 1.96562050e-10 1.38271488e-09
1.49128016e-11 1.63861838e-10]
[2.75856000e-08 9.02714665e-08 8.35900848e-08 9.66776596e-10
2.14441358e-08 6.02413822e-09 6.43357503e-08 1.41981338e-08
1.28067236e-08 1.27216842e-08]
[9.71695297e-10 1.19464412e-08 3.02747674e-09 5.27322454e-10
2.19690985e-09 4.70732337e-10 2.42272114e-10 3.26293162e-09
1.67267230e-09 6.54130746e-09]
[1.94231520e-09 1.02068933e-10 7.43106123e-12 3.28382217e-12
1.76969820e-11 2.33210181e-10 5.69282771e-11 1.82035898e-10
6.12384790e-11 1.99718133e-12]
[1.93124220e-08 1.93877507e-08 1.78304767e-08 3.85805684e-11
1.24525972e-08 4.58006405e-10 8.78632890e-11 5.72096025e-09
8.90888249e-10 5.10507156e-10]
[3.65436345e-10 4.33596376e-10 8.90547617e-11 2.17916880e-12
3.75267284e-11 9.18349110e-12 8.23137280e-10 7.28607229e-14
9.71445414e-11 4.57872762e-11]
[2.21278198e-09 1.26904022e-11 7.92977464e-11 2.40417226e-10
2.52569826e-10 9.94163132e-16 4.61404353e-12 3.06913043e-12
2.18303153e-12 2.62688996e-11]
[1.23621208e-09 1.71329002e-10 2.01139727e-10 3.74802118e-11
8.77540676e-15 2.95308178e-12 1.49095824e-10 1.32908444e-10
1.21664124e-10 1.65161894e-10]
[1.98942745e-09 3.92239141e-09 7.42848327e-10 3.98327642e-10
1.08598445e-10 1.89272633e-12 7.56245528e-10 5.64053759e-10
5.20048218e-10 1.98675992e-09]
[6.31735072e-09 1.32692962e-09 7.74823728e-10 6.54495506e-11
1.22598145e-09 3.53942936e-10 7.06692113e-11 1.77627042e-11
1.11845739e-11 2.19520590e-11]
[7.08559262e-10 2.19075586e-10 5.68349581e-10 1.80546574e-11
1.18050053e-10 1.31756920e-12 2.13440800e-10 2.31735429e-11
1.35684333e-10 3.09768508e-15]
[2.11837604e-08 2.03395609e-09 2.54612396e-09 8.11263057e-10
9.78703403e-10 5.05348733e-10 1.05692529e-09 5.73574459e-10
1.93545451e-11 1.54003132e-10]
[9.68075072e-10 1.33961944e-09 2.80613313e-09 7.99220648e-11
1.06980734e-09 1.74473933e-09 3.91805222e-10 1.82958310e-10
1.24662449e-11 2.57661304e-09]
[5.22627623e-09 6.31428082e-09 5.79533130e-09 2.98648957e-10
9.79917632e-10 2.01364052e-10 3.36640101e-09 1.03492482e-09
7.41502322e-10 2.82126448e-09]
[2.45796162e-09 1.59230402e-09 4.17268418e-09 1.85438886e-11
4.39191421e-10 5.85803016e-10 4.13306121e-11 1.10234846e-10
1.39469487e-12 2.24768955e-11]
[3.14721001e-09 4.17132376e-09 9.11256669e-10 1.52112194e-10
3.39313690e-10 2.96153147e-11 8.97345223e-11 5.03473222e-10
3.90071112e-10 9.65369323e-12]
[3.45018245e-10 7.70073183e-11 2.22701866e-10 1.48423916e-11
1.56085922e-13 5.38617104e-13 7.09486131e-11 4.22697012e-11
2.58595066e-11 6.79796338e-11]
[7.70019336e-09 2.24567769e-11 1.45584147e-09 3.38868048e-10
7.90785362e-10 1.23774148e-09 8.34054441e-10 3.68770479e-11
3.57326268e-10 1.54974297e-10]
[3.80058851e-08 1.05817102e-08 9.53866730e-09 1.06384472e-09
7.40423502e-09 9.80924121e-10 8.66310657e-10 8.64547818e-10
5.18756401e-10 1.66792314e-09]
[3.76873885e-10 1.53849866e-08 4.50291137e-09 5.15126552e-12
2.12256172e-09 2.97652048e-10 2.83177186e-09 3.67045175e-09
9.27708344e-10 5.70562765e-09]
[6.87802272e-09 8.36012955e-10 9.69065401e-10 6.23821211e-10
3.77407555e-10 1.88656468e-11 1.15514451e-10 2.30321597e-10
4.83517367e-11 2.88080800e-12]
[1.54240998e-09 2.22015256e-09 2.21296599e-09 7.05002378e-11
1.93116330e-09 3.40176639e-10 8.75315067e-11 4.97691915e-10
5.66700632e-11 5.84512062e-10]
[3.78169043e-09 1.00322238e-09 2.29850419e-10 5.72019142e-10
1.63729902e-10 7.48051217e-10 1.27456148e-09 2.48653818e-10
9.49173647e-10 3.39351151e-11]
[2.93774065e-10 1.91378353e-09 6.71074485e-10 1.01712458e-10
6.11750236e-10 3.41525126e-13 9.80264590e-10 1.08722133e-09
6.29769335e-10 2.87053791e-10]
[3.55499062e-09 7.80341200e-10 3.17372864e-09 4.65347969e-12
2.08365254e-09 8.22185515e-10 1.49476333e-11 3.04466148e-11
4.05304979e-10 1.83498640e-10]
[1.54263131e-08 1.48827683e-08 4.17064035e-09 1.95275586e-11
2.89149230e-09 3.55532594e-09 7.27379072e-09 5.54038194e-09
1.88122036e-09 9.84862554e-10]
[6.30707048e-09 1.90655863e-09 3.60367534e-10 2.38832514e-10
4.17645550e-10 3.00439723e-10 1.57580907e-09 7.98294148e-10
9.11014631e-12 2.18329968e-10]
[1.21853305e-09 1.31321397e-09 1.06549304e-10 9.41957319e-12
1.45034245e-10 9.36998498e-11 2.13606063e-11 4.67609006e-10
5.94999242e-11 1.13366064e-10]
[1.48783142e-10 3.97084656e-11 2.83855509e-10 4.24218365e-11
2.17292439e-10 1.65369138e-11 1.08586499e-10 3.54244128e-11
5.11338858e-11 1.97301839e-11]
[1.15777471e-11 1.20010734e-13 3.29612038e-11 7.37707869e-12
1.36655698e-11 6.38568185e-12 1.04362238e-10 4.87289940e-14
4.52958423e-11 2.67343900e-13]
[4.28376721e-09 3.19367987e-09 4.53334307e-09 7.16195826e-12
1.50755507e-09 1.80612211e-09 3.73135136e-11 5.49764738e-10
5.76776217e-10 6.61708073e-10]
[2.98448946e-10 9.21102053e-10 5.73067068e-10 2.42162908e-11
7.67217691e-10 1.25455556e-10 5.00184437e-11 2.17150419e-10
6.00420458e-11 7.03602101e-10]
[1.26083551e-08 1.23171901e-09 2.23680059e-09 6.98550965e-10
2.28934256e-09 3.81154929e-11 1.41419171e-10 8.09307709e-10
1.03364761e-09 3.01418335e-13]
[5.34477443e-08 8.04546528e-09 2.01351985e-08 5.30310268e-09
7.95363861e-09 6.93718829e-12 9.27694129e-12 1.15139916e-09
1.70147731e-09 1.13775779e-11]
[4.90159140e-10 2.02740151e-08 9.44155706e-09 3.17127993e-10
1.07365320e-08 1.48455037e-09 5.32504590e-11 3.96607906e-09
4.10199982e-10 9.73957393e-09]
[1.69106909e-10 4.24948072e-10 2.57969927e-09 5.22114907e-11
1.10638281e-09 9.89640766e-10 1.52116487e-14 1.01962666e-11
1.17721389e-12 8.88083117e-10]
[7.48901712e-09 8.53461071e-09 1.89932828e-08 1.80248218e-14
5.38430934e-09 4.52051356e-09 4.55219688e-10 2.79322890e-10
4.71483552e-10 5.63916835e-14]
[4.96913049e-10 1.25622919e-09 2.06239855e-10 6.78439384e-11
6.61935060e-11 9.34660256e-12 5.47129254e-10 1.52588178e-10
3.79132234e-10 7.54539363e-11]
[5.22569094e-09 2.98462268e-10 1.11903457e-09 2.54290447e-10
3.16905649e-10 2.20381168e-10 4.11850247e-11 8.08095379e-12
3.92634646e-11 8.99929242e-13]
[3.15990074e-08 7.62850225e-09 7.69042361e-09 4.81884117e-10
2.88880866e-09 1.81535311e-09 3.33246088e-10 1.88893732e-09
2.97395412e-11 1.51760699e-11]
[1.21374317e-08 2.40015641e-09 4.21792482e-09 1.76065794e-10
1.43677793e-10 1.35702127e-09 4.80980312e-12 5.03656629e-10
7.91542889e-10 1.30952160e-10]
[3.66635905e-09 1.39783991e-09 1.66447442e-09 3.59203722e-10
1.24017097e-09 1.86168885e-09 1.33090477e-10 2.19811179e-10
4.23938314e-10 1.48621604e-12]
[1.24283003e-08 4.64241080e-11 7.24256117e-10 3.14742063e-10
7.61972204e-11 5.37385464e-12 9.95651329e-10 3.60248417e-11
2.72978183e-10 8.13418399e-10]
[1.31106021e-08 1.68269789e-09 1.41976698e-12 1.24839735e-09
1.01225038e-10 7.13672248e-10 7.29570573e-10 5.94209660e-10
8.83521541e-10 3.54726747e-09]
[1.58578450e-08 2.24830524e-09 9.38519274e-10 7.93619753e-10
1.60461793e-09 8.71394638e-11 2.78801146e-11 8.76181712e-10
1.17759818e-11 4.85991003e-11]
[3.84441473e-07 4.13722284e-08 9.59993031e-08 6.41039943e-09
8.56369005e-08 6.28874080e-08 8.17380740e-09 1.81138661e-09
1.28258368e-08 2.46267268e-10]
[6.88263185e-09 1.81709385e-08 8.36652712e-09 6.27918880e-10
6.66019447e-09 6.49707102e-12 8.55926906e-11 3.75015783e-09
1.12116728e-11 3.58434755e-10]]
478 [[1.86029790e-09 3.15868410e-09 2.26329186e-09 1.88882431e-11
9.43309624e-10 5.06322204e-10 2.35784793e-11 4.13791552e-10
1.32343046e-11 1.59628588e-10]
[2.55021137e-08 5.34835005e-09 1.20307741e-08 1.79740281e-09
3.75642143e-09 4.36625130e-09 2.97354095e-10 6.05221260e-10
4.73720460e-11 1.95970973e-10]
[2.37457716e-08 3.77676477e-10 3.32617909e-09 1.45583605e-09
1.12684179e-09 2.16734530e-09 1.18076696e-09 4.60998459e-10
1.23073545e-09 2.27957445e-10]
[6.89989543e-09 5.93777998e-11 4.00291503e-10 6.36774093e-10
8.25900194e-13 5.76537166e-12 5.21452900e-10 1.29410210e-11
3.65723388e-11 3.57471699e-10]
[9.99187791e-09 1.23758735e-09 3.71407257e-09 4.57537590e-10
1.30548948e-09 3.79547179e-10 2.23975679e-10 1.58798345e-10
5.41949797e-11 2.77919300e-12]
[2.55019424e-10 8.87948802e-11 5.08729251e-10 1.65712024e-12
1.14133344e-10 1.27012680e-10 1.15313270e-10 1.72203200e-11
3.45721170e-12 3.55807658e-11]
[7.21222367e-09 9.29978599e-11 8.60277116e-11 9.02391584e-11
3.64971731e-11 2.80790256e-11 1.17285948e-09 9.13338649e-12
5.20689414e-10 5.55636035e-11]
[1.20184560e-10 1.07552379e-10 2.80407781e-11 2.19293502e-11
8.88517709e-13 7.32270735e-12 7.59642266e-11 6.97228008e-12
2.68762113e-12 2.47479616e-11]
[2.88261229e-10 2.48628366e-09 3.01019183e-09 1.89617650e-13
2.16722498e-09 5.73894719e-10 4.56203446e-10 4.07295543e-10
1.13740403e-10 8.63463080e-10]
[2.04073921e-10 4.98941846e-10 2.12297060e-09 2.87801664e-11
1.15329228e-09 8.11843775e-10 1.42712775e-11 1.36308250e-10
1.33804558e-11 5.50352740e-10]
[3.29273176e-12 4.84640999e-10 8.83360762e-10 7.85195325e-15
1.27189305e-10 1.46536729e-10 1.01203238e-10 5.60787649e-11
2.87359422e-11 2.79978454e-10]
[2.95255178e-10 4.39003826e-11 8.95967142e-12 1.63218456e-11
4.43794977e-12 1.14283753e-11 3.46484654e-12 1.48519541e-12
2.50562375e-11 4.21453408e-11]
[2.00171826e-11 7.92554198e-09 1.81340360e-09 2.57128771e-11
1.14487917e-09 1.94374478e-10 1.04955688e-09 9.91176970e-10
7.83472265e-10 4.51901592e-10]
[1.87005127e-09 1.03897532e-09 1.30865364e-09 5.76414033e-11
5.42000327e-10 3.98384540e-10 2.43023312e-11 1.91770221e-10
3.65265975e-13 7.78680291e-11]
[4.65952829e-10 1.87635775e-09 1.15946724e-09 7.86659188e-11
9.25028847e-10 5.64669817e-11 1.87866104e-11 1.69192649e-09
1.82900218e-10 9.05043421e-10]
[1.12083709e-10 1.37753087e-10 6.10492815e-12 7.53494165e-13
4.19186770e-11 7.30793419e-11 2.15078743e-11 1.62542769e-10
1.16903385e-10 2.69770141e-10]
[6.04097170e-09 5.91605597e-09 7.76730924e-09 1.97923837e-10
4.44085570e-09 4.26182581e-09 1.59578567e-10 6.37531234e-10
6.70325937e-10 1.63863144e-09]
[9.28363262e-09 1.01017808e-08 9.57325251e-09 4.12980928e-10
2.83749903e-09 9.37409564e-10 1.88907118e-10 1.31906790e-09
1.45830354e-11 1.56482930e-10]
[2.62750684e-08 8.62206154e-08 7.98845428e-08 9.22584660e-10
2.04752921e-08 5.75796150e-09 6.13865415e-08 1.35621734e-08
1.22200235e-08 1.21452546e-08]
[9.15118834e-10 1.13771945e-08 2.89017262e-09 5.01927742e-10
2.09217572e-09 4.50265625e-10 2.28950728e-10 3.11414315e-09
1.59256324e-09 6.23504694e-09]
[1.84854805e-09 9.62967065e-11 7.07150591e-12 3.08042192e-12
1.69188311e-11 2.21461739e-10 5.48601588e-11 1.73278733e-10
5.80475404e-11 1.95147298e-12]
[1.84369443e-08 1.84634092e-08 1.70032506e-08 3.71439469e-11
1.18728354e-08 4.38724020e-10 8.26569826e-11 5.44963069e-09
8.45588005e-10 4.85134261e-10]
[3.50677339e-10 4.21219496e-10 8.65754059e-11 2.15393929e-12
3.63521279e-11 8.54805003e-12 7.92747239e-10 4.80637489e-14
9.42039696e-11 4.36405517e-11]
[2.10096332e-09 1.25733986e-11 7.62424939e-11 2.28437751e-10
2.42945276e-10 1.33236125e-16 4.46644550e-12 3.15806798e-12
1.85935241e-12 2.37866459e-11]
[1.17731852e-09 1.62252253e-10 1.91981518e-10 3.56926962e-11
1.30050569e-14 2.71010450e-12 1.41101400e-10 1.26261811e-10
1.15206802e-10 1.56603749e-10]
[1.89784939e-09 3.74138032e-09 7.09134534e-10 3.80353830e-10
1.03582893e-10 1.79624539e-12 7.21091185e-10 5.37985881e-10
4.96748621e-10 1.89737861e-09]
[6.03270146e-09 1.26799043e-09 7.42451184e-10 6.25875307e-11
1.17228041e-09 3.39453991e-10 6.75668443e-11 1.70571062e-11
1.07973823e-11 2.12046858e-11]
[6.68130407e-10 2.08791431e-10 5.40156276e-10 1.73302179e-11
1.12845987e-10 1.29473382e-12 2.04343963e-10 2.16495227e-11
1.29276520e-10 9.69622444e-15]
[2.01919145e-08 1.94213104e-09 2.43452090e-09 7.74164326e-10
9.34782164e-10 4.84113604e-10 1.00936305e-09 5.48982101e-10
1.92029429e-11 1.47612390e-10]
[9.14896441e-10 1.28482130e-09 2.68554225e-09 7.55375380e-11
1.02279625e-09 1.66438028e-09 3.72692168e-10 1.75457770e-10
1.16903447e-11 2.45379597e-09]
[4.97856787e-09 6.01640084e-09 5.51827970e-09 2.84004528e-10
9.33496199e-10 1.91578080e-10 3.20980065e-09 9.85005450e-10
7.07158471e-10 2.68538846e-09]
[2.34364710e-09 1.51633178e-09 3.97477490e-09 1.77006412e-11
4.18651419e-10 5.58219278e-10 3.93478021e-11 1.04597713e-10
1.33535281e-12 2.12357820e-11]
[2.99748004e-09 3.97656417e-09 8.69055008e-10 1.45040809e-10
3.23600695e-10 2.81358060e-11 8.56602276e-11 4.80006585e-10
3.71443985e-10 9.24358632e-12]
[3.25321096e-10 7.54393052e-11 2.14531338e-10 1.37457654e-11
1.23222050e-13 4.88392053e-13 6.76924846e-11 4.08692072e-11
2.49223770e-11 6.60640934e-11]
[7.35273938e-09 2.13085101e-11 1.38826305e-09 3.24275556e-10
7.54353646e-10 1.18000506e-09 7.94939728e-10 3.51690383e-11
3.41284969e-10 1.47143727e-10]
[3.62718203e-08 1.00999486e-08 9.11492598e-09 1.01404056e-09
7.07378663e-09 9.40002913e-10 8.28878528e-10 8.24373365e-10
4.96747999e-10 1.59679141e-09]
[3.57692685e-10 1.46918217e-08 4.30373255e-09 4.90761219e-12
2.02724316e-09 2.84007693e-10 2.70224287e-09 3.50623006e-09
8.85920021e-10 5.44741561e-09]
[6.54123655e-09 7.99238406e-10 9.26474400e-10 5.91804212e-10
3.60991877e-10 1.76773412e-11 1.10066361e-10 2.20359327e-10
4.64291660e-11 2.46081104e-12]
[1.47150818e-09 2.10932659e-09 2.10408510e-09 6.74485104e-11
1.83548514e-09 3.24050109e-10 8.29248994e-11 4.71606880e-10
5.38306427e-11 5.53705119e-10]
[3.59902212e-09 9.47841472e-10 2.19953531e-10 5.42995079e-10
1.56734059e-10 7.11498722e-10 1.20784133e-09 2.35510483e-10
8.99207357e-10 3.29474657e-11]
[2.79476526e-10 1.80901316e-09 6.33537101e-10 9.61513004e-11
5.79909601e-10 3.11538473e-13 9.28060242e-10 1.02899434e-09
5.95827021e-10 2.71833644e-10]
[3.39408327e-09 7.41814856e-10 3.02430958e-09 4.52469748e-12
1.98590274e-09 7.84168470e-10 1.42233739e-11 2.88565219e-11
3.86636986e-10 1.73760818e-10]
[1.46718084e-08 1.41839009e-08 3.99370360e-09 1.83323208e-11
2.75870148e-09 3.36521293e-09 6.91531791e-09 5.28841864e-09
1.78826437e-09 9.50231095e-10]
[6.00686500e-09 1.82024349e-09 3.50595586e-10 2.25358571e-10
4.01204863e-10 2.80189604e-10 1.49618281e-09 7.65486780e-10
8.68425195e-12 2.00461462e-10]
[1.16272713e-09 1.24378409e-09 1.01394209e-10 8.77550948e-12
1.37819603e-10 8.83501718e-11 2.10510405e-11 4.45797544e-10
5.55404160e-11 1.07652004e-10]
[1.41452187e-10 3.82653588e-11 2.69051848e-10 4.04025972e-11
2.05930155e-10 1.55364456e-11 1.03717819e-10 3.39989666e-11
4.86792169e-11 1.84536939e-11]
[1.08400862e-11 1.01163552e-13 3.16841762e-11 7.04394978e-12
1.30761376e-11 6.05231009e-12 9.94803738e-11 5.01917304e-14
4.30935548e-11 2.57505350e-13]
[4.10006205e-09 3.04915165e-09 4.33230073e-09 6.71558245e-12
1.43957496e-09 1.72481190e-09 3.56503793e-11 5.25380785e-10
5.51129509e-10 6.30896737e-10]
[2.81508560e-10 8.75889522e-10 5.45467896e-10 2.28298649e-11
7.28501641e-10 1.19653885e-10 4.70228767e-11 2.06343441e-10
5.63699004e-11 6.66871249e-10]
[1.20077206e-08 1.17647098e-09 2.13685993e-09 6.64181786e-10
2.18576528e-09 3.58351533e-11 1.34578226e-10 7.71993989e-10
9.85562930e-10 2.19303875e-13]
[5.09107766e-08 7.66312711e-09 1.91913809e-08 5.04985071e-09
7.58319193e-09 6.27096688e-12 8.97078499e-12 1.09654893e-09
1.61866165e-09 1.09231823e-11]
[4.66207289e-10 1.93045719e-08 8.98723489e-09 3.01400140e-10
1.02095171e-08 1.41159498e-09 5.11543923e-11 3.77621984e-09
3.90314552e-10 9.26119324e-09]
[1.58908920e-10 4.08679837e-10 2.46748505e-09 4.94293869e-11
1.05654027e-09 9.42072787e-10 1.06856837e-15 9.91491383e-12
1.06260141e-12 8.45054571e-10]
[7.15168051e-09 8.16388005e-09 1.81508092e-08 2.01451307e-14
5.14950810e-09 4.31815058e-09 4.34995254e-10 2.67824645e-10
4.50043495e-10 4.09362942e-14]
[4.75681366e-10 1.19308576e-09 1.97279822e-10 6.35994927e-11
6.27602965e-11 8.88381688e-12 5.22266709e-10 1.45268414e-10
3.61683625e-10 7.15829599e-11]
[4.98327904e-09 2.85176877e-10 1.06928733e-09 2.42707482e-10
3.02309446e-10 2.10890733e-10 3.92881733e-11 7.70034355e-12
3.76172611e-11 8.75227174e-13]
[3.01176979e-08 7.26651252e-09 7.33478280e-09 4.59725373e-10
2.75688205e-09 1.73448359e-09 3.18897561e-10 1.80109168e-09
2.88519555e-11 1.47462727e-11]
[1.15655981e-08 2.28189053e-09 4.01682473e-09 1.68162092e-10
1.37053169e-10 1.29369911e-09 4.71288334e-12 4.79436012e-10
7.55916881e-10 1.24478150e-10]
[3.50126809e-09 1.32723684e-09 1.57892781e-09 3.43608428e-10
1.18098164e-09 1.77120116e-09 1.26695777e-10 2.07548549e-10
4.04396346e-10 1.28675748e-12]
[1.18359042e-08 4.46034332e-11 6.91012225e-10 2.99371558e-10
7.21838642e-11 5.08610731e-12 9.47822219e-10 3.44939292e-11
2.59666965e-10 7.73061394e-10]
[1.24828312e-08 1.59736058e-09 1.54283206e-12 1.18799243e-09
9.81180451e-11 6.82018051e-10 6.93272494e-10 5.63762982e-10
8.41458481e-10 3.36909597e-09]
[1.51127983e-08 2.13436602e-09 8.94604968e-10 7.56629759e-10
1.52795369e-09 8.18494636e-11 2.73116624e-11 8.33389499e-10
1.15545744e-11 4.61610877e-11]
[3.67084610e-07 3.95042004e-08 9.16821496e-08 6.11632118e-09
8.17914622e-08 6.00609891e-08 7.80620630e-09 1.73087570e-09
1.22488440e-08 2.35120504e-10]
[6.54623860e-09 1.72609314e-08 7.94532818e-09 5.96729805e-10
6.32858312e-09 6.27143107e-12 8.10311736e-11 3.56205506e-09
1.06140591e-11 3.39169606e-10]]
479 [[1.77687100e-09 3.01291345e-09 2.16210654e-09 1.82144306e-11
8.98021014e-10 4.83378670e-10 2.25605015e-11 3.94660411e-10
1.26266349e-11 1.51342381e-10]
[2.43104781e-08 5.12128431e-09 1.15083937e-08 1.71259749e-09
3.59089639e-09 4.17904236e-09 2.83694180e-10 5.79556534e-10
4.52387946e-11 1.89553953e-10]
[2.26770025e-08 3.62797171e-10 3.18423913e-09 1.39056533e-09
1.08091282e-09 2.07641047e-09 1.12514480e-09 4.40400407e-10
1.17580623e-09 2.16475808e-10]
[6.57280550e-09 5.56318369e-11 3.79556624e-10 6.07126228e-10
7.69443119e-13 5.50914057e-12 4.98359207e-10 1.20567260e-11
3.52185437e-11 3.41708375e-10]
[9.53408482e-09 1.17657513e-09 3.53569571e-09 4.36586897e-10
1.24303449e-09 3.62319048e-10 2.13511998e-10 1.50369859e-10
5.12428900e-11 2.44499942e-12]
[2.44131933e-10 8.41857296e-11 4.84011974e-10 1.62934994e-12
1.08521457e-10 1.20468896e-10 1.10006647e-10 1.64480705e-11
3.36801281e-12 3.34705566e-11]
[6.88265932e-09 8.81302148e-11 8.29331892e-11 8.59480019e-11
3.52182453e-11 2.69764554e-11 1.11993165e-09 8.55056827e-12
4.96565970e-10 5.22468927e-11]
[1.15844743e-10 1.03042742e-10 2.67844336e-11 2.06060123e-11
8.45791108e-13 7.02029167e-12 7.26654886e-11 6.64791257e-12
2.64162368e-12 2.33338212e-11]
[2.79799792e-10 2.37445668e-09 2.88334720e-09 1.79569710e-13
2.07599067e-09 5.50455235e-10 4.32946812e-10 3.89898064e-10
1.08390206e-10 8.28511159e-10]
[1.95798343e-10 4.75376932e-10 2.02638164e-09 2.73227839e-11
1.10048850e-09 7.75171389e-10 1.35317946e-11 1.29967589e-10
1.29348821e-11 5.24351215e-10]
[3.28602841e-12 4.63884157e-10 8.44924177e-10 7.08609273e-15
1.21659497e-10 1.39976945e-10 9.63920088e-11 5.36496667e-11
2.75760961e-11 2.67249973e-10]
[2.85343474e-10 4.16119164e-11 8.87257135e-12 1.59104363e-11
4.11656494e-12 1.13150345e-11 3.51547547e-12 1.46211163e-12
2.44164763e-11 4.02344408e-11]
[1.95571439e-11 7.55380732e-09 1.73300033e-09 2.45322691e-11
1.09279685e-09 1.86500405e-10 9.98492780e-10 9.45301280e-10
7.46326897e-10 4.31437208e-10]
[1.78905057e-09 9.94529520e-10 1.25283440e-09 5.52707735e-11
5.18531895e-10 3.80826734e-10 2.31832764e-11 1.83891024e-10
3.48889961e-13 7.45647526e-11]
[4.43702503e-10 1.78750221e-09 1.10823750e-09 7.54830054e-11
8.82059667e-10 5.32623059e-11 1.75605685e-11 1.61391203e-09
1.73682263e-10 8.65142123e-10]
[1.06761814e-10 1.30956032e-10 5.74507304e-12 7.12084913e-13
3.97104641e-11 6.96547412e-11 2.06592909e-11 1.54806267e-10
1.10967514e-10 2.56527671e-10]
[5.78223247e-09 5.64496548e-09 7.41325735e-09 1.90317643e-10
4.23390532e-09 4.06672661e-09 1.52969151e-10 6.07703190e-10
6.41822038e-10 1.55762195e-09]
[8.81729529e-09 9.63157843e-09 9.12013279e-09 3.91304631e-10
2.70285765e-09 8.90839492e-10 1.81586402e-10 1.25866390e-09
1.41459200e-11 1.49449768e-10]
[2.50289341e-08 8.23538627e-08 7.63452568e-08 8.80055191e-10
1.95504718e-08 5.50427882e-09 5.85750543e-08 1.29562994e-08
1.16590782e-08 1.15956602e-08]
[8.63005018e-10 1.08368421e-08 2.76076186e-09 4.77504387e-10
1.99216175e-09 4.30681989e-10 2.17083640e-10 2.97231032e-09
1.51521441e-09 5.94191521e-09]
[1.75845927e-09 9.09722516e-11 6.77737439e-12 2.93034217e-12
1.61790358e-11 2.10009754e-10 5.27587251e-11 1.65301353e-10
5.48395777e-11 1.91932910e-12]
[1.75967550e-08 1.75855575e-08 1.62146802e-08 3.58605315e-11
1.13208977e-08 4.20437076e-10 7.79289840e-11 5.19095000e-09
8.03173587e-10 4.60930399e-10]
[3.36057794e-10 4.08944505e-10 8.40910711e-11 2.15431241e-12
3.51667270e-11 7.92710475e-12 7.63114980e-10 3.16465133e-14
9.16029016e-11 4.15832094e-11]
[1.99602688e-09 1.23713030e-11 7.32862358e-11 2.16593457e-10
2.33625345e-10 2.03997280e-15 4.29794095e-12 3.17833730e-12
1.63343283e-12 2.16434168e-11]
[1.12013664e-09 1.53835346e-10 1.83201937e-10 3.41757743e-11
1.83787441e-14 2.46431417e-12 1.33596965e-10 1.20164005e-10
1.08870493e-10 1.48280965e-10]
[1.81210143e-09 3.56901116e-09 6.77445068e-10 3.62678308e-10
9.88800165e-11 1.72346958e-12 6.88055286e-10 5.13542641e-10
4.73723540e-10 1.81159535e-09]
[5.76318087e-09 1.21116190e-09 7.11559661e-10 5.96052964e-11
1.12078002e-09 3.25295101e-10 6.46256877e-11 1.62949719e-11
1.03682064e-11 2.03899848e-11]
[6.34128689e-10 1.99290577e-10 5.13623412e-10 1.63283828e-11
1.07370674e-10 1.24682617e-12 1.95091917e-10 2.04431776e-11
1.23863327e-10 8.16363937e-15]
[1.92524645e-08 1.85697721e-09 2.32393697e-09 7.36684475e-10
8.93007761e-10 4.63167306e-10 9.63681834e-10 5.22740990e-10
1.79546477e-11 1.41456038e-10]
[8.63008372e-10 1.23206558e-09 2.57006700e-09 7.16812383e-11
9.77348542e-10 1.58753574e-09 3.54194445e-10 1.68230637e-10
1.10705045e-11 2.33770911e-09]
[4.74062843e-09 5.73188150e-09 5.25442491e-09 2.70494372e-10
8.89125148e-10 1.82137286e-10 3.06006233e-09 9.37024818e-10
6.74994433e-10 2.55570955e-09]
[2.23265575e-09 1.44437292e-09 3.78672757e-09 1.70346420e-11
3.99238006e-10 5.32214733e-10 3.75513653e-11 9.94197409e-11
1.31316199e-12 2.00643093e-11]
[2.85657023e-09 3.79049320e-09 8.28453009e-10 1.37976615e-10
3.08518370e-10 2.67941251e-11 8.16500805e-11 4.57209737e-10
3.54386824e-10 8.85271519e-12]
[3.06246650e-10 7.39257630e-11 2.06777940e-10 1.28098951e-11
9.36141154e-14 4.35319393e-13 6.46803579e-11 3.96037233e-11
2.38804054e-11 6.41924582e-11]
[7.02457622e-09 2.02580606e-11 1.32314638e-09 3.09689591e-10
7.19404906e-10 1.12439489e-09 7.58257250e-10 3.34027851e-11
3.25217019e-10 1.39755692e-10]
[3.46125105e-08 9.64070375e-09 8.71073641e-09 9.67166113e-10
6.75838188e-09 9.01052194e-10 7.92792310e-10 7.86437348e-10
4.76108899e-10 1.52864839e-09]
[3.40096889e-10 1.40306044e-08 4.11407415e-09 4.73239440e-12
1.93643284e-09 2.71199214e-10 2.57935838e-09 3.35036354e-09
8.45169243e-10 5.20083453e-09]
[6.21883592e-09 7.64253912e-10 8.85787715e-10 5.61824069e-10
3.45505230e-10 1.65286215e-11 1.05000625e-10 2.11001082e-10
4.42931334e-11 2.11143048e-12]
[1.40308255e-09 2.00464373e-09 2.00023897e-09 6.46262788e-11
1.74504616e-09 3.08855309e-10 7.86596418e-11 4.47211311e-10
5.07909265e-11 5.24450765e-10]
[3.42260775e-09 8.95442839e-10 2.10715431e-10 5.16079140e-10
1.50184866e-10 6.76990149e-10 1.14389619e-09 2.22904366e-10
8.53188857e-10 3.18648089e-11]
[2.65402675e-10 1.70968777e-09 5.97854318e-10 9.11261924e-11
5.49578099e-10 2.90627653e-13 8.78299915e-10 9.73388445e-10
5.64359370e-10 2.57429784e-10]
[3.24221640e-09 7.04936146e-10 2.88141893e-09 4.34845369e-12
1.89249269e-09 7.47540862e-10 1.35896318e-11 2.72703715e-11
3.68467223e-10 1.64567049e-10]
[1.39570213e-08 1.35172850e-08 3.82363414e-09 1.71305145e-11
2.63181356e-09 3.18583247e-09 6.57375083e-09 5.04691451e-09
1.70083676e-09 9.16784398e-10]
[5.71901855e-09 1.73809859e-09 3.41165010e-10 2.12947900e-10
3.85506360e-10 2.61089927e-10 1.42099520e-09 7.34423798e-10
8.20160095e-12 1.83940184e-10]
[1.11068010e-09 1.17782050e-09 9.63068714e-11 8.24460077e-12
1.30862121e-10 8.34304746e-11 2.08230695e-11 4.24652085e-10
5.21342101e-11 1.02359237e-10]
[1.34510099e-10 3.68007328e-11 2.55340781e-10 3.84105325e-11
1.95078519e-10 1.46656886e-11 9.89828984e-11 3.24432055e-11
4.64810087e-11 1.73337725e-11]
[1.01860508e-11 7.94361236e-14 3.03677026e-11 6.76261610e-12
1.25290073e-11 5.71711130e-12 9.48183848e-11 5.52043311e-14
4.08705670e-11 2.58790794e-13]
[3.92658640e-09 2.91074684e-09 4.13931561e-09 6.38833470e-12
1.37453312e-09 1.64667382e-09 3.41310933e-11 5.01606053e-10
5.26209354e-10 6.01304037e-10]
[2.65162928e-10 8.32561309e-10 5.18732991e-10 2.16472440e-11
6.91739537e-10 1.13922198e-10 4.41156417e-11 1.95660961e-10
5.31115414e-11 6.31757598e-10]
[1.14328163e-08 1.12392736e-09 2.04171820e-09 6.32092914e-10
2.08713272e-09 3.36248350e-11 1.27940791e-10 7.36852277e-10
9.38822696e-10 1.53764336e-13]
[4.85009591e-08 7.29841211e-09 1.82906983e-08 4.80726543e-09
7.22943936e-09 5.68785573e-12 8.71183189e-12 1.04380886e-09
1.54101426e-09 1.04938923e-11]
[4.43235619e-10 1.83803422e-08 8.55465203e-09 2.86582470e-10
9.70794060e-09 1.34191634e-09 4.90963735e-11 3.59471094e-09
3.71905611e-10 8.80631755e-09]
[1.48655029e-10 3.92997697e-10 2.36031218e-09 4.70068017e-11
1.00838178e-09 8.96799618e-10 2.19831009e-15 9.63983172e-12
9.16061789e-13 8.04747481e-10]
[6.83105881e-09 7.80845058e-09 1.73443324e-08 2.54659020e-14
4.92471471e-09 4.12406806e-09 4.15476646e-10 2.56499158e-10
4.29097826e-10 2.91656264e-14]
[4.50182288e-10 1.13435234e-09 1.85664182e-10 6.14669511e-11
5.97784256e-11 8.63387387e-12 4.96607449e-10 1.37329885e-10
3.43557279e-10 6.78655961e-11]
[4.75434382e-09 2.72425327e-10 1.02127304e-09 2.31327866e-10
2.88264543e-10 2.01671245e-10 3.75890895e-11 7.30864801e-12
3.58604834e-11 8.63386555e-13]
[2.87127861e-08 6.92073394e-09 6.99505462e-09 4.37822696e-10
2.63067084e-09 1.65661236e-09 3.05347829e-10 1.71649498e-09
2.78615858e-11 1.42831028e-11]
[1.10228129e-08 2.16919137e-09 3.82510543e-09 1.60429404e-10
1.30673648e-10 1.23303704e-09 4.63360761e-12 4.56116367e-10
7.21273103e-10 1.18335104e-10]
[3.34265897e-09 1.26048329e-09 1.49841695e-09 3.29043055e-10
1.12464062e-09 1.68571460e-09 1.20497179e-10 1.96269853e-10
3.86124280e-10 1.11752018e-12]
[1.12690024e-08 4.29006935e-11 6.59400957e-10 2.85193093e-10
6.83512340e-11 4.79045500e-12 9.02113354e-10 3.31322153e-11
2.47386784e-10 7.34455841e-10]
[1.18886975e-08 1.51653146e-09 1.64385255e-12 1.12956578e-09
9.50400889e-11 6.51354756e-10 6.59094546e-10 5.35438252e-10
8.00310935e-10 3.19992886e-09]
[1.43985450e-08 2.02669961e-09 8.52871182e-10 7.22177361e-10
1.45521086e-09 7.67811353e-11 2.66846397e-11 7.93223777e-10
1.14223604e-11 4.38074114e-11]
[3.50506510e-07 3.77233111e-08 8.75506826e-08 5.83670354e-09
7.81199251e-08 5.73621273e-08 7.45607579e-09 1.65339422e-09
1.17001237e-08 2.24059856e-10]
[6.22671212e-09 1.63954517e-08 7.54421666e-09 5.66844761e-10
6.01364331e-09 6.08527272e-12 7.67780404e-11 3.38216841e-09
1.00969360e-11 3.20740765e-10]]
480 [[1.69926731e-09 2.87474207e-09 2.06229239e-09 1.73330960e-11
8.55326196e-10 4.61078602e-10 2.14950254e-11 3.76054394e-10
1.18293887e-11 1.43416161e-10]
[2.31728752e-08 4.90426645e-09 1.10076221e-08 1.63215425e-09
3.43287613e-09 3.99971283e-09 2.70645239e-10 5.55191553e-10
4.30272429e-11 1.83345919e-10]
[2.16537487e-08 3.47844636e-10 3.04946429e-09 1.32716103e-09
1.03313542e-09 1.98075822e-09 1.07358072e-09 4.19169673e-10
1.12364370e-09 2.07602518e-10]
[6.26333572e-09 5.20862524e-11 3.59843529e-10 5.78329563e-10
7.17390932e-13 5.26430285e-12 4.76376215e-10 1.11753954e-11
3.37903460e-11 3.26651435e-10]
[9.09924838e-09 1.11845094e-09 3.36563786e-09 4.16231287e-10
1.18353379e-09 3.45826153e-10 2.03633545e-10 1.42251751e-10
4.85837791e-11 2.14396660e-12]
[2.34135440e-10 7.98375684e-11 4.60101514e-10 1.56668896e-12
1.03240739e-10 1.14223420e-10 1.05041094e-10 1.56550798e-11
3.24721170e-12 3.14701343e-11]
[6.56666854e-09 8.34851658e-11 7.99492714e-11 8.20421233e-11
3.40143301e-11 2.59645717e-11 1.06940486e-09 7.94938223e-12
4.73952964e-10 4.91747389e-11]
[1.11881085e-10 9.87534931e-11 2.56083900e-11 1.94474006e-11
8.03586624e-13 6.72055906e-12 6.95544049e-11 6.36971471e-12
2.56818535e-12 2.19838877e-11]
[2.71091490e-10 2.26763061e-09 2.76226764e-09 1.80229570e-13
1.98799114e-09 5.27615095e-10 4.10888058e-10 3.73582578e-10
1.03139754e-10 7.94918352e-10]
[1.88219616e-10 4.52884753e-10 1.93380968e-09 2.60772039e-11
1.05010182e-09 7.40017490e-10 1.28132123e-11 1.23717392e-10
1.24113845e-11 4.99629003e-10]
[3.31615134e-12 4.43997927e-10 8.08081611e-10 7.90029420e-15
1.16503422e-10 1.33915777e-10 9.18235720e-11 5.12166406e-11
2.63931108e-11 2.55127318e-10]
[2.76274079e-10 3.94284497e-11 8.75430246e-12 1.53729190e-11
3.81619759e-12 1.11747119e-11 3.56550940e-12 1.46566337e-12
2.36943151e-11 3.84310669e-11]
[1.89925082e-11 7.20029053e-09 1.65607630e-09 2.34965714e-11
1.04322165e-09 1.78933302e-10 9.50128502e-10 9.01922488e-10
7.10342659e-10 4.11999830e-10]
[1.71254590e-09 9.51909827e-10 1.19920388e-09 5.28705382e-11
4.96120035e-10 3.64550855e-10 2.21093609e-11 1.76105341e-10
3.37832543e-13 7.15108846e-11]
[4.21998504e-10 1.70313788e-09 1.05922454e-09 7.22249190e-11
8.41177592e-10 5.02203432e-11 1.64262631e-11 1.54015212e-09
1.64655781e-10 8.27068608e-10]
[1.01829033e-10 1.24409781e-10 5.42572135e-12 6.81096471e-13
3.75894753e-11 6.63905965e-11 1.98555682e-11 1.47312947e-10
1.05626221e-10 2.43917091e-10]
[5.53652779e-09 5.38581651e-09 7.07493391e-09 1.82670272e-10
4.03654943e-09 3.88066766e-09 1.46748185e-10 5.79036694e-10
6.13957424e-10 1.48038018e-09]
[8.37464714e-09 9.18251183e-09 8.68988612e-09 3.70800577e-10
2.57438506e-09 8.46666432e-10 1.74561341e-10 1.20085835e-09
1.38061753e-11 1.42721559e-10]
[2.38401160e-08 7.86594792e-08 7.29616933e-08 8.39866955e-10
1.86673469e-08 5.26154579e-09 5.58919228e-08 1.23765018e-08
1.11252655e-08 1.10709042e-08]
[8.13353753e-10 1.03221928e-08 2.63743709e-09 4.54902223e-10
1.89647853e-09 4.11650175e-10 2.06040896e-10 2.83546481e-09
1.44235041e-09 5.66266259e-09]
[1.67391322e-09 8.58811301e-11 6.48565746e-12 2.74548753e-12
1.54622146e-11 1.99211612e-10 5.07871362e-11 1.57478288e-10
5.19852301e-11 1.88314341e-12]
[1.67981822e-08 1.67471922e-08 1.54632155e-08 3.45209153e-11
1.07937481e-08 4.02739305e-10 7.33357344e-11 4.94519488e-09
7.62189846e-10 4.37899914e-10]
[3.22344042e-10 3.97110373e-10 8.16250845e-11 2.13082311e-12
3.40698582e-11 7.37611153e-12 7.34732817e-10 1.77493342e-14
8.88469043e-11 3.96099622e-11]
[1.89703002e-09 1.20444212e-11 7.09909065e-11 2.05533804e-10
2.24417799e-10 7.79778710e-15 4.13828844e-12 3.20511992e-12
1.54801201e-12 1.98259186e-11]
[1.06662120e-09 1.45796277e-10 1.74791808e-10 3.25769874e-11
2.39627641e-14 2.24144244e-12 1.26440301e-10 1.14177097e-10
1.03112200e-10 1.40448551e-10]
[1.72911082e-09 3.40423976e-09 6.46974173e-10 3.46281042e-10
9.43477229e-11 1.64328414e-12 6.56238567e-10 4.89950750e-10
4.52287267e-10 1.72960855e-09]
[5.50362514e-09 1.15717636e-09 6.81927389e-10 5.69537927e-11
1.07157802e-09 3.11757352e-10 6.17645075e-11 1.56237688e-11
1.00165129e-11 1.96252039e-11]
[6.03393342e-10 1.90587236e-10 4.89541561e-10 1.53965524e-11
1.01937026e-10 1.14709374e-12 1.86287439e-10 1.96436485e-11
1.18685834e-10 2.26163468e-15]
[1.83533525e-08 1.77472370e-09 2.21981762e-09 7.01997941e-10
8.53104001e-10 4.43336689e-10 9.19972336e-10 4.98822381e-10
1.71900320e-11 1.35571148e-10]
[8.14776226e-10 1.18157453e-09 2.45967615e-09 6.78303783e-11
9.34018407e-10 1.51442160e-09 3.36675430e-10 1.61496331e-10
1.04159356e-11 2.22726103e-09]
[4.51551894e-09 5.46103217e-09 5.00354751e-09 2.57314671e-10
8.46780547e-10 1.73122365e-10 2.91745842e-09 8.91795493e-10
6.43883396e-10 2.43234690e-09]
[2.12834732e-09 1.37554774e-09 3.60707450e-09 1.63020304e-11
3.80720683e-10 5.07370713e-10 3.57568069e-11 9.43591148e-11
1.26795392e-12 1.89635027e-11]
[2.72100594e-09 3.61341654e-09 7.89924458e-10 1.31519479e-10
2.94242906e-10 2.54650129e-11 7.79106242e-11 4.35745589e-10
3.37675815e-10 8.48820198e-12]
[2.88681061e-10 7.23989606e-11 1.99170642e-10 1.18552235e-11
6.85108933e-14 3.87973925e-13 6.17510422e-11 3.82946828e-11
2.29904448e-11 6.23340597e-11]
[6.70848126e-09 1.92312580e-11 1.26130651e-09 2.96215939e-10
6.86177769e-10 1.07162270e-09 7.22957252e-10 3.18055154e-11
3.10355940e-10 1.32817699e-10]
[3.30324096e-08 9.20210912e-09 8.32394985e-09 9.21990632e-10
6.45699933e-09 8.63643515e-10 7.58435916e-10 7.49985468e-10
4.55975353e-10 1.46334788e-09]
[3.22965408e-10 1.33979323e-08 3.93274399e-09 4.52238758e-12
1.84923164e-09 2.58769768e-10 2.46168425e-09 3.20035601e-09
8.06775306e-10 4.96593184e-09]
[5.91322524e-09 7.30683092e-10 8.47133840e-10 5.33033461e-10
3.30461394e-10 1.55002871e-11 1.00210715e-10 2.01821473e-10
4.24276148e-11 1.79959098e-12]
[1.33829096e-09 1.90475637e-09 1.90203701e-09 6.18471432e-11
1.65874657e-09 2.94315155e-10 7.45727627e-11 4.23946957e-10
4.82077100e-11 4.96675040e-10]
[3.25628902e-09 8.46151204e-10 2.01804872e-10 4.90036770e-10
1.43776380e-10 6.43834778e-10 1.08368487e-09 2.11157085e-10
8.08864282e-10 3.08019328e-11]
[2.52326556e-10 1.61594818e-09 5.64065569e-10 8.62286181e-11
5.20660299e-10 2.79249390e-13 8.31091307e-10 9.21371121e-10
5.34229220e-10 2.43729251e-10]
[3.09582626e-09 6.70045774e-10 2.74542987e-09 4.22217064e-12
1.80359000e-09 7.12710412e-10 1.29592882e-11 2.58365556e-11
3.51567593e-10 1.55885985e-10]
[1.32750513e-08 1.28824891e-08 3.66091459e-09 1.60660138e-11
2.51090665e-09 3.01585867e-09 6.24958347e-09 4.81709823e-09
1.61703838e-09 8.84482413e-10]
[5.44646976e-09 1.65950843e-09 3.31835017e-10 2.00981788e-10
3.70384465e-10 2.43292979e-10 1.34937967e-09 7.04317424e-10
7.79526119e-12 1.68693562e-10]
[1.06016976e-09 1.11556475e-09 9.14854425e-11 7.68707843e-12
1.24283905e-10 7.87675697e-11 2.05520690e-11 4.04793702e-10
4.87606959e-11 9.73419120e-11]
[1.28112958e-10 3.54177587e-11 2.42235836e-10 3.66052205e-11
1.84790059e-10 1.38373565e-11 9.45433454e-11 3.10066851e-11
4.42564455e-11 1.62660685e-11]
[9.53231313e-12 6.24556615e-14 2.91389086e-11 6.46237033e-12
1.19920677e-11 5.40491844e-12 9.03734684e-11 5.70556308e-14
3.89097392e-11 2.59670063e-13]
[3.75838450e-09 2.77868562e-09 3.95592640e-09 6.00075155e-12
1.31237778e-09 1.57230001e-09 3.26239554e-11 4.79279050e-10
5.02676799e-10 5.73160957e-10]
[2.50205419e-10 7.91493054e-10 4.93517794e-10 2.04160464e-11
6.56799278e-10 1.08498124e-10 4.14381151e-11 1.85730134e-10
4.99269801e-11 5.98584145e-10]
[1.08875120e-08 1.07361832e-09 1.95065274e-09 6.01209308e-10
1.99299400e-09 3.15374659e-11 1.21692208e-10 7.03075734e-10
8.94837825e-10 1.03498454e-13]
[4.62015262e-08 6.95143401e-09 1.74326884e-08 4.57761832e-09
6.89249431e-09 5.14177332e-12 8.44409225e-12 9.94042643e-10
1.46631282e-09 1.00814845e-11]
[4.21602658e-10 1.75012920e-08 8.14204719e-09 2.72389951e-10
9.23155701e-09 1.27570858e-09 4.71338892e-11 3.42236315e-09
3.53965903e-10 8.37383998e-09]
[1.39300117e-10 3.78038272e-10 2.25795043e-09 4.45722916e-11
9.62959620e-10 8.54384559e-10 1.69047937e-14 9.38714628e-12
8.11130157e-13 7.66502993e-10]
[6.52328968e-09 7.46875655e-09 1.65743432e-08 2.88968532e-14
4.70963943e-09 3.93870034e-09 3.97001843e-10 2.45739426e-10
4.09420021e-10 1.88176237e-14]
[4.30290738e-10 1.07725281e-09 1.77475740e-10 5.77747066e-11
5.65820963e-11 8.24988894e-12 4.73710263e-10 1.30771777e-10
3.27810655e-10 6.44230413e-11]
[4.53478819e-09 2.60304520e-10 9.75396512e-10 2.20730522e-10
2.74944655e-10 1.92929467e-10 3.59564424e-11 6.97578796e-12
3.42888760e-11 8.50762014e-13]
[2.73684347e-08 6.59182563e-09 6.67167531e-09 4.17558812e-10
2.50992123e-09 1.58210328e-09 2.92302202e-10 1.63671738e-09
2.69817666e-11 1.38327228e-11]
[1.05052135e-08 2.06242098e-09 3.64199793e-09 1.53190818e-10
1.24547659e-10 1.17559136e-09 4.49807955e-12 4.34495154e-10
6.88808218e-10 1.12622942e-10]
[3.19252535e-09 1.19695436e-09 1.42174239e-09 3.14746638e-10
1.07104042e-09 1.60426209e-09 1.14691836e-10 1.85433136e-10
3.68402695e-10 9.65279836e-13]
[1.07315973e-08 4.12374071e-11 6.29193119e-10 2.71340368e-10
6.47330352e-11 4.51896700e-12 8.58679182e-10 3.17349618e-11
2.35383965e-10 6.97854858e-10]
[1.13209650e-08 1.43946732e-09 1.72466912e-12 1.07448700e-09
9.20313596e-11 6.21803251e-10 6.26756201e-10 5.07981405e-10
7.61655026e-10 3.03967868e-09]
[1.37215008e-08 1.92439352e-09 8.12777772e-10 6.88568616e-10
1.38597802e-09 7.20474573e-11 2.61011987e-11 7.54608333e-10
1.11988921e-11 4.15936052e-11]
[3.34677059e-07 3.60218562e-08 8.36108225e-08 5.56997012e-09
7.46149755e-08 5.47892353e-08 7.12046099e-09 1.57995916e-09
1.11744952e-08 2.13406936e-10]
[5.92183578e-09 1.55736271e-08 7.16329175e-09 5.38785901e-10
5.71452499e-09 5.89898069e-12 7.27506085e-11 3.21193706e-09
9.56673943e-12 3.03346127e-10]]
481 [[1.62522821e-09 2.74319739e-09 1.96629802e-09 1.64414732e-11
8.15017162e-10 4.39741809e-10 2.03758243e-11 3.57902537e-10
1.11743191e-11 1.35978172e-10]
[2.20887150e-08 4.69613861e-09 1.05290035e-08 1.55547028e-09
3.28185500e-09 3.82863967e-09 2.58169270e-10 5.31657007e-10
4.09978973e-11 1.77398850e-10]
[2.06846058e-08 3.34792803e-10 2.91294955e-09 1.26750599e-09
9.89556581e-10 1.89584625e-09 1.02574645e-09 4.00034160e-10
1.07074677e-09 1.95631245e-10]
[5.96788293e-09 4.87391536e-11 3.41375446e-10 5.51176570e-10
6.62330811e-13 5.00858538e-12 4.55385166e-10 1.04054117e-11
3.23856952e-11 3.12450942e-10]
[8.68340056e-09 1.06324780e-09 3.20416337e-09 3.97041457e-10
1.12671337e-09 3.30026204e-10 1.94181400e-10 1.34733833e-10
4.60151662e-11 1.86947209e-12]
[2.24065321e-10 7.57756794e-11 4.37377183e-10 1.53542847e-12
9.82107454e-11 1.08117599e-10 1.00552825e-10 1.48984824e-11
3.16919470e-12 2.96421218e-11]
[6.26472498e-09 7.91105202e-11 7.71628802e-11 7.81649849e-11
3.29063820e-11 2.49178554e-11 1.02182112e-09 7.50323272e-12
4.52193177e-10 4.63716252e-11]
[1.07723242e-10 9.46276570e-11 2.45080926e-11 1.82828852e-11
7.66600897e-13 6.40420188e-12 6.65877417e-11 6.10668009e-12
2.52024783e-12 2.06699362e-11]
[2.63049639e-10 2.16541179e-09 2.64642872e-09 1.73232908e-13
1.90430886e-09 5.06297056e-10 3.89979947e-10 3.57734982e-10
9.82753125e-11 7.62701964e-10]
[1.80620656e-10 4.31509171e-10 1.84568095e-09 2.47865603e-11
1.00187169e-09 7.06310362e-10 1.21407223e-11 1.17995278e-10
1.20056714e-11 4.75947346e-10]
[3.33078463e-12 4.25082601e-10 7.73140207e-10 8.30502369e-15
1.11404561e-10 1.27748627e-10 8.72112700e-11 4.89956584e-11
2.52986289e-11 2.43550861e-10]
[2.66928132e-10 3.73303114e-11 8.64430561e-12 1.49499407e-11
3.53003474e-12 1.09682098e-11 3.60350247e-12 1.45288778e-12
2.30523775e-11 3.68285941e-11]
[1.84509867e-11 6.86338211e-09 1.58215030e-09 2.24757758e-11
9.96100581e-10 1.71728825e-10 9.04039043e-10 8.60222511e-10
6.75682313e-10 3.93527066e-10]
[1.63712971e-09 9.11259164e-10 1.14734734e-09 5.07140592e-11
4.74988598e-10 3.47817256e-10 2.10857902e-11 1.68680346e-10
3.08801662e-13 6.82862552e-11]
[4.01350352e-10 1.62285253e-09 1.01154968e-09 6.91806902e-11
8.02478383e-10 4.73937641e-11 1.53388293e-11 1.46875431e-09
1.56236718e-10 7.90913500e-10]
[9.71426607e-11 1.18226492e-10 5.11048811e-12 6.49141319e-13
3.55522738e-11 6.33147010e-11 1.90541309e-11 1.40371066e-10
1.00371854e-10 2.31937195e-10]
[5.30187992e-09 5.13891552e-09 6.75414884e-09 1.75334512e-10
3.84740376e-09 3.70321773e-09 1.40668039e-10 5.52264704e-10
5.87368741e-10 1.40749867e-09]
[7.95450285e-09 8.75463846e-09 8.27891106e-09 3.51316912e-10
2.45207493e-09 8.04453123e-10 1.67751395e-10 1.14582840e-09
1.34140786e-11 1.36227133e-10]
[2.27106920e-08 7.51297731e-08 6.97281649e-08 8.01134834e-10
1.78241972e-08 5.02916701e-09 5.33328432e-08 1.18234269e-08
1.06144304e-08 1.05693090e-08]
[7.66738674e-10 9.83151271e-09 2.51905243e-09 4.32805682e-10
1.80573488e-09 3.93665720e-10 1.95172424e-10 2.70656255e-09
1.37273052e-09 5.39638257e-09]
[1.59238241e-09 8.10089440e-11 6.22716527e-12 2.60604350e-12
1.47645086e-11 1.89012581e-10 4.89201637e-11 1.50066334e-10
4.92583428e-11 1.82744974e-12]
[1.60338723e-08 1.59491005e-08 1.47478097e-08 3.33209510e-11
1.02910927e-08 3.85870027e-10 6.90688025e-11 4.71177084e-09
7.23211249e-10 4.16023669e-10]
[3.09249071e-10 3.85520076e-10 7.91892876e-11 2.10881524e-12
3.29965095e-11 6.84484105e-12 7.07266408e-10 9.13606833e-15
8.61060647e-11 3.76997576e-11]
[1.80856870e-09 1.14976593e-11 6.94601253e-11 1.94272026e-10
2.15120480e-10 2.28456826e-14 3.96308030e-12 3.18064944e-12
1.77075976e-12 1.83070980e-11]
[1.01512424e-09 1.38132576e-10 1.67043246e-10 3.11290719e-11
2.91421574e-14 2.04206205e-12 1.19776533e-10 1.08656821e-10
9.74747730e-11 1.33230890e-10]
[1.65035881e-09 3.24725742e-09 6.17981380e-10 3.30308046e-10
8.99110673e-11 1.58587663e-12 6.25947928e-10 4.67806861e-10
4.31662664e-10 1.65203362e-09]
[5.25660133e-09 1.10591899e-09 6.52688243e-10 5.43486551e-11
1.02465072e-09 2.98858050e-10 5.91308588e-11 1.49339461e-11
9.68097995e-12 1.90300806e-11]
[5.69892858e-10 1.81611020e-10 4.64917062e-10 1.46719810e-11
9.74273706e-11 1.13152542e-12 1.78172224e-10 1.82889630e-11
1.13267038e-10 6.74981765e-15]
[1.74970290e-08 1.69521915e-09 2.12108297e-09 6.69099025e-10
8.14967647e-10 4.24471436e-10 8.78543164e-10 4.76428165e-10
1.67051987e-11 1.29951265e-10]
[7.69378381e-10 1.13294642e-09 2.35342695e-09 6.42675208e-11
8.92899922e-10 1.44463895e-09 3.20134751e-10 1.54663053e-10
9.85098930e-12 2.12148400e-09]
[4.30025174e-09 5.20328166e-09 4.76373166e-09 2.44979585e-10
8.06802199e-10 1.64602617e-10 2.78154932e-09 8.48351061e-10
6.14504300e-10 2.31531977e-09]
[2.02823389e-09 1.31007758e-09 3.43713530e-09 1.56611836e-11
3.62861829e-10 4.83649777e-10 3.41159422e-11 8.97457097e-11
1.23475830e-12 1.78960398e-11]
[2.59267353e-09 3.44428620e-09 7.53110790e-10 1.25195132e-10
2.80609483e-10 2.42099069e-11 7.42946463e-11 4.15091671e-10
3.21970229e-10 8.13577993e-12]
[2.71928626e-10 7.08447959e-11 1.91989842e-10 1.10173574e-11
5.02600376e-14 3.47003869e-13 5.89739986e-11 3.70895300e-11
2.20765815e-11 6.05256543e-11]
[6.40781938e-09 1.82608541e-11 1.20208277e-09 2.83057029e-10
6.54564361e-10 1.02150750e-09 6.89214289e-10 3.02381472e-11
2.96147764e-10 1.26211642e-10]
[3.15224889e-08 8.78356566e-09 7.95498545e-09 8.79102692e-10
6.16945874e-09 8.27189123e-10 7.25467504e-10 7.15369234e-10
4.36780854e-10 1.40017103e-09]
[3.06720656e-10 1.27943460e-08 3.75926197e-09 4.35406222e-12
1.76547312e-09 2.47317317e-10 2.35092145e-09 3.05938884e-09
7.69840214e-10 4.74231453e-09]
[5.62295069e-09 6.98733822e-10 8.09879518e-10 5.05916697e-10
3.16108873e-10 1.45159360e-11 9.55391957e-11 1.93275021e-10
4.05605878e-11 1.51216993e-12]
[1.27645517e-09 1.81015383e-09 1.80815554e-09 5.92247395e-11
1.57654756e-09 2.80447115e-10 7.07586942e-11 4.02223419e-10
4.55048054e-11 4.70560709e-10]
[3.09832286e-09 7.99305672e-10 1.93202055e-10 4.65474229e-10
1.37626682e-10 6.12471893e-10 1.02675459e-09 1.99801505e-10
7.66898869e-10 2.98516772e-11]
[2.39722778e-10 1.52753076e-09 5.32005036e-10 8.16341120e-11
4.93443232e-10 2.53686396e-13 7.87319657e-10 8.72109526e-10
5.05347373e-10 2.31124097e-10]
[2.95619520e-09 6.37143093e-10 2.61478099e-09 4.07170861e-12
1.71938695e-09 6.79282924e-10 1.23559431e-11 2.44006081e-11
3.35631220e-10 1.47729060e-10]
[1.26272418e-08 1.22774723e-08 3.50488095e-09 1.50282219e-11
2.39578696e-09 2.85494329e-09 5.94102244e-09 4.59691042e-09
1.53767510e-09 8.53185008e-10]
[5.18625404e-09 1.58453509e-09 3.22780786e-10 1.89812311e-10
3.55774382e-10 2.26720881e-10 1.28150185e-09 6.75674441e-10
7.39102573e-12 1.54627248e-10]
[1.01244729e-09 1.05643709e-09 8.69413990e-11 7.19171527e-12
1.18003217e-10 7.42186068e-11 2.01371493e-11 3.85910624e-10
4.56169266e-11 9.26036217e-11]
[1.21694196e-10 3.41454139e-11 2.29830596e-10 3.48016690e-11
1.75078651e-10 1.30132995e-11 9.02370927e-11 2.96946231e-11
4.21011465e-11 1.51734988e-11]
[8.97264163e-12 4.93751655e-14 2.80345233e-11 6.19729159e-12
1.14814585e-11 5.11818377e-12 8.61452591e-11 6.06726233e-14
3.68784908e-11 2.46452200e-13]
[3.59850519e-09 2.65281651e-09 3.77931976e-09 5.67961534e-12
1.25309877e-09 1.50155479e-09 3.12306920e-11 4.57599232e-10
4.79978602e-10 5.46700242e-10]
[2.35566662e-10 7.52407000e-10 4.69574451e-10 1.93647904e-11
6.23774734e-10 1.03399691e-10 3.88279182e-11 1.76107937e-10
4.71368521e-11 5.67133290e-10]
[1.03674420e-08 1.02558626e-09 1.86386072e-09 5.71977665e-10
1.90301498e-09 2.96732836e-11 1.15723452e-10 6.71021011e-10
8.52791362e-10 6.66962249e-14]
[4.40147209e-08 6.62087655e-09 1.66144179e-08 4.35835781e-09
6.57072952e-09 4.61586117e-12 8.14679669e-12 9.46561335e-10
1.39543939e-09 9.72647709e-12]
[4.01005005e-10 1.66638640e-08 7.75025421e-09 2.58897120e-10
8.77841316e-09 1.21297732e-09 4.52666712e-11 3.25806410e-09
3.37107122e-10 7.96312364e-09]
[1.30758583e-10 3.63373277e-10 2.15949177e-09 4.22515167e-11
9.19473638e-10 8.13322640e-10 4.21035913e-14 9.08275797e-12
7.11272119e-13 7.29562103e-10]
[6.23053576e-09 7.14384485e-09 1.58380266e-08 3.42443933e-14
4.50420048e-09 3.76227356e-09 3.79256029e-10 2.35413589e-10
3.90503597e-10 1.08653211e-14]
[4.07898461e-10 1.02424352e-09 1.67100100e-10 5.56467556e-11
5.39180677e-11 7.97704204e-12 4.50655266e-10 1.23591217e-10
3.11439211e-10 6.11058001e-11]
[4.32552924e-09 2.48664787e-10 9.31740063e-10 2.10454148e-10
2.62285272e-10 1.84581477e-10 3.43354978e-11 6.61256728e-12
3.27621746e-11 8.30132364e-13]
[2.60925412e-08 6.27928398e-09 6.36179843e-09 3.98564370e-10
2.39480765e-09 1.51176768e-09 2.79974760e-10 1.55857505e-09
2.60059536e-11 1.34714765e-11]
[1.00102108e-08 1.96104358e-09 3.46770541e-09 1.46209506e-10
1.18928682e-10 1.12024010e-09 4.46710478e-12 4.12988103e-10
6.57736662e-10 1.07035817e-10]
[3.04768762e-09 1.13658606e-09 1.34924462e-09 3.01384242e-10
1.01995059e-09 1.52648426e-09 1.09098213e-10 1.75280237e-10
3.51683798e-10 8.21055988e-13]
[1.02186702e-08 3.96217894e-11 6.00666041e-10 2.58353668e-10
6.13400327e-11 4.26375506e-12 8.17209340e-10 3.04694485e-11
2.24114965e-10 6.63335400e-10]
[1.07800855e-08 1.36627898e-09 1.80292902e-12 1.02211452e-09
8.92138748e-11 5.93900114e-10 5.95772104e-10 4.82321941e-10
7.25723038e-10 2.88645405e-09]
[1.30737219e-08 1.82729396e-09 7.74927158e-10 6.57034023e-10
1.31983370e-09 6.76544042e-11 2.54763056e-11 7.18349083e-10
1.10878648e-11 3.95221745e-11]
[3.19560275e-07 3.43975251e-08 7.98474454e-08 5.31488459e-09
7.12643808e-08 5.23260113e-08 6.79932909e-09 1.50907201e-09
1.06755103e-08 2.03700735e-10]
[5.63349037e-09 1.47934995e-08 6.80191942e-09 5.11823258e-10
5.43003097e-09 5.70191548e-12 6.89232486e-11 3.05018810e-09
9.08358021e-12 2.87087786e-10]]
482 [[1.55134763e-09 2.61680400e-09 1.87876644e-09 1.59078137e-11
7.75841413e-10 4.19845856e-10 1.95498944e-11 3.41600487e-10
1.06025964e-11 1.28889339e-10]
[2.10553891e-08 4.49690414e-09 1.00713249e-08 1.48228439e-09
3.13733994e-09 3.66413443e-09 2.46256999e-10 5.09318127e-10
3.90002923e-11 1.71488494e-10]
[1.97521954e-08 3.21542626e-10 2.78671552e-09 1.21022813e-09
9.46794975e-10 1.81038453e-09 9.79282601e-10 3.81016971e-10
1.02230430e-09 1.86363839e-10]
[5.68688643e-09 4.56321755e-11 3.23579209e-10 5.25197308e-10
6.18817862e-13 4.79678279e-12 4.35310323e-10 9.63649117e-12
3.10571592e-11 2.98596092e-10]
[8.28741325e-09 1.01070019e-09 3.05001131e-09 3.78614859e-10
1.07281780e-09 3.15085997e-10 1.85229491e-10 1.27446879e-10
4.36733643e-11 1.62906537e-12]
[2.15095540e-10 7.18482814e-11 4.15840895e-10 1.51683043e-12
9.33315483e-11 1.02582285e-10 9.60617727e-11 1.40521041e-11
3.03848063e-12 2.79024935e-11]
[5.97690747e-09 7.49459202e-11 7.44179782e-11 7.44952739e-11
3.17424107e-11 2.39332643e-11 9.75417121e-10 6.97064200e-12
4.31662952e-10 4.36049039e-11]
[1.04016011e-10 9.06846274e-11 2.34438993e-11 1.72429341e-11
7.27341952e-13 6.13859815e-12 6.37331436e-11 5.85867791e-12
2.44072137e-12 1.95007599e-11]
[2.54848772e-10 2.06821943e-09 2.53508998e-09 1.72564729e-13
1.82362885e-09 4.85220118e-10 3.70122984e-10 3.42744594e-10
9.35569908e-11 7.31571700e-10]
[1.73564116e-10 4.11043697e-10 1.76136559e-09 2.36058809e-11
9.55972485e-10 6.74459079e-10 1.14820686e-11 1.12354627e-10
1.15095146e-11 4.53524605e-10]
[3.34695007e-12 4.06802623e-10 7.39431211e-10 8.51776960e-15
1.06652136e-10 1.22246299e-10 8.30940893e-11 4.67500586e-11
2.41850165e-11 2.32610216e-10]
[2.58315289e-10 3.53752807e-11 8.51390719e-12 1.44746457e-11
3.27097595e-12 1.08337792e-11 3.64522295e-12 1.45276035e-12
2.23290327e-11 3.51339880e-11]
[1.79045588e-11 6.54189463e-09 1.51238645e-09 2.15314078e-11
9.50770517e-10 1.64749254e-10 8.60307763e-10 8.20935771e-10
6.43051422e-10 3.75639510e-10]
[1.56741011e-09 8.72236153e-10 1.09803226e-09 4.85502766e-11
4.54568046e-10 3.32789431e-10 2.01313465e-11 1.61296756e-10
3.05582897e-13 6.54502854e-11]
[3.81643087e-10 1.54609555e-09 9.67092562e-10 6.63227049e-11
7.65274079e-10 4.47207529e-11 1.43652833e-11 1.40211474e-09
1.48073782e-10 7.55964997e-10]
[9.27433194e-11 1.12354261e-10 4.81387009e-12 6.02247435e-13
3.36432264e-11 6.03012996e-11 1.83364175e-11 1.33282328e-10
9.56273866e-11 2.20623236e-10]
[5.07607416e-09 4.90304054e-09 6.44559879e-09 1.68285495e-10
3.66811216e-09 3.53381053e-09 1.34915203e-10 5.25987273e-10
5.61760602e-10 1.33812451e-09]
[7.55549079e-09 8.34652023e-09 7.88794896e-09 3.32961433e-10
2.33561071e-09 7.64654247e-10 1.61210476e-10 1.09314111e-09
1.30844652e-11 1.30133444e-10]
[2.16329909e-08 7.17591132e-08 6.66365300e-08 7.64475706e-10
1.70193984e-08 4.80749147e-09 5.08905325e-08 1.12941806e-08
1.01286075e-08 1.00914234e-08]
[7.22799204e-10 9.36503587e-09 2.40639189e-09 4.12182068e-10
1.71911099e-09 3.76403573e-10 1.85297955e-10 2.58220407e-09
1.30672179e-09 5.14210742e-09]
[1.51597509e-09 7.64932522e-11 5.94285711e-12 2.46436883e-12
1.41107731e-11 1.79224895e-10 4.71055778e-11 1.42819032e-10
4.67185546e-11 1.80115942e-12]
[1.53056267e-08 1.51890685e-08 1.40636844e-08 3.20954465e-11
9.81213142e-09 3.69685527e-10 6.49932455e-11 4.48849239e-09
6.86415058e-10 3.95335377e-10]
[2.96568396e-10 3.74125166e-10 7.68857952e-11 2.08774716e-12
3.19293872e-11 6.35511323e-12 6.80862560e-10 3.04733073e-15
8.34335586e-11 3.59274177e-11]
[1.71659741e-09 1.13302189e-11 6.68328801e-11 1.84590739e-10
2.06862192e-10 3.13999718e-14 3.84326192e-12 3.22787245e-12
1.51050803e-12 1.66218419e-11]
[9.66976062e-10 1.30970721e-10 1.59289765e-10 2.97885552e-11
3.45138058e-14 1.84387547e-12 1.13312666e-10 1.02996673e-10
9.24419894e-11 1.26091932e-10]
[1.57531005e-09 3.09720299e-09 5.90137305e-10 3.15573985e-10
8.58489693e-11 1.51272652e-12 5.97051247e-10 4.46688146e-10
4.12071356e-10 1.57737557e-09]
[5.01996637e-09 1.05654189e-09 6.25702278e-10 5.18890887e-11
9.79628650e-10 2.86337670e-10 5.64868558e-11 1.43250002e-11
9.35335464e-12 1.82764157e-11]
[5.39971184e-10 1.73336970e-10 4.42505697e-10 1.38827365e-11
9.27968163e-11 1.07815320e-12 1.70309750e-10 1.74255675e-11
1.08187281e-10 5.43694844e-15]
[1.66802026e-08 1.62041988e-09 2.02579692e-09 6.37386831e-10
7.78551726e-10 4.06155364e-10 8.38618858e-10 4.54435432e-10
1.59315921e-11 1.24509740e-10]
[7.26383830e-10 1.08649220e-09 2.25246113e-09 6.08537125e-11
8.53264834e-10 1.37798376e-09 3.04347648e-10 1.48495461e-10
9.25632752e-12 2.02121317e-09]
[4.09541546e-09 4.95778860e-09 4.53635609e-09 2.32887393e-10
7.68315897e-10 1.56454430e-10 2.65184486e-09 8.06940257e-10
5.86193741e-10 2.20348341e-09]
[1.93363547e-09 1.24772108e-09 3.27360867e-09 1.49964288e-11
3.46077620e-10 4.61168448e-10 3.24692819e-11 8.51527457e-11
1.18574962e-12 1.69300424e-11]
[2.46932784e-09 3.28322134e-09 7.18219656e-10 1.19135867e-10
2.67642745e-10 2.30377598e-11 7.09301780e-11 3.95955041e-10
3.06674852e-10 7.78698702e-12]
[2.56290766e-10 6.92877897e-11 1.84933674e-10 1.02116785e-11
3.42057461e-14 3.06791845e-13 5.62895715e-11 3.58243120e-11
2.12410158e-11 5.88003155e-11]
[6.11942563e-09 1.73398512e-11 1.14611243e-09 2.70673501e-10
6.24304425e-10 9.73477546e-10 6.57144948e-10 2.88325803e-11
2.82711481e-10 1.19839403e-10]
[3.00842722e-08 8.38410002e-09 7.60143827e-09 8.38446827e-10
5.89402542e-09 7.92966869e-10 6.94098510e-10 6.81895096e-10
4.18177540e-10 1.34046880e-09]
[2.91093611e-10 1.22182431e-08 3.59254993e-09 4.18923451e-12
1.68632913e-09 2.36144994e-10 2.24312073e-09 2.92154197e-09
7.35198698e-10 4.52783682e-09]
[5.34700824e-09 6.67997421e-10 7.74425211e-10 4.80067487e-10
3.02395935e-10 1.35844861e-11 9.11420609e-11 1.84866213e-10
3.88789718e-11 1.27270742e-12]
[1.21713355e-09 1.71981875e-09 1.71931178e-09 5.66035647e-11
1.49878639e-09 2.67264962e-10 6.70968620e-11 3.81582292e-10
4.31559992e-11 4.45745997e-10]
[2.94805903e-09 7.55259631e-10 1.84972394e-10 4.42258383e-10
1.31765630e-10 5.82656601e-10 9.72814251e-10 1.89423338e-10
7.26778337e-10 2.88518974e-11]
[2.27898123e-10 1.44375375e-09 5.02040926e-10 7.72935932e-11
4.67461965e-10 2.45973349e-13 7.45026867e-10 8.25472432e-10
4.78375563e-10 2.18762265e-10]
[2.82332514e-09 6.05648540e-10 2.49158237e-09 3.97211447e-12
1.63838720e-09 6.47604809e-10 1.17920671e-11 2.30302822e-11
3.20147750e-10 1.39893326e-10]
[1.20105652e-08 1.17009853e-08 3.35576392e-09 1.40793859e-11
2.28566526e-09 2.70277549e-09 5.64827238e-09 4.38774178e-09
1.46192943e-09 8.22862826e-10]
[4.93917024e-09 1.51289409e-09 3.13767686e-10 1.79182818e-10
3.41803842e-10 2.11147414e-10 1.21693543e-09 6.47886067e-10
7.03511971e-12 1.41562666e-10]
[9.66747476e-10 1.00070440e-09 8.26049926e-11 6.68233986e-12
1.12037302e-10 7.00928305e-11 1.98699071e-11 3.67510028e-10
4.26700024e-11 8.80194720e-11]
[1.15874568e-10 3.28625747e-11 2.17988528e-10 3.31701457e-11
1.65843759e-10 1.22759123e-11 8.61749239e-11 2.83888341e-11
4.00739651e-11 1.42584761e-11]
[8.38432797e-12 3.73708610e-14 2.68744814e-11 5.92675908e-12
1.09932480e-11 4.82744927e-12 8.20764359e-11 6.20430377e-14
3.51504750e-11 2.47716996e-13]
[3.44523662e-09 2.53277093e-09 3.61165397e-09 5.30029140e-12
1.19627737e-09 1.43367306e-09 2.98763959e-11 4.36623849e-10
4.58514849e-10 5.21078940e-10]
[2.22311730e-10 7.15310653e-10 4.46823620e-10 1.82704285e-11
5.92287053e-10 9.84644182e-11 3.64785469e-11 1.67240727e-10
4.42723186e-11 5.37181903e-10]
[9.87319920e-09 9.79646187e-10 1.78063545e-09 5.44095622e-10
1.81709571e-09 2.78195156e-11 1.10069491e-10 6.40146286e-10
8.13004978e-10 3.66059991e-14]
[4.19291048e-08 6.30622022e-09 1.58356781e-08 4.14996499e-09
6.26432720e-09 4.17521312e-12 7.89068173e-12 9.01400753e-10
1.32774934e-09 9.33392257e-12]
[3.81638718e-10 1.58664241e-08 7.37663349e-09 2.46414897e-10
8.35097098e-09 1.15295014e-09 4.34221363e-11 3.10268705e-09
3.20845896e-10 7.57043367e-09]
[1.22511237e-10 3.49460200e-10 2.06589859e-09 4.01376862e-11
8.77965700e-10 7.74686109e-10 7.89371400e-14 8.86159208e-12
6.32811871e-13 6.94845641e-10]
[5.94947837e-09 6.83287578e-09 1.51352091e-08 3.88092250e-14
4.30760177e-09 3.59322567e-09 3.62386799e-10 2.25715428e-10
3.72688578e-10 6.06120250e-15]
[3.89655795e-10 9.72869709e-10 1.59583504e-10 5.24284000e-11
5.10717014e-11 7.64045143e-12 4.29834206e-10 1.17669534e-10
2.96983857e-10 5.79638439e-11]
[4.12575862e-09 2.37580668e-10 8.90063154e-10 2.00763639e-10
2.50113600e-10 1.76519145e-10 3.28280018e-11 6.31101257e-12
3.13414305e-11 8.16047459e-13]
[2.48689440e-08 5.98061691e-09 6.06785650e-09 3.79291205e-10
2.28527421e-09 1.44337010e-09 2.67845101e-10 1.48767836e-09
2.52697790e-11 1.30086207e-11]
[9.54062207e-09 1.86456434e-09 3.30249727e-09 1.39892794e-10
1.13337822e-10 1.06784921e-09 4.38420391e-12 3.92600752e-10
6.27864521e-10 1.01742572e-10]
[2.91092687e-09 1.07933980e-09 1.28003877e-09 2.88304162e-10
9.71402791e-10 1.45282967e-09 1.03868310e-10 1.65573365e-10
3.35532932e-10 7.04853438e-13]
[9.73137902e-09 3.80848958e-11 5.73004520e-10 2.45870304e-10
5.80802974e-11 4.01842281e-12 7.77974263e-10 2.91690701e-11
2.13251686e-10 6.30155259e-10]
[1.02656506e-08 1.29707024e-09 1.89480419e-12 9.72206861e-10
8.62914480e-11 5.66824204e-10 5.66451056e-10 4.57683287e-10
6.90717389e-10 2.74203181e-09]
[1.24588600e-08 1.73493729e-09 7.38533158e-10 6.26651853e-10
1.25698941e-09 6.34504278e-11 2.49025332e-11 6.83313651e-10
1.08515882e-11 3.75016215e-11]
[3.05131065e-07 3.28465796e-08 7.62523782e-08 5.07315486e-09
6.80662137e-08 4.99796639e-08 6.49389495e-09 1.44247455e-09
1.01951987e-08 1.93977921e-10]
[5.35860402e-09 1.40522441e-08 6.45856408e-09 4.86774251e-10
5.15966640e-09 5.52770436e-12 6.53277091e-11 2.89565579e-09
8.61355538e-12 2.71472120e-10]]
483 [[1.48390493e-09 2.49689006e-09 1.79134345e-09 1.50408707e-11
7.39697559e-10 4.00582841e-10 1.85956275e-11 3.25391702e-10
9.99763637e-12 1.22109384e-10]
[2.00723520e-08 4.30599005e-09 9.63348357e-09 1.41320959e-09
2.99762918e-09 3.50741069e-09 2.34822667e-10 4.87429401e-10
3.71815868e-11 1.66035953e-10]
[1.88609720e-08 3.08282387e-10 2.67079054e-09 1.15526742e-09
9.06966847e-10 1.73192487e-09 9.32556440e-10 3.63564127e-10
9.78073779e-10 1.78599993e-10]
[5.41790452e-09 4.27297798e-11 3.06864985e-10 5.00561735e-10
5.73320575e-13 4.56020417e-12 4.16067872e-10 8.98390657e-12
2.98702284e-11 2.85530410e-10]
[7.90805030e-09 9.60832211e-10 2.90375911e-09 3.61151776e-10
1.02141409e-09 3.00664258e-10 1.76611076e-10 1.20730802e-10
4.13415585e-11 1.40555728e-12]
[2.05562628e-10 6.80440085e-11 3.95627064e-10 1.43015267e-12
8.95411598e-11 9.71617966e-11 9.18148869e-11 1.35437495e-11
2.94234916e-12 2.60301412e-11]
[5.70471184e-09 7.09712187e-11 7.17034975e-11 7.11982346e-11
3.03437430e-11 2.30352917e-11 9.31010994e-10 6.57416878e-12
4.11821433e-10 4.08235088e-11]
[1.00387341e-10 8.68559249e-11 2.23927506e-11 1.61696183e-11
6.57559439e-13 5.86743325e-12 6.09208600e-11 5.61877664e-12
2.38500034e-12 1.84404785e-11]
[2.47181009e-10 1.97530088e-09 2.42815467e-09 1.72071505e-13
1.74609645e-09 4.65539024e-10 3.51374481e-10 3.28048618e-10
8.89549909e-11 7.02223463e-10]
[1.66538567e-10 3.91596659e-10 1.68132859e-09 2.24570871e-11
9.12068659e-10 6.43747588e-10 1.08812924e-11 1.07176849e-10
1.10962859e-11 4.31959553e-10]
[3.33115680e-12 3.89335433e-10 7.07287657e-10 8.25106569e-15
1.01934268e-10 1.16667874e-10 7.91392193e-11 4.47617559e-11
2.31816859e-11 2.22002001e-10]
[2.49849087e-10 3.35483321e-11 8.40944732e-12 1.41283807e-11
3.11556976e-12 1.06551817e-11 3.66161734e-12 1.44258726e-12
2.17395505e-11 3.34432623e-11]
[1.74259877e-11 6.23529931e-09 1.44520930e-09 2.05404983e-11
9.08268119e-10 1.58169130e-10 8.18459429e-10 7.83099752e-10
6.12302664e-10 3.58561603e-10]
[1.49918128e-09 8.34746186e-10 1.05128117e-09 4.64297132e-11
4.35215142e-10 3.17933126e-10 1.92294162e-11 1.54796127e-10
2.96788932e-13 6.25552541e-11]
[3.63310857e-10 1.47305797e-09 9.24214993e-10 6.35413778e-11
7.29838000e-10 4.21251998e-11 1.34320710e-11 1.33731995e-09
1.40553142e-10 7.22827020e-10]
[8.82148042e-11 1.06740943e-10 4.53267692e-12 5.95700855e-13
3.21680457e-11 5.75755730e-11 1.76640932e-11 1.27206313e-10
9.08578090e-11 2.09352655e-10]
[4.85771607e-09 4.67794213e-09 6.15185992e-09 1.61334500e-10
3.49934732e-09 3.37149362e-09 1.29411296e-10 5.01684517e-10
5.37446831e-10 1.27125283e-09]
[7.17652646e-09 7.95768803e-09 7.51503088e-09 3.15444813e-10
2.22470900e-09 7.26537212e-10 1.54903277e-10 1.04315827e-09
1.26915799e-11 1.24228729e-10]
[2.06084682e-08 6.85393177e-08 6.36837164e-08 7.29336192e-10
1.62507891e-08 4.59516137e-09 4.85609732e-08 1.07900688e-08
9.66341081e-09 9.63476052e-09]
[6.81444667e-10 8.91983372e-09 2.29800604e-09 3.92079186e-10
1.63693843e-09 3.59969425e-10 1.75425799e-10 2.46500669e-09
1.24354858e-09 4.90069659e-09]
[1.44230790e-09 7.22185510e-11 5.70184404e-12 2.32644382e-12
1.34795423e-11 1.70109503e-10 4.53078012e-11 1.36303742e-10
4.41527201e-11 1.75988291e-12]
[1.46085680e-08 1.44669231e-08 1.34113458e-08 3.08992387e-11
9.35579833e-09 3.54084193e-10 6.12591741e-11 4.27547215e-09
6.52266931e-10 3.75534711e-10]
[2.84286960e-10 3.62918570e-10 7.46188198e-11 2.06179691e-12
3.08771541e-11 5.91144908e-12 6.55285038e-10 4.88921642e-16
8.10157819e-11 3.42044506e-11]
[1.63099917e-09 1.11462824e-11 6.41607524e-11 1.75090661e-10
1.98960171e-10 4.35118233e-14 3.69738175e-12 3.21970659e-12
1.31807497e-12 1.50539889e-11]
[9.20366270e-10 1.24163572e-10 1.52050909e-10 2.84803181e-11
3.70829573e-14 1.68038391e-12 1.07299982e-10 9.80159723e-11
8.74207748e-11 1.19375986e-10]
[1.50471657e-09 2.95413211e-09 5.63808396e-10 3.01581680e-10
8.25119719e-11 1.43838710e-12 5.69343350e-10 4.26880615e-10
3.93182179e-10 1.50482051e-09]
[4.79519089e-09 1.00927847e-09 5.99541106e-10 4.95078237e-11
9.36683388e-10 2.74538604e-10 5.40120099e-11 1.36836700e-11
9.01640046e-12 1.76052244e-11]
[5.14254478e-10 1.65706029e-10 4.21498848e-10 1.30331354e-11
8.80476738e-11 1.00471689e-12 1.62540438e-10 1.66469595e-11
1.03729480e-10 1.34359599e-15]
[1.59027318e-08 1.54763408e-09 1.93579734e-09 6.07683584e-10
7.43646163e-10 3.88904005e-10 8.01023094e-10 4.34156802e-10
1.55258118e-11 1.19403737e-10]
[6.85571175e-10 1.04187688e-09 2.15533056e-09 5.77790864e-11
8.16242481e-10 1.31469824e-09 2.89230269e-10 1.42416285e-10
8.76840193e-12 1.92545891e-09]
[3.90085563e-09 4.72344249e-09 4.31944947e-09 2.21985385e-10
7.33066855e-10 1.48846853e-10 2.52790221e-09 7.68028906e-10
5.59486317e-10 2.09654955e-09]
[1.84319399e-09 1.18855807e-09 3.11915393e-09 1.45210137e-11
3.28671728e-10 4.39547028e-10 3.10535594e-11 8.07742899e-11
1.16277005e-12 1.60955811e-11]
[2.35326194e-09 3.12966987e-09 6.84678782e-10 1.13520227e-10
2.54974151e-10 2.18938784e-11 6.76500866e-11 3.77032113e-10
2.92383956e-10 7.48792741e-12]
[2.41317728e-10 6.77487486e-11 1.78233097e-10 9.47481118e-12
2.21620810e-14 2.75193808e-13 5.37615006e-11 3.46899581e-11
2.03770418e-11 5.70590456e-11]
[5.84488348e-09 1.64797379e-11 1.09226822e-09 2.58395954e-10
5.96406378e-10 9.27919371e-10 6.26928189e-10 2.74340681e-11
2.69469237e-10 1.13698904e-10]
[2.87085190e-08 8.00282433e-09 7.26433756e-09 7.99493142e-10
5.63079491e-09 7.59879482e-10 6.63889859e-10 6.50605057e-10
4.00660657e-10 1.28301917e-09]
[2.77087274e-10 1.16674630e-08 3.43419064e-09 3.96620847e-12
1.61314071e-09 2.25329823e-10 2.14046721e-09 2.79285398e-09
7.01681227e-10 4.32074307e-09]
[5.08393192e-09 6.38659100e-10 7.40551315e-10 4.55648061e-10
2.89327378e-10 1.27238528e-11 8.69436628e-11 1.77019129e-10
3.71410047e-11 1.06492842e-12]
[1.16065567e-09 1.63431881e-09 1.63477178e-09 5.41869723e-11
1.42478290e-09 2.54563587e-10 6.36298902e-11 3.61912790e-10
4.07727410e-11 4.22094145e-10]
[2.80453773e-09 7.13551264e-10 1.77171296e-10 4.20258164e-10
1.25987741e-10 5.54225869e-10 9.21334774e-10 1.79361747e-10
6.89505510e-10 2.78979649e-11]
[2.16730530e-10 1.36536525e-09 4.74091269e-10 7.30962009e-11
4.42896824e-10 2.13147252e-13 7.05460507e-10 7.81728432e-10
4.53454384e-10 2.06749981e-10]
[2.69679368e-09 5.75654470e-10 2.37362112e-09 3.82785484e-12
1.56150921e-09 6.17546351e-10 1.12593294e-11 2.17353607e-11
3.05117329e-10 1.32555596e-10]
[1.14252929e-08 1.11515168e-08 3.21253205e-09 1.31718479e-11
2.18072884e-09 2.55841682e-09 5.36978051e-09 4.18725611e-09
1.39037613e-09 7.93696294e-10]
[4.70297260e-09 1.44457765e-09 3.05066455e-10 1.69192770e-10
3.28336589e-10 1.96711036e-10 1.15579615e-09 6.21562009e-10
6.66338467e-12 1.29576207e-10]
[9.23136102e-10 9.47887712e-10 7.84213657e-11 6.23885559e-12
1.06342573e-10 6.60956864e-11 1.95854009e-11 3.50090209e-10
4.00214098e-11 8.37786919e-11]
[1.10197884e-10 3.16161454e-11 2.06854863e-10 3.15791366e-11
1.57058177e-10 1.15513780e-11 8.22475186e-11 2.70863821e-11
3.82248608e-11 1.33727226e-11]
[7.87094238e-12 2.72553395e-14 2.57919808e-11 5.67616412e-12
1.05172530e-11 4.57605934e-12 7.82293706e-11 6.80304635e-14
3.33523331e-11 2.44555905e-13]
[3.29859538e-09 2.41788733e-09 3.45083873e-09 5.01951361e-12
1.14231500e-09 1.36907252e-09 2.85776809e-11 4.16867595e-10
4.37842932e-10 4.96769734e-10]
[2.09460880e-10 6.80035864e-10 4.24867253e-10 1.72923798e-11
5.62497245e-10 9.38614090e-11 3.42338541e-11 1.58564381e-10
4.16809456e-11 5.09001755e-10]
[9.40127443e-09 9.35807924e-10 1.70138099e-09 5.17681439e-10
1.73488744e-09 2.61360471e-11 1.04663193e-10 6.11097878e-10
7.74679261e-10 1.73215615e-14]
[3.99455451e-08 6.00642980e-09 1.50926171e-08 3.95127221e-09
5.97239020e-09 3.75664191e-12 7.65198287e-12 8.57967885e-10
1.26389183e-09 8.97533122e-12]
[3.62773749e-10 1.51074329e-08 7.02145534e-09 2.33864821e-10
7.93745735e-09 1.09635650e-09 4.17526889e-11 2.95274185e-09
3.05586763e-10 7.20069654e-09]
[1.14651492e-10 3.35952219e-10 1.97616769e-09 3.80872597e-11
8.37930077e-10 7.37678256e-10 1.20131881e-13 8.60550917e-12
5.44226670e-13 6.61844028e-10]
[5.68234212e-09 6.53538323e-09 1.44625681e-08 4.38044041e-14
4.11965827e-09 3.43231187e-09 3.46184500e-10 2.16123047e-10
3.55403560e-10 2.39272333e-15]
[3.69653186e-10 9.24859441e-10 1.50491894e-10 5.05916557e-11
4.90012964e-11 7.38652047e-12 4.08788561e-10 1.11481529e-10
2.82398412e-10 5.48169371e-11]
[3.93606563e-09 2.27006771e-10 8.50001280e-10 1.91404697e-10
2.38548810e-10 1.68892785e-10 3.13929387e-11 5.97844718e-12
2.98754192e-11 8.05642652e-13]
[2.37103246e-08 5.69683553e-09 5.78709219e-09 3.62074466e-10
2.17807284e-09 1.37916901e-09 2.56194463e-10 1.41706632e-09
2.44159471e-11 1.27050643e-11]
[9.09303836e-09 1.77272255e-09 3.14469693e-09 1.33500587e-10
1.08091222e-10 1.01799711e-09 4.29783756e-12 3.73468744e-10
5.99204838e-10 9.67642297e-11]
[2.77894056e-09 1.02493873e-09 1.21493435e-09 2.75816978e-10
9.25644766e-10 1.38243443e-09 9.88483381e-11 1.56677859e-10
3.20276366e-10 5.89142308e-13]
[9.26583505e-09 3.66042093e-11 5.46818427e-10 2.34034003e-10
5.49512832e-11 3.80335084e-12 7.40612571e-10 2.80232723e-11
2.03049951e-10 5.98899738e-10]
[9.77690188e-09 1.23145683e-09 1.98059098e-12 9.24810810e-10
8.35560673e-11 5.41594488e-10 5.38504926e-10 4.34663332e-10
6.57374747e-10 2.60414140e-09]
[1.18693596e-08 1.64719935e-09 7.04102699e-10 5.97327870e-10
1.19871744e-09 5.96071066e-11 2.43515635e-11 6.50909884e-10
1.06656873e-11 3.57646232e-11]
[2.91349740e-07 3.13661590e-08 7.28156356e-08 4.84071638e-09
6.50092738e-08 4.77308785e-08 6.20260948e-09 1.37788814e-09
9.73703599e-09 1.84916198e-10]
[5.09711576e-09 1.33478031e-08 6.13236110e-09 4.62525968e-10
4.90306872e-09 5.34961634e-12 6.18852583e-11 2.74936223e-09
8.18244830e-12 2.56736791e-10]]
484 [[1.41885016e-09 2.38295098e-09 1.70815868e-09 1.42969652e-11
7.04456993e-10 3.82170393e-10 1.77117409e-11 3.10195146e-10
9.45515869e-12 1.15720402e-10]
[1.91330359e-08 4.12313200e-09 9.21375051e-09 1.34657575e-09
2.86617103e-09 3.35648077e-09 2.24064619e-10 4.66886083e-10
3.54134641e-11 1.60486304e-10]
[1.80133846e-08 2.96894072e-10 2.55134878e-09 1.10435711e-09
8.69199542e-10 1.65853822e-09 8.90804298e-10 3.46830195e-10
9.31796805e-10 1.67915144e-10]
[5.16279108e-09 4.00490268e-11 2.91037624e-10 4.77079396e-10
5.25982367e-13 4.37553012e-12 3.97620965e-10 8.35552230e-12
2.86713256e-11 2.73086364e-10]
[7.54641748e-09 9.14007801e-10 2.76508353e-09 3.44622234e-10
9.72071556e-10 2.87454367e-10 1.68299163e-10 1.14542861e-10
3.92586731e-11 1.18307103e-12]
[1.97175708e-10 6.45401614e-11 3.76076583e-10 1.39086848e-12
8.50261125e-11 9.21476420e-11 8.76868847e-11 1.28868884e-11
2.84488783e-12 2.45707714e-11]
[5.44253508e-09 6.72638884e-11 6.91762086e-11 6.78568083e-11
2.93169784e-11 2.21281042e-11 8.88800025e-10 6.12153687e-12
3.93001493e-10 3.84623160e-11]
[9.67881011e-11 8.32277289e-11 2.14487279e-11 1.52194126e-11
6.22253886e-13 5.61365919e-12 5.82919267e-11 5.40322175e-12
2.33423730e-12 1.73741289e-11]
[2.39326432e-10 1.88655304e-09 2.32610535e-09 1.67820738e-13
1.67302208e-09 4.46402119e-10 3.33562860e-10 3.14284917e-10
8.47240510e-11 6.73601263e-10]
[1.59884785e-10 3.73308466e-10 1.60516457e-09 2.13495098e-11
8.69939602e-10 6.15119292e-10 1.03313299e-11 1.02283714e-10
1.06465556e-11 4.11107311e-10]
[3.36021851e-12 3.72916453e-10 6.76815232e-10 8.74468615e-15
9.73531544e-11 1.11659224e-10 7.53968354e-11 4.29490960e-11
2.21458381e-11 2.11854878e-10]
[2.41503463e-10 3.17837430e-11 8.27391409e-12 1.36791661e-11
2.87417122e-12 1.04876350e-11 3.67976341e-12 1.44541668e-12
2.11576687e-11 3.20087933e-11]
[1.68883379e-11 5.94329528e-09 1.38105820e-09 1.96700626e-11
8.67103585e-10 1.51688920e-10 7.78772142e-10 7.47173139e-10
5.82973065e-10 3.42348139e-10]
[1.43426546e-09 7.98822006e-10 1.00592303e-09 4.44334618e-11
4.16446050e-10 3.03805488e-10 1.83445477e-11 1.48089832e-10
2.84265724e-13 5.97640153e-11]
[3.45448588e-10 1.40348857e-09 8.82356076e-10 6.07917108e-11
6.96016411e-10 3.98336208e-11 1.25138236e-11 1.27550585e-09
1.33374256e-10 6.91694606e-10]
[8.41480607e-11 1.01421305e-10 4.28506829e-12 5.61481988e-13
3.03874986e-11 5.48326718e-11 1.69510714e-11 1.21027398e-10
8.63988862e-11 1.99161966e-10]
[4.65239440e-09 4.46350302e-09 5.87261819e-09 1.54795903e-10
3.33484006e-09 3.21801984e-09 1.24001858e-10 4.78200872e-10
5.14239821e-10 1.20909061e-09]
[6.81646050e-09 7.58664867e-09 7.16065352e-09 2.99052069e-10
2.11891249e-09 6.90632260e-10 1.48895782e-10 9.95202481e-10
1.23428071e-11 1.18617465e-10]
[1.96323792e-08 6.54638183e-08 6.08593690e-08 6.95838844e-10
1.55171042e-08 4.39246336e-09 4.63386785e-08 1.03066779e-08
9.22028612e-09 9.19862196e-09]
[6.42754305e-10 8.49845886e-09 2.19625173e-09 3.73319492e-10
1.55794159e-09 3.44465648e-10 1.66837754e-10 2.35277549e-09
1.18390378e-09 4.66764782e-09]
[1.37253184e-09 6.81632196e-11 5.44695665e-12 2.19894780e-12
1.28707410e-11 1.61336503e-10 4.35681329e-11 1.29766038e-10
4.17743714e-11 1.71751177e-12]
[1.39454355e-08 1.37776039e-08 1.27900851e-08 2.97721895e-11
8.92001033e-09 3.39331612e-10 5.76945074e-11 4.07278517e-09
6.18779484e-10 3.56925244e-10]
[2.72688933e-10 3.52068756e-10 7.23343969e-11 2.03723897e-12
2.98531957e-11 5.47358059e-12 6.30570507e-10 2.67129190e-16
7.85886788e-11 3.25745025e-11]
[1.55040801e-09 1.09059795e-11 6.20871034e-11 1.66098715e-10
1.91009449e-10 6.10923421e-14 3.55023728e-12 3.26924465e-12
1.26807651e-12 1.37177675e-11]
[8.76290103e-10 1.17742505e-10 1.45201841e-10 2.71979471e-11
3.93869823e-14 1.50704158e-12 1.01688652e-10 9.32149017e-11
8.26572918e-11 1.13101922e-10]
[1.43569505e-09 2.81798291e-09 5.38440177e-10 2.87734767e-10
7.86613518e-11 1.38554401e-12 5.43089776e-10 4.07173531e-10
3.75238683e-10 1.43738435e-09]
[4.57957137e-09 9.64375132e-10 5.73985980e-10 4.72659753e-11
8.95582569e-10 2.62907125e-10 5.17198221e-11 1.30991666e-11
8.64655780e-12 1.70342419e-11]
[4.85528495e-10 1.57874291e-10 4.00552856e-10 1.24687093e-11
8.41268026e-11 9.94151188e-13 1.55407207e-10 1.55797167e-11
9.90654116e-11 4.60284699e-15]
[1.51610631e-08 1.47906325e-09 1.84839177e-09 5.78542448e-10
7.10803253e-10 3.71839836e-10 7.64733911e-10 4.13632447e-10
1.47106401e-11 1.14426008e-10]
[6.47812655e-10 9.99184115e-10 2.06188349e-09 5.46236811e-11
7.79899842e-10 1.25395911e-09 2.74938610e-10 1.36617161e-10
8.28124540e-12 1.83447511e-09]
[3.71565385e-09 4.50049684e-09 4.11289511e-09 2.11218300e-10
6.98310223e-10 1.41495361e-10 2.40999573e-09 7.30898940e-10
5.33976589e-10 1.99581740e-09]
[1.75723223e-09 1.13205004e-09 2.97155434e-09 1.38989533e-11
3.13493429e-10 4.19167039e-10 2.95896598e-11 7.67318755e-11
1.13248608e-12 1.51913515e-11]
[2.24167858e-09 2.98322571e-09 6.52773891e-10 1.08083855e-10
2.43283480e-10 2.08648676e-11 6.45001532e-11 3.59323191e-10
2.78790549e-10 7.15940618e-12]
[2.27559527e-10 6.61344266e-11 1.71634069e-10 8.76564340e-12
1.28875342e-14 2.46631518e-13 5.13079559e-11 3.34354608e-11
1.95121122e-11 5.55119685e-11]
[5.58156304e-09 1.56566223e-11 1.04144862e-09 2.47038612e-10
5.69025280e-10 8.84250491e-10 5.97819210e-10 2.61502672e-11
2.56989535e-10 1.07857720e-10]
[2.73961855e-08 7.64005372e-09 6.94278287e-09 7.62493690e-10
5.37864155e-09 7.28818707e-10 6.34855367e-10 6.21013824e-10
3.83567895e-10 1.22740172e-09]
[2.63120799e-10 1.11418567e-08 3.28236543e-09 3.80497538e-12
1.54051678e-09 2.15207666e-10 2.04284715e-09 2.66788422e-09
6.69637103e-10 4.12565920e-09]
[4.83478556e-09 6.10736676e-10 7.08251157e-10 4.32515596e-10
2.76656691e-10 1.18735260e-11 8.29712967e-11 1.69421595e-10
3.55463816e-11 8.72840385e-13]
[1.10708409e-09 1.55293285e-09 1.55470984e-09 5.19461404e-11
1.35417970e-09 2.42727121e-10 6.03606545e-11 3.43029696e-10
3.86064298e-11 3.99956799e-10]
[2.66913331e-09 6.74205933e-10 1.69673992e-10 3.99089409e-10
1.20657581e-10 5.27400484e-10 8.72877065e-10 1.69913064e-10
6.53832776e-10 2.70168957e-11]
[2.05887199e-10 1.28966405e-09 4.46999563e-10 6.92923581e-11
4.20180493e-10 2.13222424e-13 6.67471421e-10 7.38828860e-10
4.28846921e-10 1.96328363e-10]
[2.57549827e-09 5.46811098e-10 2.26102878e-09 3.68727824e-12
1.48854098e-09 5.88203628e-10 1.07679671e-11 2.04947969e-11
2.91210584e-10 1.25775087e-10]
[1.08674060e-08 1.06275758e-08 3.07541687e-09 1.23212704e-11
2.08086880e-09 2.42234911e-09 5.10502615e-09 3.99636635e-09
1.32208657e-09 7.65469104e-10]
[4.47937583e-09 1.37911065e-09 2.96427061e-10 1.59709030e-10
3.15376109e-10 1.83193274e-10 1.09763638e-09 5.95791972e-10
6.29980519e-12 1.18353092e-10]
[8.80977212e-10 8.97534935e-10 7.45796113e-11 5.84559016e-12
1.01170937e-10 6.24615140e-11 1.92727121e-11 3.33629738e-10
3.74226624e-11 7.95106483e-11]
[1.04785620e-10 3.05144578e-11 1.95943174e-10 3.00594516e-11
1.48865336e-10 1.08265084e-11 7.85468381e-11 2.60727777e-11
3.65300068e-11 1.25716594e-11]
[7.40477524e-12 1.84451947e-14 2.48565312e-11 5.45076246e-12
1.00495445e-11 4.28789811e-12 7.47097038e-11 7.33357298e-14
3.16920575e-11 2.27255131e-13]
[3.15702931e-09 2.30825693e-09 3.29733348e-09 4.73024351e-12
1.09077277e-09 1.30703690e-09 2.73476583e-11 3.98297884e-10
4.17973352e-10 4.73678430e-10]
[1.97638122e-10 6.46922632e-10 4.04496017e-10 1.63184987e-11
5.33968284e-10 8.95847558e-11 3.21651934e-11 1.50913044e-10
3.93464587e-11 4.81885753e-10]
[8.95231014e-09 8.94433905e-10 1.62602167e-09 4.92691575e-10
1.65592682e-09 2.44081116e-11 9.94453430e-11 5.83689339e-10
7.38648711e-10 7.21350587e-15]
[3.80534816e-08 5.72076469e-09 1.43847782e-08 3.76221390e-09
5.69441330e-09 3.39639968e-12 7.41931657e-12 8.16934782e-10
1.20289025e-09 8.61723731e-12]
[3.45097170e-10 1.43846110e-08 6.68278217e-09 2.22415467e-10
7.54861404e-09 1.04211093e-09 4.00723718e-11 2.81132788e-09
2.90958833e-10 6.84719789e-09]
[1.07661447e-10 3.22953674e-10 1.88972322e-09 3.61298487e-11
8.00184696e-10 7.02010186e-10 1.68085416e-13 8.34908915e-12
4.67793864e-13 6.29658710e-10]
[5.42654005e-09 6.25085259e-09 1.38205653e-08 4.85267762e-14
3.93981313e-09 3.27820828e-09 3.30716643e-10 2.07151994e-10
3.38956008e-10 4.46940325e-16]
[3.53077935e-10 8.78468313e-10 1.43512670e-10 4.76908485e-11
4.64526286e-11 7.07580348e-12 3.89857701e-10 1.05976556e-10
2.69397815e-10 5.20020513e-11]
[3.75388931e-09 2.16812792e-10 8.12065045e-10 1.82562980e-10
2.27581576e-10 1.61502897e-10 3.00023237e-11 5.69689012e-12
2.85590512e-11 7.81214988e-13]
[2.26005051e-08 5.42675869e-09 5.51860369e-09 3.45242957e-10
2.07839826e-09 1.31724191e-09 2.45321652e-10 1.35132212e-09
2.35398998e-11 1.23308290e-11]
[8.66527451e-09 1.68544128e-09 2.99453079e-09 1.27426444e-10
1.03124791e-10 9.70195037e-10 4.21449737e-12 3.55517661e-10
5.72034541e-10 9.19882667e-11]
[2.65384837e-09 9.73024609e-10 1.15301101e-09 2.63870196e-10
8.81913990e-10 1.31556370e-09 9.40603971e-11 1.47878989e-10
3.05824948e-10 4.86956925e-13]
[8.82466806e-09 3.51057583e-11 5.21912406e-10 2.22665276e-10
5.20078193e-11 3.59779703e-12 7.04915556e-10 2.67744009e-11
1.93536863e-10 5.69021619e-10]
[9.30993747e-09 1.16905412e-09 2.05811930e-12 8.79728985e-10
8.09386857e-11 5.17173156e-10 5.11951025e-10 4.12565692e-10
6.25607833e-10 2.47346743e-09]
[1.13109968e-08 1.56415858e-09 6.71604911e-10 5.69871533e-10
1.14125103e-09 5.58211093e-11 2.37119855e-11 6.19418373e-10
1.04741773e-11 3.40587201e-11]
[2.78187959e-07 2.99503679e-08 6.95426716e-08 4.61951884e-09
6.20888984e-08 4.55867110e-08 5.92298166e-09 1.31623162e-09
9.30128654e-09 1.76529593e-10]
[4.84870577e-09 1.26789547e-08 5.82296970e-09 4.39454703e-10
4.65914265e-09 5.18226634e-12 5.86620186e-11 2.61138276e-09
7.77455819e-12 2.42913883e-10]]
485 [[1.35634763e-09 2.27348440e-09 1.63016643e-09 1.36498723e-11
6.70788921e-10 3.64778088e-10 1.69174178e-11 2.95635654e-10
8.91843821e-12 1.09649817e-10]
[1.82380780e-08 3.94792268e-09 8.81387124e-09 1.28335063e-09
2.73982752e-09 3.21267621e-09 2.13669298e-10 4.47259778e-10
3.37038109e-11 1.55043482e-10]
[1.72051141e-08 2.84995775e-10 2.44045295e-09 1.05347222e-09
8.30741276e-10 1.58225916e-09 8.50899718e-10 3.30332580e-10
8.89301255e-10 1.60400929e-10]
[4.91932905e-09 3.74652987e-11 2.75937036e-10 4.54701864e-10
4.92428815e-13 4.17180368e-12 3.80145270e-10 7.74564700e-12
2.74530274e-11 2.60951114e-10]
[7.20140734e-09 8.68903590e-10 2.63194088e-09 3.28711066e-10
9.25681218e-10 2.74273158e-10 1.60539070e-10 1.08376955e-10
3.72093460e-11 1.02216775e-12]
[1.88710980e-10 6.12313262e-11 3.57448586e-10 1.36394650e-12
8.08472361e-11 8.73079207e-11 8.37081940e-11 1.22847378e-11
2.78315374e-12 2.31850397e-11]
[5.19324899e-09 6.37179150e-11 6.67057843e-11 6.46648223e-11
2.82803851e-11 2.12864806e-11 8.48519816e-10 5.70921129e-12
3.75037586e-10 3.62005772e-11]
[9.32818342e-11 7.97168503e-11 2.05460094e-11 1.43357437e-11
5.97030390e-13 5.35889028e-12 5.58208474e-11 5.16708328e-12
2.27477101e-12 1.63999545e-11]
[2.32028327e-10 1.80151742e-09 2.22850477e-09 1.60722728e-13
1.60201894e-09 4.27954715e-10 3.16554158e-10 3.01124288e-10
8.07189599e-11 6.45787707e-10]
[1.53394300e-10 3.55695139e-10 1.53147189e-09 2.03104455e-11
8.30258887e-10 5.87051679e-10 9.77250777e-12 9.74457742e-11
1.02679377e-11 3.91958369e-10]
[3.32493747e-12 3.56977733e-10 6.47080805e-10 8.13917349e-15
9.32963920e-11 1.06709767e-10 7.18181178e-11 4.10210819e-11
2.12583938e-11 2.02452563e-10]
[2.33378101e-10 3.01263211e-11 8.12450319e-12 1.32604913e-11
2.64180821e-12 1.03091515e-11 3.71623431e-12 1.42667627e-12
2.05070875e-11 3.05040522e-11]
[1.64073532e-11 5.66541787e-09 1.31994222e-09 1.88254209e-11
8.27523281e-10 1.45581622e-10 7.41245747e-10 7.12917417e-10
5.54371788e-10 3.26831646e-10]
[1.37168433e-09 7.64793855e-10 9.62439141e-10 4.26335602e-11
3.98417398e-10 2.90798989e-10 1.74871680e-11 1.41937850e-10
2.61319516e-13 5.75043535e-11]
[3.28721920e-10 1.33732586e-09 8.43513384e-10 5.82378189e-11
6.63639499e-10 3.75322563e-11 1.17246848e-11 1.21706281e-09
1.26398158e-10 6.61047919e-10]
[8.02279354e-11 9.63744629e-11 4.01283776e-12 5.34590023e-13
2.87758453e-11 5.23270689e-11 1.63085100e-11 1.15146404e-10
8.21829966e-11 1.89524402e-10]
[4.45314600e-09 4.25863558e-09 5.60419618e-09 1.48627295e-10
3.17936969e-09 3.07041920e-09 1.18918310e-10 4.55549083e-10
4.91723110e-10 1.14974650e-09]
[6.47506820e-09 7.23310871e-09 6.82154106e-09 2.83325827e-10
2.01853011e-09 6.56257811e-10 1.43000658e-10 9.49446460e-10
1.19983293e-11 1.13325895e-10]
[1.87029124e-08 6.25260121e-08 5.81602169e-08 6.63877505e-10
1.48168775e-08 4.19844931e-09 4.42174039e-08 9.84592857e-09
8.79756668e-09 8.78311796e-09]
[6.05885267e-10 8.09411933e-09 2.09653285e-09 3.55156224e-10
1.48410735e-09 3.29417066e-10 1.57765758e-10 2.24566899e-09
1.12706285e-09 4.44930840e-09]
[1.30623382e-09 6.43117509e-11 5.21168305e-12 2.07488486e-12
1.23033739e-11 1.53090421e-10 4.19521625e-11 1.23685372e-10
3.96523344e-11 1.69006898e-12]
[1.33105551e-08 1.31212163e-08 1.21968315e-08 2.86975578e-11
8.50493550e-09 3.24938735e-10 5.42844426e-11 3.88007460e-09
5.87446349e-10 3.39228740e-10]
[2.61560184e-10 3.41483246e-10 7.02084489e-11 2.00522198e-12
2.88763729e-11 5.09681523e-12 6.07011906e-10 2.58936463e-15
7.60214873e-11 3.10346897e-11]
[1.47763725e-09 1.03705896e-11 6.09033701e-11 1.57085473e-10
1.83013838e-10 8.84148044e-14 3.40313094e-12 3.21287807e-12
1.45245572e-12 1.27898261e-11]
[8.34064763e-10 1.11603036e-10 1.38471883e-10 2.59638588e-11
4.69604700e-14 1.36888871e-12 9.61947321e-11 8.86241698e-11
7.83080038e-11 1.07016994e-10]
[1.37045410e-09 2.68792147e-09 5.14144868e-10 2.74523190e-10
7.50632621e-11 1.31641474e-12 5.17956348e-10 3.88471477e-10
3.58227432e-10 1.37280573e-09]
[4.37369187e-09 9.21500431e-10 5.50062713e-10 4.51223543e-11
8.56075856e-10 2.51989462e-10 4.93841428e-11 1.25568269e-11
8.41452633e-12 1.63698808e-11]
[4.60667605e-10 1.50716244e-10 3.81219041e-10 1.18046483e-11
8.00687430e-11 9.39130222e-13 1.48559939e-10 1.47375395e-11
9.46870963e-11 3.97766653e-15]
[1.44538496e-08 1.41249756e-09 1.76691883e-09 5.51733084e-10
6.78780319e-10 3.56130346e-10 7.30315473e-10 3.95536182e-10
1.44364971e-11 1.09639628e-10]
[6.11286087e-10 9.58129351e-10 1.97367488e-09 5.17799444e-11
7.45123783e-10 1.19645777e-09 2.61489848e-10 1.31041202e-10
7.82511177e-12 1.74756593e-09]
[3.53856664e-09 4.28803141e-09 3.91696905e-09 2.01050862e-10
6.64951984e-10 1.34562655e-10 2.29793326e-09 6.95527763e-10
5.09375675e-10 1.89936317e-09]
[1.67439171e-09 1.07820558e-09 2.83060170e-09 1.33303405e-11
2.99147477e-10 3.99459424e-10 2.81836641e-11 7.28937268e-11
1.08975150e-12 1.43628397e-11]
[2.13596754e-09 2.84363925e-09 6.22681123e-10 1.02924552e-10
2.31969121e-10 1.98025437e-11 6.15694827e-11 3.42417932e-10
2.65686147e-10 6.83559200e-12]
[2.14316381e-10 6.46147853e-11 1.65276033e-10 8.12593318e-12
6.09772676e-15 2.19007767e-13 4.89614886e-11 3.23282626e-11
1.87682852e-11 5.39012515e-11]
[5.33132143e-09 1.48548987e-11 9.92943379e-10 2.36158036e-10
5.42547666e-10 8.42975604e-10 5.69895955e-10 2.48800561e-11
2.45346519e-10 1.02411620e-10]
[2.61439038e-08 7.29255415e-09 6.63410058e-09 7.27061195e-10
5.13871382e-09 6.98334542e-10 6.07317799e-10 5.92277078e-10
3.67337092e-10 1.17510359e-09]
[2.50109247e-10 1.06397689e-08 3.13702868e-09 3.65350144e-12
1.47135551e-09 2.05345299e-10 1.94957589e-09 2.54874883e-09
6.39307707e-10 3.93940911e-09]
[4.59730714e-09 5.83896544e-10 6.76784893e-10 4.10616440e-10
2.64833028e-10 1.11119477e-11 7.90046752e-11 1.62283114e-10
3.39727311e-11 7.17122023e-13]
[1.05580535e-09 1.47578661e-09 1.47750226e-09 4.96998981e-11
1.28759989e-09 2.31104421e-10 5.71653250e-11 3.25184336e-10
3.65164767e-11 3.78995035e-10]
[2.53931703e-09 6.37048841e-10 1.62321277e-10 3.79059705e-10
1.15560038e-10 5.01466704e-10 8.27127848e-10 1.60929513e-10
6.19922080e-10 2.61150672e-11]
[1.95677296e-10 1.21906760e-09 4.21703953e-10 6.56384104e-11
3.98056971e-10 2.04046680e-13 6.31713828e-10 6.99307793e-10
4.05763616e-10 1.85843215e-10]
[2.45934188e-09 5.19978570e-10 2.15396131e-09 3.56584654e-12
1.41851755e-09 5.60845754e-10 1.02530033e-11 1.94011716e-11
2.78132158e-10 1.19130190e-10]
[1.03375180e-08 1.01287723e-08 2.94448897e-09 1.15276948e-11
1.98519760e-09 2.29293559e-09 4.85381214e-09 3.81430592e-09
1.25709693e-09 7.37989511e-10]
[4.26549083e-09 1.31682921e-09 2.87927597e-10 1.50800387e-10
3.02997594e-10 1.70594059e-10 1.04234176e-09 5.71369725e-10
5.99182231e-12 1.08074380e-10]
[8.41131944e-10 8.50094109e-10 7.09239899e-11 5.46324595e-12
9.60352587e-11 5.89200296e-11 1.89367386e-11 3.17973239e-10
3.50106580e-11 7.55446612e-11]
[9.97440170e-11 2.93896394e-11 1.85908918e-10 2.86307868e-11
1.40976153e-10 1.01958636e-11 7.50126439e-11 2.49210019e-11
3.46816063e-11 1.17985365e-11]
[6.93827554e-12 1.20782230e-14 2.37905729e-11 5.21318758e-12
9.63889781e-12 4.06368784e-12 7.11478926e-11 7.56124282e-14
3.01369117e-11 2.30609933e-13]
[3.02231621e-09 2.20362021e-09 3.15108285e-09 4.47751494e-12
1.04133542e-09 1.24811833e-09 2.61257088e-11 3.80450024e-10
3.99321317e-10 4.51369540e-10]
[1.86193253e-10 6.14895864e-10 3.85000348e-10 1.54520035e-11
5.06929917e-10 8.53849714e-11 3.02125741e-11 1.43164465e-10
3.70662002e-11 4.56274522e-10]
[8.52499400e-09 8.54369189e-10 1.55324428e-09 4.68777190e-10
1.58132873e-09 2.29252014e-11 9.46101606e-11 5.56902012e-10
7.04027971e-10 6.29927812e-16]
[3.62531715e-08 5.44900004e-09 1.37112084e-08 3.58228132e-09
5.42868320e-09 3.04457063e-12 7.17450114e-12 7.77895632e-10
1.14470814e-09 8.25491847e-12]
[3.28362790e-10 1.36962682e-08 6.36156292e-09 2.11377202e-10
7.17811358e-09 9.90898389e-10 3.85161566e-11 2.67665397e-09
2.77008031e-10 6.51065879e-09]
[1.00814798e-10 3.10477694e-10 1.80829396e-09 3.42622001e-11
7.63898323e-10 6.69014470e-10 2.27321948e-13 8.12329717e-12
4.08541001e-13 5.99636122e-10]
[5.18250221e-09 5.97865353e-09 1.32074099e-08 5.33346910e-14
3.76749166e-09 3.13139311e-09 3.16042874e-10 1.98463296e-10
3.23408153e-10 2.36370044e-18]
[3.34892018e-10 8.35175940e-10 1.35602281e-10 4.57718577e-11
4.41085107e-11 6.84289114e-12 3.71138422e-10 1.00368956e-10
2.56078802e-10 4.92651581e-11]
[3.58107450e-09 2.07163214e-10 7.75782853e-10 1.74070597e-10
2.16976979e-10 1.54524068e-10 2.86626346e-11 5.41860682e-12
2.72778336e-11 7.63560956e-13]
[2.15448851e-08 5.16890062e-09 5.26408186e-09 3.28998916e-10
1.98308432e-09 1.25812427e-09 2.34758737e-10 1.28839368e-09
2.27760707e-11 1.18827395e-11]
[8.25806182e-09 1.60266001e-09 2.85164386e-09 1.21635803e-10
9.83317998e-11 9.24836653e-10 4.11808814e-12 3.38373222e-10
5.46475364e-10 8.74410041e-11]
[2.53399433e-09 9.24077051e-10 1.09408131e-09 2.52614345e-10
8.39857788e-10 1.25196680e-09 8.95230134e-11 1.39771732e-10
2.91818958e-10 4.11937032e-13]
[8.40327970e-09 3.37397261e-11 4.97831687e-10 2.11995533e-10
4.92234953e-11 3.39912297e-12 6.71182948e-10 2.56627729e-11
1.84092984e-10 5.40509030e-10]
[8.86588158e-09 1.10966994e-09 2.12659916e-12 8.36836521e-10
7.82469156e-11 4.93710993e-10 4.86711489e-10 3.91455085e-10
5.96188575e-10 2.34958565e-09]
[1.07775158e-08 1.48534602e-09 6.39803619e-10 5.43779480e-10
1.08687646e-09 5.24359187e-11 2.31502356e-11 5.89354619e-10
1.03018761e-11 3.22464405e-11]
[2.65619367e-07 2.86014831e-08 6.64058648e-08 4.40829913e-09
5.93043650e-08 4.35394078e-08 5.65683319e-09 1.25767602e-09
8.88431146e-09 1.68022835e-10]
[4.61198860e-09 1.20435114e-08 5.52928076e-09 4.17580439e-10
4.42723648e-09 5.01854659e-12 5.55455060e-11 2.47978097e-09
7.36175540e-12 2.29596093e-10]]
486 [[1.29529346e-09 2.16902878e-09 1.55594333e-09 1.31559505e-11
6.38860661e-10 3.48268139e-10 1.61453896e-11 2.81953927e-10
8.46497039e-12 1.04120926e-10]
[1.73848675e-08 3.78031232e-09 8.42937086e-09 1.22300356e-09
2.61953347e-09 3.07485365e-09 2.03855978e-10 4.28296037e-10
3.20957421e-11 1.49972673e-10]
[1.64284819e-08 2.73867854e-10 2.33415514e-09 1.00655203e-09
7.95844015e-10 1.51399403e-09 8.11834795e-10 3.15146371e-10
8.49165164e-10 1.51984848e-10]
[4.68795597e-09 3.50532446e-11 2.61775687e-10 4.33239387e-10
4.53401133e-13 3.96920302e-12 3.63332105e-10 7.19309515e-12
2.63536221e-11 2.49661874e-10]
[6.87310146e-09 8.25933109e-10 2.50600834e-09 3.13440512e-10
8.81194353e-10 2.61768807e-10 1.53076452e-10 1.02579247e-10
3.52779278e-11 8.63228634e-13]
[1.80959066e-10 5.80426464e-11 3.39932540e-10 1.32171917e-12
7.67662596e-11 8.27546116e-11 7.99561845e-11 1.17005950e-11
2.67356763e-12 2.18100324e-11]
[4.95463099e-09 6.03656029e-11 6.42666418e-11 6.16845776e-11
2.73370649e-11 2.04853949e-11 8.10208321e-10 5.31063432e-12
3.58011528e-10 3.40787188e-11]
[9.00212298e-11 7.64046688e-11 1.95977837e-11 1.35215422e-11
5.64478115e-13 5.13012726e-12 5.34220848e-11 4.93247390e-12
2.21076920e-12 1.54013306e-11]
[2.24536522e-10 1.72081092e-09 2.13428609e-09 1.60043263e-13
1.53477642e-09 4.10435866e-10 3.00536345e-10 2.88350892e-10
7.68517620e-11 6.19765683e-10]
[1.47355512e-10 3.38788963e-10 1.46210466e-09 1.93644551e-11
7.91939155e-10 5.60332458e-10 9.26993640e-12 9.28312369e-11
9.86175194e-12 3.73295463e-10]
[3.32492838e-12 3.41570802e-10 6.19038138e-10 8.81462625e-15
8.92064370e-11 1.01863050e-10 6.83759481e-11 3.92326258e-11
2.03221856e-11 1.93041034e-10]
[2.25689921e-10 2.85423939e-11 8.02830930e-12 1.28208553e-11
2.44304579e-12 1.01127219e-11 3.73180212e-12 1.40487519e-12
1.98816612e-11 2.92229542e-11]
[1.59067959e-11 5.40044152e-09 1.26120447e-09 1.80283655e-11
7.90024620e-10 1.39671855e-10 7.05396363e-10 6.79937711e-10
5.27815342e-10 3.12367676e-10]
[1.31299300e-09 7.31916399e-10 9.21285498e-10 4.06973782e-11
3.81247067e-10 2.77539454e-10 1.67059739e-11 1.35931591e-10
2.57643794e-13 5.48427935e-11]
[3.12715447e-10 1.27440670e-09 8.05690553e-10 5.57467975e-11
6.33066662e-10 3.53969307e-11 1.09350857e-11 1.16111208e-09
1.20002482e-10 6.32405844e-10]
[7.65031411e-11 9.15373867e-11 3.80895218e-12 5.06108963e-13
2.71844664e-11 4.98803804e-11 1.56511798e-11 1.09638100e-10
7.81936066e-11 1.80098986e-10]
[4.26294511e-09 4.06279554e-09 5.34965399e-09 1.42643599e-10
3.03068761e-09 2.93013468e-09 1.13961347e-10 4.34230117e-10
4.70514244e-10 1.09233325e-09]
[6.15062813e-09 6.89570537e-09 6.50029426e-09 2.68543054e-10
1.92246925e-09 6.23737369e-10 1.37425286e-10 9.05975171e-10
1.16704552e-11 1.08132372e-10]
[1.78168684e-08 5.97199062e-08 5.55823172e-08 6.33511777e-10
1.41475855e-08 4.01315900e-09 4.21944918e-08 9.40568229e-09
8.39461523e-09 8.38542518e-09]
[5.71066636e-10 7.70953857e-09 2.00271245e-09 3.38229698e-10
1.41294623e-09 3.14851551e-10 1.49645642e-10 2.14272030e-09
1.07286527e-09 4.24001165e-09]
[1.24339642e-09 6.06936175e-11 5.00898170e-12 1.95244870e-12
1.17149818e-11 1.45164636e-10 4.03422882e-11 1.17944626e-10
3.75498115e-11 1.64680770e-12]
[1.27055907e-08 1.24960528e-08 1.16326136e-08 2.76319600e-11
8.10866054e-09 3.11322852e-10 5.11361997e-11 3.69662165e-09
5.57449638e-10 3.22172680e-10]
[2.50791604e-10 3.31111677e-10 6.79888609e-11 1.98255273e-12
2.79616848e-11 4.72134745e-12 5.83987314e-10 6.43498214e-15
7.36674129e-11 2.95267575e-11]
[1.40273412e-09 1.02188310e-11 5.83877195e-11 1.49300518e-10
1.76046844e-10 1.01935976e-13 3.28028232e-12 3.22011991e-12
1.25494990e-12 1.14761904e-11]
[7.94143212e-10 1.05712332e-10 1.32408094e-10 2.47508404e-11
5.20337859e-14 1.23424897e-12 9.11316716e-11 8.43157330e-11
7.40745715e-11 1.01621648e-10]
[1.30775468e-09 2.56384013e-09 4.91186208e-10 2.61950957e-10
7.14977777e-11 1.27449456e-12 4.94087576e-10 3.70747826e-10
3.41935509e-10 1.31112760e-09]
[4.17700981e-09 8.80604770e-10 5.26532783e-10 4.31059691e-11
8.18536885e-10 2.41519214e-10 4.72757285e-11 1.20082878e-11
8.07486297e-12 1.58668191e-11]
[4.38110042e-10 1.44042043e-10 3.63322075e-10 1.11305290e-11
7.60414096e-11 8.78851709e-13 1.41911188e-10 1.41090067e-11
9.05990615e-11 1.11599429e-15]
[1.37795541e-08 1.35061459e-09 1.68655456e-09 5.25403016e-10
6.48626662e-10 3.40748729e-10 6.97170172e-10 3.76829675e-10
1.36030203e-11 1.05128325e-10]
[5.77197221e-10 9.18861890e-10 1.88841508e-09 4.90263416e-11
7.12244316e-10 1.14142948e-09 2.48596421e-10 1.25680027e-10
7.36779690e-12 1.66565299e-09]
[3.37056789e-09 4.08596757e-09 3.72926917e-09 1.91287997e-10
6.33632797e-10 1.27906138e-10 2.19076884e-09 6.61727984e-10
4.85993876e-10 1.80843010e-09]
[1.59620970e-09 1.02684211e-09 2.69713244e-09 1.27417824e-11
2.85218249e-10 3.80777633e-10 2.68848552e-11 6.92592979e-11
1.05497297e-12 1.34977768e-11]
[2.03474940e-09 2.71085738e-09 5.93431415e-10 9.80434291e-11
2.21303519e-10 1.88443248e-11 5.87046355e-11 3.26243100e-10
2.53268240e-10 6.57049007e-12]
[2.01978847e-10 6.30821757e-11 1.59323869e-10 7.51775369e-12
2.47085059e-15 1.91847745e-13 4.67579759e-11 3.12718943e-11
1.80319169e-11 5.22211650e-11]
[5.09135887e-09 1.40930293e-11 9.46238642e-10 2.25813399e-10
5.17563370e-10 8.03352411e-10 5.43433778e-10 2.36378241e-11
2.34112690e-10 9.74391469e-11]
[2.49499603e-08 6.96079666e-09 6.33999841e-09 6.93083729e-10
4.90954609e-09 6.68805089e-10 5.80869953e-10 5.64855846e-10
3.51788483e-10 1.12417241e-09]
[2.37541206e-10 1.01600694e-08 2.99886549e-09 3.49248890e-12
1.40476078e-09 1.96106227e-10 1.86098082e-09 2.43482197e-09
6.10239996e-10 3.76125692e-09]
[4.37220642e-09 5.58143511e-10 6.47408084e-10 3.89574409e-10
2.53188988e-10 1.04195110e-11 7.54209212e-11 1.55138237e-10
3.25550036e-11 5.73432636e-13]
[1.00700310e-09 1.40215639e-09 1.40560328e-09 4.75996007e-11
1.22370595e-09 2.20312082e-10 5.42465090e-11 3.08415013e-10
3.46051564e-11 3.58758881e-10]
[2.41623004e-09 6.01972095e-10 1.55496561e-10 3.60087861e-10
1.10580206e-10 4.77104991e-10 7.83595979e-10 1.52386497e-10
5.87628335e-10 2.52069433e-11]
[1.86081325e-10 1.15228383e-09 3.97802574e-10 6.21096592e-11
3.77445140e-10 1.88412401e-13 5.97937909e-10 6.61687722e-10
3.84123241e-10 1.76139034e-10]
[2.34836333e-09 4.94292266e-10 2.05193194e-09 3.45115491e-12
1.35221170e-09 5.34584490e-10 9.79119509e-12 1.83400259e-11
2.65247303e-10 1.12883303e-10]
[9.83280438e-09 9.65355405e-09 2.81849032e-09 1.07934345e-11
1.89426099e-09 2.17067180e-09 4.61468935e-09 3.64033390e-09
1.19542460e-09 7.11770172e-10]
[4.06241140e-09 1.25726254e-09 2.79850013e-10 1.42343318e-10
2.90955806e-10 1.58769702e-10 9.89993289e-10 5.48030089e-10
5.69259478e-12 9.87569135e-11]
[8.02806224e-10 8.05189919e-10 6.73328663e-11 5.10190014e-12
9.12940299e-11 5.56095590e-11 1.86072972e-11 3.02964759e-10
3.27098802e-11 7.18681339e-11]
[9.49592394e-11 2.82910887e-11 1.76438216e-10 2.72729188e-11
1.33445581e-10 9.60989982e-12 7.15974036e-11 2.37846203e-11
3.30362503e-11 1.10331752e-11]
[6.48966855e-12 7.96271842e-15 2.29346248e-11 4.97866287e-12
9.20319566e-12 3.83396754e-12 6.79015386e-11 7.84161274e-14
2.86462724e-11 2.19148305e-13]
[2.89264779e-09 2.10393381e-09 3.01027006e-09 4.21698292e-12
9.94354292e-10 1.19189136e-09 2.50035268e-11 3.63371879e-10
3.81387717e-10 4.30706822e-10]
[1.75715292e-10 5.84646002e-10 3.66100024e-10 1.45853479e-11
4.81574004e-10 8.13053955e-11 2.83640540e-11 1.35791208e-10
3.48478499e-11 4.32539974e-10]
[8.11904008e-09 8.16008040e-10 1.48423559e-09 4.45922569e-10
1.50988542e-09 2.15472811e-11 8.99897757e-11 5.31487163e-10
6.71038969e-10 3.40570073e-16]
[3.45368371e-08 5.19031587e-09 1.30675089e-08 3.41136626e-09
5.17578631e-09 2.72212817e-12 6.95539165e-12 7.40713320e-10
1.08935715e-09 7.96115049e-12]
[3.12403413e-10 1.30412596e-08 6.05400044e-09 2.00976371e-10
6.82662279e-09 9.41990497e-10 3.69688461e-11 2.54808195e-09
2.63698581e-10 6.19166262e-09]
[9.43748997e-11 2.98546440e-10 1.72895197e-09 3.25493122e-11
7.29290140e-10 6.36930143e-10 2.81986742e-13 7.88804483e-12
3.50597286e-13 5.71307070e-10]
[4.94878192e-09 5.71843691e-09 1.26202467e-08 5.76978519e-14
3.60310517e-09 2.99080996e-09 3.01989787e-10 1.90039775e-10
3.08526676e-10 1.07158828e-15]
[3.19552200e-10 7.93504505e-10 1.29155700e-10 4.32140361e-11
4.18211240e-11 6.52771677e-12 3.53897313e-10 9.53990780e-11
2.44076107e-10 4.68739995e-11]
[3.41556869e-09 1.97918984e-10 7.41004002e-10 1.66049198e-10
2.07010541e-10 1.47836446e-10 2.73919214e-11 5.15662505e-12
2.60976432e-11 7.50417678e-13]
[2.05376704e-08 4.92390834e-09 5.01923611e-09 3.13630550e-10
1.89275826e-09 1.20182193e-09 2.24792905e-10 1.22816972e-09
2.20310174e-11 1.15679692e-11]
[7.87110907e-09 1.52380732e-09 2.71521135e-09 1.16174656e-10
9.36858318e-11 8.82022758e-10 3.95467230e-12 3.22481080e-10
5.21844923e-10 8.33724228e-11]
[2.42031484e-09 8.77549969e-10 1.03836311e-09 2.41647823e-10
7.99627478e-10 1.19147179e-09 8.51885741e-11 1.32102060e-10
2.78512534e-10 3.36530965e-13]
[8.00272587e-09 3.23962739e-11 4.75322928e-10 2.01769391e-10
4.66634423e-11 3.20178811e-12 6.38812086e-10 2.46261087e-11
1.75324454e-10 5.13918169e-10]
[8.44291856e-09 1.05337892e-09 2.18454227e-12 7.96145868e-10
7.58092718e-11 4.71587182e-10 4.62756921e-10 3.71687148e-10
5.67406802e-10 2.23143369e-09]
[1.02708808e-08 1.41016035e-09 6.10256181e-10 5.18653480e-10
1.03469358e-09 4.91959327e-11 2.25470620e-11 5.60817134e-10
1.00989201e-11 3.07080646e-11]
[2.53618561e-07 2.73102156e-08 6.34221334e-08 4.20667940e-09
5.66389890e-08 4.15819133e-08 5.40180267e-09 1.20127756e-09
8.48599330e-09 1.60465051e-10]
[4.38651151e-09 1.14400282e-08 5.24973872e-09 3.96871514e-10
4.20731319e-09 4.86341435e-12 5.26217314e-11 2.35467452e-09
6.97942598e-12 2.17166936e-10]]
487 [[1.23858461e-09 2.06947426e-09 1.48413307e-09 1.24698614e-11
6.08605594e-10 3.32089777e-10 1.53415424e-11 2.68376090e-10
7.97242032e-12 9.85385136e-11]
[1.65716549e-08 3.61927556e-09 8.06347002e-09 1.16553593e-09
2.50409074e-09 2.94275737e-09 1.94433070e-10 4.10221970e-10
3.05802710e-11 1.44812573e-10]
[1.56916548e-08 2.62956203e-10 2.23318972e-09 9.60488487e-10
7.61408633e-10 1.44634724e-09 7.75070748e-10 3.00318692e-10
8.10445286e-10 1.44756795e-10]
[4.46668073e-09 3.28416093e-11 2.48113001e-10 4.13017769e-10
4.24663720e-13 3.79666021e-12 3.47262622e-10 6.67423812e-12
2.53238528e-11 2.38447819e-10]
[6.55902260e-09 7.85291862e-10 2.38542914e-09 2.99020901e-10
8.39109911e-10 2.49890664e-10 1.45993456e-10 9.70993585e-11
3.34199103e-11 7.40464333e-13]
[1.73210327e-10 5.50460483e-11 3.23335514e-10 1.29190111e-12
7.30282977e-11 7.84220016e-11 7.64944949e-11 1.11043249e-11
2.57926337e-12 2.05246955e-11]
[4.72697970e-09 5.72071560e-11 6.19831801e-11 5.87573826e-11
2.64097167e-11 1.96303671e-11 7.74317835e-10 4.99994703e-12
3.41470427e-10 3.21508474e-11]
[8.66448479e-11 7.31788825e-11 1.87747057e-11 1.27200896e-11
5.43128103e-13 4.90073683e-12 5.11406885e-11 4.74195260e-12
2.15043276e-12 1.45043826e-11]
[2.17562436e-10 1.64337967e-09 2.04433056e-09 1.52991673e-13
1.46963943e-09 3.93283523e-10 2.85169019e-10 2.76298723e-10
7.31117359e-11 5.94260160e-10]
[1.41402743e-10 3.22825256e-10 1.39514997e-09 1.84149002e-11
7.55731401e-10 5.34986909e-10 8.77641468e-12 8.84670278e-11
9.48665770e-12 3.55889300e-10]
[3.30841281e-12 3.27070162e-10 5.92206207e-10 8.62700246e-15
8.54474390e-11 9.74233788e-11 6.50063481e-11 3.74660168e-11
1.94479100e-11 1.84546990e-10]
[2.17839042e-10 2.70802350e-11 7.86416760e-12 1.24487616e-11
2.24336557e-12 9.93813776e-12 3.73181119e-12 1.39147266e-12
1.93129288e-11 2.78724943e-11]
[1.54598431e-11 5.14721022e-09 1.20554486e-09 1.72336951e-11
7.54030729e-10 1.33974122e-10 6.71221827e-10 6.48645557e-10
5.02427244e-10 2.98011423e-10]
[1.25534496e-09 7.00489473e-10 8.81621745e-10 3.90541743e-11
3.64984767e-10 2.65438825e-10 1.59086622e-11 1.30070335e-10
2.48550310e-13 5.25281000e-11]
[2.97403153e-10 1.21398335e-09 7.70138202e-10 5.33955510e-11
6.03778403e-10 3.34038233e-11 1.02285530e-11 1.10758167e-09
1.13828055e-10 6.04059822e-10]
[7.29640998e-11 8.70342119e-11 3.56432858e-12 4.80019449e-13
2.57308779e-11 4.75334588e-11 1.50136310e-11 1.04390692e-10
7.43124087e-11 1.71475580e-10]
[4.08157408e-09 3.87695784e-09 5.10575276e-09 1.36861487e-10
2.88894249e-09 2.79650110e-09 1.09245508e-10 4.13981740e-10
4.49943774e-10 1.03942064e-09]
[5.84253711e-09 6.57459036e-09 6.19239098e-09 2.54481306e-10
1.83140981e-09 5.92810024e-10 1.32007082e-10 8.64306011e-10
1.13195833e-11 1.03316977e-10]
[1.69750091e-08 5.70398852e-08 5.31159008e-08 6.04309455e-10
1.35095311e-08 3.83602230e-09 4.02645360e-08 8.98492072e-09
8.00928623e-09 8.00657154e-09]
[5.38607644e-10 7.34420798e-09 1.91282070e-09 3.21976323e-10
1.34540500e-09 3.00995362e-10 1.42068367e-10 2.04458367e-09
1.02109005e-09 4.04081985e-09]
[1.18266672e-09 5.73159313e-11 4.76878657e-12 1.85349236e-12
1.12281525e-11 1.37764515e-10 3.88425497e-11 1.12315162e-10
3.55211266e-11 1.61471835e-12]
[1.21270814e-08 1.19014529e-08 1.10929061e-08 2.66331303e-11
7.73134493e-09 2.98217590e-10 4.81425081e-11 3.52180389e-09
5.29132708e-10 3.06284301e-10]
[2.40471750e-10 3.20853577e-10 6.60217995e-11 1.95353686e-12
2.70300795e-11 4.38502449e-12 5.61930996e-10 1.18563360e-14
7.14134618e-11 2.81767957e-11]
[1.33336277e-09 1.00307477e-11 5.61861008e-11 1.41545013e-10
1.69068387e-10 1.17639717e-13 3.18090479e-12 3.24604124e-12
1.08915333e-12 1.03603958e-11]
[7.55895073e-10 1.00277621e-10 1.26275535e-10 2.36402899e-11
5.88984640e-14 1.11086310e-12 8.62664733e-11 8.01749344e-11
7.01322921e-11 9.61019378e-11]
[1.24831570e-09 2.44575317e-09 4.69027211e-10 2.49910620e-10
6.82376898e-11 1.21848724e-12 4.71270203e-10 3.53820962e-10
3.26317685e-10 1.25235983e-09]
[3.98960879e-09 8.41096796e-10 5.04781314e-10 4.11106851e-11
7.82455642e-10 2.31389772e-10 4.51781099e-11 1.14953358e-11
7.78990383e-12 1.52008902e-11]
[4.14209331e-10 1.37264893e-10 3.45170498e-10 1.06139737e-11
7.26325069e-11 8.58635886e-13 1.35636794e-10 1.31824107e-11
8.65144903e-11 3.26188616e-15]
[1.31366510e-08 1.28951058e-09 1.61245880e-09 5.01090801e-10
6.19392206e-10 3.26228840e-10 6.65865902e-10 3.60435692e-10
1.33956186e-11 1.00689727e-10]
[5.45117746e-10 8.80855501e-10 1.80704551e-09 4.64432760e-11
6.80734408e-10 1.08869641e-09 2.36400523e-10 1.20438674e-10
6.95076467e-12 1.58631233e-09]
[3.20995490e-09 3.89285091e-09 3.55164619e-09 1.82112896e-10
6.03401582e-10 1.21595273e-10 2.08891821e-09 6.29731733e-10
4.63696141e-10 1.72095569e-09]
[1.52124873e-09 9.78161685e-10 2.56915514e-09 1.22367245e-11
2.72100810e-10 3.63084159e-10 2.56050254e-11 6.58465186e-11
1.02050916e-12 1.28008845e-11]
[1.93872840e-09 2.58383979e-09 5.66002110e-10 9.33486385e-11
2.11034002e-10 1.79163096e-11 5.60050528e-11 3.10876282e-10
2.41367037e-10 6.26914221e-12]
[1.90229979e-10 6.16184769e-11 1.53400390e-10 6.97029817e-12
3.27203839e-16 1.66956208e-13 4.46420896e-11 3.02322277e-11
1.73180150e-11 5.07259266e-11]
[4.86336112e-09 1.34027880e-11 9.02205834e-10 2.15797161e-10
4.93491338e-10 7.65698244e-10 5.18101558e-10 2.25022223e-11
2.23324925e-10 9.24643242e-11]
[2.38095294e-08 6.64447222e-09 6.05796171e-09 6.60944284e-10
4.69040438e-09 6.40968350e-10 5.55641085e-10 5.38786998e-10
3.36948337e-10 1.07634406e-09]
[2.25653040e-10 9.70245444e-09 2.86576336e-09 3.36118901e-12
1.34141521e-09 1.87362641e-10 1.77708423e-09 2.32709776e-09
5.82370001e-10 3.59241252e-09]
[4.15793316e-09 5.33721370e-10 6.18941977e-10 3.69732562e-10
2.42205354e-10 9.74812926e-12 7.19461946e-11 1.48478253e-10
3.11330750e-11 4.49781680e-13]
[9.60542338e-10 1.33262241e-09 1.33595328e-09 4.55621629e-11
1.16338717e-09 2.09950014e-10 5.14537269e-11 2.92489883e-10
3.27353501e-11 3.40075179e-10]
[2.29923540e-09 5.68648273e-10 1.48801023e-10 3.42013292e-10
1.05860735e-10 4.53859783e-10 7.42446051e-10 1.44257543e-10
5.57255141e-10 2.43970088e-11]
[1.76682912e-10 1.08889937e-09 3.75358831e-10 5.88602160e-11
3.57425780e-10 1.78961196e-13 5.66288437e-10 6.26556452e-10
3.63730693e-10 1.66759343e-10]
[2.24292762e-09 4.69762160e-10 1.95529467e-09 3.32505225e-12
1.28850765e-09 5.09638965e-10 9.33545039e-12 1.73394373e-11
2.52859648e-10 1.06809583e-10]
[9.35336226e-09 9.20020427e-09 2.69844384e-09 1.00873702e-11
1.80726267e-09 2.05503742e-09 4.38724732e-09 3.47423998e-09
1.13689094e-09 6.85968406e-10]
[3.86855251e-09 1.20062066e-09 2.71711761e-10 1.34419406e-10
2.79509781e-10 1.47724461e-10 9.40224469e-10 5.25577036e-10
5.40572089e-12 8.99928092e-11]
[7.66672986e-10 7.62480432e-10 6.40065150e-11 4.76836883e-12
8.66026936e-11 5.24499763e-11 1.81907500e-11 2.88951738e-10
3.06076521e-11 6.83273436e-11]
[9.01119379e-11 2.72290890e-11 1.67152381e-10 2.59171455e-11
1.26488077e-10 9.03612017e-12 6.83323499e-11 2.27840186e-11
3.15004023e-11 1.03286856e-11]
[6.09731565e-12 4.08482886e-15 2.19494283e-11 4.77053222e-12
8.83289944e-12 3.62852643e-12 6.46450177e-11 7.87539798e-14
2.72253297e-11 2.20224066e-13]
[2.76903768e-09 2.00839177e-09 2.87672439e-09 3.99495435e-12
9.49348727e-10 1.13793928e-09 2.39044328e-11 3.47028920e-10
3.64272448e-10 4.10365042e-10]
[1.65505597e-10 5.55787291e-10 3.48427854e-10 1.38279524e-11
4.57235049e-10 7.74339463e-11 2.65799823e-11 1.28849651e-10
3.27906288e-11 4.09663386e-10]
[7.73172706e-09 7.79564827e-10 1.41781097e-09 4.24350653e-10
1.44175627e-09 2.02048003e-11 8.55825034e-11 5.07160378e-10
6.39506271e-10 5.54285846e-15]
[3.29044840e-08 4.94349536e-09 1.24555750e-08 3.24815892e-09
4.93405446e-09 2.43787172e-12 6.70558046e-12 7.05462389e-10
1.03677308e-09 7.62781912e-12]
[2.97219354e-10 1.24168469e-08 5.76311348e-09 1.91037365e-10
6.49146182e-09 8.95578529e-10 3.55139840e-11 2.42605702e-09
2.51149606e-10 5.88739065e-09]
[8.85808598e-11 2.86812899e-10 1.65399805e-09 3.08374779e-11
6.96458834e-10 6.06616328e-10 3.47967522e-13 7.62522660e-12
3.03394630e-13 5.43603365e-10]
[4.72652741e-09 5.46913676e-09 1.20605932e-08 6.36578694e-14
3.44545895e-09 2.85670207e-09 2.88537837e-10 1.82108024e-10
2.94278611e-10 2.83609157e-15]
[3.03450423e-10 7.54186784e-10 1.22110432e-10 4.14078298e-11
3.97451873e-11 6.32775867e-12 3.36922233e-10 9.03227413e-11
2.32132915e-10 4.43414342e-11]
[3.25822867e-09 1.89040071e-10 7.07810286e-10 1.58309685e-10
1.97404081e-10 1.41378961e-10 2.61907952e-11 4.89935355e-12
2.49039754e-11 7.29063245e-13]
[1.95825566e-08 4.69023845e-09 4.78749556e-09 2.99619003e-10
1.80556433e-09 1.14789303e-09 2.15304776e-10 1.16906984e-09
2.12208407e-11 1.11405807e-11]
[7.50035412e-09 1.44875371e-09 2.58628390e-09 1.10853582e-10
8.94677967e-11 8.40233382e-10 3.95083668e-12 3.06349011e-10
4.97958703e-10 7.90248477e-11]
[2.31068267e-09 8.33419159e-10 9.85188626e-10 2.31393370e-10
7.61627562e-10 1.13397204e-09 8.10687078e-11 1.24813968e-10
2.65827564e-10 2.77868836e-13]
[7.62067326e-09 3.11600284e-11 4.53409964e-10 1.92123262e-10
4.41828559e-11 3.01723199e-12 6.08101776e-10 2.36063039e-11
1.66870181e-10 4.88087347e-10]
[8.04121538e-09 1.00027196e-09 2.25966117e-12 7.57199443e-10
7.32526243e-11 4.50134904e-10 4.40059403e-10 3.52764270e-10
5.40057580e-10 2.12019180e-09]
[9.78713868e-09 1.33914329e-09 5.81520676e-10 4.94889594e-10
9.85351293e-10 4.61456407e-11 2.19791308e-11 5.33666612e-10
9.89488545e-12 2.90847542e-11]
[2.42162236e-07 2.60801949e-08 6.05624512e-08 4.01441070e-09
5.40972179e-08 3.97157487e-08 5.15857199e-09 1.14766675e-09
8.10479265e-09 1.52704750e-10]
[4.17338027e-09 1.08665150e-08 4.98524243e-09 3.77042554e-10
3.99768632e-09 4.70731153e-12 4.98726573e-11 2.23632151e-09
6.62934039e-12 2.05352060e-10]]
488 [[1.18387849e-09 1.97451189e-09 1.41586971e-09 1.18962271e-11
5.79686304e-10 3.16958982e-10 1.46414688e-11 2.55754065e-10
7.53384098e-12 9.34941051e-11]
[1.57968529e-08 3.46560335e-09 7.71174683e-09 1.11065258e-09
2.39404232e-09 2.81647508e-09 1.85464389e-10 3.92846685e-10
2.91309263e-11 1.40035574e-10]
[1.49829777e-08 2.52424280e-10 2.13783098e-09 9.17238606e-10
7.29211151e-10 1.38318509e-09 7.39005743e-10 2.86335766e-10
7.74256383e-10 1.37926397e-10]
[4.25645544e-09 3.07212323e-11 2.35306691e-10 3.93594545e-10
3.91814391e-13 3.61079074e-12 3.31958216e-10 6.19276317e-12
2.43387058e-11 2.28091097e-10]
[6.25959599e-09 7.46493147e-10 2.27106404e-09 2.85183253e-10
7.98900801e-10 2.38459641e-10 1.39238420e-10 9.18995655e-11
3.16337087e-11 6.16569159e-13]
[1.66184109e-10 5.21935624e-11 3.07483162e-10 1.27762240e-12
6.93471265e-11 7.43473933e-11 7.30642878e-11 1.04873049e-11
2.49393696e-12 1.92827876e-11]
[4.50996621e-09 5.41831263e-11 5.96983106e-11 5.59785169e-11
2.54851434e-11 1.88711010e-11 7.39203173e-10 4.64855649e-12
3.25793173e-10 3.01830951e-11]
[8.35726733e-11 7.01276755e-11 1.79340597e-11 1.19928438e-11
5.14105053e-13 4.68272180e-12 4.89488395e-11 4.53340148e-12
2.09838960e-12 1.36360188e-11]
[2.10550690e-10 1.56983226e-09 1.95799372e-09 1.52819025e-13
1.40773419e-09 3.77230548e-10 2.70789550e-10 2.64540962e-10
6.96609135e-11 5.70273944e-10]
[1.35765869e-10 3.07497370e-10 1.33168615e-09 1.75370732e-11
7.20982131e-10 5.10633199e-10 8.31083822e-12 8.43017597e-11
9.13584987e-12 3.38899603e-10]
[3.28959135e-12 3.12975901e-10 5.66345387e-10 8.97067172e-15
8.17082711e-11 9.29742234e-11 6.18945920e-11 3.58234077e-11
1.86359869e-11 1.76058646e-10]
[2.10587479e-10 2.56642176e-11 7.73745968e-12 1.20407650e-11
2.07322298e-12 9.74742467e-12 3.74587474e-12 1.37077222e-12
1.87591981e-11 2.66553788e-11]
[1.49984542e-11 4.90662403e-09 1.15199376e-09 1.64984333e-11
7.19848250e-10 1.28591348e-10 6.38811035e-10 6.18801192e-10
4.78413986e-10 2.84730465e-10]
[1.20158419e-09 6.70395024e-10 8.43738664e-10 3.73793882e-11
3.49023628e-10 2.53608613e-10 1.52058912e-11 1.24493919e-10
2.40900435e-13 5.02403103e-11]
[2.82815154e-10 1.15675845e-09 7.35997562e-10 5.11883184e-11
5.75909583e-10 3.14763369e-11 9.57765626e-12 1.05712826e-09
1.07974031e-10 5.77684185e-10]
[6.96224864e-11 8.26912357e-11 3.37008277e-12 4.42802051e-13
2.43161276e-11 4.53259211e-11 1.44346956e-11 9.91798047e-11
7.06894668e-11 1.62969154e-10]
[3.90686902e-09 3.69873395e-09 4.87316963e-09 1.31325386e-10
2.75403371e-09 2.66828500e-09 1.04731661e-10 3.94410724e-10
4.30516634e-10 9.87882800e-10]
[5.54971800e-09 6.26783327e-09 5.90068679e-09 2.41262754e-10
1.74434436e-09 5.63418150e-10 1.26823160e-10 8.24687209e-10
1.09940037e-11 9.85921694e-11]
[1.61717879e-08 5.44796643e-08 5.07603769e-08 5.76657012e-10
1.28996809e-08 3.66657286e-09 3.84227560e-08 8.58304344e-09
7.64213928e-09 7.64419222e-09]
[5.07699614e-10 6.99510173e-09 1.82656760e-09 3.06435918e-10
1.28123576e-09 2.87791390e-10 1.34543255e-10 1.95133018e-09
9.71835189e-10 3.85061709e-09]
[1.12585412e-09 5.40927630e-11 4.57102449e-12 1.75370682e-12
1.07104080e-11 1.30724051e-10 3.73774571e-11 1.06963474e-10
3.36307796e-11 1.56795356e-12]
[1.15756322e-08 1.13344459e-08 1.05791853e-08 2.56398437e-11
7.37146942e-09 2.85659121e-10 4.53300220e-11 3.35506157e-09
5.02100759e-10 2.90883150e-10]
[2.30541146e-10 3.10987567e-10 6.39559620e-11 1.92657782e-12
2.61381948e-11 4.07004969e-12 5.40627339e-10 1.87395024e-14
6.92114433e-11 2.68010088e-11]
[1.26698315e-09 9.79803576e-12 5.41932679e-11 1.34327079e-10
1.62469147e-10 1.37022999e-13 3.06343752e-12 3.23207274e-12
1.00622559e-12 9.36262823e-12]
[7.19973603e-10 9.50596397e-11 1.20532172e-10 2.26203475e-11
6.32493121e-14 1.00240780e-12 8.16116940e-11 7.60806597e-11
6.63984927e-11 9.10869704e-11]
[1.19175655e-09 2.33264100e-09 4.48156194e-10 2.38704761e-10
6.51320398e-11 1.16177235e-12 4.49635961e-10 3.37909486e-10
3.11248693e-10 1.19548886e-09]
[3.81006821e-09 8.03678927e-10 4.83582796e-10 3.92512719e-11
7.48147703e-10 2.21787353e-10 4.31617039e-11 1.10016606e-11
7.49479752e-12 1.46616261e-11]
[3.92644469e-10 1.31017616e-10 3.28404437e-10 1.00288678e-11
6.91559594e-11 8.22088190e-13 1.29610952e-10 1.25451115e-11
8.28094743e-11 2.19304914e-15]
[1.25245337e-08 1.23311119e-09 1.53912434e-09 4.77085364e-10
5.91805799e-10 3.12153692e-10 6.35607069e-10 3.43302605e-10
1.25507199e-11 9.65480937e-11]
[5.14633724e-10 8.44716711e-10 1.72920333e-09 4.39943439e-11
6.50593802e-10 1.03875897e-09 2.24796294e-10 1.15535128e-10
6.56693735e-12 1.51198467e-09]
[3.05697514e-09 3.70944041e-09 3.38211636e-09 1.73118971e-10
5.74781653e-10 1.15720883e-10 1.99164581e-09 5.98831969e-10
4.42563389e-10 1.63851459e-09]
[1.45004272e-09 9.31595216e-10 2.44770646e-09 1.17179030e-11
2.59471615e-10 3.46019001e-10 2.44075422e-11 6.25433487e-11
9.92672758e-13 1.20442786e-11]
[1.84671393e-09 2.46309516e-09 5.39563614e-10 8.87986146e-11
2.01310003e-10 1.70447174e-11 5.34402497e-11 2.96463619e-10
2.30128745e-10 6.02183367e-12]
[1.79243101e-10 6.01116358e-11 1.47798642e-10 6.44871733e-12
9.13520538e-17 1.46875684e-13 4.26108604e-11 2.92132540e-11
1.66134231e-11 4.91363798e-11]
[4.64478453e-09 1.27133592e-11 8.59883064e-10 2.06310310e-10
4.70709368e-10 7.29874035e-10 4.94054099e-10 2.14126433e-11
2.12948697e-10 8.79474703e-11]
[2.27227195e-08 6.34230051e-09 5.78898631e-09 6.30396729e-10
4.48057516e-09 6.14189473e-10 5.31525550e-10 5.13669894e-10
3.22726938e-10 1.03006223e-09]
[2.14304955e-10 9.26526110e-09 2.73913564e-09 3.24100365e-12
1.28103760e-09 1.78851516e-10 1.69592671e-09 2.22231271e-09
5.55845215e-10 3.42939199e-09]
[3.95391173e-09 5.10153056e-10 5.91990441e-10 3.51012002e-10
2.31686165e-10 9.11678490e-12 6.86245221e-11 1.42103944e-10
2.97505897e-11 3.55429989e-13]
[9.15697656e-10 1.26604439e-09 1.27084900e-09 4.35745537e-11
1.10595261e-09 2.00020403e-10 4.88292686e-11 2.77628227e-10
3.09657299e-11 3.21891587e-10]
[2.18782113e-09 5.37356973e-10 1.42539180e-10 3.25035603e-10
1.01337951e-10 4.31786677e-10 7.03366178e-10 1.36732895e-10
5.28575322e-10 2.35010711e-11]
[1.67987476e-10 1.02927212e-09 3.54165543e-10 5.57055445e-11
3.38879886e-10 1.64656498e-13 5.36080340e-10 5.92841537e-10
3.44424531e-10 1.58051639e-10]
[2.14225940e-09 4.46618211e-10 1.86276120e-09 3.23982855e-12
1.22795462e-09 4.86011139e-10 8.91578744e-12 1.63403619e-11
2.41057979e-10 1.01270581e-10]
[8.89705085e-09 8.76867970e-09 2.58305458e-09 9.44012068e-12
1.72438595e-09 1.94533772e-09 4.17139170e-09 3.31590493e-09
1.08126465e-09 6.61470906e-10]
[3.68428199e-09 1.14633093e-09 2.63903844e-10 1.26890307e-10
2.68432013e-10 1.37462731e-10 8.92980933e-10 5.04029236e-10
5.12332028e-12 8.20720859e-11]
[7.32070011e-10 7.22352650e-10 6.07458312e-11 4.42728828e-12
8.22669860e-11 4.94786698e-11 1.78841898e-11 2.75059763e-10
2.86814258e-11 6.50608680e-11]
[8.57434050e-11 2.62124965e-11 1.58585003e-10 2.46992009e-11
1.19746336e-10 8.49986194e-12 6.52257517e-11 2.17545788e-11
3.00291298e-11 9.66510649e-12]
[5.70879086e-12 1.81927385e-15 2.10756818e-11 4.56731006e-12
8.44461533e-12 3.43485245e-12 6.16154895e-11 8.11709864e-14
2.58816402e-11 2.15051007e-13]
[2.65073966e-09 1.91756599e-09 2.74886108e-09 3.72585840e-12
9.06379926e-10 1.08677690e-09 2.28620282e-11 3.31021609e-10
3.47687986e-10 3.91376937e-10]
[1.56142787e-10 5.28440066e-10 3.31418061e-10 1.30566636e-11
4.34274111e-10 7.38112636e-11 2.49609693e-11 1.22273799e-10
3.08676309e-11 3.88302069e-10]
[7.36339479e-09 7.44561717e-10 1.35462666e-09 4.03723394e-10
1.37646568e-09 1.89777997e-11 8.14096019e-11 4.83968239e-10
6.09364630e-10 1.39918940e-14]
[3.13479641e-08 4.70889371e-09 1.18715779e-08 3.09307163e-09
4.70422326e-09 2.17877779e-12 6.49157724e-12 6.71678453e-10
9.86891377e-10 7.34222288e-12]
[2.82763325e-10 1.18231528e-08 5.48489892e-09 1.81511681e-10
6.17336844e-09 8.51597160e-10 3.39882530e-11 2.30937305e-09
2.39054081e-10 5.60058091e-09]
[8.29156885e-11 2.75727686e-10 1.58171036e-09 2.93240342e-11
6.64823567e-10 5.77569032e-10 4.09854096e-13 7.42362422e-12
2.53698705e-13 5.17885417e-10]
[4.51331836e-09 5.23096882e-09 1.15247787e-08 6.85996351e-14
3.29514343e-09 2.72876691e-09 2.75715836e-10 1.74520795e-10
2.80677692e-10 6.20750582e-15]
[2.89347437e-10 7.16599678e-10 1.16280085e-10 3.91792732e-11
3.77059373e-11 6.04791189e-12 3.21252977e-10 8.58284123e-11
2.21335965e-10 4.21382596e-11]
[3.10794952e-09 1.80632845e-10 6.76017165e-10 1.50980278e-10
1.88283890e-10 1.35263610e-10 2.50360374e-11 4.66249034e-12
2.37693913e-11 7.22395349e-13]
[1.86628042e-08 4.46714734e-09 4.56643679e-09 2.84825021e-10
1.72359315e-09 1.09639159e-09 2.05842850e-10 1.11659649e-09
2.05564152e-11 1.07893201e-11]
[7.14915687e-09 1.37772731e-09 2.46267663e-09 1.06024954e-10
8.53061719e-11 8.01101628e-10 3.86972960e-12 2.91142670e-10
4.75231298e-10 7.52129443e-11]
[2.20688179e-09 7.91440064e-10 9.34991909e-10 2.21383747e-10
7.25242312e-10 1.07912707e-09 7.71621947e-11 1.17946240e-10
2.53759010e-10 2.21679292e-13]
[7.25749988e-09 2.99206341e-11 4.32698039e-10 1.82880391e-10
4.18610125e-11 2.85214652e-12 5.78984730e-10 2.26298215e-11
1.58901160e-10 4.63946574e-10]
[7.65793936e-09 9.49491609e-10 2.31472885e-12 7.20365072e-10
7.08987838e-11 4.30046277e-10 4.18308257e-10 3.34944538e-10
5.13956080e-10 2.01341864e-09]
[9.32686275e-09 1.27144242e-09 5.54356391e-10 4.72074255e-10
9.38208571e-10 4.33264860e-11 2.14253073e-11 5.07795457e-10
9.70122923e-12 2.76667152e-11]
[2.31224517e-07 2.49041477e-08 5.78348894e-08 3.83165526e-09
5.16654457e-08 3.79291836e-08 4.92731321e-09 1.09659139e-09
7.74039627e-09 1.45584440e-10]
[3.97012902e-09 1.03222705e-08 4.73331104e-09 3.58585712e-10
3.79880195e-09 4.55406387e-12 4.72553233e-11 2.12278153e-09
6.30327662e-12 1.94226644e-10]]
489 [[1.13066799e-09 1.88356420e-09 1.35201144e-09 1.14882982e-11
5.51869511e-10 3.02641479e-10 1.39260495e-11 2.43785484e-10
7.15279817e-12 8.88626781e-11]
[1.50578737e-08 3.31804968e-09 7.37633943e-09 1.05864902e-09
2.28852996e-09 2.69542409e-09 1.76914001e-10 3.76218722e-10
2.77450942e-11 1.35282431e-10]
[1.43085809e-08 2.42615425e-10 2.04489932e-09 8.76921119e-10
6.98955893e-10 1.32456759e-09 7.05913525e-10 2.73552366e-10
7.39466978e-10 1.30282713e-10]
[4.05578329e-09 2.87366970e-11 2.23140373e-10 3.74650214e-10
3.63147607e-13 3.43001319e-12 3.16387697e-10 5.78419650e-12
2.32400368e-11 2.18729515e-10]
[5.97342869e-09 7.09732158e-10 2.16199604e-09 2.71998801e-10
7.60742698e-10 2.27634997e-10 1.32777170e-10 8.70075060e-11
2.99817162e-11 5.17288488e-13]
[1.59095909e-10 4.95678195e-11 2.92253822e-10 1.28593117e-12
6.59564560e-11 7.06074967e-11 7.04350115e-11 9.93481360e-12
2.46554728e-12 1.85631932e-11]
[4.30332203e-09 5.13554798e-11 5.75667145e-11 5.33671321e-11
2.45690902e-11 1.81300999e-11 7.05668723e-10 4.33909165e-12
3.10985168e-10 2.84268562e-11]
[8.05198585e-11 6.71374911e-11 1.71565995e-11 1.13012960e-11
4.89588585e-13 4.48281564e-12 4.68170310e-11 4.33131357e-12
2.04793952e-12 1.28677691e-11]
[2.03943329e-10 1.49919044e-09 1.87557863e-09 1.48411958e-13
1.34822888e-09 3.61721976e-10 2.57062947e-10 2.53262332e-10
6.64319123e-11 5.46816402e-10]
[1.30216213e-10 2.93046108e-10 1.27076696e-09 1.66986835e-11
6.88004683e-10 4.87490176e-10 7.88966279e-12 8.03820662e-11
8.80726857e-12 3.22970019e-10]
[3.24878407e-12 2.99632831e-10 5.41454894e-10 9.00578745e-15
7.81816400e-11 8.87828981e-11 5.89679229e-11 3.42744452e-11
1.78685174e-11 1.68171424e-10]
[2.03345861e-10 2.43510404e-11 7.61980730e-12 1.16587828e-11
1.90965680e-12 9.57859625e-12 3.73462471e-12 1.34526927e-12
1.81802780e-11 2.54233308e-11]
[1.45505391e-11 4.67724184e-09 1.10070108e-09 1.57989543e-11
6.87147375e-10 1.23323389e-10 6.07852739e-10 5.90337626e-10
4.55127076e-10 2.71877117e-10]
[1.14919654e-09 6.41875839e-10 8.07163139e-10 3.57744065e-11
3.33937148e-10 2.42333595e-10 1.45002588e-11 1.19336442e-10
2.22956616e-13 4.81874297e-11]
[2.69164686e-10 1.10228826e-09 7.03050214e-10 4.90012980e-11
5.49191178e-10 2.96988128e-11 8.92478336e-12 1.00852455e-09
1.02425922e-10 5.52488862e-10]
[6.63654581e-11 7.86012851e-11 3.16580312e-12 4.24697732e-13
2.30287045e-11 4.32135492e-11 1.38608826e-11 9.44263123e-11
6.72174850e-11 1.55024517e-10]
[3.73899169e-09 3.52890389e-09 4.65137682e-09 1.25998043e-10
2.62568533e-09 2.54636095e-09 1.00316133e-10 3.75852635e-10
4.11823764e-10 9.39167418e-10]
[5.27217911e-09 5.97584540e-09 5.62157464e-09 2.28548063e-10
1.66168894e-09 5.35508725e-10 1.21821280e-10 7.86804998e-10
1.06664627e-11 9.41625557e-11]
[1.54073547e-08 5.20349197e-08 4.85086140e-08 5.50255024e-10
1.23177467e-08 3.50481562e-09 3.66658835e-08 8.19926069e-09
7.29218503e-09 7.29896605e-09]
[4.78930767e-10 6.66382079e-09 1.74503896e-09 2.91849755e-10
1.21980674e-09 2.75063144e-10 1.27903290e-10 1.86179651e-09
9.25039590e-10 3.66909748e-09]
[1.07150864e-09 5.10579102e-11 4.38744383e-12 1.65986965e-12
1.02373064e-11 1.23919245e-10 3.60539865e-11 1.01930242e-10
3.18922646e-11 1.55999619e-12]
[1.10483121e-08 1.07949123e-08 1.00892566e-08 2.46971763e-11
7.02865148e-09 2.73727947e-10 4.27081650e-11 3.19641044e-09
4.76668582e-10 2.76468574e-10]
[2.21060670e-10 3.01349653e-10 6.19519674e-11 1.89187588e-12
2.52538657e-11 3.76512290e-12 5.20075255e-10 2.67990832e-14
6.69213838e-11 2.55307593e-11]
[1.20786650e-09 9.28826916e-12 5.31543620e-11 1.27171231e-10
1.55632363e-10 1.69676244e-13 2.88980286e-12 3.14850539e-12
1.19569994e-12 8.66718337e-12]
[6.85211609e-10 9.01083010e-11 1.15169394e-10 2.15682711e-11
6.98704537e-14 8.97061737e-13 7.73590564e-11 7.23875686e-11
6.28145691e-11 8.63325479e-11]
[1.13752895e-09 2.22527942e-09 4.28064863e-10 2.27367773e-10
6.21921941e-11 1.12956766e-12 4.27651894e-10 3.22137648e-10
2.96878670e-10 1.14389644e-09]
[3.63899899e-09 7.68058305e-10 4.62909578e-10 3.75530430e-11
7.15232000e-10 2.12475917e-10 4.14858923e-11 1.05004031e-11
7.26082859e-12 1.42533716e-11]
[3.73730649e-10 1.25140498e-10 3.12999649e-10 9.46127022e-12
6.56466774e-11 7.71101935e-13 1.23573237e-10 1.19755162e-11
7.91831666e-11 4.60608740e-16]
[1.19409061e-08 1.17822035e-09 1.47029982e-09 4.54601112e-10
5.65272097e-10 2.98679265e-10 6.06862391e-10 3.27597282e-10
1.20553729e-11 9.24510777e-11]
[4.85635964e-10 8.09855729e-10 1.65494007e-09 4.16035522e-11
6.21697030e-10 9.91133013e-10 2.13297832e-10 1.10715306e-10
6.18673889e-12 1.44155750e-09]
[2.91128185e-09 3.53447030e-09 3.22075281e-09 1.64535266e-10
5.47518344e-10 1.10128256e-10 1.89704158e-09 5.69533285e-10
4.22059330e-10 1.56124228e-09]
[1.38180070e-09 8.87261859e-10 2.33168868e-09 1.11318222e-11
2.47505222e-10 3.29670536e-10 2.35925060e-11 5.96035859e-11
9.43096085e-13 1.11649921e-11]
[1.75973228e-09 2.34776532e-09 5.14459967e-10 8.45732893e-11
1.91928709e-10 1.62279570e-11 5.09461116e-11 2.82426746e-10
2.19442406e-10 5.75213585e-12]
[1.68820453e-10 5.86601969e-11 1.42373458e-10 5.98722194e-12
1.46730178e-15 1.25239031e-13 4.05523126e-11 2.82057044e-11
1.59371530e-11 4.78198232e-11]
[4.43654799e-09 1.20766647e-11 8.19754728e-10 1.97458891e-10
4.48904429e-10 6.95723677e-10 4.71962101e-10 2.03163486e-11
2.03399051e-10 8.37941287e-11]
[2.16840365e-08 6.05400805e-09 5.53178637e-09 6.01070788e-10
4.28047417e-09 5.88595404e-10 5.08374191e-10 4.90017563e-10
3.09052434e-10 9.86023266e-10]
[2.03700997e-10 8.84800944e-09 2.61809599e-09 3.15182460e-12
1.22368116e-09 1.70949284e-10 1.61629923e-09 2.12241736e-09
5.30265269e-10 3.27771701e-09]
[3.76014837e-09 4.87882497e-10 5.65904759e-10 3.33438659e-10
2.21694864e-10 8.49918519e-12 6.51244482e-11 1.35823877e-10
2.83786258e-11 2.46613998e-13]
[8.73365144e-10 1.20333071e-09 1.20799411e-09 4.17031725e-11
1.05158332e-09 1.90640811e-10 4.62523904e-11 2.63165843e-10
2.92569537e-11 3.05173993e-10]
[2.08156206e-09 5.07634467e-10 1.36372504e-10 3.08753481e-10
9.70551090e-11 4.10842403e-10 6.66392568e-10 1.29478738e-10
5.01126383e-10 2.27388119e-11]
[1.59709223e-10 9.72878581e-10 3.34122881e-10 5.27194290e-11
3.21242482e-10 1.54403065e-13 5.07430650e-10 5.60959475e-10
3.25822056e-10 1.49702001e-10]
[2.04567089e-09 4.24631339e-10 1.77447624e-09 3.15703014e-12
1.17029411e-09 4.63337270e-10 8.59341292e-12 1.54324825e-11
2.30438306e-10 9.62114335e-11]
[8.46339274e-09 8.35709733e-09 2.47271638e-09 8.83135995e-12
1.64519521e-09 1.84167926e-09 3.96607807e-09 3.16471590e-09
1.02820399e-09 6.37531240e-10]
[3.50849978e-09 1.09463675e-09 2.56215459e-10 1.19781338e-10
2.57858797e-10 1.27831077e-10 8.48200335e-10 4.83407123e-10
4.86630229e-12 7.46889707e-11]
[6.98828460e-10 6.84051531e-10 5.77774113e-11 4.13192831e-12
7.81221048e-11 4.67165300e-11 1.75490629e-11 2.62087108e-10
2.67992177e-11 6.17686596e-11]
[8.15997105e-11 2.52354080e-11 1.50469535e-10 2.35340025e-11
1.13385680e-10 8.01376002e-12 6.22583235e-11 2.07830031e-11
2.85137963e-11 9.06351062e-12]
[5.35801192e-12 5.09945891e-16 2.02253111e-11 4.29452813e-12
8.08960414e-12 3.26628627e-12 5.93125761e-11 8.93631309e-14
2.46525064e-11 1.80442351e-13]
[2.53755714e-09 1.83065670e-09 2.62610899e-09 3.52875586e-12
8.65382453e-10 1.03753254e-09 2.18911285e-11 3.16127112e-10
3.32072917e-10 3.73138414e-10]
[1.47109515e-10 5.02245911e-10 3.15360375e-10 1.23363986e-11
4.12343507e-10 7.03334214e-11 2.33684622e-11 1.15953990e-10
2.90829670e-11 3.67887793e-10]
[7.01229580e-09 7.11262138e-10 1.29414303e-09 3.84185243e-10
1.31427024e-09 1.77949251e-11 7.74900236e-11 4.61820258e-10
5.80756122e-10 2.87000892e-14]
[2.98662954e-08 4.48516383e-09 1.13155136e-08 2.94565831e-09
4.48485559e-09 1.94751757e-12 6.31504518e-12 6.39417252e-10
9.39133400e-10 7.06116409e-12]
[2.69101795e-10 1.12571080e-08 5.22073491e-09 1.72614110e-10
5.87042287e-09 8.09472820e-10 3.27732243e-11 2.19893150e-09
2.27688993e-10 5.32404598e-09]
[7.75746346e-11 2.64974941e-10 1.51284138e-09 2.78398023e-11
6.34491506e-10 5.49823434e-10 4.71321344e-13 7.20650574e-12
2.12536995e-13 4.93114019e-10]
[4.31011513e-09 5.00284454e-09 1.10127881e-08 7.64184749e-14
3.15108128e-09 2.60603162e-09 2.63901753e-10 1.67280736e-10
2.67637325e-10 6.99946864e-15]
[2.74794581e-10 6.81135304e-10 1.10045807e-10 3.74654338e-11
3.58311531e-11 5.83693790e-12 3.05862406e-10 8.13317850e-11
2.10516068e-10 3.99508061e-11]
[2.96484267e-09 1.72518333e-10 6.45923449e-10 1.44102255e-10
1.79557033e-10 1.29428097e-10 2.40342901e-11 4.42185352e-12
2.27618654e-11 7.17589869e-13]
[1.77919309e-08 4.25513718e-09 4.35446875e-09 2.71132820e-10
1.64460154e-09 1.04678762e-09 1.96369490e-10 1.06484968e-09
1.97974181e-11 1.02781021e-11]
[6.81315495e-09 1.31004028e-09 2.34517218e-09 1.01252163e-10
8.13441223e-11 7.63552008e-10 3.78239251e-12 2.77116608e-10
4.53964516e-10 7.15197213e-11]
[2.10740435e-09 7.51688510e-10 8.87336938e-10 2.11978781e-10
6.90741928e-10 1.02723748e-09 7.35887396e-11 1.11436226e-10
2.42264955e-10 1.86278893e-13]
[6.91105546e-09 2.87431834e-11 4.13066091e-10 1.74126832e-10
3.96371410e-11 2.68323080e-12 5.51212937e-10 2.16902695e-11
1.51331974e-10 4.40772472e-10]
[7.29285241e-09 9.01264354e-10 2.35638695e-12 6.85327415e-10
6.85869674e-11 4.10636180e-10 3.97603330e-10 3.17974212e-10
4.89681207e-10 1.91255888e-09]
[8.88697967e-09 1.20742780e-09 5.28382291e-10 4.50357437e-10
8.93543717e-10 4.06440594e-11 2.08402078e-11 4.83282228e-10
9.53737427e-12 2.62607865e-11]
[2.20774145e-07 2.37813382e-08 5.52306184e-08 3.65594392e-09
4.93451936e-08 3.62231764e-08 4.70478256e-09 1.04729333e-09
7.39471376e-09 1.38978676e-10]
[3.77656542e-09 9.80471973e-09 4.49415612e-09 3.40800681e-10
3.60985468e-09 4.41575526e-12 4.47886833e-11 2.01574824e-09
5.97095874e-12 1.83602757e-10]]
490 [[1.08095834e-09 1.79731314e-09 1.28912191e-09 1.08917667e-11
5.25778662e-10 2.88684232e-10 1.32909491e-11 2.32251821e-10
6.70808680e-12 8.42159993e-11]
[1.43535048e-08 3.17695854e-09 7.05455440e-09 1.00872023e-09
2.18787193e-09 2.57943968e-09 1.68715234e-10 3.60302179e-10
2.63891391e-11 1.30685822e-10]
[1.36655712e-08 2.33038253e-10 1.95528844e-09 8.36615675e-10
6.68197564e-10 1.26391382e-09 6.74244262e-10 2.60398202e-10
7.05564824e-10 1.24095044e-10]
[3.86517405e-09 2.68864097e-11 2.11652850e-10 3.57190056e-10
3.37268527e-13 3.28370313e-12 3.02575970e-10 5.35289063e-12
2.22927364e-11 2.08980344e-10]
[5.70118562e-09 6.74654192e-10 2.05853789e-09 2.59449408e-10
7.24287208e-10 2.17316263e-10 1.26668262e-10 8.23466157e-11
2.84593931e-11 4.26966378e-13]
[1.52524050e-10 4.69580202e-11 2.78057840e-10 1.23789573e-12
6.26751663e-11 6.69402548e-11 6.72011703e-11 9.46846672e-12
2.37090269e-12 1.74208779e-11]
[4.10575147e-09 4.86320540e-11 5.54222861e-11 5.08623499e-11
2.36969069e-11 1.74136135e-11 6.73634756e-10 4.04461385e-12
2.96823486e-10 2.67184587e-11]
[7.75953743e-11 6.43437028e-11 1.64077185e-11 1.06347828e-11
4.65969062e-13 4.28636955e-12 4.48278011e-11 4.16467448e-12
1.97782747e-12 1.20977419e-11]
[1.97302372e-10 1.43194461e-09 1.79631275e-09 1.45566951e-13
1.29104201e-09 3.46609058e-10 2.43977853e-10 2.42730882e-10
6.31441630e-11 5.24567929e-10]
[1.25051480e-10 2.79138684e-10 1.21301566e-09 1.58874949e-11
6.56388088e-10 4.65446188e-10 7.45940981e-12 7.65784032e-11
8.43991952e-12 3.07726337e-10]
[3.25709242e-12 2.86774749e-10 5.18181813e-10 8.88619275e-15
7.47822103e-11 8.49906360e-11 5.61463834e-11 3.27900529e-11
1.70669795e-11 1.60769729e-10]
[1.96407313e-10 2.30690580e-11 7.48200865e-12 1.13032355e-11
1.75419486e-12 9.39956678e-12 3.74157031e-12 1.33512231e-12
1.76088322e-11 2.43012341e-11]
[1.40829960e-11 4.45852188e-09 1.05180130e-09 1.51093145e-11
6.55998879e-10 1.18279274e-10 5.78576722e-10 5.63244041e-10
4.33087060e-10 2.59556691e-10]
[1.09926956e-09 6.14136627e-10 7.72499738e-10 3.43009823e-11
3.19377191e-10 2.32081313e-10 1.38370235e-11 1.14194465e-10
2.16411263e-13 4.62033177e-11]
[2.55824825e-10 1.05025239e-09 6.71772254e-10 4.69277325e-11
5.23877737e-10 2.80303304e-11 8.36814336e-12 9.62088547e-10
9.71046232e-11 5.27905712e-10]
[6.32836845e-11 7.46598875e-11 2.99698598e-12 3.99443218e-13
2.17820325e-11 4.11831121e-11 1.33321589e-11 8.98884396e-11
6.39926586e-11 1.47448677e-10]
[3.58027338e-09 3.36706223e-09 4.43987028e-09 1.20807945e-10
2.50275492e-09 2.43021173e-09 9.61903542e-11 3.58296314e-10
3.93602296e-10 8.93303006e-10]
[5.00809536e-09 5.69698550e-09 5.35676129e-09 2.16744015e-10
1.58278357e-09 5.09057311e-10 1.16992331e-10 7.50659632e-10
1.03735816e-11 8.99077904e-11]
[1.46795100e-08 4.96984343e-08 4.63568080e-08 5.24949470e-10
1.17620955e-08 3.35004706e-09 3.49890051e-08 7.83222467e-09
6.95800528e-09 6.96868797e-09]
[4.51324756e-10 6.34698166e-09 1.66609287e-09 2.77621281e-10
1.16173889e-09 2.63122878e-10 1.20957973e-10 1.77690621e-09
8.80739620e-10 3.49729095e-09]
[1.01963589e-09 4.81574004e-11 4.19757546e-12 1.56894894e-12
9.78777174e-12 1.17582416e-10 3.46827932e-11 9.71107234e-11
3.02130312e-11 1.51286977e-12]
[1.05464974e-08 1.02817642e-08 9.62068597e-09 2.37306509e-11
6.70147287e-09 2.62122224e-10 4.02066088e-11 3.04432777e-09
4.53264638e-10 2.62653104e-10]
[2.11983515e-10 2.91960692e-10 6.00051878e-11 1.86346424e-12
2.44237108e-11 3.49555165e-12 5.00351491e-10 3.54629735e-14
6.47496131e-11 2.43053843e-11]
[1.14723879e-09 9.14483831e-12 5.10323373e-11 1.20667534e-10
1.49585985e-10 1.83078043e-13 2.80550944e-12 3.16370346e-12
1.02227889e-12 7.77538953e-12]
[6.52540085e-10 8.54074746e-11 1.09955849e-10 2.05851231e-11
7.50078531e-14 8.02188902e-13 7.31839542e-11 6.88327506e-11
5.95438800e-11 8.17417481e-11]
[1.08553803e-09 2.12246646e-09 4.09004807e-10 2.17004517e-10
5.93331097e-11 1.08050223e-12 4.07904947e-10 3.07408630e-10
2.83467601e-10 1.09215883e-09]
[3.47500296e-09 7.33746356e-10 4.43546546e-10 3.58070379e-11
6.83819756e-10 2.03536752e-10 3.95731019e-11 1.00758043e-11
7.02669968e-12 1.36673946e-11]
[3.53137749e-10 1.19342452e-10 2.97362314e-10 9.03930957e-12
6.27130094e-11 7.51697704e-13 1.18232167e-10 1.12132151e-11
7.55587278e-11 1.43237045e-15]
[1.13833401e-08 1.12565588e-09 1.40459561e-09 4.33327169e-10
5.40010838e-10 2.85858455e-10 5.79421487e-10 3.12903207e-10
1.16977907e-11 8.86189078e-11]
[4.58953080e-10 7.76535190e-10 1.58320935e-09 3.94110226e-11
5.94329778e-10 9.45314954e-10 2.02949518e-10 1.06107735e-10
5.81743522e-12 1.37320889e-09]
[2.77324603e-09 3.36784116e-09 3.06691890e-09 1.56685598e-10
5.21587042e-10 1.04681909e-10 1.80909399e-09 5.42115599e-10
4.02546334e-10 1.48580138e-09]
[1.31749413e-09 8.45028287e-10 2.22163446e-09 1.06849962e-11
2.35988883e-10 3.14344181e-10 2.24307399e-11 5.65990345e-11
9.06045745e-13 1.05775942e-11]
[1.67629442e-09 2.23808551e-09 4.90462670e-10 8.05600863e-11
1.83081080e-10 1.54297450e-11 4.86343653e-11 2.69140733e-10
2.09007037e-10 5.51238330e-12]
[1.59115400e-10 5.71932244e-11 1.37164102e-10 5.53630002e-12
4.04992657e-15 1.07593382e-13 3.86953105e-11 2.72447207e-11
1.53345655e-11 4.63512762e-11]
[4.23676791e-09 1.14589092e-11 7.81325308e-10 1.88672526e-10
4.28221862e-10 6.63046344e-10 4.49839815e-10 1.93482297e-11
1.94167153e-10 7.95580592e-11]
[2.06938402e-08 5.77865534e-09 5.28624091e-09 5.73156764e-10
4.08909211e-09 5.64043807e-10 4.86321266e-10 4.67372264e-10
2.95898143e-10 9.43772485e-10]
[1.93528863e-10 8.44892373e-09 2.50240007e-09 3.01489483e-12
1.16863279e-09 1.63173497e-10 1.54264080e-09 2.02772328e-09
5.06353406e-10 3.12897967e-09]
[3.57640745e-09 4.66310934e-10 5.41341423e-10 3.16551266e-10
2.12001282e-10 7.93697443e-12 6.20725830e-11 1.30045712e-10
2.72112371e-11 1.82754159e-13]
[8.33025108e-10 1.14330838e-09 1.14908620e-09 3.99908163e-11
9.99506723e-10 1.81695425e-10 4.38264318e-11 2.49528643e-10
2.77765443e-11 2.89068427e-10]
[1.98123102e-09 4.79723112e-10 1.30656500e-10 2.93262931e-10
9.28500225e-11 3.90846664e-10 6.31460777e-10 1.22576178e-10
4.74982026e-10 2.19371970e-11]
[1.51997658e-10 9.20030655e-10 3.15366907e-10 4.98977401e-11
3.04266116e-10 1.42206460e-13 4.80422077e-10 5.31494116e-10
3.08731226e-10 1.41450004e-10]
[1.95335043e-09 4.03677411e-10 1.69051605e-09 3.03915790e-12
1.11536912e-09 4.41678081e-10 8.17909286e-12 1.46028937e-11
2.19799993e-10 9.10907385e-11]
[8.05029432e-09 7.96518508e-09 2.36691574e-09 8.25607053e-12
1.56978416e-09 1.74349938e-09 3.77112466e-09 3.02052740e-09
9.77669982e-10 6.14532359e-10]
[3.34166550e-09 1.04516541e-09 2.48759203e-10 1.13080831e-10
2.47634009e-10 1.18858215e-10 8.05535322e-10 4.63535252e-10
4.63789052e-12 6.79370193e-11]
[6.66894074e-10 6.48170980e-10 5.47801175e-11 3.85523331e-12
7.41098776e-11 4.41019222e-11 1.72008225e-11 2.49846928e-10
2.50490775e-11 5.87970166e-11]
[7.74926282e-11 2.43126536e-11 1.42632083e-10 2.23788201e-11
1.07417710e-10 7.52679964e-12 5.94202179e-11 1.99172003e-11
2.71479889e-11 8.45196260e-12]
[5.01628143e-12 1.94735260e-17 1.94302002e-11 4.13355633e-12
7.73940880e-12 3.07921742e-12 5.64311601e-11 8.94287665e-14
2.34824365e-11 1.82480304e-13]
[2.42814383e-09 1.74774367e-09 2.50933370e-09 3.33296406e-12
8.26310769e-10 9.90583485e-10 2.08895802e-11 3.02029772e-10
3.17277497e-10 3.55676961e-10]
[1.38717047e-10 4.77604633e-10 3.00002056e-10 1.16774584e-11
3.91535410e-10 6.70133684e-11 2.19629432e-11 1.10143107e-10
2.73230501e-11 3.48642091e-10]
[6.67873802e-09 6.79325562e-10 1.23655131e-09 3.65589235e-10
1.25477914e-09 1.66805066e-11 7.37310599e-11 4.40660329e-10
5.53688776e-10 4.49754089e-14]
[2.84533756e-08 4.27236675e-09 1.07849711e-08 2.80486593e-09
4.27596784e-09 1.74047684e-12 6.09516209e-12 6.08879206e-10
8.93630682e-10 6.76895123e-12]
[2.56043429e-10 1.07188505e-08 4.96869249e-09 1.64127125e-10
5.58262808e-09 7.69576301e-10 3.14724088e-11 2.09347830e-09
2.16684828e-10 5.06334442e-09]
[7.28309006e-11 2.54633220e-10 1.44658106e-09 2.63930766e-11
6.06065309e-10 5.23643435e-10 5.43390786e-13 6.96018865e-12
1.86199744e-13 4.69442182e-10]
[4.11586454e-09 4.78508491e-09 1.05235417e-08 8.12982618e-14
3.01343028e-09 2.48921620e-09 2.52189267e-10 1.60300106e-10
2.55388380e-10 1.13067544e-14]
[2.62097297e-10 6.47236666e-10 1.04603308e-10 3.55278237e-11
3.40081519e-11 5.62095635e-12 2.91678035e-10 7.72439341e-11
2.00541611e-10 3.78501591e-11]
[2.82779559e-09 1.64854642e-10 6.16809648e-10 1.37377988e-10
1.71278252e-10 1.23762482e-10 2.29511453e-11 4.20264117e-12
2.17472825e-11 7.00604869e-13]
[1.69598156e-08 4.05357565e-09 4.15272280e-09 2.58513414e-10
1.56925670e-09 9.99623028e-10 1.87957397e-10 1.01521898e-09
1.91669925e-11 9.94842758e-12]
[6.49308105e-09 1.24571458e-09 2.23324032e-09 9.66180332e-11
7.75881988e-11 7.27795986e-10 3.69015331e-12 2.63735945e-10
4.33435353e-10 6.80018317e-11]
[2.01250908e-09 7.13780111e-10 8.42066333e-10 2.02844350e-10
6.57878044e-10 9.77617585e-10 7.00313507e-11 1.05249634e-10
2.31168182e-10 1.43200731e-13]
[6.58210142e-09 2.76051537e-11 3.94173533e-10 1.65794411e-10
3.75492988e-11 2.52707772e-12 5.24901226e-10 2.07790659e-11
1.43990363e-10 4.18764142e-10]
[6.94537505e-09 8.55653138e-10 2.39960776e-12 6.51851441e-10
6.62883292e-11 3.91945699e-10 3.78080573e-10 3.01755335e-10
4.66231455e-10 1.81682194e-09]
[8.46964820e-09 1.14638791e-09 5.03737572e-10 4.29615784e-10
8.50817674e-10 3.81208605e-11 2.03062351e-11 4.59863265e-10
9.29550805e-12 2.49553894e-11]
[2.10798470e-07 2.27092293e-08 5.27444076e-08 3.48935200e-09
4.71294454e-08 3.45975454e-08 4.49335686e-09 1.00062447e-09
7.06149512e-09 1.32328858e-10]
[3.59278440e-09 9.31364548e-09 4.26735958e-09 3.23784358e-10
3.43011465e-09 4.27632027e-12 4.24133493e-11 1.91447960e-09
5.65750736e-12 1.73661622e-10]]
491 [[1.03333244e-09 1.71493792e-09 1.22987220e-09 1.03863832e-11
5.00738496e-10 2.75676675e-10 1.26718772e-11 2.21435263e-10
6.35382412e-12 7.97653725e-11]
[1.36825094e-08 3.04151141e-09 6.74788341e-09 9.61452405e-10
2.09150217e-09 2.46894155e-09 1.60939842e-10 3.45008271e-10
2.51874492e-11 1.26302297e-10]
[1.30493625e-08 2.23590517e-10 1.87256815e-09 7.98942022e-10
6.39706323e-10 1.20950369e-09 6.42590791e-10 2.48341478e-10
6.74295404e-10 1.18340822e-10]
[3.68254437e-09 2.52047855e-11 2.00689974e-10 3.40495205e-10
3.11995140e-13 3.12835899e-12 2.89191615e-10 4.98468184e-12
2.14316860e-11 1.99895703e-10]
[5.43961937e-09 6.41833541e-10 1.96020579e-09 2.47566194e-10
6.89439540e-10 2.07620740e-10 1.20679259e-10 7.81755424e-11
2.70021725e-11 3.32508391e-13]
[1.46008054e-10 4.45153592e-11 2.64458627e-10 1.19594221e-12
5.95708395e-11 6.34121840e-11 6.40650537e-11 9.03809198e-12
2.30046623e-12 1.63399129e-11]
[3.91782291e-09 4.60829517e-11 5.33866182e-11 4.85148806e-11
2.28585287e-11 1.67640350e-11 6.43327015e-10 3.76373858e-12
2.83117976e-10 2.51290361e-11]
[7.47672061e-11 6.16021066e-11 1.57151490e-11 1.00154580e-11
4.38238681e-13 4.09295802e-12 4.28926208e-11 3.97546745e-12
1.93320387e-12 1.14067336e-11]
[1.91001910e-10 1.36763905e-09 1.72051630e-09 1.41509614e-13
1.23672569e-09 3.32405431e-10 2.31579528e-10 2.32327582e-10
6.01660926e-11 5.03235780e-10]
[1.19861509e-10 2.66163451e-10 1.15793400e-09 1.51171989e-11
6.26084256e-10 4.44438498e-10 7.09607224e-12 7.31219619e-11
8.10701929e-12 2.92887707e-10]
[3.19965396e-12 2.74651015e-10 4.95588667e-10 8.49175506e-15
7.15094957e-11 8.11807178e-11 5.35042266e-11 3.14515138e-11
1.63355058e-11 1.53232976e-10]
[1.89626671e-10 2.18983395e-11 7.33730079e-12 1.09306995e-11
1.61131490e-12 9.18058509e-12 3.72686573e-12 1.30807407e-12
1.71146090e-11 2.32285397e-11]
[1.36892126e-11 4.24981934e-09 1.00532611e-09 1.44492083e-11
6.26185220e-10 1.13517055e-10 5.50566812e-10 5.37162394e-10
4.12571763e-10 2.47948932e-10]
[1.05144533e-09 5.87788140e-10 7.39411695e-10 3.27864594e-11
3.05779136e-10 2.21367353e-10 1.31988632e-11 1.09399131e-10
2.09713024e-13 4.40590433e-11]
[2.43560664e-10 1.00057378e-09 6.41862994e-10 4.49344214e-11
4.99512930e-10 2.64298298e-11 7.80910005e-12 9.17626185e-10
9.21861239e-11 5.05050583e-10]
[6.03017910e-11 7.09768709e-11 2.80876834e-12 3.82483865e-13
2.06228122e-11 3.93119051e-11 1.28016816e-11 8.55928813e-11
6.07511828e-11 1.40174165e-10]
[3.42609356e-09 3.21264090e-09 4.23697179e-09 1.15928033e-10
2.38609819e-09 2.31884605e-09 9.21658873e-11 3.41573882e-10
3.76709727e-10 8.49055937e-10]
[4.75760284e-09 5.43161730e-09 5.10334033e-09 2.05350010e-10
1.50772762e-09 4.83735276e-10 1.12369765e-10 7.16211543e-10
1.00337074e-11 8.58225218e-11]
[1.39863437e-08 4.74681096e-08 4.42995510e-08 5.00930121e-10
1.12314223e-08 3.20196741e-09 3.33899025e-08 7.48207081e-09
6.63885893e-09 6.65361450e-09]
[4.25850294e-10 6.04696862e-09 1.59145628e-09 2.64223665e-10
1.10594451e-09 2.51642275e-10 1.14824578e-10 1.69621149e-09
8.38491231e-10 3.33203937e-09]
[9.70211804e-10 4.54779313e-11 4.00772946e-12 1.47584598e-12
9.33990248e-12 1.11561276e-10 3.33184909e-11 9.26423248e-11
2.85515657e-11 1.47650434e-12]
[1.00657315e-08 9.79245613e-09 9.17466432e-09 2.28489149e-11
6.38973309e-09 2.51000875e-10 3.78963724e-11 2.90040259e-09
4.29986730e-10 2.49545147e-10]
[2.03185675e-10 2.82764555e-10 5.81627557e-11 1.82252802e-12
2.35625200e-11 3.25038585e-12 4.81188980e-10 4.49092792e-14
6.27983631e-11 2.31144707e-11]
[1.08986027e-09 8.98449086e-12 4.90791965e-11 1.14560394e-10
1.43745377e-10 2.03745142e-13 2.70751531e-12 3.16358981e-12
9.03678628e-13 7.01512994e-12]
[6.21044234e-10 8.10645321e-11 1.04838915e-10 1.96593484e-11
7.71980789e-14 7.20751429e-13 6.93017355e-11 6.55527643e-11
5.63244899e-11 7.73936115e-11]
[1.03639700e-09 2.02461646e-09 3.90664595e-10 2.07091097e-10
5.65898263e-11 1.04204345e-12 3.89267264e-10 2.93461389e-10
2.70336055e-10 1.04295515e-09]
[3.31925268e-09 7.00868242e-10 4.25013435e-10 3.41649809e-11
6.53712553e-10 1.95113877e-10 3.78150544e-11 9.62847365e-12
6.73866625e-12 1.31588788e-11]
[3.35183086e-10 1.13886090e-10 2.82910072e-10 8.55856066e-12
5.96905843e-11 7.17804862e-13 1.12891062e-10 1.06054062e-11
7.23627856e-11 1.01995692e-15]
[1.08537908e-08 1.07543512e-09 1.34148441e-09 4.12759840e-10
5.16020840e-10 2.73476297e-10 5.53366024e-10 2.98275372e-10
1.11662266e-11 8.50057073e-11]
[4.33158636e-10 7.44668469e-10 1.51495989e-09 3.73077410e-11
5.67670544e-10 9.02146105e-10 1.92962227e-10 1.01804974e-10
5.51730182e-12 1.30899010e-09]
[2.64120601e-09 3.20883663e-09 2.92097406e-09 1.49168441e-10
4.96995714e-10 9.95733780e-11 1.72498860e-09 5.15841178e-10
3.84231093e-10 1.41409181e-09]
[1.25547423e-09 8.04954404e-10 2.11645745e-09 1.02557498e-11
2.25020826e-10 2.99584908e-10 2.13812163e-11 5.37881859e-11
8.87195888e-13 9.99899787e-12]
[1.59737177e-09 2.13327728e-09 4.67687577e-10 7.67244566e-11
1.74616034e-10 1.46671565e-11 4.63578104e-11 2.56373371e-10
1.99383344e-10 5.28330983e-12]
[1.49897945e-10 5.57181771e-11 1.31989467e-10 5.10571718e-12
8.06132161e-15 9.67449760e-14 3.69413229e-11 2.62796021e-11
1.46614636e-11 4.49910542e-11]
[4.04690993e-09 1.08948895e-11 7.45097274e-10 1.80293077e-10
4.08482269e-10 6.32076088e-10 4.28954183e-10 1.83952293e-11
1.85020485e-10 7.55272020e-11]
[1.97463934e-08 5.51697968e-09 5.05183445e-09 5.46553820e-10
3.90646911e-09 5.40378654e-10 4.64980319e-10 4.46147028e-10
2.83284357e-10 9.02380842e-10]
[1.84060272e-10 8.06823829e-09 2.39162069e-09 2.88417050e-12
1.11595709e-09 1.55767202e-10 1.47279406e-09 1.93730135e-09
4.83151735e-10 2.98746190e-09]
[3.40098425e-09 4.45851185e-10 5.17576377e-10 3.00410848e-10
2.02798368e-10 7.42820675e-12 5.92506545e-11 1.24472794e-10
2.59870368e-11 1.31865847e-13]
[7.94475221e-10 1.08658262e-09 1.09226895e-09 3.82505237e-11
9.50162426e-10 1.73067047e-10 4.15770911e-11 2.36596418e-10
2.62166229e-11 2.73813491e-10]
[1.88499415e-09 4.53325705e-10 1.25080745e-10 2.78488609e-10
8.89832684e-11 3.71712572e-10 5.98104838e-10 1.16109262e-10
4.50761596e-10 2.11128940e-11]
[1.44282952e-10 8.68993666e-10 2.97423183e-10 4.72857977e-11
2.88599998e-10 1.37561222e-13 4.54558986e-10 5.02272733e-10
2.92141104e-10 1.34348686e-10]
[1.86601463e-09 3.83427138e-10 1.61051711e-09 2.92268136e-12
1.06327469e-09 4.20783429e-10 7.82513463e-12 1.37372120e-11
2.09645385e-10 8.64024019e-11]
[7.65833219e-09 7.59114418e-09 2.26576828e-09 7.71837299e-12
1.49782020e-09 1.65051608e-09 3.58540843e-09 2.88248322e-09
9.29937396e-10 5.92467259e-10]
[3.18239384e-09 9.97903110e-10 2.41323670e-10 1.06714635e-10
2.37877412e-10 1.10606823e-10 7.65118013e-10 4.44430620e-10
4.37719917e-12 6.16956643e-11]
[6.36732062e-10 6.13655818e-10 5.21273465e-11 3.60727169e-12
7.05127836e-11 4.16157740e-11 1.68794214e-11 2.37946382e-10
2.34710900e-11 5.58806558e-11]
[7.37421312e-11 2.34278588e-11 1.35127842e-10 2.13392684e-11
1.01758573e-10 7.04268688e-12 5.67269932e-11 1.90685110e-11
2.59448597e-11 7.95198085e-12]
[4.71242270e-12 4.45101178e-16 1.86374098e-11 3.96108128e-12
7.39145328e-12 2.89928130e-12 5.37815432e-11 9.42638770e-14
2.23302303e-11 1.78911417e-13]
[2.32422601e-09 1.66842426e-09 2.39781820e-09 3.15040252e-12
7.88922299e-10 9.46101740e-10 1.99771819e-11 2.88444761e-10
3.02911664e-10 3.39169057e-10]
[1.30836533e-10 4.54275865e-10 2.85560280e-10 1.10274440e-11
3.71756783e-10 6.39723471e-11 2.06350764e-11 1.04635398e-10
2.57994187e-11 3.29989652e-10]
[6.35946764e-09 6.49294452e-10 1.18155064e-09 3.47964217e-10
1.19793153e-09 1.56252776e-11 7.00652149e-11 4.21025942e-10
5.27846206e-10 5.55748260e-14]
[2.71095691e-08 4.06926925e-09 1.02799370e-08 2.67106954e-09
4.07652888e-09 1.53949099e-12 5.90505256e-12 5.79710959e-10
8.50716999e-10 6.51217943e-12]
[2.43652022e-10 1.02056456e-08 4.72962074e-09 1.55934403e-10
5.30884699e-09 7.31792976e-10 3.02104027e-11 1.99294144e-09
2.06439576e-10 4.81550992e-09]
[6.81377929e-11 2.44695874e-10 1.38379526e-09 2.50410101e-11
5.78368143e-10 4.98771223e-10 6.04808292e-13 6.76589858e-12
1.48146069e-13 4.47128132e-10]
[3.93064815e-09 4.57639773e-09 1.00565897e-08 8.54622439e-14
2.88172885e-09 2.37772787e-09 2.40950169e-10 1.53464756e-10
2.43497109e-10 1.68620229e-14]
[2.49020584e-10 6.15153249e-10 9.91155479e-11 3.39223976e-11
3.23086417e-11 5.39053485e-12 2.77798395e-10 7.32349495e-11
1.90919129e-10 3.59377446e-11]
[2.69771688e-09 1.57463237e-10 5.89235422e-10 1.31010995e-10
1.63351647e-10 1.18435062e-10 2.19547543e-11 3.99772883e-12
2.07370825e-11 6.86892018e-13]
[1.61682399e-08 3.86146827e-09 3.96085321e-09 2.46624989e-10
1.49706625e-09 9.55260846e-10 1.79965907e-10 9.67801750e-10
1.84698818e-11 9.66778156e-12]
[6.18868792e-09 1.18442153e-09 2.12699394e-09 9.22407210e-11
7.39907857e-11 6.93877748e-10 3.60909384e-12 2.50952594e-10
4.13630813e-10 6.46893937e-11]
[1.92170859e-09 6.77735834e-10 7.99340247e-10 1.94011143e-10
6.26833379e-10 9.30217423e-10 6.66006209e-11 9.94633434e-11
2.20756998e-10 1.05756146e-13]
[6.26835567e-09 2.64859103e-11 3.75992760e-10 1.57772539e-10
3.55360650e-11 2.40165350e-12 4.99752864e-10 1.98879275e-11
1.37275148e-10 3.97780072e-10]
[6.61519383e-09 8.12417139e-10 2.45389057e-12 6.20195903e-10
6.41292072e-11 3.74481806e-10 3.59537408e-10 2.86545433e-10
4.43548334e-10 1.72571295e-09]
[8.07023392e-09 1.08874143e-09 4.80221808e-10 4.09803219e-10
8.10270151e-10 3.57690992e-11 1.97285990e-11 4.37851267e-10
9.09842002e-12 2.38001879e-11]
[2.01274196e-07 2.16858969e-08 5.03659015e-08 3.32945941e-09
4.50113347e-08 3.30397719e-08 4.29172233e-09 9.55805533e-10
6.74482780e-09 1.26080922e-10]
[3.41777757e-09 8.84665922e-09 4.05199242e-09 3.07729812e-10
3.25952565e-09 4.13777513e-12 4.01876810e-11 1.81784008e-09
5.37807193e-12 1.64192096e-10]]
492 [[9.86435504e-10 1.63601219e-09 1.17445483e-09 1.00109304e-11
4.76736921e-10 2.63162499e-10 1.21341972e-11 2.11230822e-10
6.05075260e-12 7.56592933e-11]
[1.30425438e-08 2.91217670e-09 6.45351617e-09 9.16196705e-10
1.99941875e-09 2.36264180e-09 1.53507377e-10 3.30466566e-10
2.39683619e-11 1.21964044e-10]
[1.24622428e-08 2.14970694e-10 1.78929689e-09 7.62917888e-10
6.12221217e-10 1.15559702e-09 6.13962648e-10 2.36702000e-10
6.42747368e-10 1.12142634e-10]
[3.50942047e-09 2.35729285e-11 1.90336464e-10 3.24584998e-10
2.90799206e-13 2.99437758e-12 2.76477452e-10 4.60413278e-12
2.05816550e-11 1.90931674e-10]
[5.19152962e-09 6.10132094e-10 1.86616971e-09 2.36129529e-10
6.56498272e-10 1.98186598e-10 1.15106356e-10 7.39460042e-11
2.55835373e-11 2.70671848e-13]
[1.39930674e-10 4.22001281e-11 2.51379688e-10 1.15485510e-12
5.66288686e-11 6.01106208e-11 6.11690177e-11 8.60114805e-12
2.21307878e-12 1.53881938e-11]
[3.73793340e-09 4.36553820e-11 5.14609454e-11 4.62434873e-11
2.20342462e-11 1.60980418e-11 6.14079610e-10 3.50424414e-12
2.70241960e-10 2.36665241e-11]
[7.20991118e-11 5.90133756e-11 1.50147216e-11 9.43960543e-12
4.15797138e-13 3.91997933e-12 4.10453900e-11 3.81409742e-12
1.87280970e-12 1.07544444e-11]
[1.84695820e-10 1.30630988e-09 1.64777282e-09 1.39392557e-13
1.18418439e-09 3.18466688e-10 2.19790923e-10 2.22672555e-10
5.72368155e-11 4.82578371e-10]
[1.15077199e-10 2.53517505e-10 1.10517049e-09 1.43923748e-11
5.97385400e-10 4.24327204e-10 6.71731727e-12 6.96363892e-11
7.79112234e-12 2.79173430e-10]
[3.16669935e-12 2.62822817e-10 4.74051422e-10 8.13927013e-15
6.84717595e-11 7.76539362e-11 5.09881671e-11 3.00145841e-11
1.56533706e-11 1.46424255e-10]
[1.83186206e-10 2.07632933e-11 7.21733266e-12 1.05798063e-11
1.48497887e-12 9.01502960e-12 3.70977628e-12 1.29059849e-12
1.65840201e-11 2.21469293e-11]
[1.32528254e-11 4.05122137e-09 9.60725694e-10 1.38325516e-11
5.97744927e-10 1.08867642e-10 5.23943069e-10 5.12653341e-10
3.92692497e-10 2.36689414e-10]
[1.00620573e-09 5.62510318e-10 7.07655045e-10 3.13547038e-11
2.92616733e-10 2.11753748e-10 1.25862243e-11 1.04697763e-10
2.01851247e-13 4.22359377e-11]
[2.31695729e-10 9.53579299e-10 6.13132896e-10 4.30302677e-11
4.76375831e-10 2.49348773e-11 7.28744936e-12 8.75681767e-10
8.75115877e-11 4.82914967e-10]
[5.74790056e-11 6.74145662e-11 2.65443737e-12 3.60536593e-13
1.95188154e-11 3.74609118e-11 1.22969083e-11 8.14455426e-11
5.77965140e-11 1.33362361e-10]
[3.27912419e-09 3.06489576e-09 4.04412496e-09 1.11192048e-10
2.27489679e-09 2.21287861e-09 8.82852520e-11 3.25340022e-10
3.60288332e-10 8.07191174e-10]
[4.51958901e-09 5.17814875e-09 4.86286620e-09 1.94703123e-10
1.43617624e-09 4.59877076e-10 1.07926328e-10 6.83268559e-10
9.74211513e-12 8.19789952e-11]
[1.33254857e-08 4.53367783e-08 4.23343124e-08 4.77967901e-10
1.07247484e-08 3.06071765e-09 3.18635263e-08 7.14729828e-09
6.33451250e-09 6.35325849e-09]
[4.01550527e-10 5.76016597e-09 1.52017669e-09 2.51591726e-10
1.05300827e-09 2.40536845e-10 1.09024050e-10 1.61819192e-09
7.98110097e-10 3.17538350e-09]
[9.23514969e-10 4.29047625e-11 3.84047149e-12 1.39031379e-12
8.92458198e-12 1.05808047e-10 3.20392710e-11 8.82783655e-11
2.70430106e-11 1.44619294e-12]
[9.60801984e-09 9.32618332e-09 8.74939367e-09 2.19848429e-11
6.09230828e-09 2.40397968e-10 3.56759571e-11 2.76295492e-09
4.08270140e-10 2.37145119e-10]
[1.94779842e-10 2.73857428e-10 5.63139275e-11 1.79568811e-12
2.27774592e-11 3.01312165e-12 4.62834770e-10 5.51043155e-14
6.07506344e-11 2.20288513e-11]
[1.03510688e-09 8.80566889e-12 4.71964807e-11 1.08789418e-10
1.38094494e-10 2.17022861e-13 2.61012392e-12 3.15881689e-12
8.03375641e-13 6.30172515e-12]
[5.91288327e-10 7.68023806e-11 1.00214214e-10 1.87490713e-11
8.30771104e-14 6.40881220e-13 6.56660867e-11 6.22976360e-11
5.32816807e-11 7.33575279e-11]
[9.89090658e-10 1.93109593e-09 3.73096428e-10 1.97666429e-10
5.40144618e-11 9.95818687e-13 3.71242912e-10 2.79989815e-10
2.58103722e-10 9.96091343e-10]
[3.16990253e-09 6.69770419e-10 4.06903123e-10 3.26050375e-11
6.24915527e-10 1.86853485e-10 3.61612930e-11 9.23975144e-12
6.47774443e-12 1.26721621e-11]
[3.18684894e-10 1.08793421e-10 2.69727623e-10 8.07927983e-12
5.67222201e-11 6.68885780e-13 1.07902704e-10 1.01542990e-11
6.92170039e-11 2.80156403e-16]
[1.03471688e-08 1.02733293e-09 1.28191494e-09 3.93515372e-10
4.92846692e-10 2.61755174e-10 5.28362753e-10 2.85045658e-10
1.08523067e-11 8.14079667e-11]
[4.09012347e-10 7.14070289e-10 1.44962368e-09 3.53602047e-11
5.42536741e-10 8.60601960e-10 1.83506657e-10 9.76961654e-11
5.20939978e-12 1.24721153e-09]
[2.51576289e-09 3.05768710e-09 2.78140195e-09 1.42017603e-10
4.73408483e-10 9.46431555e-11 1.64470633e-09 4.91034605e-10
3.66673012e-10 1.34597137e-09]
[1.19694669e-09 7.66579726e-10 2.01631286e-09 9.83598666e-12
2.14627356e-10 2.85662921e-10 2.03425025e-11 5.10530793e-11
8.61291480e-13 9.46909623e-12]
[1.52173032e-09 2.03363062e-09 4.45914953e-10 7.30951661e-11
1.66536170e-10 1.39475373e-11 4.42310488e-11 2.44368172e-10
1.89973425e-10 5.05288363e-12]
[1.41246777e-10 5.42927775e-11 1.27128868e-10 4.71689177e-12
1.28176530e-14 8.18411208e-14 3.52688128e-11 2.53741359e-11
1.40883636e-11 4.36061193e-11]
[3.86480887e-09 1.03396902e-11 7.10283737e-10 1.72316312e-10
3.89604188e-10 6.02391404e-10 4.08948006e-10 1.75329322e-11
1.76554684e-10 7.16628056e-11]
[1.88445704e-08 5.26596036e-09 4.82731503e-09 5.21156338e-10
3.73186530e-09 5.17822015e-10 4.44772882e-10 4.25483566e-10
2.71290524e-10 8.63842127e-10]
[1.74887960e-10 7.70427433e-09 2.28559977e-09 2.75822390e-12
1.06587595e-09 1.48707820e-10 1.40562724e-09 1.85072461e-09
4.61232982e-10 2.85229894e-09]
[3.23461808e-09 4.26192287e-10 4.94886776e-10 2.85147765e-10
1.93966178e-10 6.94722474e-12 5.65127639e-11 1.19062381e-10
2.48766707e-11 8.77997680e-14]
[7.57750612e-10 1.03238610e-09 1.03885500e-09 3.66504815e-11
9.03254910e-10 1.64953362e-10 3.94195926e-11 2.24285793e-10
2.48558930e-11 2.59423920e-10]
[1.79381979e-09 4.28374300e-10 1.19683325e-10 2.64612719e-10
8.51436542e-11 3.53588079e-10 5.66811492e-10 1.09995178e-10
4.27274344e-10 2.04094917e-11]
[1.37173353e-10 8.21373545e-10 2.80539201e-10 4.47937130e-11
2.73445279e-10 1.33456833e-13 4.30178427e-10 4.75465444e-10
2.76648641e-10 1.27177095e-10]
[1.78184671e-09 3.64542216e-10 1.53438167e-09 2.81911047e-12
1.01327276e-09 4.01115320e-10 7.45494801e-12 1.30109643e-11
1.99904410e-10 8.17685896e-11]
[7.28466906e-09 7.23525981e-09 2.16882523e-09 7.21636306e-12
1.42908876e-09 1.56256580e-09 3.40916718e-09 2.75134148e-09
8.84362107e-10 5.70886635e-10]
[3.03100501e-09 9.52807842e-10 2.34181123e-10 1.00735050e-10
2.28465664e-10 1.02787326e-10 7.26697759e-10 4.26113391e-10
4.16289901e-12 5.59779680e-11]
[6.07694958e-10 5.81283992e-10 4.95253492e-11 3.37364403e-12
6.69732534e-11 3.93004875e-11 1.65203647e-11 2.26814025e-10
2.19150540e-11 5.30598678e-11]
[7.01754678e-11 2.25505507e-11 1.28141940e-10 2.03224251e-11
9.63438193e-11 6.63379623e-12 5.41479974e-11 1.82272854e-11
2.46952860e-11 7.46155632e-12]
[4.41185746e-12 1.32737431e-15 1.79372103e-11 3.79708956e-12
7.07756788e-12 2.73020301e-12 5.12635948e-11 9.40902061e-14
2.11925494e-11 1.77122236e-13]
[2.22425861e-09 1.59296527e-09 2.29089125e-09 2.97665485e-12
7.53250783e-10 9.03238926e-10 1.91011935e-11 2.75575968e-10
2.89233881e-10 3.23353572e-10]
[1.23457051e-10 4.31935959e-10 2.71634489e-10 1.04198663e-11
3.53044194e-10 6.09223135e-11 1.93769734e-11 9.93388748e-11
2.42610570e-11 3.12580640e-10]
[6.05697569e-09 6.20131087e-10 1.12884005e-09 3.31101469e-10
1.14378267e-09 1.46493561e-11 6.66440540e-11 4.01639055e-10
5.03072975e-10 7.66722640e-14]
[2.58278902e-08 3.87627907e-09 9.79840359e-09 2.54356318e-09
3.88650276e-09 1.37113472e-12 5.70560451e-12 5.52105917e-10
8.09665396e-10 6.23003901e-12]
[2.31868931e-10 9.71762487e-09 4.50122078e-09 1.48274952e-10
5.04860680e-09 6.95661791e-10 2.90085090e-11 1.89741523e-09
1.96491888e-10 4.57927961e-09]
[6.37735390e-11 2.35194628e-10 1.32347025e-09 2.37796385e-11
5.52226849e-10 4.75130481e-10 6.72865644e-13 6.56966698e-12
1.25003246e-13 4.25830455e-10]
[3.75326766e-09 4.37705953e-09 9.60982149e-09 8.91886800e-14
2.75581798e-09 2.27108345e-09 2.30227180e-10 1.47062634e-10
2.32296255e-10 2.17454205e-14]
[2.37232321e-10 5.84623484e-10 9.41975087e-11 3.22165011e-11
3.06545875e-11 5.18892412e-12 2.64791372e-10 6.95846258e-11
1.81957416e-10 3.40651126e-11]
[2.57318141e-09 1.50434703e-10 5.62910437e-10 1.24913742e-10
1.55791770e-10 1.13270378e-10 2.09669382e-11 3.81011639e-12
1.98148821e-11 6.64334272e-13]
[1.54134547e-08 3.67870956e-09 3.77690759e-09 2.35073838e-10
1.42846158e-09 9.12108849e-10 1.72364741e-10 9.22723564e-10
1.78330116e-11 9.36014776e-12]
[5.89956893e-09 1.12628145e-09 2.02536716e-09 8.80842311e-11
7.04028525e-11 6.61896736e-10 3.42460766e-12 2.39470403e-10
3.95082158e-10 6.16777364e-11]
[1.83551990e-09 6.43616466e-10 7.58543158e-10 1.85604745e-10
5.96982192e-10 8.85405449e-10 6.33959337e-11 9.39690376e-11
2.10683681e-10 7.92736322e-14]
[5.96978365e-09 2.54247003e-11 3.58942191e-10 1.50186728e-10
3.36589959e-11 2.25947762e-12 4.75681596e-10 1.90451783e-11
1.30688529e-10 3.78077152e-10]
[6.30024430e-09 7.71259253e-10 2.48650216e-12 5.89959174e-10
6.19497513e-11 3.57415458e-10 3.41939953e-10 2.71884736e-10
4.22310018e-10 1.63946486e-09]
[7.69089464e-09 1.03364644e-09 4.57853890e-10 3.90910011e-10
7.71639687e-10 3.35462414e-11 1.91832399e-11 4.16557964e-10
8.89677267e-12 2.26178613e-11]
[1.92175587e-07 2.07074290e-08 4.81022292e-08 3.17760987e-09
4.29904456e-08 3.15572818e-08 4.09781692e-09 9.13192078e-10
6.44234048e-09 1.20253793e-10]
[3.25103963e-09 8.40334469e-09 3.84718935e-09 2.92468924e-10
3.09744203e-09 4.01265559e-12 3.80788571e-11 1.72632857e-09
5.09913403e-12 1.55178528e-10]]
493 [[9.43074370e-10 1.56086494e-09 1.12009752e-09 9.49219556e-12
4.54300454e-10 2.51018738e-10 1.15263065e-11 2.00986242e-10
5.70300287e-12 7.16698171e-11]
[1.24322576e-08 2.78798347e-09 6.17276228e-09 8.73240350e-10
1.91142319e-09 2.26119416e-09 1.46387340e-10 3.16384684e-10
2.28397090e-11 1.17797291e-10]
[1.19022842e-08 2.06393386e-10 1.71207204e-09 7.28246728e-10
5.86158850e-10 1.10494278e-09 5.85837552e-10 2.25750285e-10
6.13631201e-10 1.06629574e-10]
[3.34438381e-09 2.20570763e-11 1.80505316e-10 3.09387182e-10
2.67714830e-13 2.85223737e-12 2.64337722e-10 4.27743570e-12
1.97274184e-11 1.82492762e-10]
[4.95438375e-09 5.80133112e-10 1.77657383e-09 2.25235422e-10
6.25067968e-10 1.89129178e-10 1.09771422e-10 7.00373626e-11
2.42233761e-11 2.15746849e-13]
[1.33834755e-10 4.00558213e-11 2.38994104e-10 1.12082124e-12
5.38754702e-11 5.69009207e-11 5.84790728e-11 8.18937482e-12
2.16049522e-12 1.44461751e-11]
[3.56612336e-09 4.13655056e-11 4.95872114e-11 4.40692303e-11
2.12800692e-11 1.54335802e-11 5.86849932e-10 3.30731131e-12
2.57765320e-10 2.22887650e-11]
[6.93592874e-11 5.64999847e-11 1.43864200e-11 8.87897346e-12
3.96780682e-13 3.73593046e-12 3.92950374e-11 3.65200558e-12
1.83233392e-12 1.01099670e-11]
[1.78802322e-10 1.24757756e-09 1.57839719e-09 1.34037995e-13
1.13413643e-09 3.05423147e-10 2.08673026e-10 2.13109804e-10
5.46184360e-11 4.62823567e-10]
[1.10367399e-10 2.41629721e-10 1.05449685e-09 1.36965665e-11
5.69966221e-10 4.04977087e-10 6.36498379e-12 6.64213358e-11
7.52340273e-12 2.66076382e-10]
[3.13285697e-12 2.51683968e-10 4.53327605e-10 8.44614971e-15
6.54875046e-11 7.40878781e-11 4.84300267e-11 2.87224467e-11
1.49994423e-11 1.39893660e-10]
[1.76738605e-10 1.97064404e-11 7.05965331e-12 1.02565238e-11
1.36545457e-12 8.82111365e-12 3.68917184e-12 1.26290149e-12
1.60882326e-11 2.11543538e-11]
[1.28321009e-11 3.86188261e-09 9.17981330e-10 1.32330319e-11
5.70747544e-10 1.04433296e-10 4.98645074e-10 4.88899898e-10
3.73639379e-10 2.26014825e-10]
[9.61761818e-10 5.38491349e-10 6.76761420e-10 3.00600325e-11
2.79972945e-10 2.02209284e-10 1.20056068e-11 1.00301359e-10
1.84997333e-13 4.04224794e-11]
[2.20265026e-10 9.08505137e-10 5.85921474e-10 4.11932281e-11
4.54470705e-10 2.35126674e-11 6.82025410e-12 8.35162578e-10
8.29670667e-11 4.61469767e-10]
[5.48237829e-11 6.40910333e-11 2.48758123e-12 3.43900354e-13
1.84624307e-11 3.57270384e-11 1.17966855e-11 7.76176249e-11
5.49243317e-11 1.26865511e-10]
[3.13916176e-09 2.92452203e-09 3.85999939e-09 1.06612761e-10
2.16840520e-09 2.11179396e-09 8.46128020e-11 3.10337374e-10
3.44544264e-10 7.67842237e-10]
[4.29372568e-09 4.93689276e-09 4.63281869e-09 1.84510926e-10
1.36805512e-09 4.37053976e-10 1.03629469e-10 6.51931763e-10
9.43555491e-12 7.82654230e-11]
[1.26971731e-08 4.33017201e-08 4.04547264e-08 4.56026270e-10
1.02410666e-08 2.92541805e-09 3.04076797e-08 6.82761839e-09
6.04424112e-09 6.06597884e-09]
[3.78600027e-10 5.48683484e-09 1.45133826e-09 2.39402071e-10
1.00281708e-09 2.29973643e-10 1.03250111e-10 1.54453185e-09
7.59706207e-10 3.02651359e-09]
[8.78629152e-10 4.04881702e-11 3.67040596e-12 1.31237176e-12
8.53393545e-12 1.00454531e-10 3.08297253e-11 8.41195467e-11
2.56515678e-11 1.39616921e-12]
[9.17039624e-09 8.88255025e-09 8.34396380e-09 2.11713581e-11
5.80874745e-09 2.30263104e-10 3.36085346e-11 2.63257357e-09
3.87544523e-10 2.25342867e-10]
[1.86806468e-10 2.65137924e-10 5.45383766e-11 1.75919830e-12
2.20140253e-11 2.79101428e-12 4.45102228e-10 6.41997058e-14
5.87358931e-11 2.09701929e-11]
[9.87850267e-10 8.31144436e-12 4.64243556e-11 1.02763450e-10
1.32191058e-10 2.57854783e-13 2.50446595e-12 3.08593886e-12
9.96426725e-13 5.85771742e-12]
[5.62905946e-10 7.28548183e-11 9.56177750e-11 1.78926125e-11
8.75767960e-14 5.74245894e-13 6.21546123e-11 5.93064228e-11
5.04444792e-11 6.94528087e-11]
[9.44174472e-10 1.84209868e-09 3.56407568e-10 1.88653172e-10
5.15587797e-11 9.53052895e-13 3.54203032e-10 2.67316931e-10
2.46292200e-10 9.51107393e-10]
[3.02711501e-09 6.39920178e-10 3.89783220e-10 3.11110370e-11
5.97544166e-10 1.79021302e-10 3.45307891e-11 8.82576651e-12
6.26987908e-12 1.21811042e-11]
[3.01528605e-10 1.03695371e-10 2.56271162e-10 7.69185223e-12
5.41425555e-11 6.56085925e-13 1.03109242e-10 9.49302962e-12
6.61530191e-11 1.03616053e-15]
[9.86522744e-09 9.81853004e-10 1.22420895e-09 3.74833540e-10
4.70823859e-10 2.50484390e-10 5.04439197e-10 2.71737416e-10
1.03077495e-11 7.79939401e-11]
[3.86355703e-10 6.84424459e-10 1.38717464e-09 3.34951871e-11
5.18647676e-10 8.21027614e-10 1.74569731e-10 9.35648089e-11
4.92184202e-12 1.18810082e-09]
[2.39611175e-09 2.91331533e-09 2.64907258e-09 1.35199435e-10
4.51038925e-10 9.00631909e-11 1.56846594e-09 4.67219393e-10
3.49851859e-10 1.28115622e-09]
[1.14088981e-09 7.30264215e-10 1.92096126e-09 9.44144518e-12
2.04616950e-10 2.72297135e-10 1.93806613e-11 4.85593944e-11
8.34191216e-13 8.96290885e-12]
[1.44999129e-09 1.93837131e-09 4.25220366e-10 6.96008454e-11
1.58816691e-10 1.32637983e-11 4.21837338e-11 2.32740466e-10
1.81205909e-10 4.83310998e-12]
[1.33042515e-10 5.29213564e-11 1.22400996e-10 4.35865642e-12
1.80225870e-14 6.99289739e-14 3.36740663e-11 2.45479435e-11
1.35243473e-11 4.22447047e-11]
[3.69141587e-09 9.82727924e-12 6.77080907e-10 1.64672759e-10
3.71602616e-10 5.74202257e-10 3.89854152e-10 1.66670554e-11
1.68451242e-10 6.80774089e-11]
[1.79833470e-08 5.02671401e-09 4.61260112e-09 4.96944476e-10
3.56492507e-09 4.96100827e-10 4.25409311e-10 4.05925381e-10
2.59827690e-10 8.26833988e-10]
[1.66190773e-10 7.35713175e-09 2.18426979e-09 2.64571051e-12
1.01771774e-09 1.42049525e-10 1.34254106e-09 1.76905861e-09
4.40155083e-10 2.72358574e-09]
[3.07642085e-09 4.07507560e-10 4.73121203e-10 2.70652528e-10
1.85534512e-10 6.49361894e-12 5.39011660e-11 1.14017746e-10
2.37523395e-11 5.44889028e-14]
[7.22771342e-10 9.81266504e-10 9.87489579e-10 3.50717187e-11
8.58659455e-10 1.57160054e-10 3.74114775e-11 2.12822848e-10
2.34709061e-11 2.45793715e-10]
[1.70719330e-09 4.04677526e-10 1.14564904e-10 2.51358928e-10
8.14823181e-11 3.36430439e-10 5.36991990e-10 1.04086990e-10
4.05304130e-10 1.97002513e-11]
[1.30358560e-10 7.76332151e-10 2.64646999e-10 4.23971022e-11
2.59179092e-10 1.21092521e-13 4.07598472e-10 4.50076794e-10
2.61719621e-10 1.20590423e-10]
[1.70141360e-09 3.46579638e-10 1.46166735e-09 2.72455854e-12
9.65766386e-10 3.82421047e-10 7.10450251e-12 1.23015737e-11
1.90932659e-10 7.74832778e-11]
[6.92950877e-09 6.89578708e-09 2.07611774e-09 6.74322975e-12
1.36363323e-09 1.47923700e-09 3.24140260e-09 2.62560885e-09
8.41127004e-10 5.50090874e-10]
[2.88658780e-09 9.10124593e-10 2.27051007e-10 9.52532938e-11
2.19487789e-10 9.56283259e-11 6.90287814e-10 4.08848083e-10
3.93667906e-12 5.07499284e-11]
[5.80225543e-10 5.50491324e-10 4.70536408e-11 3.14545605e-12
6.35694876e-11 3.70373365e-11 1.61035881e-11 2.16226954e-10
2.04975592e-11 5.05194072e-11]
[6.66199724e-11 2.17239814e-11 1.21464278e-10 1.93316944e-11
9.12483988e-11 6.22653178e-12 5.16617705e-11 1.74428749e-11
2.34793627e-11 6.94568299e-12]
[4.14022514e-12 3.15192732e-15 1.71716706e-11 3.63566826e-12
6.78020076e-12 2.58985405e-12 4.87756608e-11 9.41655034e-14
2.01437404e-11 1.76359937e-13]
[2.12879245e-09 1.52066398e-09 2.18908894e-09 2.81728213e-12
7.19274156e-10 8.62473926e-10 1.82524728e-11 2.63095138e-10
2.76212669e-10 3.08226075e-10]
[1.16254967e-10 4.10551235e-10 2.58581211e-10 9.87858762e-12
3.35286961e-10 5.80726534e-11 1.81538051e-11 9.41807433e-11
2.28938440e-11 2.96138900e-10]
[5.76858843e-09 5.92390294e-10 1.07833784e-09 3.15069620e-10
1.09196027e-09 1.37553528e-11 6.33926766e-11 3.83367676e-10
4.79417131e-10 9.84782275e-14]
[2.46084284e-08 3.69216626e-09 9.33965326e-09 2.42218651e-09
3.70525702e-09 1.21027494e-12 5.49377537e-12 5.25740361e-10
7.70635742e-10 5.98101099e-12]
[2.20690329e-10 9.25245330e-09 4.28468588e-09 1.40912866e-10
4.80105600e-09 6.61486244e-10 2.78659414e-11 1.80633323e-09
1.87132810e-10 4.35500372e-09]
[5.98245066e-11 2.25888198e-10 1.26560993e-09 2.25473614e-11
5.27239433e-10 4.52271914e-10 7.36198224e-13 6.33393735e-12
1.00130379e-13 4.05268172e-10]
[3.58442096e-09 4.18615307e-09 9.18337512e-09 9.35671305e-14
2.63542213e-09 2.16949107e-09 2.19973718e-10 1.40853979e-10
2.21570205e-10 2.80070145e-14]
[2.25677968e-10 5.55558706e-10 8.93056860e-11 3.07217302e-11
2.91572754e-11 4.99411903e-12 2.52291559e-10 6.59201924e-11
1.73192942e-10 3.22732582e-11]
[2.45458051e-09 1.43679436e-10 5.37681754e-10 1.19105763e-10
1.48602281e-10 1.08352337e-10 2.00364668e-11 3.61332345e-12
1.89213590e-11 6.48969732e-13]
[1.46986735e-08 3.50467851e-09 3.60234301e-09 2.25054733e-10
1.36270096e-09 8.71608923e-10 1.65148635e-10 8.77172803e-10
1.71462217e-11 9.06369976e-12]
[5.62086154e-09 1.07105081e-09 1.92908374e-09 8.40776428e-11
6.73350126e-11 6.30406248e-10 3.44525048e-12 2.27235422e-10
3.77165304e-10 5.84800071e-11]
[1.75247675e-09 6.11288215e-10 7.19797900e-10 1.77666069e-10
5.68546599e-10 8.42591051e-10 6.03104749e-11 8.88321145e-11
2.01119143e-10 5.59332893e-14]
[5.68531801e-09 2.44304725e-11 3.42475973e-10 1.43000635e-10
3.18970556e-11 2.13228671e-12 4.52894901e-10 1.82896050e-11
1.24418560e-10 3.59250316e-10]
[6.00000219e-09 7.32121625e-10 2.51553333e-12 5.61306939e-10
5.99280925e-11 3.41404820e-10 3.25066140e-10 2.58229237e-10
4.02328765e-10 1.55707120e-09]
[7.32871712e-09 9.81714473e-10 4.36207167e-10 3.72940168e-10
7.34800725e-10 3.15014896e-11 1.86573058e-11 3.96585808e-10
8.72767598e-12 2.14665100e-11]
[1.83492003e-07 1.97749330e-08 4.59316383e-08 3.03224737e-09
4.10578925e-08 3.01368659e-08 3.91382472e-09 8.71987785e-10
6.15388853e-09 1.14504959e-10]
[3.09317034e-09 7.98222717e-09 3.65324307e-09 2.77898450e-10
2.94325888e-09 3.87955628e-12 3.60721154e-11 1.63936656e-09
4.83958350e-12 1.46759786e-10]]
494 [[9.01466183e-10 1.48926042e-09 1.06817682e-09 8.99444637e-12
4.32683549e-10 2.39514199e-10 1.09938118e-11 1.91517851e-10
5.35114271e-12 6.79777254e-11]
[1.18509554e-08 2.66936548e-09 5.90315305e-09 8.32125450e-10
1.82723539e-09 2.16381340e-09 1.39635881e-10 3.03089077e-10
2.17103559e-11 1.13806363e-10]
[1.13624544e-08 1.98024511e-10 1.64051871e-09 6.95588541e-10
5.61825677e-10 1.05786245e-09 5.57665882e-10 2.15203486e-10
5.87546216e-10 1.01836896e-10]
[3.18733648e-09 2.06672870e-11 1.71109667e-10 2.95277716e-10
2.52457568e-13 2.70064894e-12 2.52658795e-10 3.95792626e-12
1.89365110e-11 1.74479501e-10]
[4.72852864e-09 5.51513587e-10 1.69149931e-09 2.14926420e-10
5.95259794e-10 1.80492466e-10 1.04687640e-10 6.62411707e-11
2.29922023e-11 1.66249177e-13]
[1.28400853e-10 3.79675869e-11 2.27331106e-10 1.10326241e-12
5.11883392e-11 5.39712581e-11 5.58232564e-11 7.71963152e-12
2.06383493e-12 1.35530733e-11]
[3.40231780e-09 3.91680298e-11 4.77145902e-11 4.19872970e-11
2.05068010e-11 1.48140682e-11 5.60245633e-10 3.06164490e-12
2.46102996e-10 2.09246500e-11]
[6.68850973e-11 5.41344359e-11 1.37285940e-11 8.37180043e-12
3.75501087e-13 3.57914631e-12 3.75914676e-11 3.51009795e-12
1.76209859e-12 9.50733735e-12]
[1.72841860e-10 1.19209199e-09 1.51121815e-09 1.42236344e-13
1.08641920e-09 2.92578612e-10 1.98172002e-10 2.04316932e-10
5.18189321e-11 4.44022128e-10]
[1.05960566e-10 2.30136775e-10 1.00661804e-09 1.30418605e-11
5.43816582e-10 3.86658335e-10 6.02910275e-12 6.32240170e-11
7.19760986e-12 2.53452380e-10]
[3.10196657e-12 2.41003404e-10 4.33354680e-10 6.19106680e-15
6.27248106e-11 7.06128554e-11 4.61472384e-11 2.74693247e-11
1.43488179e-11 1.33503784e-10]
[1.70748899e-10 1.87381728e-11 6.91396695e-12 9.99460439e-12
1.24515512e-12 8.60289931e-12 3.65971606e-12 1.24168052e-12
1.55833168e-11 2.02130489e-11]
[1.24247389e-11 3.68153296e-09 8.77113870e-10 1.26670519e-11
5.44834370e-10 1.00160081e-10 4.74531573e-10 4.66666699e-10
3.55498804e-10 2.15887197e-10]
[9.20542970e-10 5.15275570e-10 6.47765620e-10 2.87692843e-11
2.67747353e-10 1.93289573e-10 1.14712119e-11 9.59031880e-11
1.86863752e-13 3.86735439e-11]
[2.09437264e-10 8.65712310e-10 5.59869575e-10 3.94941468e-11
4.33462100e-10 2.21758985e-11 6.37901934e-12 7.97361787e-10
7.86318400e-11 4.41209079e-10]
[5.23220704e-11 6.09613124e-11 2.34090845e-12 3.05601113e-13
1.74976127e-11 3.41108588e-11 1.13295760e-11 7.37375700e-11
5.22875261e-11 1.20597209e-10]
[3.00438265e-09 2.78930214e-09 3.68507399e-09 1.01953749e-10
2.06685575e-09 2.01609393e-09 8.11033133e-11 2.95310482e-10
3.29093467e-10 7.29917507e-10]
[4.07901212e-09 4.70643140e-09 4.41484647e-09 1.74963959e-10
1.30312909e-09 4.15518108e-10 9.95313483e-11 6.21872260e-10
9.17574403e-12 7.47041308e-11]
[1.20976770e-08 4.13591424e-08 3.86583229e-08 4.34709487e-10
9.77998117e-09 2.79565316e-09 2.90186111e-08 6.52253141e-09
5.76691935e-09 5.79175327e-09]
[3.57084540e-10 5.22696014e-09 1.38663854e-09 2.27975084e-10
9.54741367e-10 2.19726352e-10 9.81878356e-11 1.47325659e-09
7.23260632e-10 2.88348514e-09]
[8.36495926e-10 3.82029951e-11 3.52137498e-12 1.24587836e-12
8.15184162e-12 9.52764751e-11 2.96414049e-11 8.00492058e-11
2.43380327e-11 1.35739028e-12]
[8.75325352e-09 8.45972044e-09 7.95752918e-09 2.03700602e-11
5.53862853e-09 2.20592230e-10 3.16600545e-11 2.50748951e-09
3.68054973e-10 2.14065668e-10]
[1.79080534e-10 2.56695634e-10 5.27575170e-11 1.72979496e-12
2.12579414e-11 2.57978385e-12 4.28004735e-10 7.65204400e-14
5.67827991e-11 1.99589551e-11]
[9.37939858e-10 8.17160619e-12 4.45029331e-11 9.75986471e-11
1.27074578e-10 2.65952345e-13 2.42129084e-12 3.08522779e-12
8.45471028e-13 5.22674800e-12]
[5.36234734e-10 6.89961734e-11 9.14127009e-11 1.70463388e-11
8.94203112e-14 4.99769959e-13 5.88173045e-11 5.61604202e-11
4.79263960e-11 6.58033015e-11]
[9.01391006e-10 1.75682161e-09 3.40610786e-10 1.80212424e-10
4.92551175e-11 9.15171275e-13 3.37977203e-10 2.55197066e-10
2.35127359e-10 9.07976506e-10]
[2.89071599e-09 6.11578196e-10 3.73220693e-10 2.97619233e-11
5.71357712e-10 1.71373331e-10 3.29807963e-11 8.49635660e-12
6.08048990e-12 1.17188702e-11]
[2.85837474e-10 9.90508183e-11 2.43626270e-10 7.31019296e-12
5.15933088e-11 6.40580528e-13 9.85082668e-11 9.08300292e-12
6.31275673e-11 6.38564140e-16]
[9.40472328e-09 9.37970047e-10 1.16961241e-09 3.57512877e-10
4.49688805e-10 2.39664873e-10 4.81740778e-10 2.59840405e-10
1.00837442e-11 7.47849348e-11]
[3.64846140e-10 6.56328462e-10 1.32711884e-09 3.17443629e-11
4.95683655e-10 7.83235270e-10 1.65999351e-10 8.98281766e-11
4.62588888e-12 1.13243396e-09]
[2.28216311e-09 2.77626314e-09 2.52258917e-09 1.28559944e-10
4.29671291e-10 8.56654664e-11 1.49551052e-09 4.44514955e-10
3.33649595e-10 1.21969017e-09]
[1.08773728e-09 6.95423152e-10 1.83032893e-09 9.05758106e-12
1.95141399e-10 2.59648650e-10 1.84497741e-11 4.60605207e-11
7.96363998e-13 8.46050750e-12]
[1.38112547e-09 1.84779452e-09 4.05315664e-10 6.62225445e-11
1.51463294e-10 1.26372313e-11 4.02402775e-11 2.22099722e-10
1.72531779e-10 4.63678827e-12]
[1.25376791e-10 5.14535096e-11 1.18030564e-10 3.98171797e-12
2.28869124e-14 5.52412032e-14 3.21370946e-11 2.36325414e-11
1.30507075e-11 4.08916403e-11]
[3.52526783e-09 9.31741068e-12 6.45198322e-10 1.57390940e-10
3.54417845e-10 5.47163205e-10 3.71781841e-10 1.59083575e-11
1.60848713e-10 6.47045220e-11]
[1.71627150e-08 4.79810183e-09 4.40773456e-09 4.74011792e-10
3.40544044e-09 4.75391660e-10 4.06929789e-10 3.86890082e-10
2.48675321e-10 7.91308658e-10]
[1.57843734e-10 7.02527658e-09 2.08769558e-09 2.54949720e-12
9.72027702e-10 1.35665559e-10 1.28126741e-09 1.68911813e-09
4.20422524e-10 2.60001187e-09]
[2.92592366e-09 3.89452461e-10 4.52669016e-10 2.56934088e-10
1.77463962e-10 6.05758697e-12 5.14533813e-11 1.09025355e-10
2.27749822e-11 3.20806814e-14]
[6.89112277e-10 9.32224554e-10 9.39511025e-10 3.35505420e-11
8.16395846e-10 1.49810369e-10 3.55149799e-11 2.01890113e-10
2.22783150e-11 2.32706963e-10]
[1.62481348e-09 3.82461641e-10 1.09762977e-10 2.38924543e-10
7.79946286e-11 3.20187393e-10 5.08734785e-10 9.87523648e-11
3.84145371e-10 1.89683069e-11]
[1.23972368e-10 7.33691740e-10 2.49730916e-10 4.02007912e-11
2.45634850e-10 1.10834327e-13 3.85816623e-10 4.25948548e-10
2.47741706e-10 1.14272277e-10]
[1.62496815e-09 3.29597157e-10 1.39231595e-09 2.65297024e-12
9.20341207e-10 3.64527327e-10 6.78180417e-12 1.16149372e-11
1.82116471e-10 7.34087583e-11]
[6.59150494e-09 6.57265799e-09 1.98706135e-09 6.30479943e-12
1.30108173e-09 1.40043176e-09 3.08213153e-09 2.50628573e-09
7.99791528e-10 5.30122861e-10]
[2.74938973e-09 8.68731839e-10 2.20433454e-10 8.97520376e-11
2.10699191e-10 8.87135526e-11 6.55638594e-10 3.91785214e-10
3.77259299e-12 4.59810809e-11]
[5.53928078e-10 5.21612753e-10 4.46177362e-11 2.91411022e-12
6.03723826e-11 3.49884319e-11 1.57870800e-11 2.05966442e-10
1.91324996e-11 4.80821333e-11]
[6.33891263e-11 2.09168813e-11 1.15218322e-10 1.84223048e-11
8.63894293e-11 5.86158230e-12 4.93033232e-11 1.66776700e-11
2.23206263e-11 6.49767873e-12]
[3.86725780e-12 4.01463332e-15 1.65674578e-11 3.43383231e-12
6.45678466e-12 2.41196831e-12 4.64492114e-11 8.98038927e-14
1.92615693e-11 1.74911652e-13]
[2.03740113e-09 1.45199995e-09 2.09150257e-09 2.62781764e-12
6.86666527e-10 8.23518605e-10 1.74570181e-11 2.51089900e-10
2.63865987e-10 2.93941507e-10]
[1.09717162e-10 3.90370370e-10 2.45907706e-10 9.34300541e-12
3.18403077e-10 5.53192197e-11 1.70387796e-11 8.94471616e-11
2.14817720e-11 2.80666436e-10]
[5.49439827e-09 5.65872243e-10 1.03016665e-09 2.99995113e-10
1.04269565e-09 1.29277161e-11 6.02864978e-11 3.65760914e-10
4.57037531e-10 1.19240068e-13]
[2.34454695e-08 3.51719528e-09 8.90179533e-09 2.30672575e-09
3.53257186e-09 1.07367118e-12 5.31360147e-12 5.00754491e-10
7.33251860e-10 5.74394067e-12]
[2.10051399e-10 8.81062007e-09 4.07724690e-09 1.33844869e-10
4.56604950e-09 6.28637511e-10 2.67696520e-11 1.72013428e-09
1.77994924e-10 4.14177381e-09]
[5.59949497e-11 2.17084668e-10 1.21010763e-09 2.14386480e-11
5.03216428e-10 4.30532410e-10 7.95520657e-13 6.18117272e-12
8.36751006e-14 3.86069728e-10]
[3.42242844e-09 4.00293958e-09 8.77631500e-09 1.06717338e-13
2.51975380e-09 2.07298551e-09 2.10139523e-10 1.34937364e-10
2.11332376e-10 3.50284985e-14]
[2.14947419e-10 5.28079766e-10 8.47821992e-11 2.92017841e-11
2.76900365e-11 4.80246533e-12 2.40458024e-10 6.26418040e-11
1.64857253e-10 3.06442441e-11]
[2.34132119e-09 1.37135474e-10 5.13737208e-10 1.13256434e-10
1.41583979e-10 1.03807349e-10 1.91727547e-11 3.43073995e-12
1.80388999e-11 6.39342977e-13]
[1.40065623e-08 3.33789658e-09 3.43603908e-09 2.13531475e-10
1.30081685e-09 8.32227339e-10 1.57857309e-10 8.38913363e-10
1.66899758e-11 8.76545649e-12]
[5.35784220e-09 1.01864367e-09 1.83691835e-09 8.04013599e-11
6.41845472e-11 6.00966491e-10 3.36819936e-12 2.16047853e-10
3.60068713e-10 5.56413786e-11]
[1.67388854e-09 5.80495151e-10 6.83198777e-10 1.69982757e-10
5.41480246e-10 8.02025208e-10 5.74071745e-11 8.38908223e-11
1.91851313e-10 3.76518682e-14]
[5.41472613e-09 2.34545663e-11 3.26884681e-10 1.36122864e-10
3.02129066e-11 2.00882180e-12 4.31161192e-10 1.74923291e-11
1.18345512e-10 3.41469920e-10]
[5.71431823e-09 6.95023835e-10 2.54378446e-12 5.34051694e-10
5.79241448e-11 3.26069969e-10 3.09079052e-10 2.45004861e-10
3.83224822e-10 1.47888593e-09]
[6.98454143e-09 9.32043722e-10 4.15891061e-10 3.55733437e-10
6.99753047e-10 2.95424248e-11 1.81433463e-11 3.77215649e-10
8.48415467e-12 2.04217568e-11]
[1.75202494e-07 1.88853190e-08 4.38600124e-08 2.89566331e-09
3.92140004e-08 2.87785073e-08 3.73802039e-09 8.33130691e-10
5.87654837e-09 1.09137614e-10]
[2.94272482e-09 7.58246507e-09 3.46837223e-09 2.64322003e-10
2.79679122e-09 3.75854076e-12 3.41974344e-11 1.55636410e-09
4.58440665e-12 1.38764822e-10]]
495 [[8.60552740e-10 1.42054188e-09 1.02024051e-09 8.70373453e-12
4.11932272e-10 2.28714632e-10 1.05215862e-11 1.82600273e-10
5.12753051e-12 6.45106498e-11]
[1.12969509e-08 2.55547146e-09 5.64625683e-09 7.93193982e-10
1.74662111e-09 2.07078862e-09 1.33187763e-10 2.90104752e-10
2.07541675e-11 1.09898527e-10]
[1.08539632e-08 1.90379179e-10 1.56635520e-09 6.64069481e-10
5.37130806e-10 1.00959136e-09 5.33646841e-10 2.05272110e-10
5.59075173e-10 9.63710400e-11]
[3.03699594e-09 1.93361080e-11 1.62269537e-10 2.81376581e-10
2.34858596e-13 2.58301975e-12 2.41475415e-10 3.68348518e-12
1.82446898e-11 1.66717623e-10]
[4.51228184e-09 5.24350111e-10 1.61037542e-09 2.04951370e-10
5.66829150e-10 1.72277366e-10 9.98227767e-11 6.27625594e-11
2.17318781e-11 1.27028814e-13]
[1.22906521e-10 3.59992223e-11 2.16117628e-10 1.06385408e-12
4.86648970e-11 5.11581698e-11 5.32738392e-11 7.38458783e-12
1.99984332e-12 1.27622932e-11]
[3.24653866e-09 3.71190004e-11 4.59957385e-11 4.00475240e-11
1.97551605e-11 1.42337519e-11 5.34801284e-10 2.86538068e-12
2.34752242e-10 1.96953858e-11]
[6.44156598e-11 5.18338884e-11 1.31290676e-11 7.89246536e-12
3.56209127e-13 3.42403867e-12 3.59634119e-11 3.34127664e-12
1.72634673e-12 8.95890367e-12]
[1.67247388e-10 1.13864482e-09 1.44707289e-09 1.36830735e-13
1.04019886e-09 2.80309921e-10 1.88072680e-10 1.95535071e-10
4.94725456e-11 4.25768572e-10]
[1.01601959e-10 2.19276341e-10 9.60691725e-10 1.24285908e-11
5.18897921e-10 3.69114947e-10 5.72053160e-12 6.03330952e-11
6.95552231e-12 2.41508161e-10]
[3.04597883e-12 2.30642168e-10 4.14492015e-10 6.52535061e-15
6.00503934e-11 6.75542311e-11 4.40094950e-11 2.62946111e-11
1.37883230e-11 1.27537596e-10]
[1.64729067e-10 1.77810926e-11 6.78801702e-12 9.65357745e-12
1.14266042e-12 8.44015100e-12 3.62896676e-12 1.20419289e-12
1.51552653e-11 1.92770245e-11]
[1.20631007e-11 3.50940831e-09 8.38261929e-10 1.21294888e-11
5.20077024e-10 9.60653101e-11 4.51586016e-10 4.45003479e-10
3.38791655e-10 2.06109638e-10]
[8.80460102e-10 4.93134741e-10 6.20009603e-10 2.75541826e-11
2.55970254e-10 1.85216791e-10 1.09225506e-11 9.19996123e-11
1.73623015e-13 3.71742840e-11]
[1.99381818e-10 8.24933783e-10 5.34896163e-10 3.78092014e-11
4.13349719e-10 2.09076816e-11 5.94293111e-12 7.60447871e-10
7.47993746e-11 4.21944493e-10]
[4.98480841e-11 5.79390350e-11 2.19556947e-12 2.95432071e-13
1.65718327e-11 3.25310830e-11 1.08784476e-11 7.02793672e-11
4.96111517e-11 1.14691312e-10]
[2.87454215e-09 2.66128464e-09 3.51704565e-09 9.78191615e-11
1.97070234e-09 1.92371306e-09 7.76645213e-11 2.81623539e-10
3.15162397e-10 6.93909132e-10]
[3.87525033e-09 4.48710279e-09 4.20607735e-09 1.65741293e-10
1.24140604e-09 3.94957176e-10 9.55675763e-11 5.93420549e-10
8.85317787e-12 7.13317947e-11]
[1.15273249e-08 3.95023364e-08 3.69423597e-08 4.14921918e-10
9.33888689e-09 2.67233299e-09 2.76929819e-08 6.23109131e-09
5.50239947e-09 5.53024582e-09]
[3.36735604e-10 4.97880234e-09 1.32367490e-09 2.16951044e-10
9.09374756e-10 2.10126179e-10 9.29533930e-11 1.40637516e-09
6.88219108e-10 2.74845761e-09]
[7.95920899e-10 3.60705477e-11 3.36405390e-12 1.16915553e-12
7.79292207e-12 9.03981492e-11 2.84791917e-11 7.64212995e-11
2.29726485e-11 1.32639701e-12]
[8.35405753e-09 8.05736612e-09 7.58852389e-09 1.96003421e-11
5.28092141e-09 2.11211388e-10 2.98219730e-11 2.38937794e-09
3.49180174e-10 2.03423518e-10]
[1.71606461e-10 2.48396557e-10 5.11133634e-11 1.69118773e-12
2.05309140e-11 2.39658571e-12 4.11581969e-10 8.44129029e-14
5.50331086e-11 1.90148744e-11]
[8.91193626e-10 8.00814467e-12 4.27168713e-11 9.27092510e-11
1.22094618e-10 2.76358730e-13 2.32458190e-12 3.04012666e-12
7.54502491e-13 4.65963656e-12]
[5.10303214e-10 6.54200627e-11 8.73298551e-11 1.62590687e-11
9.53484571e-14 4.43490638e-13 5.57523151e-11 5.35263825e-11
4.52049412e-11 6.23848773e-11]
[8.60576852e-10 1.67587849e-09 3.25247290e-10 1.72026526e-10
4.70249951e-11 8.72806924e-13 3.22396245e-10 2.43692541e-10
2.24239406e-10 8.67134086e-10]
[2.76125535e-09 5.84324085e-10 3.57356503e-10 2.84070429e-11
5.46138377e-10 1.64158210e-10 3.15677248e-11 8.09859965e-12
5.78609050e-12 1.12983222e-11]
[2.71963414e-10 9.45849631e-11 2.32234298e-10 6.89613455e-12
4.90336480e-11 5.97056170e-13 9.41325336e-11 8.63189893e-12
6.04742481e-11 1.48839223e-16]
[8.96706513e-09 8.96571973e-10 1.11691261e-09 3.40525667e-10
4.29581129e-10 2.29335749e-10 4.59891862e-10 2.47543238e-10
9.51483339e-12 7.16234048e-11]
[3.44354339e-10 6.29232373e-10 1.27000530e-09 3.00670118e-11
4.73695048e-10 7.47236580e-10 1.57852040e-10 8.60665114e-11
4.39236731e-12 1.07914978e-09]
[2.17343819e-09 2.64536876e-09 2.40224042e-09 1.22365907e-10
4.09295435e-10 8.14459408e-11 1.42577949e-09 4.22904377e-10
3.18629719e-10 1.16108102e-09]
[1.03659580e-09 6.62404384e-10 1.74386843e-09 8.67315016e-12
1.86105655e-10 2.47535708e-10 1.76002685e-11 4.38260187e-11
7.85792472e-13 7.99273800e-12]
[1.31610234e-09 1.76140024e-09 3.86474029e-10 6.31175959e-11
1.44448531e-10 1.20128984e-11 3.83847301e-11 2.11461108e-10
1.64635670e-10 4.43846185e-12]
[1.18036420e-10 5.01214146e-11 1.13625230e-10 3.67515718e-12
2.97284980e-14 4.64394646e-14 3.06973776e-11 2.28734053e-11
1.24769054e-11 3.96137732e-11]
[3.36727573e-09 8.85466669e-12 6.15176620e-10 1.50480189e-10
3.38031854e-10 5.21524920e-10 3.54521166e-10 1.50886926e-11
1.53250239e-10 6.14136672e-11]
[1.63778590e-08 4.58008580e-09 4.21172688e-09 4.51942907e-10
3.25321214e-09 4.55461000e-10 3.89156564e-10 3.69182424e-10
2.38303554e-10 7.57369143e-10]
[1.50143188e-10 6.70865122e-09 1.99510499e-09 2.43407912e-12
9.28395405e-10 1.29479967e-10 1.22298412e-09 1.61407459e-09
4.01081580e-10 2.48225325e-09]
[2.78283677e-09 3.72329659e-10 4.32618275e-10 2.43798266e-10
1.69757642e-10 5.66919137e-12 4.90473494e-11 1.04409067e-10
2.17356724e-11 1.48258302e-14]
[6.57265500e-10 8.85984371e-10 8.93059441e-10 3.20663702e-11
7.76177850e-10 1.42699533e-10 3.36725269e-11 1.91548399e-10
2.10091389e-11 2.20441808e-10]
[1.54605875e-09 3.61335953e-10 1.04956016e-10 2.26947624e-10
7.46412407e-11 3.04503824e-10 4.82101083e-10 9.34198428e-11
3.64477933e-10 1.83131204e-11]
[1.17777372e-10 6.93398121e-10 2.35588374e-10 3.80614679e-11
2.32743521e-10 1.07524941e-13 3.65078052e-10 4.02962564e-10
2.34826352e-10 1.08189996e-10]
[1.55204589e-09 3.13258842e-10 1.32668344e-09 2.56356269e-12
8.77049677e-10 3.47534419e-10 6.47422293e-12 1.09597694e-11
1.73521399e-10 6.94970472e-11]
[6.27055256e-09 6.26437084e-09 1.90209436e-09 5.89554965e-12
1.24139519e-09 1.32573353e-09 2.93071935e-09 2.39173758e-09
7.60948898e-10 5.10717309e-10]
[2.61825110e-09 8.29578519e-10 2.13763379e-10 8.47040178e-11
2.02359793e-10 8.23990683e-11 6.22799044e-10 3.75862843e-10
3.56008724e-12 4.15792591e-11]
[5.28784961e-10 4.93977640e-10 4.24451145e-11 2.71560256e-12
5.73289985e-11 3.30203849e-11 1.54442347e-11 1.96125163e-10
1.79403042e-11 4.56644208e-11]
[6.02525200e-11 2.01173665e-11 1.09221966e-10 1.75556985e-11
8.17887244e-11 5.51003746e-12 4.70401781e-11 1.58986893e-11
2.13052858e-11 6.08964757e-12]
[3.62640319e-12 6.24507827e-15 1.59102406e-11 3.27583889e-12
6.18681647e-12 2.27689436e-12 4.43068034e-11 9.32695287e-14
1.82234598e-11 1.70182543e-13]
[1.95018208e-09 1.38627021e-09 1.99804341e-09 2.48536236e-12
6.55627735e-10 7.86216292e-10 1.67089182e-11 2.39688845e-10
2.51792098e-10 2.80289819e-10]
[1.03398396e-10 3.71129484e-10 2.33898613e-10 8.82654863e-12
3.02372679e-10 5.26921463e-11 1.59802954e-11 8.47846634e-11
2.02595454e-11 2.65903619e-10]
[5.23261804e-09 5.40506333e-10 9.84136127e-10 2.85412362e-10
9.95547844e-10 1.21216078e-11 5.73265930e-11 3.49176527e-10
4.35347582e-10 1.42887305e-13]
[2.23389040e-08 3.35027360e-09 8.48522352e-09 2.19691148e-09
3.36791694e-09 9.48195020e-13 5.13929416e-12 4.76639723e-10
6.98207499e-10 5.50106043e-12]
[1.99916448e-10 8.38882650e-09 3.88103429e-09 1.27196377e-10
4.34207359e-09 5.97743739e-10 2.57178210e-11 1.63736162e-09
1.69646805e-10 3.93871228e-09]
[5.23698179e-11 2.08570566e-10 1.15752227e-09 2.03296045e-11
4.80431855e-10 4.10208803e-10 8.59933247e-13 5.96476803e-12
6.34534807e-14 3.67737737e-10]
[3.26833680e-09 3.82845261e-09 8.38645534e-09 1.07453005e-13
2.40969111e-09 1.98003966e-09 2.00764285e-10 1.29185812e-10
2.01472642e-10 4.13458521e-14]
[2.04403218e-10 5.01896347e-10 8.04262057e-11 2.78085339e-11
2.62904330e-11 4.62932935e-12 2.29045800e-10 5.93858817e-11
1.57130982e-10 2.90605104e-11]
[2.23357535e-09 1.30972349e-10 4.90919938e-10 1.08076288e-10
1.35038491e-10 9.92891946e-11 1.83205494e-11 3.25722867e-12
1.72147084e-11 6.20608313e-13]
[1.33541689e-08 3.17978069e-09 3.27666986e-09 2.03611389e-10
1.24106880e-09 7.94738138e-10 1.51221191e-10 7.99408048e-10
1.60353223e-11 8.49247558e-12]
[5.10669022e-09 9.68570256e-10 1.74962647e-09 7.67793758e-11
6.11947885e-11 5.72895365e-10 3.28525429e-12 2.05543847e-10
3.43522949e-10 5.29034110e-11]
[1.59838226e-09 5.51342791e-10 6.48410867e-10 1.62623666e-10
5.15721256e-10 7.63367977e-10 5.46152873e-11 7.93510116e-11
1.83251752e-10 2.41365883e-14]
[5.15633211e-09 2.25186185e-11 3.12011421e-10 1.29563711e-10
2.86075736e-11 1.89176447e-12 4.10399940e-10 1.68060178e-11
1.12868998e-10 3.24581257e-10]
[5.44319320e-09 6.59904025e-10 2.56848587e-12 5.08057494e-10
5.59274701e-11 3.11246883e-10 2.93998426e-10 2.32735246e-10
3.64377878e-10 1.40497409e-09]
[6.65549574e-09 8.85016801e-10 3.96451549e-10 3.39291810e-10
6.66429297e-10 2.77174003e-11 1.76182600e-11 3.59167279e-10
8.33066399e-12 1.94218060e-11]
[1.67281165e-07 1.80333778e-08 4.18871779e-08 2.76300217e-09
3.74532261e-08 2.74868910e-08 3.56907205e-09 7.95685596e-10
5.61456597e-09 1.04119142e-10]
[2.79956067e-09 7.20235617e-09 3.29320199e-09 2.51240964e-10
2.65765003e-09 3.64388098e-12 3.24000188e-11 1.47767777e-09
4.36534516e-12 1.31139522e-10]]
496 [[8.22638610e-10 1.35538197e-09 9.72679107e-10 8.23216596e-12
3.92452659e-10 2.18210101e-10 1.00469810e-11 1.73916907e-10
4.80792684e-12 6.11147471e-11]
[1.07686171e-08 2.44646182e-09 5.39996196e-09 7.55797767e-10
1.66975064e-09 1.98184707e-09 1.27005004e-10 2.77867331e-10
1.97027789e-11 1.06117125e-10]
[1.03648809e-08 1.82777914e-10 1.49886628e-09 6.33980346e-10
5.14165565e-10 9.65620306e-10 5.09071129e-10 1.95524681e-10
5.34188372e-10 9.15927324e-11]
[2.89439893e-09 1.80906216e-11 1.53877306e-10 2.68199974e-10
2.16031012e-13 2.45606760e-12 2.30895649e-10 3.39857915e-12
1.74536123e-11 1.59338714e-10]
[4.30656516e-09 4.98520459e-10 1.53344526e-09 1.95621541e-10
5.39733939e-10 1.64423334e-10 9.52654042e-11 5.94141414e-11
2.06254779e-11 9.58721457e-14]
[1.17761646e-10 3.41319638e-11 2.05462244e-10 1.02280572e-12
4.62398512e-11 4.84713071e-11 5.08398660e-11 7.01969161e-12
1.91573083e-12 1.19876745e-11]
[3.09756093e-09 3.51600402e-11 4.42508289e-11 3.81375582e-11
1.90313030e-11 1.36755126e-11 5.10486744e-10 2.66957212e-12
2.24096164e-10 1.85191742e-11]
[6.20958579e-11 4.96011803e-11 1.26035438e-11 7.41092929e-12
3.34545614e-13 3.26346752e-12 3.44528641e-11 3.21868543e-12
1.64758985e-12 8.44029702e-12]
[1.61589318e-10 1.08746999e-09 1.38615935e-09 1.31966815e-13
9.96246268e-10 2.68828792e-10 1.78681682e-10 1.87371883e-10
4.69594790e-11 4.08301335e-10]
[9.75166627e-11 2.08939840e-10 9.16827597e-10 1.18264013e-11
4.95029947e-10 3.52290019e-10 5.41433282e-12 5.74514247e-11
6.64840617e-12 2.30137161e-10]
[3.03269016e-12 2.20772684e-10 3.96469204e-10 7.00896271e-15
5.73758618e-11 6.45079467e-11 4.18823428e-11 2.51517098e-11
1.31302445e-11 1.21894509e-10]
[1.58976953e-10 1.68739247e-11 6.63443369e-12 9.34137882e-12
1.04594489e-12 8.23296000e-12 3.61143756e-12 1.19684940e-12
1.46208461e-11 1.84228963e-11]
[1.16649686e-11 3.34526939e-09 8.01154908e-10 1.16064381e-11
4.96593557e-10 9.21712222e-11 4.29845834e-10 4.24727671e-10
3.22211039e-10 1.96766935e-10]
[8.42041070e-10 4.71916012e-10 5.93091164e-10 2.63436020e-11
2.45159419e-10 1.76503415e-10 1.04371843e-11 8.79218688e-11
1.73774611e-13 3.54053187e-11]
[1.89488675e-10 7.85935516e-10 5.11171113e-10 3.62092743e-11
3.94369732e-10 1.97133885e-11 5.56659574e-12 7.25532675e-10
7.08341047e-11 4.03159711e-10]
[4.75151377e-11 5.50580435e-11 2.07071486e-12 2.78652319e-13
1.56724504e-11 3.10277419e-11 1.04540534e-11 6.68706078e-11
4.72856230e-11 1.09086510e-10]
[2.75135829e-09 2.53914446e-09 3.35736999e-09 9.37653105e-11
1.87886463e-09 1.83571715e-09 7.44020959e-11 2.68434843e-10
3.01345546e-10 6.59959812e-10]
[3.68143167e-09 4.27781350e-09 4.00785190e-09 1.57197633e-10
1.18245982e-09 3.75416324e-10 9.17556841e-11 5.66066428e-10
8.61203275e-12 6.81043140e-11]
[1.09841387e-08 3.77282894e-08 3.53022704e-08 3.95898203e-10
8.91770859e-09 2.55427364e-09 2.64276539e-08 5.95181632e-09
5.25073612e-09 5.28028606e-09]
[3.17582972e-10 4.74236308e-09 1.26428199e-09 2.06460095e-10
8.66135569e-10 2.00868129e-10 8.81130299e-11 1.34199989e-09
6.55145912e-10 2.61932163e-09]
[7.57311000e-10 3.40297839e-11 3.21419522e-12 1.10712747e-12
7.44352659e-12 8.57769445e-11 2.73823069e-11 7.27701269e-11
2.17888875e-11 1.29038028e-12]
[7.97424502e-09 7.67449633e-09 7.23594708e-09 1.88496886e-11
5.03494654e-09 2.02243244e-10 2.80815730e-11 2.27580534e-09
3.31831125e-10 1.93315760e-10]
[1.64503853e-10 2.40338813e-10 4.94662942e-11 1.66491466e-12
1.98342556e-11 2.21972913e-12 3.95791416e-10 9.63071507e-14
5.31570943e-11 1.81236988e-11]
[8.46836008e-10 7.83567438e-12 4.11141674e-11 8.79499329e-11
1.17291105e-10 2.92521880e-13 2.24506153e-12 3.03834667e-12
6.52749648e-13 4.14943009e-12]
[4.85948859e-10 6.20655024e-11 8.33273516e-11 1.55315977e-11
9.90889347e-14 3.96208720e-13 5.27524217e-11 5.08803343e-11
4.28965986e-11 5.90510241e-11]
[8.21314830e-10 1.59856394e-09 3.10835854e-10 1.64008376e-10
4.48630229e-11 8.45580043e-13 3.07463267e-10 2.32602160e-10
2.14100554e-10 8.28394214e-10]
[2.63676421e-09 5.58173021e-10 3.42359749e-10 2.70736454e-11
5.22096263e-10 1.57351354e-10 3.01274093e-11 7.76866511e-12
5.61206950e-12 1.08503243e-11]
[2.57427101e-10 9.02214428e-11 2.20556859e-10 6.58124293e-12
4.67328420e-11 5.82626845e-13 8.99294026e-11 8.11041371e-12
5.78234038e-11 4.75489168e-16]
[8.54827933e-09 8.56064422e-10 1.06774208e-09 3.24739880e-10
4.10379250e-10 2.19573433e-10 4.39192594e-10 2.36782584e-10
9.35562374e-12 6.86264742e-11]
[3.25422491e-10 6.03148610e-10 1.21538218e-09 2.84388231e-11
4.52953280e-10 7.12911094e-10 1.50131642e-10 8.25745791e-11
4.11023903e-12 1.02821645e-09]
[2.07042596e-09 2.52058744e-09 2.28798884e-09 1.16521932e-10
3.90082080e-10 7.74614206e-11 1.35968390e-09 4.02567013e-10
3.03807143e-10 1.10501513e-09]
[9.88315754e-10 6.30989604e-10 1.66145072e-09 8.31908648e-12
1.77406421e-10 2.35957798e-10 1.67602427e-11 4.16019327e-11
7.48887796e-13 7.56556687e-12]
[1.25383244e-09 1.67896943e-09 3.68495983e-10 6.01165218e-11
1.37768256e-10 1.14246152e-11 3.66248176e-11 2.01592041e-10
1.56755598e-10 4.24688529e-12]
[1.11223420e-10 4.88116834e-11 1.09363857e-10 3.39514064e-12
3.61500097e-14 3.90881471e-14 2.92983255e-11 2.20650562e-11
1.20141936e-11 3.83756819e-11]
[3.21518318e-09 8.42591956e-12 5.86489453e-10 1.43632009e-10
3.22528113e-10 4.97102813e-10 3.37828009e-10 1.43658362e-11
1.46460292e-10 5.82299342e-11]
[1.56297219e-08 4.37206640e-09 4.02433342e-09 4.30904152e-10
3.10800546e-09 4.36058266e-10 3.72225724e-10 3.52012803e-10
2.28042459e-10 7.24616869e-10]
[1.42707786e-10 6.40627479e-09 1.90666334e-09 2.33500432e-12
8.86542180e-10 1.23652555e-10 1.16724653e-09 1.54180100e-09
3.83047571e-10 2.37014519e-09]
[2.64710335e-09 3.55969261e-10 4.13635841e-10 2.31591828e-10
1.62356318e-10 5.28109082e-12 4.67281245e-11 9.99772101e-11
2.08478171e-11 4.89512776e-15]
[6.26877431e-10 8.41916497e-10 8.49610378e-10 3.08035174e-11
7.37829885e-10 1.36013356e-10 3.19103222e-11 1.81629689e-10
1.99610029e-11 2.08913207e-10]
[1.47156110e-09 3.41366671e-10 1.00447381e-10 2.15581999e-10
7.13948606e-11 2.89708320e-10 4.56738822e-10 8.85396623e-11
3.45410805e-10 1.76618246e-11]
[1.12002330e-10 6.55298989e-10 2.22443773e-10 3.59803004e-11
2.20750812e-10 9.87418038e-14 3.45590120e-10 3.81572431e-10
2.22077449e-10 1.02525605e-10]
[1.48203189e-09 2.97763821e-10 1.26402692e-09 2.46483341e-12
8.36019694e-10 3.31264784e-10 6.16828081e-12 1.03771998e-11
1.65606029e-10 6.57633838e-11]
[5.96484932e-09 5.97072358e-09 1.82045630e-09 5.49828479e-12
1.18446529e-09 1.25500945e-09 2.78693064e-09 2.28287818e-09
7.23609909e-10 4.92001618e-10]
[2.49376886e-09 7.92201493e-10 2.07270944e-10 7.99674899e-11
1.94318899e-10 7.65303338e-11 5.91536413e-10 3.60320576e-10
3.39851345e-12 3.75497095e-11]
[5.04630131e-10 4.67870485e-10 4.03067251e-11 2.54262938e-12
5.44748803e-11 3.11588689e-11 1.50990648e-11 1.86956391e-10
1.67171837e-11 4.34155527e-11]
[5.72011809e-11 1.93626746e-11 1.03450863e-10 1.66870206e-11
7.74554302e-11 5.16185955e-12 4.48970659e-11 1.52487322e-11
2.02498713e-11 5.67857826e-12]
[3.39738072e-12 8.95986212e-15 1.52580452e-11 3.15137604e-12
5.91306716e-12 2.15286029e-12 4.21987024e-11 9.25124470e-14
1.73715538e-11 1.70327104e-13]
[1.86593301e-09 1.32336937e-09 1.90932794e-09 2.34590416e-12
6.25992690e-10 7.50877864e-10 1.59491130e-11 2.29065504e-10
2.40639341e-10 2.67235466e-10]
[9.75641502e-11 3.52791282e-10 2.22640498e-10 8.38162612e-12
2.87245014e-10 5.02286584e-11 1.50363745e-11 8.05286944e-11
1.89760937e-11 2.51850010e-10]
[4.98396951e-09 5.16279547e-10 9.40230826e-10 2.71644240e-10
9.50539751e-10 1.13999825e-11 5.45612490e-11 3.33129036e-10
4.15153822e-10 1.65667398e-13]
[2.12831654e-08 3.19110521e-09 8.08877776e-09 2.09227863e-09
3.21119189e-09 8.27836080e-13 4.96161009e-12 4.54065309e-10
6.64086124e-10 5.27665401e-12]
[1.90316647e-10 7.98716175e-09 3.69428275e-09 1.20989019e-10
4.12954361e-09 5.68394520e-10 2.47185395e-11 1.55911576e-09
1.61327010e-10 3.74563303e-09]
[4.91794029e-11 2.00235025e-10 1.10686165e-09 1.93337868e-11
4.58795932e-10 3.90503538e-10 9.25525376e-13 5.76860412e-12
5.45259054e-14 3.49846459e-10]
[3.12097920e-09 3.66157343e-09 8.01405165e-09 1.10436906e-13
2.30450222e-09 1.89143457e-09 1.91864125e-10 1.23839851e-10
1.92340590e-10 4.84131706e-14]
[1.94684575e-10 4.76944129e-10 7.63608926e-11 2.64462493e-11
2.49908881e-11 4.43720097e-12 2.18321319e-10 5.63891845e-11
1.49495063e-10 2.75561405e-11]
[2.12988490e-09 1.25048534e-10 4.69046224e-10 1.02966002e-10
1.28906356e-10 9.49736241e-11 1.74641031e-11 3.09982726e-12
1.65244504e-11 5.97528430e-13]
[1.27302157e-08 3.02898931e-09 3.12546493e-09 1.94136146e-10
1.18427034e-09 7.59322146e-10 1.44674266e-10 7.62270079e-10
1.55421827e-11 8.21114888e-12]
[4.86698555e-09 9.21014877e-10 1.66632099e-09 7.32595997e-11
5.83752660e-11 5.46169367e-10 3.19801861e-12 1.95690155e-10
3.28131723e-10 5.02983531e-11]
[1.52642372e-09 5.23634766e-10 6.15269404e-10 1.55631788e-10
4.91191128e-10 7.26432027e-10 5.19971519e-11 7.49109087e-11
1.74797082e-10 1.35278680e-14]
[4.91090855e-09 2.16201447e-11 2.97832271e-10 1.23411121e-10
2.70947088e-11 1.78398880e-12 3.90823373e-10 1.61004514e-11
1.07376115e-10 3.08381524e-10]
[5.18346914e-09 6.26662712e-10 2.60148031e-12 4.83107004e-10
5.41038171e-11 2.97315754e-10 2.79353259e-10 2.20908974e-10
3.47341665e-10 1.33501168e-09]
[6.34300538e-09 8.40426522e-10 3.77824286e-10 3.23704371e-10
6.34569485e-10 2.60226321e-11 1.71401979e-11 3.41690275e-10
8.08720327e-12 1.84453811e-11]
[1.59725398e-07 1.72215389e-08 3.99971269e-08 2.63703722e-09
3.57672239e-08 2.62497657e-08 3.40934352e-09 7.60169569e-10
5.35962832e-09 9.91010059e-11]
[2.66356574e-09 6.84146310e-09 3.12711784e-09 2.38716438e-10
2.52538500e-09 3.52224164e-12 3.06811091e-11 1.40353182e-09
4.12678215e-12 1.24001781e-10]]
497 [[7.86563104e-10 1.29321492e-09 9.27575722e-10 7.82842968e-12
3.73841724e-10 2.08182191e-10 9.56425460e-12 1.65663041e-10
4.54855244e-12 5.79292731e-11]
[1.02656870e-08 2.34221059e-09 5.16417250e-09 7.20251990e-10
1.59613496e-09 1.89642370e-09 1.21170032e-10 2.65990593e-10
1.88083197e-11 1.02483411e-10]
[9.89503138e-09 1.75276243e-10 1.43743915e-09 6.05882722e-10
4.93138402e-10 9.25431339e-10 4.84147907e-10 1.86723269e-10
5.11271632e-10 8.75498223e-11]
[2.75804777e-09 1.69117584e-11 1.45956476e-10 2.55574251e-10
2.00401721e-13 2.34672368e-12 2.20659503e-10 3.15368791e-12
1.68118221e-11 1.52324451e-10]
[4.10965799e-09 4.73946059e-10 1.45995656e-09 1.86555472e-10
5.13981611e-10 1.56963858e-10 9.08293486e-11 5.62625801e-11
1.94849723e-11 6.82905790e-14]
[1.12729402e-10 3.23445209e-11 1.95400725e-10 9.85453126e-13
4.39817062e-11 4.59558435e-11 4.84952370e-11 6.69916778e-12
1.87026701e-12 1.12550695e-11]
[2.95576422e-09 3.32999689e-11 4.25834330e-11 3.63928393e-11
1.83350278e-11 1.31412615e-11 4.87530360e-10 2.48219000e-12
2.13691701e-10 1.74114779e-11]
[5.97916354e-11 4.75050880e-11 1.20416964e-11 6.98722843e-12
3.18188592e-13 3.12524853e-12 3.29436746e-11 3.07094269e-12
1.61735368e-12 7.93609400e-12]
[1.56221895e-10 1.03881986e-09 1.32746549e-09 1.29778885e-13
9.53980665e-10 2.57702221e-10 1.69654045e-10 1.79359765e-10
4.48272958e-11 3.91651455e-10]
[9.35063402e-11 1.99050671e-10 8.75060263e-10 1.12754637e-11
4.72341109e-10 3.36305518e-10 5.14440444e-12 5.47692003e-11
6.42447458e-12 2.19280428e-10]
[2.97578972e-12 2.11258457e-10 3.79129604e-10 7.34990302e-15
5.48975626e-11 6.16254291e-11 3.99092491e-11 2.40733981e-11
1.26138034e-11 1.16351073e-10]
[1.53395774e-10 1.60120478e-11 6.50919017e-12 9.01245613e-12
9.58849292e-13 8.06904173e-12 3.56946338e-12 1.16786937e-12
1.42203568e-11 1.75821045e-11]
[1.13180502e-11 3.18908446e-09 7.65571167e-10 1.11091555e-11
4.74005111e-10 8.83653048e-11 4.09037335e-10 4.05073750e-10
3.07067181e-10 1.87969346e-10]
[8.05510205e-10 4.51613161e-10 5.67501185e-10 2.51732074e-11
2.34523411e-10 1.68692074e-10 9.95464611e-12 8.42634561e-11
1.62853630e-13 3.38829467e-11]
[1.80323103e-10 7.48920587e-10 4.88471123e-10 3.46575109e-11
3.76095726e-10 1.85844247e-11 5.19182807e-12 6.92154308e-10
6.72993074e-11 3.85470051e-10]
[4.52330564e-11 5.23016858e-11 1.95481844e-12 2.65515757e-13
1.48426333e-11 2.95680806e-11 1.00222709e-11 6.37039490e-11
4.48844289e-11 1.03703796e-10]
[2.63257147e-09 2.42254900e-09 3.20402435e-09 8.99541589e-11
1.79152030e-09 1.75184317e-09 7.12766281e-11 2.55931398e-10
2.88492665e-10 6.27362803e-10]
[3.49715192e-09 4.07820185e-09 3.81913392e-09 1.49039986e-10
1.12640693e-09 3.56934884e-10 8.81383057e-11 5.40164350e-10
8.32043977e-12 6.50117771e-11]
[1.04666221e-08 3.60340398e-08 3.37353284e-08 3.77863382e-10
8.51566517e-09 2.44163415e-09 2.52209802e-08 5.68562396e-09
5.00982770e-09 5.04176252e-09]
[2.99657854e-10 4.51780456e-09 1.20773485e-09 1.96708698e-10
8.24573459e-10 1.91904790e-10 8.38207469e-11 1.28017267e-09
6.23373226e-10 2.49598442e-09]
[7.20659794e-10 3.21116415e-11 3.07749594e-12 1.03925338e-12
7.11796472e-12 8.13616018e-11 2.62987513e-11 6.94106783e-11
2.05724462e-11 1.25493078e-12]
[7.61075543e-09 7.30936199e-09 6.90065899e-09 1.81268582e-11
4.80097298e-09 1.93720043e-10 2.64695348e-11 2.16824578e-09
3.14794948e-10 1.83659152e-10]
[1.57600589e-10 2.32539570e-10 4.78473160e-11 1.63202958e-12
1.91370044e-11 2.05048754e-12 3.80465831e-10 1.05506674e-13
5.15016422e-11 1.72569202e-11]
[8.04957895e-10 7.64047844e-12 3.96104862e-11 8.34896872e-11
1.12617813e-10 3.05076168e-13 2.15409872e-12 2.99514402e-12
6.07985661e-13 3.71033908e-12]
[4.62546435e-10 5.88613914e-11 7.95458796e-11 1.48165970e-11
1.04200383e-13 3.48942644e-13 4.99739083e-11 4.84463390e-11
4.05109247e-11 5.59429836e-11]
[7.84153288e-10 1.52474888e-09 2.96960604e-10 1.56564458e-10
4.28626065e-11 8.13243670e-13 2.93395634e-10 2.22023790e-10
2.04119869e-10 7.90906318e-10]
[2.51837268e-09 5.33298666e-10 3.27944711e-10 2.58507876e-11
4.99068784e-10 1.50708624e-10 2.88109022e-11 7.42352427e-12
5.36709739e-12 1.04398639e-11]
[2.44315697e-10 8.60890778e-11 2.09870099e-10 6.24894214e-12
4.44679229e-11 5.59603612e-13 8.58476731e-11 7.67445332e-12
5.54411542e-11 2.89360589e-16]
[8.15063982e-09 8.18473652e-10 1.01935101e-09 3.09237206e-10
3.91980640e-10 2.10024349e-10 4.19302958e-10 2.25545696e-10
8.77537674e-12 6.57561509e-11]
[3.07163017e-10 5.78264418e-10 1.16292554e-09 2.69450194e-11
4.32845222e-10 6.80136346e-10 1.42729339e-10 7.91680909e-11
3.90707999e-12 9.80016768e-10]
[1.97193520e-09 2.40180286e-09 2.17911018e-09 1.10891805e-10
3.71626244e-10 7.36818119e-11 1.29640780e-09 3.83072679e-10
2.90074041e-10 1.05195602e-09]
[9.41976124e-10 6.00983452e-10 1.58287106e-09 7.97124732e-12
1.69210957e-10 2.24987838e-10 1.59770367e-11 3.95535593e-11
7.36847594e-13 7.14620277e-12]
[1.19479924e-09 1.60052536e-09 3.51287233e-10 5.72800551e-11
1.31350371e-10 1.08805336e-11 3.49074699e-11 1.92031690e-10
1.49612864e-10 4.07062971e-12]
[1.04728812e-10 4.75025044e-11 1.05294511e-10 3.13035113e-12
4.38349744e-14 3.19466338e-14 2.79904802e-11 2.13246357e-11
1.14848373e-11 3.71491094e-11]
[3.07110387e-09 7.99545167e-12 5.59021595e-10 1.37333063e-10
3.07539193e-10 4.73683916e-10 3.22269554e-10 1.36461420e-11
1.39472507e-10 5.53573737e-11]
[1.49151528e-08 4.17329213e-09 3.84539595e-09 4.10864230e-10
2.96903877e-09 4.17813407e-10 3.55966748e-10 3.35851780e-10
2.18514264e-10 6.93472594e-10]
[1.35734724e-10 6.11729641e-09 1.82218514e-09 2.22901547e-12
8.46802939e-10 1.18047151e-10 1.11426125e-09 1.47292502e-09
3.65383788e-10 2.26288061e-09]
[2.51729203e-09 3.40259774e-10 3.95680220e-10 2.19783780e-10
1.55270140e-10 4.93954127e-12 4.46644853e-11 9.56106226e-11
1.98808580e-11 3.14779292e-16]
[5.97640242e-10 8.00000370e-10 8.08110510e-10 2.94960910e-11
7.01524530e-10 1.29662433e-10 3.03085194e-11 1.72287473e-10
1.88385752e-11 1.97828553e-10]
[1.40013164e-09 3.22594497e-10 9.61854607e-11 2.04841413e-10
6.83634997e-11 2.75688118e-10 4.32624590e-10 8.38228240e-11
3.27903713e-10 1.70076759e-11]
[1.06567464e-10 6.19808925e-10 2.10052788e-10 3.40021308e-11
2.09138273e-10 8.63161581e-14 3.27273858e-10 3.61502110e-10
2.10668117e-10 9.68801314e-11]
[1.41547456e-09 2.83058135e-10 1.20429539e-09 2.38243831e-12
7.96668225e-10 3.15778634e-10 5.89477671e-12 9.80549770e-12
1.57802302e-10 6.23032348e-11]
[5.67440199e-09 5.69090014e-09 1.74243702e-09 5.14034125e-12
1.13011326e-09 1.18816125e-09 2.64998027e-09 2.17874190e-09
6.88506104e-10 4.74015081e-10]
[2.37492811e-09 7.56454020e-10 2.00955954e-10 7.54723746e-11
1.86638314e-10 7.10209294e-11 5.61945988e-10 3.45581990e-10
3.20586644e-12 3.38798374e-11]
[4.81758565e-10 4.43418062e-10 3.82041434e-11 2.35913378e-12
5.15883625e-11 2.93998407e-11 1.47830866e-11 1.78185359e-10
1.57044111e-11 4.13823706e-11]
[5.43768237e-11 1.86334476e-11 9.80735818e-11 1.58976096e-11
7.33389750e-11 4.85870165e-12 4.28357430e-11 1.45434061e-11
1.93318406e-11 5.31023787e-12]
[3.19007169e-12 1.19239261e-14 1.46433758e-11 3.01611841e-12
5.66165532e-12 2.03198742e-12 4.02443316e-11 9.42817929e-14
1.64708681e-11 1.69024897e-13]
[1.78564133e-09 1.26342249e-09 1.82443151e-09 2.21017642e-12
5.97652502e-10 7.16907030e-10 1.52514395e-11 2.18754840e-10
2.29597787e-10 2.54801840e-10]
[9.19792350e-11 3.35434107e-10 2.11795258e-10 7.91022217e-12
2.72755628e-10 4.78362310e-11 1.40994599e-11 7.63732146e-11
1.79119978e-11 2.38685561e-10]
[4.74665089e-09 4.93091132e-10 8.98212710e-10 2.58466096e-10
9.07567219e-10 1.06814739e-11 5.18852062e-11 3.18003724e-10
3.95423247e-10 1.87285673e-13]
[2.02790841e-08 3.03983518e-09 7.70999454e-09 1.99273104e-09
3.06146514e-09 7.28273286e-13 4.80611321e-12 4.32313709e-10
6.32417874e-10 5.06097619e-12]
[1.81091388e-10 7.60512459e-09 3.51591545e-09 1.15036652e-10
3.92704748e-09 5.40376578e-10 2.37305361e-11 1.48419857e-09
1.53731259e-10 3.56222108e-09]
[4.59466540e-11 1.92385134e-10 1.05835865e-09 1.83648219e-11
4.37825193e-10 3.71752447e-10 9.74732750e-13 5.58512548e-12
3.82302218e-14 3.33250839e-10]
[2.98037865e-09 3.50200276e-09 7.65799794e-09 1.12081138e-13
2.20370764e-09 1.80647786e-09 1.83319652e-10 1.18565205e-10
1.83335929e-10 5.65707421e-14]
[1.86306737e-10 4.52952807e-10 7.32252147e-11 2.47562935e-11
2.36500634e-11 4.21818257e-12 2.08413648e-10 5.37236472e-11
1.43018033e-10 2.61799400e-11]
[2.03196050e-09 1.19475312e-10 4.47932155e-10 9.82446814e-11
1.22929656e-10 9.08262613e-11 1.67193346e-11 2.94935163e-12
1.57365929e-11 5.86676633e-13]
[1.21365615e-08 2.88552941e-09 2.98106973e-09 1.85159323e-10
1.12995530e-09 7.25242750e-10 1.38502949e-10 7.26570344e-10
1.49470668e-11 7.94500261e-12]
[4.63951631e-09 8.75862032e-10 1.58683898e-09 6.99477770e-11
5.56084841e-11 5.20761451e-10 3.08853928e-12 1.86392317e-10
3.13163331e-10 4.79081440e-11]
[1.45773369e-09 4.97298545e-10 5.83940326e-10 1.48876997e-10
4.67865968e-10 6.91459768e-10 4.94821329e-11 7.08200071e-11
1.66971465e-10 6.44110178e-15]
[4.67685957e-09 2.07552164e-11 2.84225373e-10 1.17453124e-10
2.56612617e-11 1.68022794e-12 3.72018632e-10 1.54410630e-11
1.02348639e-10 2.93102440e-10]
[4.93740426e-09 5.94898708e-10 2.61585561e-12 4.59719814e-10
5.22676459e-11 2.83970743e-10 2.65696230e-10 2.09774060e-10
3.30379151e-10 1.26806715e-09]
[6.04440120e-09 7.98004278e-10 3.60113960e-10 3.08736008e-10
6.04373815e-10 2.44102159e-11 1.66379633e-11 3.25235087e-10
7.92839488e-12 1.75414178e-11]
[1.52503323e-07 1.64448535e-08 3.81955679e-08 2.51638772e-09
3.41590363e-08 2.50689039e-08 3.25592278e-09 7.25981034e-10
5.11927377e-09 9.44253895e-11]
[2.53402025e-09 6.49857473e-09 2.96903002e-09 2.26923000e-10
2.39977522e-09 3.41510065e-12 2.90851213e-11 1.33269117e-09
3.93008878e-12 1.17212817e-10]]
498 [[7.51510753e-10 1.23395511e-09 8.84790035e-10 7.46425194e-12
3.56082854e-10 1.98647105e-10 9.08901852e-12 1.57867135e-10
4.30068391e-12 5.48970550e-11]
[9.78528426e-09 2.24216852e-09 4.93892344e-09 6.86399325e-10
1.52582936e-09 1.81467044e-09 1.15565827e-10 2.54741957e-10
1.78808287e-11 9.89512322e-11]
[9.45277579e-09 1.68583058e-10 1.37098238e-09 5.77967192e-10
4.70953596e-10 8.82033467e-10 4.63872695e-10 1.77807523e-10
4.86501976e-10 8.27426024e-11]
[2.62835867e-09 1.58252583e-11 1.38578112e-10 2.43702488e-10
1.83908169e-13 2.25734787e-12 2.10875461e-10 2.92407470e-12
1.60496600e-11 1.45692271e-10]
[3.92145528e-09 4.50907608e-10 1.39044370e-09 1.78069885e-10
4.89260608e-10 1.50035702e-10 8.65642601e-11 5.33819541e-11
1.85438893e-11 4.24409319e-14]
[1.07811176e-10 3.07040121e-11 1.85731737e-10 9.61317362e-13
4.18558063e-11 4.35241275e-11 4.64028050e-11 6.35927948e-12
1.80675527e-12 1.05921547e-11]
[2.81971841e-09 3.15583380e-11 4.10366154e-11 3.46736613e-11
1.76954819e-11 1.25901304e-11 4.65808843e-10 2.33288192e-12
2.04008632e-10 1.64175416e-11]
[5.75797599e-11 4.54547587e-11 1.15224195e-11 6.56757257e-12
2.99451437e-13 2.98797912e-12 3.15365875e-11 2.94531876e-12
1.56453392e-12 7.48834599e-12]
[1.51084287e-10 9.91994322e-10 1.27127791e-09 1.22571860e-13
9.13458912e-10 2.46846142e-10 1.61010158e-10 1.71801747e-10
4.26919488e-11 3.75436088e-10]
[8.96014559e-11 1.89829730e-10 8.35370315e-10 1.07035979e-11
4.50584640e-10 3.21273166e-10 4.89141923e-12 5.22963446e-11
6.15314189e-12 2.08753635e-10]
[2.89776281e-12 2.02339439e-10 3.62793966e-10 6.39535129e-15
5.25663931e-11 5.89905167e-11 3.79962322e-11 2.30041916e-11
1.20458878e-11 1.10975520e-10]
[1.48038637e-10 1.51988773e-11 6.39873346e-12 8.73244515e-12
8.80380937e-13 7.91719408e-12 3.52876385e-12 1.14349326e-12
1.37338111e-11 1.67653650e-11]
[1.09453807e-11 3.04024683e-09 7.31350098e-10 1.06190950e-11
4.52556961e-10 8.47032084e-11 3.89273143e-10 3.86555190e-10
2.91957648e-10 1.79533920e-10]
[7.70208878e-10 4.32348452e-10 5.43129480e-10 2.41408435e-11
2.24455143e-10 1.61361193e-10 9.47143679e-12 8.07062962e-11
1.52213524e-13 3.24803995e-11]
[1.71591344e-10 7.13681874e-10 4.66305948e-10 3.32058394e-11
3.58601869e-10 1.75587995e-11 4.83158212e-12 6.60289460e-10
6.37671633e-11 3.68986477e-10]
[4.31547274e-11 4.97125666e-11 1.83695361e-12 2.53028842e-13
1.40426453e-11 2.81748456e-11 9.60987149e-12 6.06458587e-11
4.27267678e-11 9.86332113e-11]
[2.51911468e-09 2.31141329e-09 3.05807945e-09 8.63308558e-11
1.70798110e-09 1.67178660e-09 6.82585792e-11 2.43834247e-10
2.75743790e-10 5.96453430e-10]
[3.32284929e-09 3.88812680e-09 3.63884050e-09 1.41261176e-10
1.07302124e-09 3.39329893e-10 8.46144022e-11 5.15273336e-10
8.06794218e-12 6.20642936e-11]
[9.97335361e-09 3.44158698e-08 3.22382222e-08 3.60546342e-10
8.13153996e-09 2.33399034e-09 2.40693872e-08 5.43130123e-09
4.78063240e-09 4.81419340e-09]
[2.82653287e-10 4.30410754e-09 1.15328374e-09 1.87041727e-10
7.85177369e-10 1.83707305e-10 7.94092488e-11 1.22225541e-09
5.93908669e-10 2.37818649e-09]
[6.85951966e-10 3.02994981e-11 2.95943669e-12 9.83257344e-13
6.79820090e-12 7.71725964e-11 2.53032418e-11 6.61241410e-11
1.95678067e-11 1.21804338e-12]
[7.26422928e-09 6.96175237e-09 6.58092243e-09 1.74533428e-11
4.57748600e-09 1.85492300e-10 2.49241877e-11 2.06554363e-09
2.99095090e-10 1.74519403e-10]
[1.51161439e-10 2.25002704e-10 4.62563444e-11 1.59467667e-12
1.84662640e-11 1.89968660e-12 3.65827744e-10 1.16916681e-13
4.96841461e-11 1.64081144e-11]
[7.67682238e-10 7.24957794e-12 3.87868876e-11 7.89076433e-11
1.07795725e-10 3.40080130e-13 2.06290877e-12 2.94092134e-12
7.42802669e-13 3.44840486e-12]
[4.40310483e-10 5.58596492e-11 7.60129310e-11 1.41802082e-11
1.04855204e-13 3.03086328e-13 4.73646076e-11 4.61059193e-11
3.83924482e-11 5.29992794e-11]
[7.48500838e-10 1.45448657e-09 2.83661327e-10 1.49387905e-10
4.09113079e-11 7.78818004e-13 2.79854466e-10 2.11859342e-10
1.94991447e-10 7.55278312e-10]
[2.40502138e-09 5.09602457e-10 3.13743291e-10 2.46549019e-11
4.77116165e-10 1.44258442e-10 2.75825344e-11 7.11060808e-12
5.20902810e-12 1.01007592e-11]
[2.32312639e-10 8.21829105e-11 2.00002818e-10 5.88053570e-12
4.22877727e-11 5.25450088e-13 8.20588537e-11 7.32044229e-12
5.29401611e-11 3.78804561e-17]
[7.77078550e-09 7.81281727e-10 9.74279745e-10 2.94838005e-10
3.74485704e-10 2.00954497e-10 4.00511472e-10 2.15600434e-10
8.65495720e-12 6.30518745e-11]
[2.89997396e-10 5.54465248e-10 1.11252736e-09 2.55517070e-11
4.13455501e-10 6.48930468e-10 1.35771491e-10 7.59678218e-11
3.68901635e-12 9.34169539e-10]
[1.87816936e-09 2.28859310e-09 2.07502386e-09 1.05612560e-10
3.54036561e-10 7.00484994e-11 1.23608694e-09 3.64660109e-10
2.76703829e-10 1.00155331e-09]
[8.97838533e-10 5.72454241e-10 1.50844712e-09 7.66480038e-12
1.61331615e-10 2.14548757e-10 1.52441852e-11 3.75715277e-11
7.04836413e-13 6.74512686e-12]
[1.13840441e-09 1.52552243e-09 3.34963371e-10 5.45334133e-11
1.25309924e-10 1.03496401e-11 3.33040881e-11 1.82987178e-10
1.42506857e-10 3.89434961e-12]
[9.87035127e-11 4.61893558e-11 1.01334961e-10 2.88860626e-12
5.19106980e-14 2.68816867e-14 2.67109154e-11 2.05361188e-11
1.10259372e-11 3.60290975e-11]
[2.93303058e-09 7.59539367e-12 5.32896843e-10 1.31209087e-10
2.93401435e-10 4.51493815e-10 3.07208630e-10 1.30126415e-11
1.33228267e-10 5.25400021e-11]
[1.42327818e-08 3.98427814e-09 3.67503149e-09 3.91944287e-10
2.83590069e-09 4.00553746e-10 3.40340897e-10 3.20601956e-10
2.09023268e-10 6.63293472e-10]
[1.28981575e-10 5.84150256e-09 1.74143908e-09 2.14093987e-12
8.08526428e-10 1.12770207e-10 1.06414865e-09 1.40759090e-09
3.48889982e-10 2.16076993e-09]
[2.39437100e-09 3.25346875e-10 3.78220375e-10 2.08679729e-10
1.48494012e-10 4.61417061e-12 4.25751987e-11 9.14959558e-11
1.90411682e-11 9.44250658e-16]
[5.70196592e-10 7.60379531e-10 7.68167204e-10 2.82257239e-11
6.66868792e-10 1.23539189e-10 2.87552997e-11 1.63342251e-10
1.78438413e-11 1.87449698e-10]
[1.33263400e-09 3.04826671e-10 9.20273971e-11 1.94636507e-10
6.54521922e-11 2.62200650e-10 4.10039449e-10 7.94298295e-11
3.10799079e-10 1.63942585e-11]
[1.01145946e-10 5.85425716e-10 1.97794136e-10 3.22754530e-11
1.98264259e-10 8.99276004e-14 3.09860600e-10 3.41880002e-10
1.98986815e-10 9.21134951e-11]
[1.35174544e-09 2.69003931e-10 1.14675770e-09 2.28747521e-12
7.59447759e-10 3.00778320e-10 5.63135441e-12 9.24105018e-12
1.50909363e-10 5.91598867e-11]
[5.39780784e-09 5.42391863e-09 1.66766653e-09 4.79293585e-12
1.07842832e-09 1.12497403e-09 2.51976203e-09 2.07931799e-09
6.54636520e-10 4.56634836e-10]
[2.26211502e-09 7.22288891e-10 1.94772787e-10 7.12468774e-11
1.79245184e-10 6.59349554e-11 5.33756200e-10 3.31218823e-10
3.05058559e-12 3.04795625e-11]
[4.59916596e-10 4.19708852e-10 3.63543766e-11 2.22170313e-12
4.91090687e-11 2.77625485e-11 1.43774689e-11 1.69855990e-10
1.45993837e-11 3.92852448e-11]
[5.17898133e-11 1.79698782e-11 9.29737324e-11 1.51535478e-11
6.94332192e-11 4.55054288e-12 4.09017191e-11 1.39507766e-11
1.83591670e-11 4.99320306e-12]
[2.99778775e-12 1.58196058e-14 1.41085214e-11 2.89689719e-12
5.41711676e-12 1.90614547e-12 3.83805084e-11 9.65585707e-14
1.56737235e-11 1.58235522e-13]
[1.70884876e-09 1.20620886e-09 1.74277176e-09 2.09837906e-12
5.70660706e-10 6.84386166e-10 1.45921483e-11 2.08943668e-10
2.19330942e-10 2.43004979e-10]
[8.67713727e-11 3.19016759e-10 2.01570690e-10 7.48452560e-12
2.58986543e-10 4.56271812e-11 1.32159988e-11 7.25723746e-11
1.69009229e-11 2.25836050e-10]
[4.52045087e-09 4.71244473e-10 8.58391997e-10 2.46092418e-10
8.66270923e-10 9.96480278e-12 4.93044381e-11 3.03712370e-10
3.77221503e-10 2.03125639e-13]
[1.93223946e-08 2.89554218e-09 7.34888971e-09 1.89768889e-09
2.91874610e-09 6.37563398e-13 4.62651751e-12 4.11767610e-10
6.01698100e-10 4.86306978e-12]
[1.72640868e-10 7.24076347e-09 3.34648410e-09 1.09566654e-10
3.73729170e-09 5.13723287e-10 2.27680588e-11 1.41369183e-09
1.46351617e-10 3.38660624e-09]
[4.30195186e-11 1.84804663e-10 1.01235649e-09 1.74215786e-11
4.17995212e-10 3.54230262e-10 1.03491658e-12 5.42380073e-12
2.97628261e-14 3.17413907e-10]
[2.84589570e-09 3.34922645e-09 7.31752266e-09 1.15076970e-13
2.10749555e-09 1.72544025e-09 1.75163082e-10 1.13606180e-10
1.74995472e-10 6.36820257e-14]
[1.76166196e-10 4.30886525e-10 6.86787892e-11 2.40374704e-11
2.25575948e-11 4.11758397e-12 1.98133602e-10 5.07767810e-11
1.35656644e-10 2.48133097e-11]
[1.93828856e-09 1.14119102e-10 4.27874956e-10 9.36659004e-11
1.17251602e-10 8.68554352e-11 1.59653732e-11 2.80944447e-12
1.50596985e-11 5.69801453e-13]
[1.15718199e-08 2.74921637e-09 2.84245626e-09 1.76781978e-10
1.07774618e-09 6.92608886e-10 1.32692985e-10 6.92151626e-10
1.44065347e-11 7.70793435e-12]
[4.42114161e-09 8.32995102e-10 1.51112360e-09 6.67521530e-11
5.30958368e-11 4.96179890e-10 3.04432959e-12 1.77244940e-10
2.99162110e-10 4.55206177e-11]
[1.39212090e-09 4.72211743e-10 5.54476147e-10 1.42463669e-10
4.45821228e-10 6.58137949e-10 4.70828655e-11 6.68903228e-11
1.59327872e-10 1.87545830e-15]
[4.45445187e-09 1.98961635e-11 2.71318020e-10 1.11815081e-10
2.42826805e-11 1.58742985e-12 3.54164185e-10 1.47548376e-11
9.74291162e-11 2.78536647e-10]
[4.70239284e-09 5.64744366e-10 2.60918851e-12 4.37326277e-10
5.04699482e-11 2.70974785e-10 2.52730689e-10 1.99041139e-10
3.14958925e-10 1.20443554e-09]
[5.75959175e-09 7.57923890e-10 3.43409733e-10 2.94628963e-10
5.75613620e-10 2.28678787e-11 1.61344131e-11 3.09623558e-10
7.72954071e-12 1.67267574e-11]
[1.45607940e-07 1.57037126e-08 3.64767459e-08 2.40136477e-09
3.26248577e-08 2.39437515e-08 3.10840610e-09 6.93514407e-10
4.89030285e-09 9.00578707e-11]
[2.41062536e-09 6.17283500e-09 2.81885864e-09 2.15643286e-10
2.28044430e-09 3.31420077e-12 2.75531608e-11 1.26552164e-09
3.71483643e-12 1.10778733e-10]]
499 [[7.17104012e-10 1.17706626e-09 8.45064191e-10 7.19284549e-12
3.39340302e-10 1.89738460e-10 8.71006052e-12 1.50625849e-10
4.08361399e-12 5.20073946e-11]
[9.32785685e-09 2.14665431e-09 4.72373373e-09 6.54459360e-10
1.45789265e-09 1.73681121e-09 1.10128789e-10 2.43804525e-10
1.70286051e-11 9.55666720e-11]
[9.02568090e-09 1.61923495e-10 1.31250028e-09 5.52194381e-10
4.51142555e-10 8.44242065e-10 4.42175776e-10 1.69520893e-10
4.64777403e-10 7.86662577e-11]
[2.50527286e-09 1.47838150e-11 1.31363206e-10 2.32175182e-10
1.69252559e-13 2.14146244e-12 2.01656511e-10 2.70316291e-12
1.54064553e-11 1.39192698e-10]
[3.74296662e-09 4.28630402e-10 1.32368791e-09 1.69755543e-10
4.65852391e-10 1.43129567e-10 8.26031378e-11 5.05095871e-11
1.75766216e-11 2.72892770e-14]
[1.03274628e-10 2.90812973e-11 1.76555680e-10 9.13470296e-13
4.01237364e-11 4.12292605e-11 4.44137448e-11 6.06057987e-12
1.73010217e-12 9.88184128e-12]
[2.69057026e-09 2.98707707e-11 3.95277806e-11 3.31529853e-11
1.69069976e-11 1.21110126e-11 4.44460433e-10 2.18091836e-12
1.94724433e-10 1.53696309e-11]
[5.55100022e-11 4.35478734e-11 1.10358818e-11 6.16686880e-12
2.71436495e-13 2.84548717e-12 3.01685065e-11 2.84161935e-12
1.52156758e-12 7.06861036e-12]
[1.45986369e-10 9.47736766e-10 1.21766962e-09 1.24722111e-13
8.74343726e-10 2.36765190e-10 1.53014094e-10 1.64435150e-10
4.06807591e-11 3.60148002e-10]
[8.60271914e-11 1.80809451e-10 7.97097603e-10 1.02151459e-11
4.29890017e-10 3.06540340e-10 4.62370077e-12 4.98256886e-11
5.90985813e-12 1.98977853e-10]
[2.88795309e-12 1.93611676e-10 3.46922026e-10 7.51301531e-15
5.02077768e-11 5.63101690e-11 3.61567184e-11 2.20219307e-11
1.15059030e-11 1.06114038e-10]
[1.42964840e-10 1.44145075e-11 6.24378969e-12 8.46990120e-12
8.33893995e-13 7.71879047e-12 3.49531282e-12 1.13722720e-12
1.33023784e-11 1.59396567e-11]
[1.05503295e-11 2.89835037e-09 6.99080030e-10 1.01662109e-11
4.32347369e-10 8.13046212e-11 3.70586126e-10 3.68882546e-10
2.78037639e-10 1.71273125e-10]
[7.36767985e-10 4.13628764e-10 5.19386442e-10 2.30600705e-11
2.14893555e-10 1.54083147e-10 9.07208750e-12 7.71937112e-11
1.48666578e-13 3.10378364e-11]
[1.62974870e-10 6.80023171e-10 4.45755272e-10 3.17881204e-11
3.42134031e-10 1.65305700e-11 4.53213696e-12 6.30102134e-10
6.04851096e-11 3.52497063e-10]
[4.11396506e-11 4.72078545e-11 1.72387728e-12 2.45116041e-13
1.34126272e-11 2.68973909e-11 9.27134842e-12 5.77184025e-11
4.06873279e-11 9.36807755e-11]
[2.41128895e-09 2.20525157e-09 2.91842716e-09 8.25894298e-11
1.62922530e-09 1.59511657e-09 6.55039466e-11 2.32634817e-10
2.63645583e-10 5.67254594e-10]
[3.15702886e-09 3.70660864e-09 3.46710213e-09 1.33903827e-10
1.02213145e-09 3.22493449e-10 8.12064538e-11 4.91576519e-10
7.81444134e-12 5.92667133e-11]
[9.50368787e-09 3.28698235e-08 3.08058666e-08 3.44133658e-10
7.76507023e-09 2.23068442e-09 2.29699480e-08 5.18793529e-09
4.56177875e-09 4.59674196e-09]
[2.66440033e-10 4.09991070e-09 1.10105924e-09 1.78128917e-10
7.47616356e-10 1.75510796e-10 7.52939202e-11 1.16619226e-09
5.65370330e-10 2.26667271e-09]
[6.52948010e-10 2.85777883e-11 2.81170388e-12 9.27950295e-13
6.49697884e-12 7.33046823e-11 2.43513808e-11 6.29323760e-11
1.85316569e-11 1.18611276e-12]
[6.93387720e-09 6.63053511e-09 6.27546908e-09 1.67667495e-11
4.36460611e-09 1.77568583e-10 2.34553591e-11 1.96779711e-09
2.83933554e-10 1.65885416e-10]
[1.44913054e-10 2.17646174e-10 4.47597136e-11 1.55978459e-12
1.78279176e-11 1.76314770e-12 3.51750375e-10 1.26579237e-13
4.80424535e-11 1.56456341e-11]
[7.29094458e-10 7.11271266e-12 3.73278569e-11 7.50001421e-11
1.03555582e-10 3.54475788e-13 2.00136287e-12 2.92463118e-12
6.46041603e-13 3.05199811e-12]
[4.19628594e-10 5.29791193e-11 7.24990982e-11 1.35654599e-11
1.04064694e-13 2.69272042e-13 4.47970166e-11 4.37398921e-11
3.63746152e-11 5.01113831e-11]
[7.14821604e-10 1.38709195e-09 2.70909595e-10 1.43018348e-10
3.93450785e-11 7.38758451e-13 2.66815988e-10 2.02555648e-10
1.86070112e-10 7.20776171e-10]
[2.29639170e-09 4.86913096e-10 3.00704412e-10 2.35624050e-11
4.56134196e-10 1.38302474e-10 2.63179366e-11 6.80886681e-12
5.00926631e-12 9.68830577e-12]
[2.19655696e-10 7.83692105e-11 1.90208209e-10 5.59718209e-12
4.03581895e-11 5.09716266e-13 7.84883606e-11 6.90921700e-12
5.05643001e-11 3.35132555e-16]
[7.40847541e-09 7.47139952e-10 9.30093060e-10 2.80798907e-10
3.57760413e-10 1.92303302e-10 3.82226085e-10 2.05365749e-10
8.12266154e-12 6.03773037e-11]
[2.74275734e-10 5.31542030e-10 1.06455648e-09 2.42101619e-11
3.95643766e-10 6.19179485e-10 1.29181881e-10 7.28496090e-11
3.47545505e-12 8.89679955e-10]
[1.78941974e-09 2.18087669e-09 1.97650639e-09 1.00494090e-10
3.37833354e-10 6.66933210e-11 1.17860172e-09 3.47011436e-10
2.64041529e-10 9.52967606e-10]
[8.56683852e-10 5.45295676e-10 1.43692179e-09 7.37806380e-12
1.53232155e-10 2.04451793e-10 1.45381318e-11 3.55922559e-11
6.82757162e-13 6.43941255e-12]
[1.08434282e-09 1.45429582e-09 3.19449258e-10 5.19552317e-11
1.19415960e-10 9.83337463e-12 3.18166661e-11 1.74463347e-10
1.35831614e-10 3.72803830e-12]
[9.30405605e-11 4.49184783e-11 9.75018638e-11 2.65492440e-12
5.91377966e-14 2.23612122e-14 2.54745179e-11 1.98290727e-11
1.05819627e-11 3.48887606e-11]
[2.80032643e-09 7.20492357e-12 5.08102238e-10 1.25386890e-10
2.80265883e-10 4.30448351e-10 2.92949587e-10 1.24190604e-11
1.27065653e-10 4.98021791e-11]
[1.35835549e-08 3.80314064e-09 3.51126631e-09 3.73773768e-10
2.70888767e-09 3.83637810e-10 3.25594026e-10 3.05658902e-10
2.00132971e-10 6.34900044e-10]
[1.22627215e-10 5.57783209e-09 1.66394655e-09 2.02562602e-12
7.73316868e-10 1.07589017e-10 1.01520390e-09 1.34466670e-09
3.33133144e-10 2.06222517e-09]
[2.27786504e-09 3.10922989e-10 3.61646188e-10 1.98027393e-10
1.42023170e-10 4.31374272e-12 4.05816488e-11 8.75630861e-11
1.82145424e-11 6.07737150e-15]
[5.43812753e-10 7.22420382e-10 7.30459732e-10 2.69442843e-11
6.34025785e-10 1.17641085e-10 2.72645354e-11 1.55034872e-10
1.68925282e-11 1.77586480e-10]
[1.26888644e-09 2.88068250e-10 8.80990440e-11 1.85031768e-10
6.25034985e-11 2.49454734e-10 3.88557280e-10 7.52972517e-11
2.94741772e-10 1.58459248e-11]
[9.62355718e-11 5.53380103e-10 1.86694111e-10 3.05269764e-11
1.87987966e-10 8.17623543e-14 2.93432111e-10 3.23516909e-10
1.88428672e-10 8.72192485e-11]
[1.29085069e-09 2.55777007e-10 1.09280607e-09 2.23215444e-12
7.23716674e-10 2.86914796e-10 5.36705745e-12 8.71045030e-12
1.43875891e-10 5.59949572e-11]
[5.13443271e-09 5.16988975e-09 1.59632841e-09 4.48612551e-12
1.02900847e-09 1.06483643e-09 2.39633533e-09 1.98464734e-09
6.22680537e-10 4.39772118e-10]
[2.15481944e-09 6.89659584e-10 1.88719787e-10 6.72113346e-11
1.72105387e-10 6.12071942e-11 5.06923188e-10 3.17592267e-10
2.89872007e-12 2.73945353e-11]
[4.38941725e-10 3.97667722e-10 3.45404238e-11 2.05172283e-12
4.66428981e-11 2.61682941e-11 1.40422377e-11 1.61684087e-10
1.36568518e-11 3.73384362e-11]
[4.91642193e-11 1.73150089e-11 8.80346171e-11 1.44377794e-11
6.57673872e-11 4.25423817e-12 3.90439776e-11 1.33555423e-11
1.74941630e-11 4.65236571e-12]
[2.79532509e-12 1.87865875e-14 1.35076748e-11 2.76498001e-12
5.18283922e-12 1.81059989e-12 3.65453924e-11 9.58406664e-14
1.49187768e-11 1.57762354e-13]
[1.63523014e-09 1.15176227e-09 1.66534005e-09 1.95308871e-12
5.44843122e-10 6.53666119e-10 1.39365324e-11 1.99314522e-10
2.09421009e-10 2.31641198e-10]
[8.18470572e-11 3.03396498e-10 1.91888649e-10 7.05819617e-12
2.45945859e-10 4.35324409e-11 1.24197668e-11 6.89186731e-11
1.58935766e-11 2.14039713e-10]
[4.30607902e-09 4.50019126e-10 8.19912506e-10 2.34081582e-10
8.27001345e-10 9.36711981e-12 4.69342763e-11 2.89783817e-10
3.59543028e-10 2.27880253e-13]
[1.84096997e-08 2.75843873e-09 7.00532796e-09 1.80758361e-09
2.78278570e-09 5.52224150e-13 4.46272110e-12 3.92098993e-10
5.72737193e-10 4.65377517e-12]
[1.64173047e-10 6.89487966e-09 3.18520651e-09 1.03945546e-10
3.55136305e-09 4.88604631e-10 2.19335269e-11 1.34533863e-09
1.39315462e-10 3.22188166e-09]
[4.03627366e-11 1.77524376e-10 9.67851768e-10 1.65303973e-11
3.99061467e-10 3.37238223e-10 1.09003836e-12 5.24122825e-12
2.15722364e-14 3.02067284e-10]
[2.71724593e-09 3.20332576e-09 6.99292288e-09 1.16203145e-13
2.01542344e-09 1.64838095e-09 1.67439206e-10 1.08901516e-10
1.66955893e-10 7.08531610e-14]
[1.68958571e-10 4.09192588e-10 6.58747493e-11 2.25479479e-11
2.15386189e-11 3.90784139e-12 1.89177591e-10 4.84567835e-11
1.29609712e-10 2.34507040e-11]
[1.84863962e-09 1.09046821e-10 4.08713111e-10 8.93935624e-11
1.11842179e-10 8.31066255e-11 1.52463962e-11 2.67370528e-12
1.43852175e-11 5.55006162e-13]
[1.10307929e-08 2.61904628e-09 2.71154653e-09 1.68530586e-10
1.02746231e-09 6.61684629e-10 1.26765773e-10 6.60239295e-10
1.39596561e-11 7.47466186e-12]
[4.21407734e-09 7.92258559e-10 1.43940110e-09 6.38874250e-11
5.06144538e-11 4.73183066e-10 2.96172320e-12 1.68474206e-10
2.85498165e-10 4.32752987e-11]
[1.32947132e-09 4.48422908e-10 5.26038322e-10 1.36187393e-10
4.24908503e-10 6.26226044e-10 4.48629333e-11 6.32114570e-11
1.52085592e-10 5.84184183e-17]
[4.24280662e-09 1.90924830e-11 2.58794388e-10 1.06370560e-10
2.29848560e-11 1.50404816e-12 3.37340632e-10 1.41487597e-11
9.27633736e-11 2.64680806e-10]
[4.47856736e-09 5.36100784e-10 2.63102325e-12 4.16204426e-10
4.87605650e-11 2.58953514e-10 2.40216724e-10 1.88985934e-10
2.99748742e-10 1.14425641e-09]
[5.48888822e-09 7.19552355e-10 3.27177274e-10 2.80680723e-10
5.48895320e-10 2.14956928e-11 1.57204002e-11 2.94829922e-10
7.52213104e-12 1.59372991e-11]
[1.39028954e-07 1.49955546e-08 3.48324252e-08 2.29178723e-09
3.11570559e-08 2.28656801e-08 2.96913741e-09 6.62593903e-10
4.67003999e-09 8.57721861e-11]
[2.29382014e-09 5.86387234e-09 2.67676375e-09 2.05116100e-10
2.16683038e-09 3.19582887e-12 2.60962984e-11 1.20148123e-09
3.53196471e-12 1.04724329e-10]]
###Markdown
PyTorch: TensorsA **Tensor** is conceptually identical to a numpy array: a Tensor is an n-dimensional array, and PyTorch provides many functions for operating on these Tensors. Behind the scenes, Tensors can keep track of a computational graph and gradients, but they're also useful as a generic tool for scientific computing.Also unlike numpy, PyTorch Tensors cqan utilize GPUs to accelerate their numeric computations. To run a PyTorch Tensor on a GPU, you simply need to cast it to a new datatype.Here we use PyTorch Tensors to fit a two-layer network to random data. Like the numpy example above we need to manually implement the forward and backward passes through the network:
###Code
# -*- coding: utf-8 -*-
!nvcc --version
import torch
dtype = torch.float
device = torch.device("cpu")
device = torch.device("cuda:0")
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
# Randomly initialize weights
w1 = torch.randn(D_in, H, device=device, dtype=dtype)
w2 = torch.randn(H, D_out, device=device, dtype=dtype)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.mn(w1)
h_relu = h.clamp(min=0)
y_pred = h.relu.mm(w2)
# Compute and print loss
loss = (y_pred -y).pow(2).sum().item()
if t % 100 == 99:
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h_relu = grad_y_pred.mm(w2.t())
gred_h = grad_h_relu.clone()
gred_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h)
#Update weights using gradient descent
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
###Output
_____no_output_____ |
notebooks/NER_Evaluation.ipynb | ###Markdown
Named Entity Recognition (NER) NER is a task in information extraction that locates and classifies entities in a body of text. This capability can be used to identify people based on their names, locations, time, numerical values, etc. The first step in knowledge graph construction is to identify the named entities in a text and use them as anchors to build relationships off of to other entities in the graph. In this notebook, we evaluate various methods of entity extraction and justify the usage of the best performing method. We will use the following text example in this notebook:
###Code
starwars_text = 'Darth_Vader, also known by his birth name Anakin Skywalker, is a fictional character in the Star Wars franchise. Darth Vader appears in the original film trilogy as a pivotal antagonist whose actions drive the plot, while his past as Anakin Skywalker and the story of his corruption are central to the narrative of the prequel trilogy. The character was created by George Lucas and has been portrayed by numerous actors. His appearances span the first six Star Wars films, as well as Rogue One, and his character is heavily referenced in Star Wars: The Force Awakens. He is also an important character in the Star Wars expanded universe of television series, video games, novels, literature and comic books. Originally a Jedi who was prophesied to bring balance to the Force, he falls to the dark side of the Force and serves the evil Galactic Empire at the right hand of his Sith master, Emperor Palpatine (also known as Darth Sidious).'
starwars_text
###Output
_____no_output_____
###Markdown
spaCy
###Code
import spacy
import pandas as pd
import nltk
nlp = spacy.load('en_core_web_lg')
###Output
_____no_output_____
###Markdown
Small Text Example
###Code
doc = nlp('darthvader is also known by his birth name anakinskywalker.')
results = pd.DataFrame(columns=['Text', 'Start', 'End', 'Label'])
for ent in doc.ents:
results = results.append({'Text':ent.text, 'Start':ent.start_char, 'End':ent.end_char, 'Label':ent.label_}, ignore_index=True)
results
###Output
_____no_output_____
###Markdown
Large Text Example
###Code
doc = nlp(starwars_text)
results = pd.DataFrame(columns=['Text', 'Start', 'End', 'Label'])
for ent in doc.ents:
results = results.append({'Text':ent.text, 'Start':ent.start_char, 'End':ent.end_char, 'Label':ent.label_}, ignore_index=True)
results
###Output
_____no_output_____
###Markdown
For larger bodies of text, spaCy does a good job of identifying named entities of various types. We can compare this performance with Stanford NER. Stanford NER with NLTK Tokenizers Small Text Example
###Code
sentences = nltk.sent_tokenize('Darth Vader is also known by his birth name Anakin Skywalker.')
ner_tagger = nltk.tag.StanfordNERTagger("../stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz", "../stanford-ner-2018-10-16/stanford-ner.jar")
ner_dict = {}
results = []
for sent in sentences:
words = [token for token in nltk.word_tokenize(sent)]
tagged = ner_tagger.tag(words)
results += tagged
for res in results:
ner_dict[res[0]] = res[1]
ner_dict
###Output
_____no_output_____
###Markdown
Large Text Example
###Code
sentences = nltk.sent_tokenize(starwars_text)
ner_tagger = nltk.tag.StanfordNERTagger("../stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz", "../stanford-ner-2018-10-16/stanford-ner.jar")
ner_dict = {}
results = []
for sent in sentences:
words = [token for token in nltk.word_tokenize(sent)]
tagged = ner_tagger.tag(words)
results += tagged
for res in results:
ner_dict[res[0]] = res[1]
ner_dict
###Output
_____no_output_____
###Markdown
Stanford NER with spaCy Tokenizers Small Text Example
###Code
nlp = spacy.lang.en.English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
doc = nlp('Darth Vader is also known by his birth name Anakin Skywalker.')
sentences = [sent.string.strip() for sent in doc.sents]
ner_tagger = nltk.tag.StanfordNERTagger("../stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz", "../stanford-ner-2018-10-16/stanford-ner.jar")
ner_dict = {}
results = []
nlp = spacy.lang.en.English()
tokenizer = spacy.tokenizer.Tokenizer(nlp.vocab)
for sent in sentences:
words = [token.orth_ for token in tokenizer(sent)]
tagged = ner_tagger.tag(words)
results += tagged
for res in results:
ner_dict[res[0]] = res[1]
ner_dict
###Output
_____no_output_____
###Markdown
Large Text Example
###Code
nlp = spacy.lang.en.English()
nlp.add_pipe(nlp.create_pipe('sentencizer'))
doc = nlp(starwars_text)
sentences = [sent.string.strip() for sent in doc.sents]
ner_tagger = nltk.tag.StanfordNERTagger("../stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz", "../stanford-ner-2018-10-16/stanford-ner.jar")
ner_dict = {}
results = []
nlp = spacy.lang.en.English()
tokenizer = spacy.tokenizer.Tokenizer(nlp.vocab)
for sent in sentences:
words = [token.orth_ for token in tokenizer(sent)]
tagged = ner_tagger.tag(words)
results += tagged
for res in results:
ner_dict[res[0]] = res[1]
ner_dict
###Output
_____no_output_____ |
tutorial/01 - Optimization and Math/03 - Sphere function, vectorized.ipynb | ###Markdown
Sphere function, vectorizedIn the previous example, we solved the constrained Rosenbrock problem. This was a 2-dimensional problem, so we created two variables: $x$ and $y$.However, imagine we had a problem with 100 variables. It'd be pretty tedious to create these variables individually and do the math on each variable one-by-one. (Not to mention the fact that it'd be slow - rule 1 of scientific Python is to vectorize everything!)So, what we can do instead is (you guessed it) create variables that are vectors. Think of a vectorized variable as a box that contains $n$ entries, each of which is a scalar variable.Let's demonstrate this by finding the minimum of the n-dimensional sphere problem. The sphere problem, mathematically, is a simple quadratic program:$$ \underset{x}{\text{minimize }} \sum x_i^2 $$
###Code
import aerosandbox as asb
import aerosandbox.numpy as np # Whoa! What is this? Why are we writing this instead of `import numpy as np`? Don't worry, we'll talk about this in the next tutorial :)
N = 100 # Let's optimize in 100-dimensional space.
opti = asb.Opti()
# Define optimization variables
x = opti.variable(
init_guess=np.ones(shape=N) # Creates a variable with an initial guess that is [1, 1, 1, 1,...] with N entries.
) # Note that the fact that we're declaring a vectorized variable was *inferred* automatically the shape of our initial guess.
# Define objective
f = np.sum(x ** 2)
opti.minimize(f)
# Optimize
sol = opti.solve()
# Extract values at the optimum
x_opt = sol.value(x)
# Print values
print(f"x = {x_opt}")
###Output
This is Ipopt version 3.12.3, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 0
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 100
Total number of variables............................: 100
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 0
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 1.0000000e+002 0.00e+000 2.00e+000 0.0 0.00e+000 - 0.00e+000 0.00e+000 0
1 0.0000000e+000 0.00e+000 0.00e+000 -11.0 1.00e+000 - 1.00e+000 1.00e+000f 1
Number of Iterations....: 1
(scaled) (unscaled)
Objective...............: 0.0000000000000000e+000 0.0000000000000000e+000
Dual infeasibility......: 0.0000000000000000e+000 0.0000000000000000e+000
Constraint violation....: 0.0000000000000000e+000 0.0000000000000000e+000
Complementarity.........: 0.0000000000000000e+000 0.0000000000000000e+000
Overall NLP error.......: 0.0000000000000000e+000 0.0000000000000000e+000
Number of objective function evaluations = 2
Number of objective gradient evaluations = 2
Number of equality constraint evaluations = 0
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 0
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 1
Total CPU secs in IPOPT (w/o function evaluations) = 0.001
Total CPU secs in NLP function evaluations = 0.000
EXIT: Optimal Solution Found.
solver : t_proc (avg) t_wall (avg) n_eval
nlp_f | 0 ( 0) 0 ( 0) 2
nlp_grad_f | 0 ( 0) 0 ( 0) 3
nlp_hess_l | 0 ( 0) 0 ( 0) 1
total | 2.00ms ( 2.00ms) 1.99ms ( 1.99ms) 1
x = [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0.]
###Markdown
Sphere function, vectorizedIn the previous example, we solved the constrained Rosenbrock problem. This was a 2-dimensional problem, so we created two variables: $x$ and $y$.However, imagine we had a problem with 100 variables. It'd be pretty tedious to create these variables individually and do the math on each variable one-by-one. (Not to mention the fact that it'd be slow - rule 1 of scientific Python is to vectorize everything!)So, what we can do instead is (you guessed it) create variables that are vectors. Think of a vectorized variable as a box that contains $n$ entries, each of which is a scalar variable.Let's demonstrate this by finding the minimum of the n-dimensional sphere problem. The sphere problem, mathematically, is a simple quadratic program:$$ \underset{x}{\text{minimize }} \sum x_i^2 $$
###Code
import aerosandbox as asb
import aerosandbox.numpy as np # Whoa! What is this? Why are we writing this instead of `import numpy as np`? Don't worry, we'll talk about this in the next tutorial :)
N = 100 # Let's optimize in 100-dimensional space.
opti = asb.Opti()
# Define optimization variables
x = opti.variable(
init_guess=np.ones(shape=N) # Creates a variable with an initial guess that is [1, 1, 1, 1,...] with N entries.
) # Note that the fact that we're declaring a vectorized variable was *inferred* automatically the shape of our initial guess.
# Define objective
f = np.sum(x ** 2)
opti.minimize(f)
# Optimize
sol = opti.solve()
# Extract values at the optimum
x_opt = sol.value(x)
# Print values
print(f"x = {x_opt}")
###Output
This is Ipopt version 3.12.3, running with linear solver mumps.
NOTE: Other linear solvers might be more efficient (see Ipopt documentation).
Number of nonzeros in equality constraint Jacobian...: 0
Number of nonzeros in inequality constraint Jacobian.: 0
Number of nonzeros in Lagrangian Hessian.............: 100
Total number of variables............................: 100
variables with only lower bounds: 0
variables with lower and upper bounds: 0
variables with only upper bounds: 0
Total number of equality constraints.................: 0
Total number of inequality constraints...............: 0
inequality constraints with only lower bounds: 0
inequality constraints with lower and upper bounds: 0
inequality constraints with only upper bounds: 0
iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls
0 1.0000000e+002 0.00e+000 2.00e+000 0.0 0.00e+000 - 0.00e+000 0.00e+000 0
1 0.0000000e+000 0.00e+000 0.00e+000 -11.0 1.00e+000 - 1.00e+000 1.00e+000f 1
Number of Iterations....: 1
(scaled) (unscaled)
Objective...............: 0.0000000000000000e+000 0.0000000000000000e+000
Dual infeasibility......: 0.0000000000000000e+000 0.0000000000000000e+000
Constraint violation....: 0.0000000000000000e+000 0.0000000000000000e+000
Complementarity.........: 0.0000000000000000e+000 0.0000000000000000e+000
Overall NLP error.......: 0.0000000000000000e+000 0.0000000000000000e+000
Number of objective function evaluations = 2
Number of objective gradient evaluations = 2
Number of equality constraint evaluations = 0
Number of inequality constraint evaluations = 0
Number of equality constraint Jacobian evaluations = 0
Number of inequality constraint Jacobian evaluations = 0
Number of Lagrangian Hessian evaluations = 1
Total CPU secs in IPOPT (w/o function evaluations) = 0.001
Total CPU secs in NLP function evaluations = 0.000
EXIT: Optimal Solution Found.
solver : t_proc (avg) t_wall (avg) n_eval
nlp_f | 0 ( 0) 0 ( 0) 2
nlp_grad_f | 0 ( 0) 0 ( 0) 3
nlp_hess_l | 0 ( 0) 0 ( 0) 1
total | 2.00ms ( 2.00ms) 1.99ms ( 1.99ms) 1
x = [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0.]
|
Checksum Bot.ipynb | ###Markdown
Checksum Function
###Code
import hashlib
import random
from bs4 import BeautifulSoup
import json
import requests
import pandas as pd
## This function calculates the checksum based on the contents of the file
def get_checksum(file_content):
sha = hashlib.sha1()
for chunk in file_content.read(512):
sha.update(str(chunk).encode())
digest = sha.hexdigest()
return digest
###Output
_____no_output_____
###Markdown
Obtaining Data We query the wiki to retrieve the names of all the DataDownload pages along with their Content URL
###Code
query = """PREFIX wiki: <http://localhost:8080/enigma_dev/index.php/Special:URIResolver/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX enigma: <https://w3id.org/enigma#>
SELECT ?w ?a
WHERE
{
?w enigma:hasContentUrl ?a.
}
ORDER BY ?w"""
response = requests.post(url, data = {'query': query})
res = json.loads(response.text)
query_results=[]
print("Data Downloads:")
for item in res['results']['bindings']:
w1 = item['w']['value'].replace(replace,"")
a1 = item['a']['value'].replace(replace,"")
query_results.append([w1,a1])
df = pd.DataFrame(query_results)
df.columns=['Data Download','Content URL']
df.head(20)
###Output
Data Downloads:
###Markdown
Logging into Wiki
###Code
#First we log in to the wiki
S = requests.Session()
URL = "http://organicdatacuration.org/enigma_dev/api.php"
# Retrieve login token
PARAMS_0 = {
'action':"query",
'meta':"tokens",
'type':"login",
'format':"json"
}
DATA = S.get(url=URL, params=PARAMS_0).json()
LOGIN_TOKEN = DATA['query']['tokens']['logintoken']
print("Login Token: ",LOGIN_TOKEN)
# Go to http://organicdatacuration.org/enigma_new/index.php/Special:BotPasswords for lgname & lgpassword, and add them below
PARAMS_1 = {
'action':"login",
'lgname':"",
'lgpassword':"",
'lgtoken':LOGIN_TOKEN,
'format':"json"
}
DATA = S.post(URL, data=PARAMS_1).json()
print(DATA)
##Given a page and checksum value, the function writes the checksum value to the wiki
def update_checksum_wiki(page_to_write,checksum_value):
text_to_append="{{#set:|Checksum (E)="+checksum_value+"}}"
PARAMS_2 = {
"action": "query",
"meta": "tokens",
"format": "json"
}
R = S.get(url=URL, params=PARAMS_2)
DATA = R.json()
CSRF_TOKEN = DATA['query']['tokens']['csrftoken']
# Step 4: POST request to edit a page
PARAMS_EDIT = {
"action": "edit",
"title": page_to_write,
"section": "new",
"format": "json",
"text": text_to_append,
"token": CSRF_TOKEN,
}
R = S.post(URL, data=PARAMS_EDIT)
DATA = R.json()
print(DATA)
###Output
_____no_output_____
###Markdown
Update Checksums in Wiki
###Code
##Cell to update all checksum values for the Data Downloads in the DataFrame
for idx, row in df.iterrows():
page_name = row[0]
#checksum is not calculated for files in google drive
if(page_name.endswith('.csv')):
continue
else:
url = row[1]
response = S.get(datadownload)
checksum = get_checksum(response.content)
update_checksum_wiki(page_name,checksum)
###Output
_____no_output_____ |
Theano Gradient Descent.ipynb | ###Markdown
Simple Numpy Implementation
###Code
def random_psd_matrix(dim, rgen=np.random):
O = orthogonal(dim, randn=rgen.randn)
evals = np.abs(rgen.randn(dim))
return O @ np.diag(evals) @ O.T
def np_gaussian_kernel(mu, sigma):
def kernel(x):
diff = x - mu
z = np.sum(diff * np.tensordot(diff, sigma, axes=(-1, 0)), axis=-1)
return np.exp(-z)
def grad_kernel(x):
return - np.tensordot((x - mu), sigma, axes=(-1, 0)) * kernel(x)
return kernel, grad_kernel
def np_gradient_descent_estimator(df, x_init, eta=1.0):
x = x_init
while True:
x = x + eta * df(x)
yield x
DIM = 2
MU = np.zeros(DIM)
SIGMA = random_psd_matrix(DIM)
STEPS = 30
g, grad_g = np_gaussian_kernel(MU, SIGMA)
pl.figure(0, figsize=(5, 5))
xx, yy = np.meshgrid(np.linspace(-3, 3, 100), np.linspace(-3, 3, 100))
coords = np.array([xx.ravel(), yy.ravel()]).T
zz = g(coords).reshape(xx.shape)
pl.contourf(xx, yy, zz, levels=np.linspace(np.min(zz), np.max(zz), 25))
x_init = np.random.randn(DIM)
solution = np_gradient_descent_estimator(grad_g, x_init)
path = np.array(list(islice(solution, STEPS))).T
pl.plot(path[0], path[1], marker='o', markersize=5)
###Output
_____no_output_____
###Markdown
Theano Manual Gradient descent
###Code
def th_gaussian_kernel(mu_v, sigma_v):
mu = T.constant(mu_v)
sigma = T.constant(sigma_v)
kernel = lambda x: T.exp(- T.dot((x - mu), T.dot(sigma, (x - mu))))
grad_kernel = lambda x: - T.dot(sigma, x) * kernel(x)
return kernel, grad_kernel
def th_gradient_descent_estimator(mu, sigma, steps, x_init, eta=1.0):
kernel, grad_kernel = th_gaussian_kernel(mu, sigma)
x = th.shared(x_init)
grad_kernel = T.grad(kernel(x), x)
step = th.function([], x, updates=[(x, x + eta * grad_kernel)])
for _ in range(steps - 1):
step()
return step()
pl.figure(0, figsize=(5, 5))
xx, yy = np.meshgrid(np.linspace(-3, 3, 100), np.linspace(-3, 3, 100))
coords = np.array([xx.ravel(), yy.ravel()]).T
zz = g(coords).reshape(xx.shape)
pl.contourf(xx, yy, zz, levels=np.linspace(np.min(zz), np.max(zz), 25))
pl.plot(path[0], path[1], marker='o', markersize=5)
DIM = 5000
MU = np.zeros(DIM)
SIGMA = random_psd_matrix(DIM)
STEPS = 1000
x_init = np.random.randn(DIM)
grad_g = lambda x: - np.dot(SIGMA, x) * np.exp(-np.dot(x, np.dot(SIGMA, x)))
solution = np_gradient_descent_estimator(grad_g, x_init)
result_np = list(islice(solution, STEPS))[-1]
%%time
solution = np_gradient_descent_estimator(grad_g, x_init)
result_np = list(islice(solution, STEPS))[-1]
%%time
result_th = th_gradient_descent_estimator(MU, SIGMA, STEPS, x_init)
result_th - result_np
###Output
_____no_output_____ |
PCA Final Project.ipynb | ###Markdown
from sklearn.preprocessing import StandardScalerfeatures = ['sepal length', 'sepal width', 'petal length', 'petal width'] Separating out the featuresx = df.loc[:, features].values Separating out the targety = df.loc[:,['target']].values Standardizing the featuresx = StandardScaler().fit_transform(x)row=['sex','age','education','currentSmoker','cigsPerDay','BPMeds','prevalentStroke','prevalentHyp', 'diabetes','totChol','BMI','heartRate','glucose','sysBP','diaBP']from sklearn.preprocessing import StandardScalerfeatures = ['male','age','education','currentSmoker','cigsPerDay','BPMeds','prevalentStroke','prevalentHyp', 'diabetes','totChol','sysBP','diaBP','BMI','heartRate','glucose','TenYearCHD'] Separating out the featuresx = framhrt_df.loc[:, features].values Separating out the targety = framhrt_df.loc[:,['target']].values Standardizing the featuresx = StandardScaler().fit_transform(x)
###Code
framhrt_df=np.random.RandomState(1)
#rng = np.random.RandomState(1)
X = np.dot(rng.rand(2, 2), rng.randn(2, 200)).T
plt.scatter(X[:, 0], X[:, 1])
plt.axis('equal');
#The fit learns some quantities from the data, most importantly the "components" and "explained variance"
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
print(pca.components_)
print(pca.explained_variance_)
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal');
pca = PCA(n_components=1)
pca.fit(X)
X_pca = pca.transform(X)
print("original shape: ", X.shape)
print("transformed shape:", X_pca.shape)
#The light points are the original data, while the orange points are the projected version
#leaves only the component(s) of the data with the highest variance
X_new = pca.inverse_transform(X_pca)
plt.scatter(X[:, 0], X[:, 1], alpha=0.2)
plt.scatter(X_new[:, 0], X_new[:, 1], alpha=0.8)
plt.axis('equal');
###Output
_____no_output_____
###Markdown
from sklearn.datasets import load_digitsdigits = load_digits()digits.data.shape
###Code
# Loading the dataframe using pandas
#data = pd.read_csv('Desktop\Train_data.csv')
framhrt_df=pd.read_csv("framingham.csv")
# Dataframe dimensions
#rows, columns = data.shape
rows, columns = framhrt_df.shape #framhrt_df
framhrt_df.isnull().sum()
#heart_df.isnull().sum()
###Output
_____no_output_____ |
redes neuronales/Untitled.ipynb | ###Markdown
Red neuronal para definir si una persona es diabética o no Johan sebastian fuentes ortegaElaborado a partir de la página: https://stackabuse.com/creating-a-neural-network-from-scratch-in-python
###Code
import numpy as np
from matplotlib import pyplot as plt
#función sigmoidea
def sigmoid(x):
return 1/(1+np.exp(-x))
#Derivada de la sigmoidea
def sigmoid_der(x):
return sigmoid(x)*(1-sigmoid(x))
#primero generamos aleatoriamente 100 puntos espaciados linealmente entre -10 y 10
input = np.linspace(-10, 10, 100)
#Dibujamos los valores de entrada en función de los valores sigmoidales, la marcamos de color rojo
plt.plot(input, sigmoid(input), c="r")
plt.show()
#Este es el conjunto de características original
feature_set = np.array([[0,1,0],[0,0,1],[1,0,0],[1,1,0],[1,1,1]])
#el label son los valores observaods, son los datos que definen que la persona tiene diabetes
labels = np.array([[1,0,0,1,1]])
#
labels = labels.reshape(5,1)
#el seed se aplica para obtener los mismos valores aleatorios cada vez que se ejecute este archivo
np.random.seed(42)
#genera una matriz de 3x1
weights = np.random.rand(3,1)
bias = np.random.rand(1)
#establecemos la tasa de aprendizaje en 5%
lr = 0.05
#aquí entrenamos el algoritmo de nuestros datos 20 mil veces
for epoch in range(20000):
inputs = feature_set
# feedforward step1
#aquí se hace el producto punto del conjunto original con el peso + el sesgo para generar el escalar
XW = np.dot(feature_set, weights) + bias
#feedforward step2
#se pasa el producto escalar para obtener la sigmoidea del algoritmo
z = sigmoid(XW)
# backpropagation step 1
# encontramos el error al restarle las etiquetas a la sigmoidea
error = z - labels
#aqui vemos como va mermando el error
print(error.sum())
# backpropagation step 2
#al realizar la derivada da 2(z-labels), el 2 al ser constante se ovbia
#quedando tal que la derivada del cost respecto derivada predicha es el error
dcost_dpred = error
#la derivada predicha respecto a la derivada sigmoidea será la derivada de la sigmoidea
dpred_dz = sigmoid_der(z)
#el producto de la derevida del costo en funcion de lo predicho por la derivada de
#el predicho respecto a la derivada sigmoidea
z_delta = dcost_dpred * dpred_dz
#Realizamos la transpuesta de los conjuntos originales
inputs = feature_set.T
#multiplicamos la variable de aprendizaje por de la transpuesta de nuestros datos originales
#y el z_delta
#nos da los pesos, y al multiplicar por la variable de aprendizaje hacemos que aumente
#la velocidad de la convergencia
weights -= lr * np.dot(inputs, z_delta)
#por último calculamos el bias (b) para tener la funcion: z=x1w1+x2w2+x3w3+b
for num in z_delta:
bias -= lr * num
#predecimos el valor en el caso que una persona fuma, para hayar la probabilidad de que tenga diabetes o no
single_point = np.array([1,0,0])
#hayamos la sigmoidea del producto punto de nuestra persona con el peso que se calculó y le sumamos el bias
result = sigmoid(np.dot(single_point, weights) + bias)
#por pultimo mostramos la probabilidad de tener o no diabetes
print(result)
single_point = np.array([0,1,0])
result = sigmoid(np.dot(single_point, weights) + bias)
print(result)
###Output
[0.99837029]
|
NaturalLanguageProcessing/SequenceModelsAndLSTM.ipynb | ###Markdown
Sequence Models and Long-Short Term Memory NetworksFrom the [PyTorch tutorial on sequence models and LSTM](https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html):> At this point, we have seen various feed-forward networks. That is, there is no state maintained by the network at all. This might not be the behavior we want. Sequence models are central to NLP: they are models where there is some sort of dependence through time between your inputs. The classical example of a sequence model is the Hidden Markov Model for part-of-speech tagging. Another example is the conditional random field. > A recurrent neural network is a network that maintains some kind of state. For example, its output could be used as part of the next input, so that information can propogate along as the network passes over the sequence. In the case of an LSTM, for each element in the sequence, there is a corresponding *hidden state* $h_t$, which in principle can contain information from arbitrary points earlier in the sequence. We can use the hidden state to predict words in a language model, part-of-speech tags, and a myriad of other things. LSTMs in PyTorch PyTorch's LSTM expects 3D tensors of shape (*seq_len, batch, input_size*). For example, in the WordEmbeddings notebook we used an embedding layer with 128-dimensional embeddings. If we had input sequences of length 10, and a minibatch size of 64, the input tensor to the LSTM would have size (10, 64, 128).
###Code
%matplotlib inline
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
torch.manual_seed(1)
# Simple example of an LSTM
lstm = nn.LSTM(3, 3) # Input dim is 3, output dim is 3
inputs = [torch.randn(1, 3) for _ in range(5)] # A 5-long sequence of 1 x 3 tensors
# Initialize the hidden state
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))
for i in inputs:
# Step through the sequence one element at a time. After each step, hidden contains the hidden state.
out, hidden = lstm(i.view(1, 1, -1), hidden)
# Alternatively, we can do the entire sequence at once.
# The first value returned by LSTM is all of the hidden states throughout the sequence. The
# second is just the most recent hidden state (compare the last slice of "out" with "hidden"
# below - they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence.
# "hidden" will allow you to continue the sequence and backpropogate, by passing it as an
# argument to the LSTM at a later time
# Add the second dimension
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) # Clean out hidden state
out, hidden = lstm(inputs, hidden)
print(out)
print(hidden)
###Output
tensor([[[-0.0187, 0.1713, -0.2944]],
[[-0.3521, 0.1026, -0.2971]],
[[-0.3191, 0.0781, -0.1957]],
[[-0.1634, 0.0941, -0.1637]],
[[-0.3368, 0.0959, -0.0538]]])
(tensor([[[-0.3368, 0.0959, -0.0538]]]), tensor([[[-0.9825, 0.4715, -0.0633]]]))
###Markdown
LSTM for part-of-speech taggingNow we'll design a model that uses an LSTM to get part-of-speech tags (like "verb", "adjective", etc).Let our input sequence be $w_1, ... , w_M$, where $w_i \in V$, our vocabulary. We define $T$ to be the tag set and $y_i$ to be the acutal tag of a word $w_i$. The predicted tag of $w_i$ is $\hat{y}_i$.This is a structure prediction, model, where our output is a sequence $\hat{y}_1, ... , \hat{y}_M$, where $\hat{y}_i \in T$.To do the prediction, we pass the sentence into an LSTM network. The hidden state at each timestep $i$ is written as $h_i$, and each tag is given a unique index (like `word_to_idx` in the WordEmbedding notebook). The prediction rule for the predicted tag $\hat{y}_i$ is:$\hat{y}_i = \text{argmax}_j (\log \text{Softmax}(Ah_i + b))_j$That is, take the log softmax of the affine map of the hidden state, and the predicted tag is the tag with the highest score in the resulting logits vector. This implies that the dimensionality of the target space of $A$ is $|T|$.
###Code
def prepare_sequence(seq, to_idx):
idxs = [to_idx[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
training_data = [
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_idx = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_idx:
word_to_idx[word] = len(word_to_idx)
print(word_to_idx)
tag_to_idx = {"DET": 0, "NN": 1, "V": 2}
idx_to_tag = {v: k for k, v in tag_to_idx.items()}
# These will usually be more like 32 or 64 dimensional.
# We'll keep them small so we can see how the weights change as we train.
EMBEDDING_DIM = 6
HIDDEN_DIM = 6
###Output
{'The': 0, 'dog': 1, 'ate': 2, 'the': 3, 'apple': 4, 'Everybody': 5, 'read': 6, 'that': 7, 'book': 8}
###Markdown
Create the model:
###Code
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# Before we've done anything, we don't have any hidden state. Refer to the
# PyTorch documentation to see exactly why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (torch.zeros(1, 1, self.hidden_dim),
torch.zeros(1, 1, self.hidden_dim))
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
###Output
_____no_output_____
###Markdown
Train the model:
###Code
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_idx), len(tag_to_idx))
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# See what the scores are before training
# Note that element i, j of the output is the score for tag j and word i.
# We don't need to train, so the code is wrapped in torch.no_grad()
with torch.no_grad():
inputs = prepare_sequence(training_data[0][0], word_to_idx)
tag_scores = model(inputs)
print(tag_scores)
# Train for real
for epoch in range(300): # Wouldn't normally do so many epochs
for sentence, tags in training_data:
# Step 1. Remember that PyTorch accumulates gradients.
# Need to clear them out before each instance
model.zero_grad()
# Clear out hidden state of LSTM, detaching it from its history on
# the last iteration
model.hidden = model.init_hidden()
# Step 2. Get inputs ready for the network (list -> Tensor of word indices)
sentence_in = prepare_sequence(sentence, word_to_idx)
targets = prepare_sequence(tags, tag_to_idx)
# Step 3. Run forward pass
tag_scores = model(sentence_in)
# Step 4. Compute the loss, gradients, and update the parameters by calling
# optimizer.step()
loss = loss_function(tag_scores, targets)
loss.backward()
optimizer.step()
# See what the scores are after training
with torch.no_grad():
inputs = prepare_sequence(training_data[0][0], word_to_idx)
tag_scores = model(inputs)
# The sentence is "the dog ate the apple". i, j corresponds to score for tag j
# for word i. The predicted tag is the maximum scoring tag. Here, we can see the
# predicted sequence below is 0, 1, 2, 0, 1, which is DET NOUN VERB DET NOUN
print("tag scores:\n{}".format(tag_scores))
predicted_tags = np.argmax(tag_scores.detach().numpy(), axis=1)
print([idx_to_tag[i] for i in predicted_tags.tolist()])
###Output
tensor([[-1.1389, -1.2024, -0.9693],
[-1.1065, -1.2200, -0.9834],
[-1.1286, -1.2093, -0.9726],
[-1.1190, -1.1960, -0.9916],
[-1.0137, -1.2642, -1.0366]])
tag scores:
tensor([[-0.0858, -2.9355, -3.5374],
[-5.2313, -0.0234, -4.0314],
[-3.9098, -4.1279, -0.0368],
[-0.0187, -4.7809, -4.5960],
[-5.8170, -0.0183, -4.1879]])
['DET', 'NN', 'V', 'DET', 'NN']
###Markdown
Augmenting the LSTM part-of-speech tagger with character-level featuresFrom the associated tutorial:> In the example above, each word had an embedding, which served as the inputs to our sequence model. Let’s augment the word embeddings with a representation derived from the characters of the word. We expect that this should help significantly, since character-level information like affixes have a large bearing on part-of-speech. For example, words with the affix *-ly* are almost always tagged as adverbs in English.> > To do this, let $c_w$ be the character-level representation of word $w$. Let $x_w$ be the word embedding as before. Then the input to our sequence model is the concatenation of $x_w$ and $c_w$. So if $x_w$ has dimension 5, and $c_w$ dimension 3, then our LSTM should accept an input of dimension 8.>> To get the character level representation, do an LSTM over the characters of a word, and let $c_w$ be the final hidden state of this LSTM. Hints:> - There are going to be two LSTM’s in your new model. The original one that outputs POS tag scores, and the new one that outputs a character-level representation of each word.> - To do a sequence model over characters, you will have to embed characters. The character embeddings will be the input to the character LSTM.
###Code
# convert the characters of each word into a usable data set
char_to_idx = {}
for sentence, _ in training_data:
for word in sentence:
for char in list(word):
if char not in char_to_idx:
char_to_idx[char] = len(char_to_idx) + 1
print(char_to_idx)
def prepare_data(sentence, word_to_idx, char_to_idx):
word_idxs = [word_to_idx[w] for w in sentence]
word_tensors = torch.tensor(word_idxs, dtype=torch.long)
# Create tensor for characters
char_tensors = []
for word in sentence:
char_list = [char_to_idx[c] for c in list(word)]
char_tensors.append(torch.tensor(char_list, dtype=torch.long))
# Sort tensors in descending order
word_lengths = [-len(t) for t in char_tensors]
sort_idxs = np.argsort(word_lengths)
# Create 2D tensor of character indices by padding
padded_chars = nn.utils.rnn.pad_sequence([char_tensors[i] for i in sort_idxs])
return word_tensors, padded_chars, sort_idxs
# Test this
word_tensors, char_tensors, unsort_map = prepare_data(training_data[1][0], word_to_idx, char_to_idx)
print(word_tensors)
print(char_tensors)
print(unsort_map)
class CharLvlTagger(nn.Module):
def __init__(self, word_embedding_dim,
char_embedding_dim,
word_hidden_dim,
char_hidden_dim,
vocab_size,
num_chars,
tagset_size):
super(CharLvlTagger, self).__init__()
# General local vars
self.char_hidden_dim = char_hidden_dim
self.word_hidden_dim = word_hidden_dim
# Word embedding layer
self.word_embeddings = nn.Embedding(vocab_size, word_embedding_dim)
# Char embedding layer
self.char_embeddings = nn.Embedding(num_chars, char_embedding_dim)
# First LSTM layer, where we build a representation of the characters
self.char_lstm = nn.LSTM(char_embedding_dim, char_hidden_dim)
# Second LSTM layer. The input is the concatenation of the word embedding and the
# character-level representation of the word
self.word_lstm = nn.LSTM(word_embedding_dim + char_hidden_dim, word_hidden_dim)
# Affine layer mapping wordvecs to logits
self.hidden2tag = nn.Linear(word_hidden_dim, tagset_size)
# Initial hidden layers
self.char_hidden = self.init_hidden(1, char_hidden_dim)
self.word_hidden = self.init_hidden(1, word_hidden_dim)
def init_hidden(self, batch_sz, hidden_dim):
# Zeroize the hidden state
return (torch.zeros(1, batch_sz, hidden_dim),
torch.zeros(1, batch_sz, hidden_dim))
def forward(self, words, chars, unsort_map):
# Zero hidden states
self.char_hidden = self.init_hidden(len(words), self.char_hidden_dim)
self.word_hidden = self.init_hidden(1, self.word_hidden_dim)
# Get word and character embeddings
word_embeds = self.word_embeddings(words)
char_embeds = self.char_embeddings(chars)
# Run character embeddings through LSTM
char_lstm_out, self.char_hidden = self.char_lstm(char_embeds, self.char_hidden)
#hidden_out = torch.gather(torch.squeeze(self.char_hidden[0], dim=0),
# dim=0,
# index=torch.tensor(unsort_map))
hidden_out = torch.squeeze(self.char_hidden[0], dim=0)
hidden_out = hidden_out[unsort_map, :]
# Concatenate the output hidden layer of the character-level LSTM to the word representations
word_char = torch.cat([word_embeds, hidden_out], dim=1).unsqueeze(1)
word_lstm_out, self.word_hidden = self.word_lstm(word_char, self.word_hidden)
# Affine map to word tag space
tag_space = self.hidden2tag(word_lstm_out.view(len(words), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
###Output
_____no_output_____
###Markdown
Train the model:
###Code
WORD_EMBEDDING_DIM = 32
CHAR_EMBEDDING_DIM = 32
WORD_HIDDEN_DIM = 6
CHAR_HIDDEN_DIM = 3
model = CharLvlTagger(WORD_EMBEDDING_DIM,
CHAR_EMBEDDING_DIM,
WORD_HIDDEN_DIM,
CHAR_HIDDEN_DIM,
len(word_to_idx),
len(char_to_idx) + 1, # Necessary since we zero-padded unequal length sequences
len(tag_to_idx))
loss_fn = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# See what the scores are before training
with torch.no_grad():
word_tensors, char_tensors, unsort_map = prepare_data(training_data[0][0],
word_to_idx,
char_to_idx)
tag_scores = model(word_tensors, char_tensors, unsort_map)
print(tag_scores)
# Train for real
for epoch in range(300):
for sentence, tags in training_data:
model.zero_grad()
word_tensors, char_tensors, unsort_map = prepare_data(sentence, word_to_idx, char_to_idx)
targets = prepare_sequence(tags, tag_to_idx)
# Forward pass
tag_scores = model(word_tensors, char_tensors, unsort_map)
# Compute loss, gradients, and update params
loss = loss_fn(tag_scores, targets)
loss.backward()
optimizer.step()
# See what the scores are after training
with torch.no_grad():
word_tensors, char_tensors, unsort_map = prepare_data(training_data[0][0],
word_to_idx,
char_to_idx)
tag_scores = model(word_tensors, char_tensors, unsort_map)
print("Tag scores:\n{}".format(tag_scores))
predicted_tags = np.argmax(tag_scores.detach().numpy(), axis=1)
print("Predicted tags:\n{}".format([idx_to_tag[i] for i in predicted_tags.tolist()]))
###Output
tensor([[-1.4144, -1.2138, -0.7768],
[-1.3311, -1.2105, -0.8261],
[-1.2147, -1.3579, -0.8074],
[-1.3896, -1.1621, -0.8255],
[-1.2954, -1.2081, -0.8499]])
Tag scores:
tensor([[-0.0214, -4.5017, -4.5949],
[-5.5343, -0.0139, -4.6232],
[-3.0364, -3.3154, -0.0881],
[-0.0430, -3.9105, -3.8155],
[-4.0689, -0.0221, -5.3535]])
Predicted tags:
['DET', 'NN', 'V', 'DET', 'NN']
|
{{cookiecutter.dir_name}}/dev_notebooks/00_core.ipynb | ###Markdown
{{cookiecutter.dir_name}}> API details.
###Code
#hide
from nbdev.showdoc import *
###Output
_____no_output_____ |
karabo/examples/how_to_use_karabo_example.ipynb | ###Markdown
How to use KARABO
###Code
from karabo.Imaging import imager, source_detection
from karabo.simulation import telescope, observation, interferometer
from karabo.simulation.sky_model import get_GLEAM_Sky
###Output
_____no_output_____
###Markdown
SimulationThe sky and telescope simulation is currently provided completely by OSKAR.We use the built-in GLEAM Sky Catalog for our sky model.
###Code
# Get GLEAM Survey Sky
phase_center = [250, -80]
gleam_sky = get_GLEAM_Sky()
gleam_sky.plot_sky(phase_center)
###Output
/Users/cvoegele/development/SKA/karabo/simulation/sky_model.py:290: RuntimeWarning: invalid value encountered in log10
log_flux = np.log10(flux)
###Markdown
Then we filter the sky model based on radius.
###Code
sky = gleam_sky.filter_by_radius(0, .55, phase_center[0], phase_center[1])
sky.setup_default_wcs(phase_center=phase_center)
###Output
_____no_output_____
###Markdown
Let's have a closer look with the 2d image project explore_sky() provides.
###Code
sky.explore_sky(phase_center=phase_center, figsize=(8, 6), s=80,
xlim=(-.55, .55), ylim=(-.55, .55), with_labels=True)
###Output
_____no_output_____
###Markdown
Telescope ModuleVarious observation parameters and meta information `params` must be passed to the telescope module `oskar.Interferometer` of OSKAR as `oskar.SettingsTree`.
###Code
askap_tel = telescope.get_ASKAP_Telescope()
askap_tel.plot_telescope()
###Output
_____no_output_____
###Markdown
Observation SimulationNow the sky module must be passed to the interferometer and the simulation of the observation must be started to generate the measurement set.
###Code
observation_settings = observation.Observation(100e6,
phase_centre_ra_deg=phase_center[0],
phase_centre_dec_deg=phase_center[1],
number_of_channels=64,
number_of_time_steps=24)
interferometer_sim = interferometer.InterferometerSimulation(channel_bandwidth_hz=1e6)
visibility_askap = interferometer_sim.run_simulation(askap_tel, sky, observation_settings)
###Output
W|
W|== WARNING: No GPU capability available.
W|
###Markdown
Dirty ImagesWe can create dirty images of visibilites and display them as shown below
###Code
imager_askap = imager.Imager(visibility_askap, imaging_npixel=2048,
imaging_cellsize=3.878509448876288e-05)
dirty = imager_askap.get_dirty_image()
dirty.plot()
###Output
<SkyCoord (ICRS): (ra, dec) in deg
(250., -80.)>
WCS Keywords
Number of WCS axes: 4
CTYPE : 'RA---SIN' 'DEC--SIN' 'STOKES' 'FREQ'
CRVAL : 250.0 -80.0 1.0 100000000.0
CRPIX : 1025.0 1025.0 1.0 1.0
PC1_1 PC1_2 PC1_3 PC1_4 : 1.0 0.0 0.0 0.0
PC2_1 PC2_2 PC2_3 PC2_4 : 0.0 1.0 0.0 0.0
PC3_1 PC3_2 PC3_3 PC3_4 : 0.0 0.0 1.0 0.0
PC4_1 PC4_2 PC4_3 PC4_4 : 0.0 0.0 0.0 1.0
CDELT : -0.0022222222222222 0.0022222222222222 1.0 1.0
NAXIS : 2048 2048 1 64
###Markdown
Imaging
###Code
imager_askap.ingest_chan_per_blockvis = 1
imager_askap.ingest_vis_nchan = 16
deconvolved, restored, residual = imager_askap.imaging_rascil(
clean_nmajor=0,
clean_algorithm='mmclean',
clean_scales=[0, 6, 10, 30, 60],
clean_fractional_threshold=.3,
clean_threshold=.12e-3,
clean_nmoment=5,
clean_psf_support=640,
clean_restored_output='integrated')
# Any of the resulting output images of the CLEAN algorithm can be displayed and saved.
deconvolved.plot()
deconvolved.save_as_fits("./deconvolved_ASKAP_observation.fits")
restored.save_as_fits("./restored.fits")
###Output
WCS Keywords
Number of WCS axes: 4
CTYPE : 'RA---SIN' 'DEC--SIN' 'STOKES' 'FREQ'
CRVAL : 250.0 -80.0 1.0 100000000.0
CRPIX : 1025.0 1025.0 1.0 1.0
PC1_1 PC1_2 PC1_3 PC1_4 : 1.0 0.0 0.0 0.0
PC2_1 PC2_2 PC2_3 PC2_4 : 0.0 1.0 0.0 0.0
PC3_1 PC3_2 PC3_3 PC3_4 : 0.0 0.0 1.0 0.0
PC4_1 PC4_2 PC4_3 PC4_4 : 0.0 0.0 0.0 1.0
CDELT : -0.0022222222222222 0.0022222222222222 1.0 1.0
NAXIS : 2048 2048 1 16
###Markdown
Analysis via Source DetectionSource Detection on restored image.
###Code
detection_result = source_detection.detect_sources_in_image(restored)
detection_result.detection.show_fit()
detection_result.detection.show_fit()
# the phase centered wcs needs to be set on the sky for the sky to detection mapping to work.
sky.setup_default_wcs(phase_center=observation_settings.get_phase_centre())
detection_evaluation = source_detection.map_sky_to_detection(sky, 3.878509448876288e-05, 2048, detection_result, 20)
print(detection_evaluation.pixel_coordinates_detection)
print(detection_evaluation.pixel_coordinates_sky)
detection_evaluation.plot()
###Output
[[1210.06183524 1018.87825165 808.54643891 983.80790081 1212.41286288
919.69019978 1045.40593177 920.28541043 873.96228616 1045.16765915
1016.24066627 812.05505397]
[ 928.75137154 925.07241064 931.07041867 843.7716342 930.41913174
859.07125655 1040.97648684 858.12933171 891.59416821 1037.88167324
926.12419057 928.06379355]]
[[ 983.7541719 923.47622896 875.76150634 1214.68252179 1019.4823065
1175.05313524 810.42477501 1046.05954221]
[ 843.99399139 857.57544953 890.71521423 930.8138393 926.59679683
935.95007904 931.27137963 1040.78864429]]
|
fileutils.ipynb | ###Markdown
Copying dataset from s3 to local
###Code
dataset_id = "0805003"
dataset_path = data_export + dataset_id
mx.file.copy_parallel(dataset_path, 'Temp/data_in')
###Output
INFO:root:Listing OBS: 1000
INFO:root:Listing OBS: 2000
INFO:root:Listing OBS: 3000
INFO:root:Listing OBS: 4000
INFO:root:Listing OBS: 5000
INFO:root:pid: None. 1000/5904
INFO:root:pid: None. 2000/5904
INFO:root:pid: None. 3000/5904
INFO:root:pid: None. 4000/5904
INFO:root:pid: None. 5000/5904
###Markdown
Saving YOLOv5 weights to s3
###Code
prefix = "yolo"
yolo_folder = "YoloV5/runs/"
dst = nbt_save + prefix
mx.file.copy_parallel(yolo_folder, dst)
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.