code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.039103, "end_time": "2020-12-23T23:31:42.542565", "exception": false, "start_time": "2020-12-23T23:31:42.503462", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# # RadarCOVID-Report
# + [markdown] papermill={"duration": 0.036161, "end_time": "2020-12-23T23:31:42.615267", "exception": false, "start_time": "2020-12-23T23:31:42.579106", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ## Data Extraction
# + papermill={"duration": 1.318649, "end_time": "2020-12-23T23:31:43.970787", "exception": false, "start_time": "2020-12-23T23:31:42.652138", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import pycountry
import retry
import seaborn as sns
# %matplotlib inline
# + papermill={"duration": 0.04571, "end_time": "2020-12-23T23:31:44.054232", "exception": false, "start_time": "2020-12-23T23:31:44.008522", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
# + [markdown] papermill={"duration": 0.036441, "end_time": "2020-12-23T23:31:44.127563", "exception": false, "start_time": "2020-12-23T23:31:44.091122", "status": "completed"} tags=[]
# ### Constants
# + papermill={"duration": 0.10719, "end_time": "2020-12-23T23:31:44.271272", "exception": false, "start_time": "2020-12-23T23:31:44.164082", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
# + [markdown] papermill={"duration": 0.036595, "end_time": "2020-12-23T23:31:44.345850", "exception": false, "start_time": "2020-12-23T23:31:44.309255", "status": "completed"} tags=[]
# ### Parameters
# + papermill={"duration": 0.045701, "end_time": "2020-12-23T23:31:44.428330", "exception": false, "start_time": "2020-12-23T23:31:44.382629", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
# + papermill={"duration": 0.043174, "end_time": "2020-12-23T23:31:44.508729", "exception": false, "start_time": "2020-12-23T23:31:44.465555", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_enable_multi_backend_download = \
os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD")
if environment_enable_multi_backend_download:
report_backend_identifiers = None
else:
report_backend_identifiers = [report_backend_identifier]
report_backend_identifiers
# + papermill={"duration": 0.044853, "end_time": "2020-12-23T23:31:44.590411", "exception": false, "start_time": "2020-12-23T23:31:44.545558", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_invalid_shared_diagnoses_dates = \
os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES")
if environment_invalid_shared_diagnoses_dates:
invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",")
else:
invalid_shared_diagnoses_dates = []
invalid_shared_diagnoses_dates
# + [markdown] papermill={"duration": 0.037381, "end_time": "2020-12-23T23:31:44.665639", "exception": false, "start_time": "2020-12-23T23:31:44.628258", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### COVID-19 Cases
# + papermill={"duration": 0.043445, "end_time": "2020-12-23T23:31:44.746676", "exception": false, "start_time": "2020-12-23T23:31:44.703231", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
report_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=report_backend_identifier)
# + papermill={"duration": 32.205291, "end_time": "2020-12-23T23:32:16.989401", "exception": false, "start_time": "2020-12-23T23:31:44.784110", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe():
return pd.read_csv(
"https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/owid-covid-data.csv")
confirmed_df_ = download_cases_dataframe()
# + papermill={"duration": 0.490151, "end_time": "2020-12-23T23:32:17.517883", "exception": false, "start_time": "2020-12-23T23:32:17.027732", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_df = confirmed_df_.copy()
confirmed_df = confirmed_df[["date", "new_cases", "iso_code"]]
confirmed_df.rename(
columns={
"date": "sample_date",
"iso_code": "country_code",
},
inplace=True)
def convert_iso_alpha_3_to_alpha_2(x):
try:
return pycountry.countries.get(alpha_3=x).alpha_2
except Exception as e:
logging.info(f"Error converting country ISO Alpha 3 code '{x}': {repr(e)}")
return None
confirmed_df["country_code"] = confirmed_df.country_code.apply(convert_iso_alpha_3_to_alpha_2)
confirmed_df.dropna(inplace=True)
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
# + papermill={"duration": 0.052594, "end_time": "2020-12-23T23:32:17.608584", "exception": false, "start_time": "2020-12-23T23:32:17.555990", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_days_df["sample_date_string"] = \
confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_days_df.tail()
# + papermill={"duration": 0.044131, "end_time": "2020-12-23T23:32:17.690599", "exception": false, "start_time": "2020-12-23T23:32:17.646468", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def sort_source_regions_for_display(source_regions: list) -> list:
if report_backend_identifier in source_regions:
source_regions = [report_backend_identifier] + \
list(sorted(set(source_regions).difference([report_backend_identifier])))
else:
source_regions = list(sorted(source_regions))
return source_regions
# + papermill={"duration": 0.045038, "end_time": "2020-12-23T23:32:17.773407", "exception": false, "start_time": "2020-12-23T23:32:17.728369", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
report_source_regions = report_backend_client.source_regions_for_date(
date=extraction_datetime.date())
report_source_regions = sort_source_regions_for_display(
source_regions=report_source_regions)
report_source_regions
# + papermill={"duration": 0.049852, "end_time": "2020-12-23T23:32:17.861265", "exception": false, "start_time": "2020-12-23T23:32:17.811413", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def get_cases_dataframe(source_regions_for_date_function, columns_suffix=None):
source_regions_at_date_df = confirmed_days_df.copy()
source_regions_at_date_df["source_regions_at_date"] = \
source_regions_at_date_df.sample_date.apply(
lambda x: source_regions_for_date_function(date=x))
source_regions_at_date_df.sort_values("sample_date", inplace=True)
source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \
source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x)))
source_regions_at_date_df.tail()
#%%
source_regions_for_summary_df_ = \
source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy()
source_regions_for_summary_df_.rename(columns={"_source_regions_group": "source_regions"}, inplace=True)
source_regions_for_summary_df_.tail()
#%%
confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"]
confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns)
for source_regions_group, source_regions_group_series in \
source_regions_at_date_df.groupby("_source_regions_group"):
source_regions_set = set(source_regions_group.split(","))
confirmed_source_regions_set_df = \
confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy()
confirmed_source_regions_group_df = \
confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \
.reset_index().sort_values("sample_date")
confirmed_source_regions_group_df["covid_cases"] = \
confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round()
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[confirmed_output_columns]
confirmed_source_regions_group_df.fillna(method="ffill", inplace=True)
confirmed_source_regions_group_df = \
confirmed_source_regions_group_df[
confirmed_source_regions_group_df.sample_date.isin(
source_regions_group_series.sample_date_string)]
confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df)
result_df = confirmed_output_df.copy()
result_df.tail()
#%%
result_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True)
result_df = confirmed_days_df[["sample_date_string"]].merge(result_df, how="left")
result_df.sort_values("sample_date_string", inplace=True)
result_df.fillna(method="ffill", inplace=True)
result_df.tail()
#%%
result_df[["new_cases", "covid_cases"]].plot()
if columns_suffix:
result_df.rename(
columns={
"new_cases": "new_cases_" + columns_suffix,
"covid_cases": "covid_cases_" + columns_suffix},
inplace=True)
return result_df, source_regions_for_summary_df_
# + papermill={"duration": 0.514321, "end_time": "2020-12-23T23:32:18.414134", "exception": false, "start_time": "2020-12-23T23:32:17.899813", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_eu_df, source_regions_for_summary_df = get_cases_dataframe(
report_backend_client.source_regions_for_date)
confirmed_es_df, _ = get_cases_dataframe(
lambda date: [spain_region_country_code],
columns_suffix=spain_region_country_code.lower())
# + [markdown] papermill={"duration": 0.042916, "end_time": "2020-12-23T23:32:18.499247", "exception": false, "start_time": "2020-12-23T23:32:18.456331", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Extract API TEKs
# + papermill={"duration": 83.925356, "end_time": "2020-12-23T23:33:42.466502", "exception": false, "start_time": "2020-12-23T23:32:18.541146", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
raw_zip_path_prefix = "Data/TEKs/Raw/"
fail_on_error_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=fail_on_error_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
# + papermill={"duration": 0.296951, "end_time": "2020-12-23T23:33:42.806340", "exception": false, "start_time": "2020-12-23T23:33:42.509389", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
# + papermill={"duration": 0.226899, "end_time": "2020-12-23T23:33:43.077772", "exception": false, "start_time": "2020-12-23T23:33:42.850873", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
# + papermill={"duration": 0.097216, "end_time": "2020-12-23T23:33:43.220759", "exception": false, "start_time": "2020-12-23T23:33:43.123543", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
# + papermill={"duration": 4.284446, "end_time": "2020-12-23T23:33:47.551449", "exception": false, "start_time": "2020-12-23T23:33:43.267003", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
# + papermill={"duration": 4.425797, "end_time": "2020-12-23T23:33:52.023400", "exception": false, "start_time": "2020-12-23T23:33:47.597603", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
# + papermill={"duration": 4.239785, "end_time": "2020-12-23T23:33:56.309243", "exception": false, "start_time": "2020-12-23T23:33:52.069458", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1:
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
# + papermill={"duration": 1.543524, "end_time": "2020-12-23T23:33:57.899534", "exception": false, "start_time": "2020-12-23T23:33:56.356010", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
# + papermill={"duration": 2.596217, "end_time": "2020-12-23T23:34:00.543613", "exception": false, "start_time": "2020-12-23T23:33:57.947396", "status": "completed"} tags=[]
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
# + [markdown] papermill={"duration": 0.047247, "end_time": "2020-12-23T23:34:00.638720", "exception": false, "start_time": "2020-12-23T23:34:00.591473", "status": "completed"} tags=[]
# ### Dump API TEKs
# + papermill={"duration": 2.867881, "end_time": "2020-12-23T23:34:03.554226", "exception": false, "start_time": "2020-12-23T23:34:00.686345", "status": "completed"} tags=[]
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_df.head()
# + [markdown] papermill={"duration": 0.04732, "end_time": "2020-12-23T23:34:03.651602", "exception": false, "start_time": "2020-12-23T23:34:03.604282", "status": "completed"} tags=[]
# ### Load TEK Dumps
# + papermill={"duration": 0.055295, "end_time": "2020-12-23T23:34:03.754391", "exception": false, "start_time": "2020-12-23T23:34:03.699096", "status": "completed"} tags=[]
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
# + papermill={"duration": 26.070346, "end_time": "2020-12-23T23:34:29.872592", "exception": false, "start_time": "2020-12-23T23:34:03.802246", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
# + papermill={"duration": 0.083454, "end_time": "2020-12-23T23:34:30.004870", "exception": false, "start_time": "2020-12-23T23:34:29.921416", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
# + [markdown] papermill={"duration": 0.049054, "end_time": "2020-12-23T23:34:30.102994", "exception": false, "start_time": "2020-12-23T23:34:30.053940", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Daily New TEKs
# + papermill={"duration": 2.385974, "end_time": "2020-12-23T23:34:32.538062", "exception": false, "start_time": "2020-12-23T23:34:30.152088", "status": "completed"} tags=[]
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
# + papermill={"duration": 114.914431, "end_time": "2020-12-23T23:36:27.501891", "exception": false, "start_time": "2020-12-23T23:34:32.587460", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
# + papermill={"duration": 0.062373, "end_time": "2020-12-23T23:36:27.613938", "exception": false, "start_time": "2020-12-23T23:36:27.551565", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
# + papermill={"duration": 1.002883, "end_time": "2020-12-23T23:36:28.667258", "exception": false, "start_time": "2020-12-23T23:36:27.664375", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
# + papermill={"duration": 0.071131, "end_time": "2020-12-23T23:36:28.789857", "exception": false, "start_time": "2020-12-23T23:36:28.718726", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
# + papermill={"duration": 1.385716, "end_time": "2020-12-23T23:36:30.227111", "exception": false, "start_time": "2020-12-23T23:36:28.841395", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
# + papermill={"duration": 0.064822, "end_time": "2020-12-23T23:36:30.344226", "exception": false, "start_time": "2020-12-23T23:36:30.279404", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \
[["upload_date", "shared_teks"]].rename(
columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.head()
# + papermill={"duration": 0.068161, "end_time": "2020-12-23T23:36:30.464959", "exception": false, "start_time": "2020-12-23T23:36:30.396798", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \
.groupby(["upload_date"]).shared_teks.max().reset_index() \
.sort_values(["upload_date"], ascending=False) \
.rename(columns={
"upload_date": "sample_date_string",
"shared_teks": "shared_diagnoses",
})
invalid_shared_diagnoses_dates_mask = \
estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates)
estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0
estimated_shared_diagnoses_df.head()
# + [markdown] papermill={"duration": 0.052664, "end_time": "2020-12-23T23:36:30.570192", "exception": false, "start_time": "2020-12-23T23:36:30.517528", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Hourly New TEKs
# + papermill={"duration": 14.064289, "end_time": "2020-12-23T23:36:44.687136", "exception": false, "start_time": "2020-12-23T23:36:30.622847", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
# + papermill={"duration": 3.15002, "end_time": "2020-12-23T23:36:47.890137", "exception": false, "start_time": "2020-12-23T23:36:44.740117", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
# + papermill={"duration": 0.068461, "end_time": "2020-12-23T23:36:48.012315", "exception": false, "start_time": "2020-12-23T23:36:47.943854", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_summary_df = hourly_new_tek_count_df.copy()
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
# + [markdown] papermill={"duration": 0.053443, "end_time": "2020-12-23T23:36:48.119274", "exception": false, "start_time": "2020-12-23T23:36:48.065831", "status": "completed"} tags=[]
# ### Official Statistics
# + papermill={"duration": 0.164199, "end_time": "2020-12-23T23:36:48.337271", "exception": false, "start_time": "2020-12-23T23:36:48.173072", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
import requests
import pandas.io.json
official_stats_response = requests.get("https://radarcovidpre.covid19.gob.es/kpi/statistics/basics")
official_stats_response.raise_for_status()
official_stats_df_ = pandas.io.json.json_normalize(official_stats_response.json())
# + papermill={"duration": 0.069092, "end_time": "2020-12-23T23:36:48.459959", "exception": false, "start_time": "2020-12-23T23:36:48.390867", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
official_stats_df = official_stats_df_.copy()
official_stats_df = official_stats_df.append(pd.DataFrame({
"date": ["06/12/2020"],
"applicationsDownloads.totalAcummulated": [5653519],
"communicatedContagions.totalAcummulated": [21925],
}), sort=False)
official_stats_df["date"] = pd.to_datetime(official_stats_df["date"], dayfirst=True)
official_stats_df.head()
# + papermill={"duration": 0.060545, "end_time": "2020-12-23T23:36:48.574679", "exception": false, "start_time": "2020-12-23T23:36:48.514134", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
official_stats_column_map = {
"date": "sample_date",
"applicationsDownloads.totalAcummulated": "app_downloads_es_accumulated",
"communicatedContagions.totalAcummulated": "shared_diagnoses_es_accumulated",
}
accumulated_suffix = "_accumulated"
accumulated_values_columns = \
list(filter(lambda x: x.endswith(accumulated_suffix), official_stats_column_map.values()))
interpolated_values_columns = \
list(map(lambda x: x[:-len(accumulated_suffix)], accumulated_values_columns))
# + papermill={"duration": 0.066665, "end_time": "2020-12-23T23:36:48.695789", "exception": false, "start_time": "2020-12-23T23:36:48.629124", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
official_stats_df = \
official_stats_df[official_stats_column_map.keys()] \
.rename(columns=official_stats_column_map)
official_stats_df.head()
# + papermill={"duration": 0.071804, "end_time": "2020-12-23T23:36:48.822170", "exception": false, "start_time": "2020-12-23T23:36:48.750366", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
official_stats_df = confirmed_days_df.merge(official_stats_df, how="left")
official_stats_df.sort_values("sample_date", ascending=False, inplace=True)
official_stats_df.head()
# + papermill={"duration": 0.099112, "end_time": "2020-12-23T23:36:48.976231", "exception": false, "start_time": "2020-12-23T23:36:48.877119", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
official_stats_df[accumulated_values_columns] = \
official_stats_df[accumulated_values_columns] \
.astype(float).interpolate(limit_area="inside")
official_stats_df[interpolated_values_columns] = \
official_stats_df[accumulated_values_columns].diff(periods=-1)
official_stats_df.drop(columns="sample_date", inplace=True)
official_stats_df.head()
# + [markdown] papermill={"duration": 0.054794, "end_time": "2020-12-23T23:36:49.086744", "exception": false, "start_time": "2020-12-23T23:36:49.031950", "status": "completed"} tags=[]
# ### Data Merge
# + papermill={"duration": 0.071771, "end_time": "2020-12-23T23:36:49.213583", "exception": false, "start_time": "2020-12-23T23:36:49.141812", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.070625, "end_time": "2020-12-23T23:36:49.340107", "exception": false, "start_time": "2020-12-23T23:36:49.269482", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.072381, "end_time": "2020-12-23T23:36:49.468721", "exception": false, "start_time": "2020-12-23T23:36:49.396340", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.074527, "end_time": "2020-12-23T23:36:49.599672", "exception": false, "start_time": "2020-12-23T23:36:49.525145", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = result_summary_df.merge(
official_stats_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.07519, "end_time": "2020-12-23T23:36:49.731762", "exception": false, "start_time": "2020-12-23T23:36:49.656572", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = confirmed_eu_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
# + papermill={"duration": 0.078453, "end_time": "2020-12-23T23:36:49.867496", "exception": false, "start_time": "2020-12-23T23:36:49.789043", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = confirmed_es_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
# + papermill={"duration": 0.086249, "end_time": "2020-12-23T23:36:50.011673", "exception": false, "start_time": "2020-12-23T23:36:49.925424", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left")
result_summary_df.set_index(["sample_date", "source_regions"], inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
# + papermill={"duration": 0.089071, "end_time": "2020-12-23T23:36:50.159614", "exception": false, "start_time": "2020-12-23T23:36:50.070543", "status": "completed"} tags=[]
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(result_summary_df.shared_diagnoses_es / result_summary_df.covid_cases_es).fillna(0)
result_summary_df.head(daily_plot_days)
# + papermill={"duration": 0.087113, "end_time": "2020-12-23T23:36:50.307795", "exception": false, "start_time": "2020-12-23T23:36:50.220682", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
weekly_result_summary_df = result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(7).agg({
"covid_cases": "sum",
"covid_cases_es": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum",
"shared_diagnoses_es": "sum",
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
weekly_result_summary_df = weekly_result_summary_df.fillna(0).astype(int)
weekly_result_summary_df["teks_per_shared_diagnosis"] = \
(weekly_result_summary_df.shared_teks_by_upload_date / weekly_result_summary_df.shared_diagnoses).fillna(0)
weekly_result_summary_df["shared_diagnoses_per_covid_case"] = \
(weekly_result_summary_df.shared_diagnoses / weekly_result_summary_df.covid_cases).fillna(0)
weekly_result_summary_df["shared_diagnoses_per_covid_case_es"] = \
(weekly_result_summary_df.shared_diagnoses_es / weekly_result_summary_df.covid_cases_es).fillna(0)
weekly_result_summary_df.head()
# + papermill={"duration": 0.069991, "end_time": "2020-12-23T23:36:50.438669", "exception": false, "start_time": "2020-12-23T23:36:50.368678", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
last_7_days_summary = weekly_result_summary_df.to_dict(orient="records")[1]
last_7_days_summary
# + [markdown] papermill={"duration": 0.060774, "end_time": "2020-12-23T23:36:50.560270", "exception": false, "start_time": "2020-12-23T23:36:50.499496", "status": "completed"} tags=[]
# ## Report Results
# + papermill={"duration": 0.06838, "end_time": "2020-12-23T23:36:50.689332", "exception": false, "start_time": "2020-12-23T23:36:50.620952", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"source_regions": "Source Countries",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases (Source Countries)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date (Source Countries)",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date (Source Countries)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date (Source Countries)",
"shared_diagnoses": "Shared Diagnoses (Source Countries – Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis (Source Countries)",
"shared_diagnoses_per_covid_case": "Usage Ratio (Source Countries)",
"covid_cases_es": "COVID-19 Cases (Spain)",
"app_downloads_es": "App Downloads (Spain – Official)",
"shared_diagnoses_es": "Shared Diagnoses (Spain – Official)",
"shared_diagnoses_per_covid_case_es": "Usage Ratio (Spain)",
}
# + papermill={"duration": 0.067167, "end_time": "2020-12-23T23:36:50.817601", "exception": false, "start_time": "2020-12-23T23:36:50.750434", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
"covid_cases_es",
"app_downloads_es",
"shared_diagnoses_es",
"shared_diagnoses_per_covid_case_es",
]
summary_percentage_columns= [
"shared_diagnoses_per_covid_case_es",
"shared_diagnoses_per_covid_case",
]
# + [markdown] papermill={"duration": 0.061054, "end_time": "2020-12-23T23:36:50.939969", "exception": false, "start_time": "2020-12-23T23:36:50.878915", "status": "completed"} tags=[]
# ### Daily Summary Table
# + papermill={"duration": 0.088746, "end_time": "2020-12-23T23:36:51.089721", "exception": false, "start_time": "2020-12-23T23:36:51.000975", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
# + [markdown] papermill={"duration": 0.062099, "end_time": "2020-12-23T23:36:51.214260", "exception": false, "start_time": "2020-12-23T23:36:51.152161", "status": "completed"} tags=[]
# ### Daily Summary Plots
# + papermill={"duration": 4.194402, "end_time": "2020-12-23T23:36:55.471133", "exception": false, "start_time": "2020-12-23T23:36:51.276731", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.droplevel(level=["source_regions"]) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 30), legend=False)
ax_ = summary_ax_list[0]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
for percentage_column in summary_percentage_columns:
percentage_column_index = summary_columns.index(percentage_column)
summary_ax_list[percentage_column_index].yaxis \
.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
# + [markdown] papermill={"duration": 0.065036, "end_time": "2020-12-23T23:36:55.601663", "exception": false, "start_time": "2020-12-23T23:36:55.536627", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Daily Generation to Upload Period Table
# + papermill={"duration": 0.080383, "end_time": "2020-12-23T23:36:55.746788", "exception": false, "start_time": "2020-12-23T23:36:55.666405", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
# + papermill={"duration": 1.494, "end_time": "2020-12-23T23:36:57.306309", "exception": false, "start_time": "2020-12-23T23:36:55.812309", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
# + [markdown] papermill={"duration": 0.069479, "end_time": "2020-12-23T23:36:57.446024", "exception": false, "start_time": "2020-12-23T23:36:57.376545", "status": "completed"} tags=[]
# ### Hourly Summary Plots
# + papermill={"duration": 0.452085, "end_time": "2020-12-23T23:36:57.968012", "exception": false, "start_time": "2020-12-23T23:36:57.515927", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
# + [markdown] papermill={"duration": 0.070494, "end_time": "2020-12-23T23:36:58.110140", "exception": false, "start_time": "2020-12-23T23:36:58.039646", "status": "completed"} tags=[]
# ### Publish Results
# + papermill={"duration": 0.107254, "end_time": "2020-12-23T23:36:58.288980", "exception": false, "start_time": "2020-12-23T23:36:58.181726", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}" if x != 0 else "",
display_column_name_mapping["shared_diagnoses_per_covid_case_es"]: lambda x: f"{x:.2%}" if x != 0 else "",
}
general_columns = \
list(filter(lambda x: x not in display_formatters, display_column_name_mapping.values()))
general_formatter = lambda x: f"{x}" if x != 0 else ""
display_formatters.update(dict(map(lambda x: (x, general_formatter), general_columns)))
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.item()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.item()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.item()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.item()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.item()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
if len(report_source_regions) == 1:
display_brief_source_regions = report_source_regions[0]
else:
display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺"
# + papermill={"duration": 0.078267, "end_time": "2020-12-23T23:36:58.438793", "exception": false, "start_time": "2020-12-23T23:36:58.360526", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
df = df.copy()
df_styler = df.style.format(display_formatters)
media_path = get_temporary_image_path()
dfi.export(df_styler, media_path)
return media_path
# + papermill={"duration": 8.271203, "end_time": "2020-12-23T23:37:06.781160", "exception": false, "start_time": "2020-12-23T23:36:58.509957", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
# + [markdown] papermill={"duration": 0.071923, "end_time": "2020-12-23T23:37:06.925861", "exception": false, "start_time": "2020-12-23T23:37:06.853938", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Save Results
# + papermill={"duration": 0.110165, "end_time": "2020-12-23T23:37:07.108052", "exception": false, "start_time": "2020-12-23T23:37:06.997887", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
# + [markdown] papermill={"duration": 0.071539, "end_time": "2020-12-23T23:37:07.251901", "exception": false, "start_time": "2020-12-23T23:37:07.180362", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish Results as JSON
# + papermill={"duration": 0.095969, "end_time": "2020-12-23T23:37:07.419404", "exception": false, "start_time": "2020-12-23T23:37:07.323435", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def generate_summary_api_results(df: pd.DataFrame) -> list:
api_df = df.reset_index().copy()
api_df["sample_date_string"] = \
api_df["sample_date"].dt.strftime("%Y-%m-%d")
api_df["source_regions"] = \
api_df["source_regions"].apply(lambda x: x.split(","))
return api_df.to_dict(orient="records")
summary_api_results = \
generate_summary_api_results(df=result_summary_df)
today_summary_api_results = \
generate_summary_api_results(df=extraction_date_result_summary_df)[0]
summary_results = dict(
backend_identifier=report_backend_identifier,
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=0,
),
today=today_summary_api_results,
last_7_days=last_7_days_summary,
daily_results=summary_api_results)
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
# + [markdown] papermill={"duration": 0.072252, "end_time": "2020-12-23T23:37:07.564139", "exception": false, "start_time": "2020-12-23T23:37:07.491887", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish on README
# + papermill={"duration": 0.07985, "end_time": "2020-12-23T23:37:07.716980", "exception": false, "start_time": "2020-12-23T23:37:07.637130", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
# + [markdown] papermill={"duration": 0.072169, "end_time": "2020-12-23T23:37:07.861600", "exception": false, "start_time": "2020-12-23T23:37:07.789431", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish on Twitter
# + papermill={"duration": 6.255353, "end_time": "2020-12-23T23:37:14.189825", "exception": false, "start_time": "2020-12-23T23:37:07.934472", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule" and \
(shared_teks_by_upload_date_last_hour or not are_today_results_partial):
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Source Countries: {display_brief_source_regions}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f}
- Usage Ratio: ≤{shared_diagnoses_per_covid_case:.2%}
Last 7 Days:
- Shared Diagnoses: ≤{last_7_days_summary["shared_diagnoses"]:.0f}
- Usage Ratio: ≤{last_7_days_summary["shared_diagnoses_per_covid_case"]:.2%}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
| Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2020-12-23.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 数学函数、字符串和对象
# ## 本章介绍Python函数来执行常见的数学运算
# - 函数是完成一个特殊任务的一组语句,可以理解为一个函数相当于一个小功能,但是在开发中,需要注意一个函数的长度最好不要超过一屏
# - Python中的内置函数是不需要Import导入的
# <img src="../Photo/15.png"></img>
max('kim')
max(1,5,65,89)
# ## 尝试练习Python内置函数
# ## Python中的math模块提供了许多数学函数
# <img src="../Photo/16.png"></img>
# <img src="../Photo/17.png"></img>
#
import math
math.log(10,10)
math.sqrt(2)
math.radians(90)
# ## 两个数学常量PI和e,可以通过使用math.pi 和math.e调用
# ## EP:
# - 通过math库,写一个程序,使得用户输入三个顶点(x,y)返回三个角度
# - 注意:Python计算角度为弧度制,需要将其转换为角度
# <img src="../Photo/18.png">
import math
x1,y1 = eval(input('x1,y1'))
x2,y2 = eval(input('x2,y2'))
x3,y3 = eval(input('x3,y3'))
a = 1
b = 1
c = math.sqrt(2)
part_1 = -2 * b * c
part_2 = -2 * a * c
part_3 = -2 * b * a
A = math.acos((math.pow(a,2)-math.pow(b,2)-math.pow(c,2))/part_1)
B = math.acos((math.pow(b,2)-math.pow(a,2)-math.pow(c,2))/part_2)
C = math.acos((math.pow(c,2)-math.pow(b,2)-math.pow(a,2))/part_3)
print(math.degrees(A))
print(math.degrees(B))
print(math.degrees(C))
x1,y1 = eval(input('x1,y1'))
x2,y2 = eval(input('x2,y2'))
x3,y3 = eval(input('x3,y3'))
a = math.sqrt(math.pow(x2-x3,2)+math.pow(y2-y3,2))
b = math.sqrt(math.pow(x1-x3,2)+math.pow(y1-y3,2))
c = math.sqrt(math.pow(x2-x1,2)+math.pow(y2-y1,2))
part_1 = -2 * b * c
part_2 = -2 * a * c
part_3 = -2 * b * a
A = math.acos((math.pow(a,2)-math.pow(b,2)-math.pow(c,2))/part_1)
B = math.acos((math.pow(b,2)-math.pow(a,2)-math.pow(c,2))/part_2)
C = math.acos((math.pow(c,2)-math.pow(b,2)-math.pow(a,2))/part_3)
print(math.degrees(A))
print(math.degrees(B))
print(math.degrees(C))
# ## 字符串和字符
# - 在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“
# - 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用
import random
random.random()#输出结果为0到1的值
# ## ASCII码与Unicode码
# - <img src="../Photo/19.png"></img>
# - <img src="../Photo/20.png"></img>
# - <img src="../Photo/21.png"></img>
# ## 函数ord、chr
# - ord 返回ASCII码值
# - chr 返回字符
ord('a')
chr((ord('H')+5)*8)
# ## EP:
# - 利用ord与chr进行简单邮箱加密
youxiang = input('youxiang')
mima = chr((ord('#')-2)*8)
print(mima)
a = 'kim is a nice girl'
c = ''
for i in a:
b = chr(ord(i)-9)
c +=b
print(c)
# ## 转义序列 \
# - a = "He said,"Johon's program is easy to read"
# - 转掉它原来的意思
# - 一般情况下只有当语句与默认方法相撞的时候,就需要转义
# 1.三引号和单/双引号的区别是:
# - 三引号内可以使用换行,单/双引号
# - 三引号如果不加变量的话,代表是注释(可以换行注释)
#
# 2.单/双引号内是不是可以加入单/双引号的,但是可以加入双/单.
#
# 3.如果非要头铁,可以使用"\\"(转义字符),表示转掉它原来的意思.
# ## 高级print
# - 参数 end: 以什么方式结束打印
# - 默认换行打印
import time
for i in range(100):
if i % 5 == 0:
print('#',end="")
time.sleep(0.5)
print('kim',end = '\\')
print('nice')
print('lalala')
# ## 函数str
# - 将类型强制转换成字符串类型
# - 其他一些以后会学到(list,set,tuple...)
# ## 字符串连接操作
# - 直接使用 “+”
# - join() 函数
a = 'http://op.hanhande.net/shtml/op_wz/list_2602_'
for i in range(1,28):
url= a + str(i) + '.shtml'
print(url)
a = 'http://op.hanhande.net/shtml/op_wz/list_2602_'
for i in range(1,28):
url = "".join((a,str(i)))
URL = "".join((url,'shtml'))
print(URL)
# ## EP:
# - 将 “Welcome” “to” "Python" 拼接
# - 将int型 100 与 “joker is a bad man” 拼接
# - 从控制台读取字符串
# > 输入一个名字返回夸奖此人
name = input('输入名字')
print(name,'真帅')
# ## 实例研究:最小数量硬币
# - 开发一个程序,让用户输入总金额,这是一个用美元和美分表示的浮点值,返回一个由美元、两角五分的硬币、一角的硬币、五分硬币、以及美分个数
# <img src="../Photo/22.png"></img>
# - Python弱项,对于浮点型的处理并不是很好,但是处理数据的时候使用的是Numpy类型
# <img src="../Photo/23.png"></img>
# ## id与type
# - id 查看内存地址,在判断语句中将会使用
# - type 查看元素类型
# ## 其他格式化语句见书
# # Homework
# - 1
# <img src="../Photo/24.png"><img>
# <img src="../Photo/25.png"><img>
import math
r = eval(input('顶点到中心的距离: '))
s = 2 * r * math.sin(3.1415926/5)
part_1 = 5 * s * s
part_2 = 4 * math.tan(3.1415926/5)
S= part_1/part_2
print('The area is: ',S)
# - 2
# <img src="../Photo/26.png"><img>
import math
import numpy
x1,y1 = eval(input('x1,y1: '))
x2,y2 = eval(input('x2,y2: '))
d = 6371.01 * numpy.arccos(math.sin(x1) * math.sin(x2)+math.cos(x1) * math.cos(x2) * math.cos(y1-y2))
e = math.radians(d)
print('The distance is: ',e)
import math
import numpy
x1,y1 = eval(input('x1,y1:'))
x2,y2 = eval(input('x2,y2:'))
a = math.radians(x1)
b = math.radians(y1)
c = math.radians(x2)
e = math.radians(y2)
d = 6371.01*numpy.arccos(math.sin(a) * math.sin(c)+math.cos(a)*math.cos(c)*math.cos(b-e))
print('The distance between the two points is',d,'km')
# - 3
# <img src="../Photo/27.png"><img>
s = eval(input('请输入边长s: '))
part_1 = 5 * s ** 2
part_2 = 4 * math.tan(3.15/5)
S= part_1/part_2
print('The area is: ',S)
# - 4
# <img src="../Photo/28.png"><img>
import math
s = eval(input('请输入边长s: '))
n = eval(input('请输入边数n: '))
part_1 = n * s ** 2
part_2 = 4 * math.tan(3.1415926/n)
S = part_1/part_2
print("The area is:",S)
# - 5
# <img src="../Photo/29.png"><img>
# <img src="../Photo/30.png"><img>
ord('j')
chr(106)
a = eval(input('输入一个ASCII code: '))
b = chr(a)
print("The character is : ",b)
# - 6
# <img src="../Photo/31.png"><img>
a = input("输入员工姓名: ")
b = eval(input("一周工作时间: "))
c = eval(input("每小时报酬: "))
d = eval(input("联邦预扣税率: "))
e = eval(input("州预扣税率: "))
f = 97.5
g = f * 0.2
h = f * 0.09
i = g + h
j = f - g - h
print('Employee Name: ',a)
print('Hours Worked: ',b)
print('Pay Rate: ',c)
print('Gross Pay: ',f)
print('Deductions:')
print('Federal Withholding(20.0%): ',g)
print('State Withholding(9.0%): ',h)
print('Total Deduction: ',i)
print('Net Pay: ',j)
# - 7
# <img src="../Photo/32.png"><img>
a = eval(input("请输入一个整数:"))
res = 0
while a :
b = a%10
a=a//10
res = res*10 + b
print(res)
# - 8 进阶:
# > 加密一串文本,并将解密后的文件写入本地保存
wenben = input('请输入文本:')
mima = chr((ord('#')-2)*8)
print(mima)
| 7.17.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scrape_mars import scrapebot
import pandas as pd
s=scrapebot.scrape()
s['mars_facts']
s['news_title'],s['news_p']
s['featured_image_url']
s['hemisphere_image_urls']
s['mars_weather']
| verify scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project 5 : Classification
# ## Instructions
#
# ### Description
#
# Practice classification on the Titanic dataset.
#
# ### Grading
#
# For grading purposes, we will clear all outputs from all your cells and then run them all from the top. Please test your notebook in the same fashion before turning it in.
#
# ### Submitting Your Solution
#
# To submit your notebook, first clear all the cells (this won't matter too much this time, but for larger data sets in the future, it will make the file smaller). Then use the File->Download As->Notebook to obtain the notebook file. Finally, submit the notebook file on Canvas.
#
# +
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# -
# ### Introduction
#
# On April 15, 1912, the largest passenger liner ever made collided with an iceberg during her maiden voyage. When the Titanic sank it killed 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships. One of the reasons that the shipwreck resulted in such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others.
#
# Intro Videos:
# https://www.youtube.com/watch?v=3lyiZMeTKIo
# and
# https://www.youtube.com/watch?v=ItjXTieWKyI
#
# The `titanic_data.csv` file contains data for `887` of the real Titanic passengers. Each row represents one person. The columns describe different attributes about the person including whether they survived (`0=No`), their age, their passenger-class (`1=1st Class, Upper`), gender, and the fare they paid (£s*). For more on the currency: http://www.statisticalconsultants.co.nz/blog/titanic-fare-data.html
#
# We are going to try to see if there are correlations between the feature data provided (find a best subset of features) and passenger survival.
# ### Problem 1: Load and understand the data (35 points)
#
# #### Your task (some of this is the work you completed for L14 - be sure to copy that work into here as needed)
# Conduct some preprocessing steps to explore the following and provide code/answers in the below cells:
# 1. Load the `titanic_data.csv` file into a pandas dataframe
# 2. Explore the data provided (e.g., looking at statistics using describe(), value_counts(), histograms, scatter plots of various features, etc.)
# 3. What are the names of feature columns that appear to be usable for learning?
# 4. What is the name of the column that appears to represent our target?
# 5. Formulate a hypothesis about the relationship between given feature data and the target
# 6. How did Pclass affect passenngers' chances of survival?
# 7. What is the age distribution of survivors?
# Step 1. Load the `titanic_data.csv` file into a pandas dataframe
boat = pd.read_csv("titanic_data.csv")
# +
# Step 2. Explore the data provided (e.g., looking at statistics using describe(), value_counts(), histograms, scatter plots of various features, etc.)
print("Headers: " + str(list(boat)))
print("\nTotal Number of Survivors: " + str(boat["Survived"].value_counts()[1]))
print("Survival Rate: " + str(342 / 757))
children = boat[boat["Age"] < 18]
print("\nNumber of Children: " + str(len(children["Age"])))
print("Surviving Children: " + str(children["Survived"].value_counts()[1]))
print("Survival Rate: " + str(65 / 130 * 100) + "%")
print("\nNumber of adults: " + str(887 - len(children["Age"])))
print("Surviving Adults: " + str(boat["Survived"].value_counts()[1] - children["Survived"].value_counts()[1]))
print("Survival Rate: " + str(277 / 757 * 100) + "%")
boat.describe()
print('\nSurvived Col:')
for i in range(10):
print(boat['Survived'][i])
# -
# ---
#
# **Edit this cell to provide answers to the following steps:**
#
# ---
#
# Step 3. What are the names of feature columns that appear to be usable for learning?
#
# Age, Pclass, Survived, Sex, Age, Parents/Children Aboard
#
# Step 4. What is the name of the column that appears to represent our target?
#
# Survived (Binary true false classifications)
#
# Step 5. Formulate a hypothesis about the relationship between given feature data and the target
#
# General survival rate will be higher in children
# +
#Step 6. How did Pclass affect passenngers' chances of survival?
#Show your work with a bar plot, dataframe selection, or visual of your choice.
classSums = [0,0,0,0]
for x in range(len(boat)):
classSums[boat['Pclass'][x]] += 1
for x in range(1,len(classSums)):
print(f"Class {x} had a survival rate of: {classSums[x] / 887}")
# +
#Step 7. What is the age distribution of survivors?
#Show your work with a dataframe operation and/or histogram plot.
ages = []
for x in range(len(boat)):
if boat['Survived'][x]:
ages.extend([boat['Age'][x]])
plt.title('Surviver Age Distrobution')
plt.xlabel("Age")
plt.hist(ages)
plt.show()
plt.title('Overall Age Distrobution')
plt.xlabel("Age")
plt.hist(boat['Age'])
plt.show()
# -
# ### Problem 2: transform the data (10 points)
# The `Sex` column is categorical, meaning its data are separable into groups, but not numerical. To be able to work with this data, we need numbers, so you task is to transform the `Sex` column into numerical data with pandas' `get_dummies` feature and remove the original categorical `Sex` column.
boat['Sex']= pd.get_dummies(boat['Sex'])
# ### Problem 3: Classification (30 points)
# Now that the data is transformed, we want to run various classification experiments on it. The first is `K Nearest Neighbors`, which you will conduct by:
#
# 1. Define input and target data by creating lists of dataframe columns (e.g., inputs = ['Pclass', etc.)
# 2. Split the data into training and testing sets with `train_test_split()`
# 3. Create a `KNeighborsClassifier` using `5` neighbors at first (you can experiment with this parameter)
# 4. Train your model by passing the training dataset to `fit()`
# 5. Calculate predicted target values(y_hat) by passing the testing dataset to `predict()`
# 6. Print the accuracy of the model with `score()`
#
# ** Note: If you get a python warning as you use the Y, trainY, or testY vector in some of the function calls about "DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, )", you can look up how to use trainY.values.ravel() or trainY.values.flatten() or another function, etc.
# +
#inputs = Pclass, Age, Sex, Fare
#inputs = pd.concat([boat["Pclass"], boat["Age"],boat["Pclass"], boat["Fare"]], axis=1)
#target = Surviaval
#target = boat["Survived"]
#inputs.describe()
# -
from sklearn.model_selection import train_test_split
train , test =train_test_split(boat)
train.describe()
# +
from sklearn.neighbors import KNeighborsClassifier
k = 5
model = KNeighborsClassifier(k)
model.fit(train[["Pclass", "Age", "Pclass", "Fare"]], train["Survived"])
yhat = model.predict(train[["Pclass", "Age", "Pclass", "Fare"]])
model.score(test[["Pclass", "Age", "Pclass", "Fare"]], test['Survived'])
# -
# ### Problem 4: Cross validation, classification report (15 points)
# - Using the concepts from the 17-model_selection slides and the [`cross_val_score`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) function from scikit-learn, estimate the f-score ([`f1-score`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score) (you can use however many folds you wish). To get `cross_val_score` to use `f1-score` rather than the default accuracy measure, you will need to set the `scoring` parameter and use a scorer object created via [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer). Since this has a few parts to it, let me just give you that parameter: ```scorerVar = make_scorer(f1_score, pos_label=1)```
#
# - Using the concepts from the end of the 14-classification slides, output a confusion matrix.
#
# - Also, output a classification report [`classification_report`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html) from sklearn.metrics showing more of the metrics: precision, recall, f1-score for both of our classes.
# +
from sklearn import metrics
from sklearn.metrics import confusion_matrix, f1_score, classification_report, make_scorer
from sklearn import model_selection
scorerVar = make_scorer(f1_score, pos_label=1)
scores = cross_val_score(model, boat[["Pclass", "Age", "Pclass", "Fare"]], boat["Survived"],cv = 5, scoring = scorerVar)
print(scores.mean())
print(confusion_matrix(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
print(classification_report(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
# -
# ### Problem 5: Logistic Regression (15 points)
#
# Now, repeat the above experiment using the [`LogisticRegression`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) model in scikit-learn, and output:
#
# - The fit accuracy (using the `score` method of the model)
# - The f-score (using the [`cross_val_score`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) function)
# - The confusion matrix
# - The precision, recall, and f-measure for the 1 class (you can just print the results of the [`classification_report`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html) function from sklearn.metrics)
# +
from sklearn.linear_model import LogisticRegression
#create a model object
model = LogisticRegression()
#train our model
model.fit(train[["Pclass", "Age", "Pclass", "Fare"]], train["Survived"])
#evaluate the model
yhat = model.predict(train[["Pclass", "Age", "Pclass", "Fare"]])
score = model.score(test[["Pclass", "Age", "Pclass", "Fare"]], test['Survived'])
print(f"model score: {score}")
#setup to get f-score and cv
scorerVar = make_scorer(f1_score, pos_label=1)
scores = cross_val_score(model, boat[["Pclass", "Age", "Pclass", "Fare"]], boat["Survived"],cv = 5, scoring = scorerVar)
print(f"Cross Validation f1_score: {scores.mean()}")
#confusion matrix
print("Confusion Matrix")
print(confusion_matrix(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
#classification report
print("\nClassification Report")
print(classification_report(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
# -
# ### Problem 6: Support Vector Machines (15 points)
# Now, repeat the above experiment using the using a Support Vector classifier [`SVC`](http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html) with default parameters (RBF kernel) model in scikit-learn, and output:
#
# - The fit accuracy (using the `score` method of the model)
# - The f-score (using the [`cross_val_score`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) function)
# - The confusion matrix
# - The precision, recall, and f-measure for the 1 class (you can just print the results of the [`classification_report`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html) function from sklearn.metrics)
# +
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
#create a model object
model = SVC()
#train our model
model.fit(train[["Pclass", "Age", "Pclass", "Fare"]], train["Survived"])
#evaluate the model
yhat = model.predict(train[["Pclass", "Age", "Pclass", "Fare"]])
score = model.score(test[["Pclass", "Age", "Pclass", "Fare"]], test['Survived'])
print(f"model score: {score}")
#setup to get f-score and cv
scorerVar = make_scorer(f1_score, pos_label=1)
scores = cross_val_score(model, boat[["Pclass", "Age", "Pclass", "Fare"]], boat["Survived"],cv = 5, scoring = scorerVar)
print(f"Cross Validation f1_score: {scores.mean()}")
#confusion matrix
print("Confusion Matrix")
print(confusion_matrix(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
#classification report
print("\nClassification Report")
print(classification_report(test['Survived'], model.predict(test[["Pclass", "Age", "Pclass", "Fare"]])))
# -
# ### Problem 7: Comparision and Discussion (5 points)
# Edit this cell to provide a brief discussion (3-5 sentances at most):
# 1. What was the model/algorithm that performed best for you?
#
# Logistic Regression performed the best
#
# 2. What feaures and parameters were used to achieve that performance?
#
# Tweaking SVM to use a linear kernal also worked just as well as Logistical Regression
#
# 3. What insights did you gain from your experimentation about the predictive power of this dataset and did it match your original hypothesis about the relationship between given feature data and the target?
#
# Age really did not effect survival rates. I was really surpised by this!
# ### Questionnaire
# 1) How long did you spend on this assignment?
# <br>~2hrs<br>
# 2) What did you like about it? What did you not like about it?
# <br>The breadth of classification measures<br>
# 3) Did you find any errors or is there anything you would like changed?
# <br>Nope<br>
| python/examples/05-classify-new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # SVI Part III: ELBO Gradient Estimators
#
# ## Setup
#
# We've defined a Pyro model with observations ${\bf x}$ and latents ${\bf z}$ of the form $p_{\theta}({\bf x}, {\bf z}) = p_{\theta}({\bf x}|{\bf z}) p_{\theta}({\bf z})$. We've also defined a Pyro guide (i.e. a variational distribution) of the form $q_{\phi}({\bf z})$. Here ${\theta}$ and $\phi$ are variational parameters for the model and guide, respectively. (In particular these are _not_ random variables that call for a Bayesian treatment).
#
# We'd like to maximize the log evidence $\log p_{\theta}({\bf x})$ by maximizing the ELBO (the evidence lower bound) given by
#
# $${\rm ELBO} \equiv \mathbb{E}_{q_{\phi}({\bf z})} \left [
# \log p_{\theta}({\bf x}, {\bf z}) - \log q_{\phi}({\bf z})
# \right]$$
#
# To do this we're going to take (stochastic) gradient steps on the ELBO in the parameter space $\{ \theta, \phi \}$ (see references [1,2] for early work on this approach). So we need to be able to compute unbiased estimates of
#
# $$\nabla_{\theta,\phi} {\rm ELBO} = \nabla_{\theta,\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
# \log p_{\theta}({\bf x}, {\bf z}) - \log q_{\phi}({\bf z})
# \right]$$
#
# How do we do this for general stochastic functions `model()` and `guide()`? To simplify notation let's generalize our discussion a bit and ask how we can compute gradients of expectations of an arbitrary cost function $f({\bf z})$. Let's also drop any distinction between $\theta$ and $\phi$. So we want to compute
#
# $$\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
# f_{\phi}({\bf z}) \right]$$
#
# Let's start with the easiest case.
#
# ## Easy Case: Reparameterizable Random Variables
#
# Suppose that we can reparameterize things such that
#
# $$\mathbb{E}_{q_{\phi}({\bf z})} \left [f_{\phi}({\bf z}) \right]
# =\mathbb{E}_{q({\bf \epsilon})} \left [f_{\phi}(g_{\phi}({\bf \epsilon})) \right]$$
#
# Crucially we've moved all the $\phi$ dependence inside of the exectation; $q({\bf \epsilon})$ is a fixed distribution with no dependence on $\phi$. This kind of reparameterization can be done for many distributions (e.g. the normal distribution); see reference [3] for a discussion. In this case we can pass the gradient straight through the expectation to get
#
# $$\nabla_{\phi}\mathbb{E}_{q({\bf \epsilon})} \left [f_{\phi}(g_{\phi}({\bf \epsilon})) \right]=
# \mathbb{E}_{q({\bf \epsilon})} \left [\nabla_{\phi}f_{\phi}(g_{\phi}({\bf \epsilon})) \right]$$
#
# Assuming $f(\cdot)$ and $g(\cdot)$ are sufficiently smooth, we can now get unbiased estimates of the gradient of interest by taking a Monte Carlo estimate of this expectation.
#
# ## Tricky Case: Non-reparameterizable Random Variables
#
# What if we can't do the above reparameterization? Unfortunately this is the case for many distributions of interest, for example all discrete distributions. In this case our estimator takes a bit more complicated form.
#
# We begin by expanding the gradient of interest as
#
# $$\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
# f_{\phi}({\bf z}) \right]=
# \nabla_{\phi} \int d{\bf z} \; q_{\phi}({\bf z}) f_{\phi}({\bf z})$$
#
# and use the chain rule to write this as
#
# $$ \int d{\bf z} \; \left \{ (\nabla_{\phi} q_{\phi}({\bf z})) f_{\phi}({\bf z}) + q_{\phi}({\bf z})(\nabla_{\phi} f_{\phi}({\bf z}))\right \} $$
#
# At this point we run into a problem. We know how to generate samples from $q(\cdot)$—we just run the guide forward—but $\nabla_{\phi} q_{\phi}({\bf z})$ isn't even a valid probability density. So we need to massage this formula so that it's in the form of an expectation w.r.t. $q(\cdot)$. This is easily done using the identity
#
# $$ \nabla_{\phi} q_{\phi}({\bf z}) =
# q_{\phi}({\bf z})\nabla_{\phi} \log q_{\phi}({\bf z})$$
#
# which allows us to rewrite the gradient of interest as
#
# $$\mathbb{E}_{q_{\phi}({\bf z})} \left [
# (\nabla_{\phi} \log q_{\phi}({\bf z})) f_{\phi}({\bf z}) + \nabla_{\phi} f_{\phi}({\bf z})\right]$$
#
# This form of the gradient estimator—variously known as the REINFORCE estimator or the score function estimator or the likelihood ratio estimator—is amenable to simple Monte Carlo estimation.
#
# Note that one way to package this result (which is covenient for implementation) is to introduce a surrogate loss function
#
# $${\rm surrogate \;loss} \equiv
# \log q_{\phi}({\bf z}) \overline{f_{\phi}({\bf z})} + f_{\phi}({\bf z})$$
#
# Here the bar indicates that the term is held constant (i.e. it is not to be differentiated w.r.t. $\phi$). To get a (single-sample) Monte Carlo gradient estimate, we sample the latent random variables, compute the surrogate loss, and differentiate. The result is an unbiased estimate of $\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
# f_{\phi}({\bf z}) \right]$. In equations:
#
# $$\nabla_{\phi} {\rm ELBO} = \mathbb{E}_{q_{\phi}({\bf z})} \left [
# \nabla_{\phi} ({\rm surrogate \; loss}) \right]$$
#
# ## Variance or Why I Wish I Was Doing MLE Deep Learning
#
# We now have a general recipe for an unbiased gradient estimator of expectations of cost functions. Unfortunately, in the more general case where our $q(\cdot)$ includes non-reparameterizable random variables, this estimator tends to have high variance. Indeed in many cases of interest the variance is so high that the estimator is effectively unusable. So we need strategies to reduce variance (for a discussion see reference [4]). We're going to pursue two strategies. The first strategy takes advantage of the particular structure of the cost function $f(\cdot)$. The second strategy effectively introduces a way to reduce variance by using information from previous estimates of
# $\mathbb{E}_{q_{\phi}({\bf z})} [ f_{\phi}({\bf z})]$. As such it is somewhat analogous to using momentum in stochastic gradient descent.
#
# ### Reducing Variance via Dependency Structure
#
# In the above discussion we stuck to a general cost function $f_{\phi}({\bf z})$. We could continue in this vein (the approach we're about to discuss is applicable in the general case) but for concreteness let's zoom back in. In the case of stochastic variational inference, we're interested in a particular cost function of the form <br/><br/>
#
# $$\log p_{\theta}({\bf x} | {\rm Pa}_p ({\bf x})) +
# \sum_i \log p_{\theta}({\bf z}_i | {\rm Pa}_p ({\bf z}_i))
# - \sum_i \log q_{\phi}({\bf z}_i | {\rm Pa}_q ({\bf z}_i))$$
#
# where we've broken the log ratio $\log p_{\theta}({\bf x}, {\bf z})/q_{\phi}({\bf z})$ into an observation log likelihood piece and a sum over the different latent random variables $\{{\bf z}_i \}$. We've also introduced the notation
# ${\rm Pa}_p (\cdot)$ and ${\rm Pa}_q (\cdot)$ to denote the parents of a given random variable in the model and in the guide, respectively. (The reader might worry what the appropriate notion of dependency would be in the case of general stochastic functions; here we simply mean regular ol' dependency within a single execution trace). The point is that different terms in the cost function have different dependencies on the random variables $\{ {\bf z}_i \}$ and this is something we can leverage.
#
# To make a long story short, for any non-reparameterizable latent random variable ${\bf z}_i$ the surrogate loss is going to have a term
#
# $$\log q_{\phi}({\bf z}_i) \overline{f_{\phi}({\bf z})} $$
#
# It turns out that we can remove some of the terms in $\overline{f_{\phi}({\bf z})}$ and still get an unbiased gradient estimator; furthermore, doing so will generally decrease the variance. In particular (see reference [4] for details) we can remove any terms in $\overline{f_{\phi}({\bf z})}$ that are not downstream of the latent variable ${\bf z}_i$ (downstream w.r.t. to the dependency structure of the guide).
#
# In Pyro, all of this logic is taken care of automatically by the `SVI` class. In particular as long as we switch on `trace_graph=True`, Pyro will keep track of the dependency structure within the execution traces of the model and guide and construct a surrogate loss that has all the unnecessary terms removed:
#
# ```python
# svi = SVI(model, guide, optimizer, "ELBO", trace_graph=True)
# ```
#
# Note that leveraging this dependency information takes extra computations, so `trace_graph=True` should only be invoked in the case where your model has non-reparameterizable random variables.
#
#
# ### Aside: Dependency tracking in Pyro
#
# Finally, a word about dependency tracking. Tracking dependency within a stochastic function that includes arbitrary Python code is a bit tricky. The approach currently implemented in Pyro is analogous to the one used in WebPPL (cf. reference [5]). Briefly, a conservative notion of dependency is used that relies on sequential ordering. If random variable ${\bf z}_2$ follows ${\bf z}_1$ in a given stochastic function then ${\bf z}_2$ _may be_ dependent on ${\bf z}_1$ and therefore _is_ assumed to be dependent. To mitigate the overly coarse conclusions that can be drawn by this kind of dependency tracking, Pyro includes constructs for declaring things as independent, namely `irange` and `iarange` ([see the previous tutorial](svi_part_ii.html)). For use cases with non-reparameterizable variables, it is therefore important for the user to make use of these constructs (when applicable) to take full advantage of the variance reduction provided by `SVI`. In some cases it may also pay to consider reordering random variables within a stochastic function (if possible). It's also worth noting that we expect to add finer notions of dependency tracking in a future version of Pyro.
#
# ### Reducing Variance with Data-Dependent Baselines
#
# The second strategy for reducing variance in our ELBO gradient estimator goes under the name of baselines (see e.g. reference [6]). It actually makes use of the same bit of math that underlies the variance reduction strategy discussed above, except now instead of removing terms we're going to add terms. Basically, instead of removing terms with zero expectation that tend to _contribute_ to the variance, we're going to add specially chosen terms with zero expectation that work to _reduce_ the variance. As such, this is a control variate strategy.
#
# In more detail, the idea is to take advantage of the fact that for any constant $b$, the following identity holds
#
# $$\mathbb{E}_{q_{\phi}({\bf z})} \left [\nabla_{\phi}
# (\log q_{\phi}({\bf z}) \times b) \right]=0$$
#
# This follows since $q(\cdot)$ is normalized:
#
# $$\mathbb{E}_{q_{\phi}({\bf z})} \left [\nabla_{\phi}
# \log q_{\phi}({\bf z}) \right]=
# \int \!d{\bf z} \; q_{\phi}({\bf z}) \nabla_{\phi}
# \log q_{\phi}({\bf z})=
# \int \! d{\bf z} \; \nabla_{\phi} q_{\phi}({\bf z})=
# \nabla_{\phi} \int \! d{\bf z} \; q_{\phi}({\bf z})=\nabla_{\phi} 1 = 0$$
#
# What this means is that we can replace any term
#
# $$\log q_{\phi}({\bf z}_i) \overline{f_{\phi}({\bf z})} $$
#
# in our surrogate loss with
#
# $$\log q_{\phi}({\bf z}_i) \left(\overline{f_{\phi}({\bf z})}-b\right) $$
#
# Doing so doesn't affect the mean of our gradient estimator but it does affect the variance. If we choose $b$ wisely, we can hope to reduce the variance. In fact, $b$ need not be a constant: it can depend on any of the random choices upstream (or sidestream) of ${\bf z}_i$.
#
# #### Baselines in Pyro
#
# There are several ways the user can instruct Pyro to use baselines in the context of stochastic variational inference. Since baselines can be attached to any non-reparameterizable random variable, the current baseline interface is at the level of the `pyro.sample` statement. In particular the baseline interface makes use of an argument `baseline`, which is a dictionary that specifies baseline options. Note that it only makes sense to specify baselines for sample statements within the guide (and not in the model).
#
# ##### Decaying Average Baseline
#
# The simplest baseline is constructed from a running average of recent samples of $\overline{f_{\phi}({\bf z})}$. In Pyro this kind of baseline can be invoked as follows
#
# ```python
# z = pyro.sample("z", dist.bernoulli, ...,
# baseline={'use_decaying_avg_baseline': True,
# 'baseline_beta': 0.95})
# ```
#
# The optional argument `baseline_beta` specifies the decay rate of the decaying average (default value: `0.90`).
#
# #### Neural Baselines
#
# In some cases a decaying average baseline works well. In others using a baseline that depends on upstream randomness is crucial for getting good variance reduction. A powerful approach for constructing such a baseline is to use a neural network that can be adapted during the course of learning. Pyro provides two ways to specify such a baseline (for an extended example see the [AIR tutorial](air.html)).
#
# First the user needs to decide what inputs the baseline is going to consume (e.g. the current datapoint under consideration or the previously sampled random variable). Then the user needs to construct a `nn.Module` that encapsulates the baseline computation. This might look something like
#
# ```python
# class BaselineNN(nn.Module):
# def __init__(self, dim_input, dim_hidden):
# super(BaselineNN, self).__init__()
# self.linear = nn.Linear(dim_input, dim_hidden)
# # ... finish initialization ...
#
# def forward(self, x):
# hidden = self.linear(x)
# # ... do more computations ...
# return baseline
# ```
#
# Then, assuming the BaselineNN object `baseline_module` has been initialized somewhere else, in the guide we'll have something like
#
# ```python
# def guide(x): # here x is the current mini-batch of data
# pyro.module("my_baseline", baseline_module, tags="baseline")
# # ... other computations ...
# z = pyro.sample("z", dist.bernoulli, ...,
# baseline={'nn_baseline': baseline_module,
# 'nn_baseline_input': x})
# ```
#
# Here the argument `nn_baseline` tells Pyro which `nn.Module` to use to construct the baseline. On the backend the argument `nn_baseline_input` is fed into the forward method of the module to compute the baseline $b$. Note that the baseline module needs to be registered with Pyro with a `pyro.module` call so that Pyro is aware of the trainable parameters within the module.
#
# Under the hood Pyro constructs a loss of the form
#
# $${\rm baseline\; loss} \equiv\left(\overline{f_{\phi}({\bf z})} - b \right)^2$$
#
# which is used to adapt the parameters of the neural network. There's no theorem that suggests this is the optimal loss function to use in this context (it's not), but in practice it can work pretty well. Just as for the decaying average baseline, the idea is that a baseline that can track the mean $\overline{f_{\phi}({\bf z})}$ will help reduce the variance. Under the hood `SVI` takes one step on the baseline loss in conjunction with a step on the ELBO.
#
# Note that the module `baseline_module` has been tagged with the string `"baseline"` above; this has the effect of tagging all parameters inside of `baseline_module` with the parameter tag `"baseline"`. This gives the user a convenient handle for controlling how the baseline parameters are optimized. For example, if the user wants the baseline parameters to have a larger learning rate (usually a good idea) an appropriate optimizer could be constructed as follows:
#
# ```python
# def per_param_args(module_name, param_name, tags):
# if 'baseline' in tags:
# return {"lr": 0.010}
# else:
# return {"lr": 0.001}
#
# optimizer = optim.Adam(per_param_args)
# ```
#
# Note that in order for the overall procedure to be correct the baseline parameters should only be optimized through the baseline loss. Similarly the model and guide parameters should only be optimized through the ELBO. To ensure that this is the case under the hood `SVI` detaches the baseline $b$ that enters the ELBO from the autograd graph. Also, since the inputs to the neural baseline may depend on the parameters of the model and guide, the inputs are also detached from the autograd graph before they are fed into the neural network.
#
# Finally, there is an alternate way for the user to specify a neural baseline. Simply use the argument `baseline_value`:
#
# ```python
# b = # do baseline computation
# z = pyro.sample("z", dist.bernoulli, ...,
# baseline={'baseline_value': b})
# ```
#
# This works as above, except in this case it's the user's responsibility to make sure that any autograd tape connecting $b$ to the parameters of the model and guide has been cut. Or to say the same thing in language more familiar to PyTorch users, any inputs to $b$ that depend on $\theta$ or $\phi$ need to be detached from the autograd graph with `detach()` statements.
#
# #### A complete example with baselines
#
# Recall that in the [first SVI tutorial](svi_part_i.html) we considered a bernoulli-beta model for coin flips. Because the beta random variable is non-reparameterizable, the corresponding ELBO gradients are quite noisy. In that context we dealt with this problem by dialing up the number of Monte Carlo samples used to form the estimator. This isn't necessarily a bad approach, but it can be an expensive one.
# Here we showcase how a simple decaying average baseline can reduce the variance. While we're at it, we also use `iarange` to write our model in a fully vectorized manner.
#
# Instead of directly comparing gradient variances, we're going to see how many steps it takes for SVI to converge. Recall that for this particular model (because of conjugacy) we can compute the exact posterior. So to assess the utility of baselines in this context, we setup the following simple experiment. We initialize the guide at a specified set of variational parameters. We then do SVI until the variational parameters have gotten to within a fixed tolerance of the parameters of the exact posterior. We do this both with and without the decaying average baseline. We then compare the number of gradient steps we needed in the two cases. Here's the complete code:
#
# (_Since apart from the use of_ `iarange` _and_ `use_decaying_avg_baseline`, _this code is very similar to the code in parts I and II of the SVI tutorial, we're not going to go through the code line by line._)
# +
from __future__ import print_function
import numpy as np
import torch
from torch.autograd import Variable
import pyro
import pyro.distributions as dist
import pyro.optim as optim
from pyro.infer import SVI
import sys
def param_abs_error(name, target):
return torch.sum(torch.abs(target - pyro.param(name))).data.numpy()[0]
class BernoulliBetaExample(object):
def __init__(self):
# the two hyperparameters for the beta prior
self.alpha0 = Variable(torch.Tensor([10.0]))
self.beta0 = Variable(torch.Tensor([10.0]))
# the dataset consists of six 1s and four 0s
self.data = Variable(torch.zeros(10,1))
self.data[0:6, 0].data = torch.ones(6)
self.n_data = self.data.size(0)
# compute the alpha parameter of the exact beta posterior
self.alpha_n = self.alpha0 + self.data.sum()
# compute the beta parameter of the exact beta posterior
self.beta_n = self.beta0 - self.data.sum() + Variable(torch.Tensor([self.n_data]))
# for convenience compute the logs
self.log_alpha_n = torch.log(self.alpha_n)
self.log_beta_n = torch.log(self.beta_n)
def setup(self):
# initialize values of the two variational parameters
# set to be quite close to the true values
# so that the experiment doesn't take too long
self.log_alpha_q_0 = Variable(torch.Tensor([np.log(15.0)]), requires_grad=True)
self.log_beta_q_0 = Variable(torch.Tensor([np.log(15.0)]), requires_grad=True)
def model(self, use_decaying_avg_baseline):
# sample `latent_fairness` from the beta prior
f = pyro.sample("latent_fairness", dist.beta, self.alpha0, self.beta0)
# use iarange to indicate that the observations are
# conditionally independent given f and get vectorization
with pyro.iarange("data_iarange"):
# observe all ten datapoints using the bernoulli likelihood
pyro.observe("obs", dist.bernoulli, self.data, f)
def guide(self, use_decaying_avg_baseline):
# register the two variational parameters with pyro
log_alpha_q = pyro.param("log_alpha_q", self.log_alpha_q_0)
log_beta_q = pyro.param("log_beta_q", self.log_beta_q_0)
alpha_q, beta_q = torch.exp(log_alpha_q), torch.exp(log_beta_q)
# sample f from the beta variational distribution
baseline_dict = {'use_decaying_avg_baseline': use_decaying_avg_baseline,
'baseline_beta': 0.90}
# note that the baseline_dict specifies whether we're using
# decaying average baselines or not
pyro.sample("latent_fairness", dist.beta, alpha_q, beta_q,
baseline=baseline_dict)
def do_inference(self, use_decaying_avg_baseline, tolerance=0.05):
# clear the param store in case we're in a REPL
pyro.clear_param_store()
# initialize the variational parameters for this run
self.setup()
# setup the optimizer and the inference algorithm
optimizer = optim.Adam({"lr": .0008, "betas": (0.93, 0.999)})
svi = SVI(self.model, self.guide, optimizer, loss="ELBO", trace_graph=True)
print("Doing inference with use_decaying_avg_baseline=%s" % use_decaying_avg_baseline)
# do up to 10000 steps of inference
for k in range(10000):
svi.step(use_decaying_avg_baseline)
if k % 100 == 0:
print('.', end='')
sys.stdout.flush()
# compute the distance to the parameters of the true posterior
alpha_error = param_abs_error("log_alpha_q", self.log_alpha_n)
beta_error = param_abs_error("log_beta_q", self.log_beta_n)
# stop inference early if we're close to the true posterior
if alpha_error < tolerance and beta_error < tolerance:
break
print("\nDid %d steps of inference." % k)
print(("Final absolute errors for the two variational parameters " +
"(in log space) were %.4f & %.4f") % (alpha_error, beta_error))
# do the experiment
bbe = BernoulliBetaExample()
bbe.do_inference(use_decaying_avg_baseline=True)
bbe.do_inference(use_decaying_avg_baseline=False)
# -
# **Sample output:**
# ```
# Doing inference with use_decaying_avg_baseline=True
# ...........
# Did 2070 steps of inference.
# Final absolute errors for the two variational parameters (in log space) were 0.0500 & 0.0443
# Doing inference with use_decaying_avg_baseline=False
# .....................
# Did 4159 steps of inference.
# Final absolute errors for the two variational parameters (in log space) were 0.0500 & 0.0306
# ```
# For this particular run we can see that baselines roughly halved the number of steps of SVI we needed to do. The results are stochastic and will vary from run to run, but this is an encouraging result. For certain model and guide pairs, baselines can provide an even bigger win.
# ## References
#
# [1] `Automated Variational Inference in Probabilistic Programming`,
# <br/>
# <NAME>, <NAME>
#
# [2] `Black Box Variational Inference`,<br/>
# <NAME>, <NAME>, <NAME>
#
# [3] `Auto-Encoding Variational Bayes`,<br/>
# <NAME>, <NAME>
#
# [4] `Gradient Estimation Using Stochastic Computation Graphs`,
# <br/>
# <NAME>, <NAME>, <NAME>, <NAME>
#
# [5] `Deep Amortized Inference for Probabilistic Programs`
# <br/>
# <NAME>, <NAME>, <NAME>
#
# [6] `Neural Variational Inference and Learning in Belief Networks`
# <br/>
# <NAME>, <NAME>
| tutorial/source/svi_part_iii.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Portfolio Exercise: Starbucks
# <br>
#
# <img src="https://opj.ca/wp-content/uploads/2018/02/New-Starbucks-Logo-1200x969.jpg" width="200" height="200">
# <br>
# <br>
#
# #### Background Information
#
# The dataset you will be provided in this portfolio exercise was originally used as a take-home assignment provided by Starbucks for their job candidates. The data for this exercise consists of about 120,000 data points split in a 2:1 ratio among training and test files. In the experiment simulated by the data, an advertising promotion was tested to see if it would bring more customers to purchase a specific product priced at $10. Since it costs the company 0.15 to send out each promotion, it would be best to limit that promotion only to those that are most receptive to the promotion. Each data point includes one column indicating whether or not an individual was sent a promotion for the product, and one column indicating whether or not that individual eventually purchased that product. Each individual also has seven additional features associated with them, which are provided abstractly as V1-V7.
#
# #### Optimization Strategy
#
# Your task is to use the training data to understand what patterns in V1-V7 to indicate that a promotion should be provided to a user. Specifically, your goal is to maximize the following metrics:
#
# * **Incremental Response Rate (IRR)**
#
# IRR depicts how many more customers purchased the product with the promotion, as compared to if they didn't receive the promotion. Mathematically, it's the ratio of the number of purchasers in the promotion group to the total number of customers in the purchasers group (_treatment_) minus the ratio of the number of purchasers in the non-promotional group to the total number of customers in the non-promotional group (_control_).
#
# $$ IRR = \frac{purch_{treat}}{cust_{treat}} - \frac{purch_{ctrl}}{cust_{ctrl}} $$
#
#
# * **Net Incremental Revenue (NIR)**
#
# NIR depicts how much is made (or lost) by sending out the promotion. Mathematically, this is 10 times the total number of purchasers that received the promotion minus 0.15 times the number of promotions sent out, minus 10 times the number of purchasers who were not given the promotion.
#
# $$ NIR = (10\cdot purch_{treat} - 0.15 \cdot cust_{treat}) - 10 \cdot purch_{ctrl}$$
#
# For a full description of what Starbucks provides to candidates see the [instructions available here](https://drive.google.com/open?id=18klca9Sef1Rs6q8DW4l7o349r8B70qXM).
#
# Below you can find the training data provided. Explore the data and different optimization strategies.
#
# #### How To Test Your Strategy?
#
# When you feel like you have an optimization strategy, complete the `promotion_strategy` function to pass to the `test_results` function.
# From past data, we know there are four possible outomes:
#
# Table of actual promotion vs. predicted promotion customers:
#
# <table>
# <tr><th></th><th colspan = '2'>Actual</th></tr>
# <tr><th>Predicted</th><th>Yes</th><th>No</th></tr>
# <tr><th>Yes</th><td>I</td><td>II</td></tr>
# <tr><th>No</th><td>III</td><td>IV</td></tr>
# </table>
#
# The metrics are only being compared for the individuals we predict should obtain the promotion – that is, quadrants I and II. Since the first set of individuals that receive the promotion (in the training set) receive it randomly, we can expect that quadrants I and II will have approximately equivalent participants.
#
# Comparing quadrant I to II then gives an idea of how well your promotion strategy will work in the future.
#
# Get started by reading in the data below. See how each variable or combination of variables along with a promotion influences the chance of purchasing. When you feel like you have a strategy for who should receive a promotion, test your strategy against the test dataset used in the final `test_results` function.
# +
# Load in packages
from itertools import combinations
from test_results import test_results, score
import numpy as np
import pandas as pd
import scipy as sp
import seaborn as sns
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# Load in the train data and inspecting the first few rows
train_data = pd.read_csv('./training.csv')
train_data.head()
# -
# Inspecting the data
train_data.info()
# Checking for any null-values
train_data[train_data.isnull()].sum()
# Checking distribution of promotion
prom_dist = train_data.groupby('Promotion')['purchase'].value_counts()
prom_dist
# +
# Calculating and printing group counts
customer_total = train_data.shape[0]
customer_control = train_data.query('Promotion == "No"').shape[0]
customer_treatment = train_data.query('Promotion == "Yes"').shape[0]
purchase_total = train_data.query('purchase == 1').shape[0]
purchase_control = train_data.query('Promotion == "No" and purchase == 1').shape[0]
purchase_treatment = train_data.query('Promotion == "Yes" and purchase == 1').shape[0]
print('Customer count:', customer_total)
print('Control group count:', customer_control)
print('Treatment group count:', customer_treatment)
print('Total purchase count:', purchase_total)
print('Control purchase count:', purchase_control)
print('Total treatment count:', purchase_treatment)
# -
# Calculating Incremental Response Rate (IRR)
irr = (purchase_treatment / customer_treatment) - (purchase_control / customer_control)
print('IRR:',irr)
# Calculating Net Incremental Revenue (NIR)
nir = 10*purchase_treatment - 0.15*customer_treatment - 10*purchase_control
print('NIR:', nir)
# ### Hypothesis test for IRR value
#
# Null Hypothesis (H0): IRR <= 0;
# Alternate Hypothesis (H1): IRR != 0
#
# alpha = 0.05
#
# Bonferroni Correction = alpha / number of measures = 0.025
#
#
# +
# Checking IRR, simulate outcomes under null and compare to observed outcome
n_trials = 200000
p_null = train_data['purchase'].mean()
sim_control = np.random.binomial(customer_control, p_null, n_trials)
sim_treatment = np.random.binomial(customer_treatment, p_null, n_trials)
samples = (sim_treatment / customer_treatment) - (sim_control / customer_control)
p_val = (samples >= irr).mean()
# Conclusion of the experiment
print('The p-value for the test on IRR is {}. Therefore we reject the null hypothesis that IRR = 0.' .format(p_val))
# -
# ### Hypothesis test for NIR value
#
# H0: NIR = 0;
# H1: NIR != 0
#
# alpha = 0.05
#
# Bonferroni Correction = alpha / number of measures = 0.025
#
# +
# Checking NIR, simulate outcomes under null and compare to observed outcome
n_trials = 200000
p_null = train_data['purchase'].mean()
sim_control = np.random.binomial(customer_control, p_null, n_trials)
sim_treatment = np.random.binomial(customer_treatment, p_null, n_trials)
samples = 10*sim_treatment - 0.15*customer_treatment - 10*sim_control
p_val = (samples >= nir).mean()
# Conclusion of the experiment
print('The p-value for the test on NIR is {}. Therefore we reject the null hypothesis that NIR = 0.' .format(p_val))
# -
# ### Building promotion strategy model
# +
# Creating X and y variables
X = train_data[['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7']]
y = train_data['purchase'].values
# Scaling X
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Perform train test split in 2:1 ratio
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.67, test_size=0.33, random_state=55)
# +
# Build a pipeline, using n_jobs = -1 to improve processing speeds
pipeline = Pipeline([('clf', RandomForestClassifier(n_jobs=-1, class_weight='balanced'))])
# Checking pipeline parameters
pipeline.get_params().keys()
# -
# Hyperparameter tuning, using precision as scoring method
parameters = {'clf__n_estimators': [50,100,200],
'clf__max_depth': [3,4,5]}
# +
# Noted it costs the company 0.15 to send out each promotion and it would be best to limit
# that promotion only to those that are most receptive to the promotion.
# Therefore we want to minimise false positives (ie we are seeking higher Precision, which will be the used metric)
# Also noted that higher the purchase_treatment (true positives), the higher the IRR and NRR.
# passing grid search object
cv = GridSearchCV(pipeline, param_grid = parameters, scoring ='precision')
# +
# Training grid search model
cv.fit(X_train, y_train)
# Predict on test data
y_pred = cv.predict(X_test)
# -
# Evaluating the model
class_report = classification_report(y_test, y_pred)
# +
# Confusion matrix
conf_matrix = confusion_matrix(y_test,y_pred)
index = ['No','Yes']
columns = ['No','Yes']
cm_df = pd.DataFrame(conf_matrix, columns, index)
sns.heatmap(cm_df,annot=True, cmap='Blues',fmt='g')
plt.plot()
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.title('Confusion Matrix');
# -
# Printing confusion matrix to check the above chart
confusion_matrix(y_test,y_pred)
def promotion_strategy(df):
'''
INPUT
df - a dataframe with *only* the columns V1 - V7 (same as train_data)
OUTPUT
promotion_df - np.array with the values
'Yes' or 'No' related to whether or not an
individual should recieve a promotion
should be the length of df.shape[0]
Ex:
INPUT: df
V1 V2 V3 V4 V5 V6 V7
2 30 -1.1 1 1 3 2
3 32 -0.6 2 3 2 2
2 30 0.13 1 1 4 2
OUTPUT: promotion
array(['Yes', 'Yes', 'No'])
indicating the first two users would recieve the promotion and
the last should not.
'''
# Scaling dataframe using the above scaler
df = scaler.transform(df)
# Predict on the data frame
purchases = cv.predict(df)
promotion = np.where(purchases == 1, 'Yes','No')
return promotion
# +
# This will test your results, and provide you back some information
# on how well your promotion_strategy will work in practice
test_results(promotion_strategy)
| Starbucks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Environment and RL Agent Controller for a Thermostat
#
# ```
# Author: <NAME>
# Github: mpettis
# Twitter: @mtpettis
# Date: 2020-04-27
# ```
#
# This is a toy example of a room with a heater. When the heater is off, the temperature will decay to 0.0, and when it is on, it will rise to 1.0. The decay and rise is not instantaneous, but has exponential decay behavior in time given by the following formula:
#
# temperature[i + 1] = heater[i] + (temperature[i] - heater[i]) * exp(-1/tau)
#
# Where:
#
# temperature[i] is the temperature at timestep i (between 0 and 1).
# heater[i] is the applied heater, 0 when not applied, 1 when applied.
# tau is the characteristic heat decay constant.
#
# So, when the heater is off, the temperature will decay towards 0, and when the heater is on, it will rise towards 1. When the heater is toggled on/off, it will drift towards 1/0.
#
# Here is a sample plot of what the temperature response looks like when the heater is on for a while, then off for a while. You will see the characteristic rise and decay of the temperature to the response.
# +
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
## Compute the response for a given action and current temperature
def respond(action, current_temp, tau):
return action + (current_temp - action) * math.exp(-1.0/tau)
## Actions of a series of on, then off
sAction = pd.Series(np.array([1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0]))
sResponse = np.zeros(sAction.size)
## Update the response with the response to the action
for i in range(sAction.size):
## Get last response
if i == 0:
last_response = 0
else:
last_response = sResponse[i - 1]
sResponse[i] = respond(sAction[i], last_response, 3.0)
## Assemble and plot
df = pd.DataFrame(list(zip(sAction, sResponse)), columns=['action', 'response'])
df.plot()
# -
# ## Goal and Reward
# The goal here is to make an agent that will take actions that will keep the temperature between 0.4 and 0.6.
#
# We make a reward function to reflect our goal. When the temperature is between 0.4 and 0.6, we set the reward as 0.0. When the temperature is outside of this band, we set the reward to be the negative distance the temperature is from its closest band. So if the temperature is 0.1, then the reward is -(0.4 - 0.1) = -0.3, and if it is 0.8, then the reward is -(0.8 - 0.6) = -0.2.
#
# Let's chart the reward vs. temperature to show what is meant:
# +
def reward(temp):
delta = abs(temp - 0.5)
if delta < 0.1:
return 0.0
else:
return -delta + 0.1
temps = [x * 0.01 for x in range(100)]
rewards = [reward(x) for x in temps]
fig=plt.figure(figsize=(12, 4))
plt.scatter(temps, rewards)
plt.xlabel('Temperature')
plt.ylabel('Reward')
plt.title('Reward vs. Temperature')
# -
# # Environment Setup
#
# The environment responds to actions. It is what keeps track of the temperature state of the room, returns the reward for being in that temperature state, and tells you if the episode is over or not (in this case, we just set a max episode length that can happen).
#
# Here is the gist of the flow:
#
# - Create an environment by calling `Environment.create()`, see below, telling it to use the class you created for this (here, the ThermostatEnvironment) and the max timesteps per episode. The enviroment is assigned to the name `environment`.
# - Initialize the environment `environment` by calling `environment.reset()`. This will do stuff, most importantly, it will initialize the `timestep` attribute to 0.
# - When you want to take an action on the current state of the environment, you will call `environment.execute(<action-value>)`. If you want to have the heater off, you call `environment.execute(0)`, and if you want to have the heater on, you call `environment.execute(1)`.
# - What the `execute()` call returns is a tuple with 3 entries:
# - __state__. In this case, the state is the current temperature that results from taking the action. If you turn on the heater, the temperature will rise from the previous state, and if the heater was turned off, the temperature will fall from the previous state. This should be kept as a numpy array, even though it seems like overkill with a single value for the state coming back. For more complex examples beyond this thermostat, there will be more than 1 component to the state.
# - __terminal__. This is a True/False value. It is True if the episode terminated. In this case, that will happen once you exceed the max number of steps you have set. Otherwise, it will be False, which lets the agent know that it can take further steps.
# - __reward__. This is the reward for taking the action you took.
#
# Below, to train the agent, you will have the agent take actions on the environment, and the environment will return these signals so that the agent can self-train to optimize its reward.
# +
###-----------------------------------------------------------------------------
## Imports
from tensorforce.environments import Environment
from tensorforce.agents import Agent
###-----------------------------------------------------------------------------
### Environment definition
class ThermostatEnvironment(Environment):
"""This class defines a simple thermostat environment. It is a room with
a heater, and when the heater is on, the room temperature will approach
the max heater temperature (usually 1.0), and when off, the room will
decay to a temperature of 0.0. The exponential constant that determines
how fast it approaches these temperatures over timesteps is tau.
"""
def __init__(self):
## Some initializations. Will eventually parameterize this in the constructor.
self.tau = 3.0
self.current_temp = np.random.random(size=(1,))
super().__init__()
def states(self):
return dict(type='float', shape=(1,))
def actions(self):
"""Action 0 means no heater, temperature approaches 0.0. Action 1 means
the heater is on and the room temperature approaches 1.0.
"""
return dict(type='int', num_values=2)
# Optional, should only be defined if environment has a natural maximum
# episode length
def max_episode_timesteps(self):
return super().max_episode_timesteps()
# Optional
def close(self):
super().close()
def reset(self):
"""Reset state.
"""
# state = np.random.random(size=(1,))
self.timestep = 0
self.current_temp = np.random.random(size=(1,))
return self.current_temp
def response(self, action):
"""Respond to an action. When the action is 1, the temperature
exponentially decays approaches 1.0. When the action is 0,
the current temperature decays towards 0.0.
"""
return action + (self.current_temp - action) * math.exp(-1.0 / self.tau)
def reward_compute(self):
""" The reward here is 0 if the current temp is between 0.4 and 0.6,
else it is distance the temp is away from the 0.4 or 0.6 boundary.
Return the value within the numpy array, not the numpy array.
"""
delta = abs(self.current_temp - 0.5)
if delta < 0.1:
return 0.0
else:
return -delta[0] + 0.1
def execute(self, actions):
## Check the action is either 0 or 1 -- heater on or off.
assert actions == 0 or actions == 1
## Increment timestamp
self.timestep += 1
## Update the current_temp
self.current_temp = self.response(actions)
## Compute the reward
reward = self.reward_compute()
## The only way to go terminal is to exceed max_episode_timestamp.
## terminal == False means episode is not done
## terminal == True means it is done.
terminal = False
if self.timestep > self.max_episode_timesteps():
terminal = True
return self.current_temp, terminal, reward
###-----------------------------------------------------------------------------
### Create the environment
### - Tell it the environment class
### - Set the max timestamps that can happen per episode
environment = environment = Environment.create(
environment=ThermostatEnvironment,
max_episode_timesteps=100)
# -
# # Agent setup
#
# Here we configure a type of agent to learn against this environment. There are many agent configurations to choose from, which we will not cover here. We will not discuss what type of agent to choose here -- we will just take a basic agent to train.
agent = Agent.create(
agent='tensorforce', environment=environment, update=64,
objective='policy_gradient', reward_estimation=dict(horizon=1)
)
# # Check: Untrained Agent Performance
#
# Let's see how the untrained agent performs on the environment. The red horizontal lines are the target bands for the temperature.
#
# The agent doesn't take actions to try and get the temperature within the bands. It either initializes a policy to the heater always off or always on.
# +
### Initialize
environment.reset()
## Creation of the environment via Environment.create() creates
## a wrapper class around the original Environment defined here.
## That wrapper mainly keeps track of the number of timesteps.
## In order to alter the attributes of your instance of the original
## class, like to set the initial temp to a custom value, like here,
## you need to access the `environment` member of this wrapped class.
## That is why you see the way to set the current_temp like below.
environment.environment.current_temp = np.array([0.5])
states = environment.environment.current_temp
internals = agent.initial_internals()
terminal = False
### Run an episode
temp = [environment.environment.current_temp[0]]
while not terminal:
actions, internals = agent.act(states=states, internals=internals, evaluation=True)
states, terminal, reward = environment.execute(actions=actions)
temp += [states[0]]
### Plot the run
plt.figure(figsize=(12, 4))
ax=plt.subplot()
ax.set_ylim([0.0, 1.0])
plt.plot(range(len(temp)), temp)
plt.hlines(y=0.4, xmin=0, xmax=99, color='r')
plt.hlines(y=0.6, xmin=0, xmax=99, color='r')
plt.xlabel('Timestep')
plt.ylabel('Temperature')
plt.title('Temperature vs. Timestep')
plt.show()
# -
# # Train the agent
#
# Here we train the agent against episodes of interacting with the environment.
# Train for 200 episodes
for _ in range(200):
states = environment.reset()
terminal = False
while not terminal:
actions = agent.act(states=states)
states, terminal, reward = environment.execute(actions=actions)
agent.observe(terminal=terminal, reward=reward)
# # Check: Trained Agent Performance
#
# You can plainly see that this is toggling the heater on/off to keep the temperature within the target band!
# +
### Initialize
environment.reset()
## Creation of the environment via Environment.create() creates
## a wrapper class around the original Environment defined here.
## That wrapper mainly keeps track of the number of timesteps.
## In order to alter the attributes of your instance of the original
## class, like to set the initial temp to a custom value, like here,
## you need to access the `environment` member of this wrapped class.
## That is why you see the way to set the current_temp like below.
environment.environment.current_temp = np.array([1.0])
states = environment.environment.current_temp
internals = agent.initial_internals()
terminal = False
### Run an episode
temp = [environment.environment.current_temp[0]]
while not terminal:
actions, internals = agent.act(states=states, internals=internals, evaluation=True)
states, terminal, reward = environment.execute(actions=actions)
temp += [states[0]]
### Plot the run
plt.figure(figsize=(12, 4))
ax=plt.subplot()
ax.set_ylim([0.0, 1.0])
plt.plot(range(len(temp)), temp)
plt.hlines(y=0.4, xmin=0, xmax=99, color='r')
plt.hlines(y=0.6, xmin=0, xmax=99, color='r')
plt.xlabel('Timestep')
plt.ylabel('Temperature')
plt.title('Temperature vs. Timestep')
plt.show()
| examples/temperature-controller.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: computationalPhysics
# language: python
# name: conda-env-computationalPhysics-py
# ---
# <h1>A Simple Model of a ballon in a fluid of Uniform Density</h1>
# <h2><NAME></h2>
# <h3>Introduction</h3>
# Here I present two simple models of a balloon in a confined space. In the first the balloon is acted upon by gravity and a buoyant force. Additionally, there is an effective infintite in magnitude and infintesimal in time normal force applied to the ballon at the boundaries of some user defined volume. The volume containes two fluids, each with a different (but uniform) density. The second model is simialar to the first; however, it may contain much more complex density perturbations throughout, and an additional "wind" force is included in all three spacial dimensions. This model demonstrates how density perturbations may be used as approximations of soft constraint boundaries.
# +
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from mplEasyAnimate import animation
from tqdm import tqdm
import time
import integrators as intgs
from helper import display_video, make_animation
from IPython.display import HTML
plt.rc('text', usetex = True)
plt.rc('font', family='serif')
# -
g = 9.8 # m s^-2
# <h3>Balloon & Helper Objects</h3>
# Here I define a balloon class which will store the ballons position and velocity. This object will also loose energy when reflected off of a boundary. The function
# $$
# c_{r}(P) = -40e^{-4.5P} + 0.86
# $$
# empirically determined <a href=http://isjos.org/JoP/vol3iss2/Papers/JoPv3i2-2COR.pdf>here</a>, is used to calculate the coefficient of restitution for the balloon in one atmosphere of pressure. At a boundary with axis $i$ the new velocity along axis $i$ is then given as
# $$
# v_{i, f} = -c_{r}(P) \cdot v_{i, 0}
# $$
class balloonObj:
def __init__(self, radius, rho=0.164, r0=[0, 0, 0], v0=[0, 0, 0], a1=1, a2=1, a3=1):
self.radius = radius
self.rho = rho # kg m^-3
self.volume = ((4/3)*np.pi*self.radius**3)/1000 # m^3
self.ppos = None
self.pos = r0 # m
self.mass = self.volume*self.rho # kg
self.velocity = v0
P = 1 #atm
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.cr = -40*np.exp(-4.5*P)+0.86 #http://isjos.org/JoP/vol3iss2/Papers/JoPv3i2-2COR.pdf
def reflect(self, axis):
self.velocity[axis] = -self.cr * self.velocity[axis]
# Next we define a "helper object" -- confinment -- which simply is used to store the bounds of the rectangular cuboid the balloon is confined within. This confinment object is responsible for reporting if a collision has happened, and what axis that collison is along.
class confinment:
def __init__(self, bounds):
# [[xmin, xmax], [ymin, ymax], [zmin, zmax]]
self.bounds = bounds
def check_x(self, x):
if self.bounds[0][0] < x < self.bounds[0][1]:
return False
else:
return True
def check_y(self, y):
if self.bounds[1][0] < y < self.bounds[1][1]:
return False
else:
return True
def check_z(self, z):
if self.bounds[2][0] < z < self.bounds[2][1]:
return False
else:
return True
def check_for_collision(self, pos):
if self.check_x(pos[0]) and self.check_y(pos[1]) and self.check_z(pos[2]):
return True
else:
return False
# Finally we define an object to coordinate the integration. The "worldIntegrator" takes some model, some balloon, some confiner, and an integration scheme to use. It will then allow the user to step the system through time.
class wordIntegrator:
def __init__(self, confiner, obj, model, method=intgs.rk4, upper_density=1.18, lower_density=1.18):
self.object = obj
self.method = method
self.model = model
self.confiner = confiner
self.clock = 0
self.step = 0
self.upper_density = upper_density
self.lower_density = lower_density
def get_rho(self, ypos):
if ypos <= self.confiner.bounds[1][1]/2:
return self.lower_density # kg m^-3
else:
return self.upper_density # kg m^-3
def get_args(self):
args = dict()
args['m'] = self.object.mass
args['V'] = self.object.volume
args['a1'] = self.object.a1
args['a2'] = self.object.a2
args['a3'] = self.object.a3
args['rho_air'] = self.get_rho(self.object.pos[1])
return args
def timeEvolve(self, dt):
"""
Desc:
Incriment system by time step dt
"""
cx = self.confiner.check_x(self.object.pos[0])
cy = self.confiner.check_y(self.object.pos[1])
cz = self.confiner.check_z(self.object.pos[2])
if cx:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(0)
if cy:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(1)
if cz:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(2)
cI = list(self.object.pos) + list(self.object.velocity)
nI = self.method(self.model, cI, self.clock, dt, self.get_args())
self.object.ppos = self.object.pos
self.object.pos = nI[:3]
self.object.velocity = nI[3:]
self.step += 1
self.clock += dt
# <h3>Model</h3>
# We develop a three--dimensional model to describe the system, the model is given as
# $$
# \frac{dx}{dt} = v_{x} \\
# \frac{dy}{dt} = v_{y}\\
# \frac{dz}{dt} = v_{z} \\
# $$
# with the velocity components being given by
# $$
# \frac{dv_{x}}{dt} = 0 \\
# \frac{dv_{y}}{dt} = -mg+gV\rho_{c} \\
# \frac{dv_{z}}{dt} = 0 \\
# $$
# Initially we had hoped to include quadradic drag in three dimensions into this model; however, this proved infeasible for this stage of this project. Future work will aim to include quadradic drag into the model.
#
# The force in the $y$ direction is given as the sum of the weight of the ballon and the weight of the displaced fluid. This model of buoyancy assumes the density of the fluid over the height of object is a constant. A more complex, and physically representative manner of estimating the boyant force may be desirable in future given that the balloon traverses density boundary. However, the method presented here acts as an effective 1st order estimate.
def bouyModel(I, t, args):
# 0 1 2 3 4 5
# [x, y, z, vx, vy, vz]
dIdt = np.zeros(6)
dIdt[0] = I[3]
dIdt[1] = I[4]
dIdt[2] = I[5]
# Weight # Boyant Force
dIdt[4] = (-args['m']*g) + g*args['V']*(args['rho_air'])
return dIdt
# <h3>Integration</h3>
# I integrate the model with a balloon density of 1 kg m$^{-3}$ over 1000 seconds, with a time step of 0.01 seconds. I have set up the densities of the volume such that the ballon is more dense than the top half of the volume, and less dense than the bottom half of the volume. This should result in an soft boundary which the balloon tends to stay within at the intersection of the two regions.
# +
balloon = balloonObj(0.31, v0=[1.5, 0, 0], r0=[1, 4.5, 1], rho=1)
confiner = confinment([[-5, 5], [0, 10], [-5, 5]])
world = wordIntegrator(confiner, balloon, bouyModel, upper_density=0.5, lower_density=2)
pos = list()
vel = list()
dt = 0.01
time_vals = np.arange(0, 1000, dt)
for t in time_vals:
world.timeEvolve(dt)
pos.append(world.object.pos)
vel.append(world.object.velocity)
pos = np.array(pos)
vel = np.array(vel)
# -
# <h3>Data Visualization</h3>
# I defined a helper function to set the style of all plots in a consistent manner
def setup_plot(xBounds=False, yBounds=False, yBV = [0, 10], xBV = [-5, 5]):
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
if yBounds:
ax.axhline(y=yBV[0], color='gray', alpha=0.5)
ax.axhline(y=yBV[1], color='gray', alpha=0.5)
if xBounds:
ax.axvline(x=xBV[0], color='gray', alpha=0.5)
ax.axvline(x=xBV[1], color='gray', alpha=0.5)
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', labelsize=17, direction='in', top=True, right=True)
ax.tick_params(which='major', length=10, width=1)
ax.tick_params(which='minor', length=5, width=1)
return fig, ax
# First we investigate the x-y postition of the balloon from the integration above. Note how the balloon looses energy on impact with the wall (related to its velocity before impact through the calculated coefficient of restitution). However also note that the ballon reverses velocity in the y direction without interacting with the hard boundary. This is a demonstration that the pressure difference may act as a soft boundary (i.e. the balloon can pass through it but will eventually be forced back the way it came).
#
# Because of the energy loss to reflection off the x bounds the ''wavelength'' of the oscillation shortens with time, this can be more clearly seen in the animation presented below this cell.
# +
fig, ax = setup_plot(xBounds=True, yBounds=True)
ax.plot(pos[:, 0], pos[:, 1], 'k')
ax.set_xlabel('$x$ [m]', fontsize=20)
ax.set_ylabel('$y$ [m]', fontsize=20)
plt.show()
# -
make_animation(pos, 'BallBouncing.mp4', plt, AutoMinorLocator, step=500)
# +
import io
import base64
from IPython.display import HTML
import os
if not os.path.exists('BallBouncing.mp4'):
raise IOError('ERROR! Animation has not been generated to the local directory yet!')
video = io.open('BallBouncing.mp4', 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
# -
# Looking at just the x position vs time we see linear sections connected with discontinuities at the bounds as we would expect to see given there are no forces acting on the ballon in the x direction.
# +
fig, ax = setup_plot(yBounds=True, yBV=[-5, 5])
ax.plot(time_vals, pos[:, 0], 'k')
ax.set_xlabel('Time [s]', fontsize=20)
ax.set_ylabel('$x$ [m]', fontsize=20)
plt.show()
# -
# We equally see what we might expect to see in the y direction, the balloon osscillating around the pressure boundary
# +
fig, ax = setup_plot(yBounds=True)
ax.plot(time_vals, pos[:, 1], 'k')
ax.set_xlabel('Time [s]', fontsize=20)
ax.set_ylabel('$y$ [m]', fontsize=20)
plt.show()
# +
fig, ax = setup_plot()
ax.plot(time_vals, vel[:, 0], 'k')
ax.set_xlabel('Time [s]', fontsize=20)
ax.set_ylabel(r'$v_{x}$ [m s$^{-1}$]', fontsize=20)
plt.show()
# +
fig, ax = setup_plot()
ax.plot(time_vals, vel[:, 1], 'k')
ax.set_xlabel('Time [s]', fontsize=20)
ax.set_ylabel(r'$v_{y}$ [m s$^{-1}$]', fontsize=20)
plt.show()
# -
# <h3>Wind & 3 Dimensions</h3>
#
# The model I have presented so far is relatively boring in all but the y directions. It is possible to plot all three spacial dimensions here; however, given there are no forces in either the x or z directions, that does not hold much interest beyond the 2D situations I have presented here. Below I present an updated model containing an extra "wind" force in both the x and z axis. It should be noted that this is a contrived force; however, because of the implimentation, may model a somewhat accurate situation.
#
# This more complex model also describes the density based on a function.
def bouyModel_wind(I, t, args):
# 0 1 2 3 4 5
# [x, y, z, vx, vy, vz]
dIdt = np.zeros(6)
dIdt[0] = I[3]
dIdt[1] = I[4]
dIdt[2] = I[5]
dIdt[3] = args['wind'][0](I, t)
# Weight # Boyant Force # Wind Force
dIdt[4] = (-args['m']*g) + g*args['V']*(args['rho_air']) + args['wind'][1](I, t)
dIdt[5] = args['wind'][2](I, t)
return dIdt
class wordIntegrator_wind:
def __init__(self, confiner, obj, model, method=intgs.rk4,
density_func=lambda y, ty: 1.18, wind_0=lambda x, t: 0,
wind_1 = lambda y, t: 0, wind_2=lambda z, t: 0):
self.object = obj
self.method = method
self.model = model
self.confiner = confiner
self.clock = 0
self.step = 0
self.get_rho = density_func
self.wind = (wind_0, wind_1, wind_2)
def get_args(self):
args = dict()
args['m'] = self.object.mass
args['V'] = self.object.volume
args['a1'] = self.object.a1
args['a2'] = self.object.a2
args['a3'] = self.object.a3
args['rho_air'] = self.get_rho(self.object.pos[1], self.confiner.bounds[1][1])
args['wind'] = self.wind
return args
def timeEvolve(self, dt):
cx = self.confiner.check_x(self.object.pos[0])
cy = self.confiner.check_y(self.object.pos[1])
cz = self.confiner.check_z(self.object.pos[2])
if cx:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(0)
if cy:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(1)
if cz:
if self.object.ppos is not None:
self.object.pos = self.object.ppos
self.object.reflect(2)
cI = list(self.object.pos) + list(self.object.velocity)
nI = self.method(self.model, cI, self.clock, dt, self.get_args())
self.object.ppos = self.object.pos
self.object.pos = nI[:3]
self.object.velocity = nI[3:]
self.step += 1
self.clock += dt
# I define both a density function
# $$
# \rho_{air}(y) = 5\sin(\ln(y^{5}))
# $$
# and a function describing wind in the x-direction
# $$
# F_{w,x}(x, t) = \frac{0.01\sin(x)}{0.005961t+0.01}
# $$
# These are then passed into the new word Integrator. The effects of the wind pushing the balloon to one side are clear.
def density(y, ty):
return 5*np.sin(np.log(y**5))
def xwind(I, t):
return 0.01*np.sin(I[0])/(0.005960*t+0.01)
balloon = balloonObj(0.31, v0=[1.5, 0, 0], r0=[1, 4.5, 1], rho=1)
confiner = confinment([[-5, 5], [0, 10], [-5, 5]])
world = wordIntegrator_wind(confiner, balloon, bouyModel_wind, density_func=density, wind_0=xwind)
pos = list()
vel = list()
dt = 0.01
time_vals = np.arange(0, 1000, dt)
for t in time_vals:
world.timeEvolve(dt)
pos.append(world.object.pos)
vel.append(world.object.velocity)
pos = np.array(pos)
vel = np.array(vel)
# +
fig, ax = setup_plot(xBounds=True, yBounds=True)
ax.plot(pos[:, 0], pos[:, 1], 'k')
ax.set_xlabel('$x$ [m]', fontsize=20)
ax.set_ylabel('$y$ [m]', fontsize=20)
plt.show()
# -
# Finally we will look at 3D. I define the same initial condtitions for integration as above, except I also give the balloon an initial z velocity of
# $$
# v_{z} = -1 \text{ m s}^{-1}
# $$
# I then plot this in 3D below. If one changes the z velocity so that it approaches 0 it is clear how the motion collapses into one plane
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
balloon = balloonObj(0.31, v0=[1.5, 0, -1], r0=[1, 4.5, 1], rho=1)
confiner = confinment([[-5, 5], [0, 10], [-5, 5]])
world = wordIntegrator_wind(confiner, balloon, bouyModel_wind, density_func=density, wind_0=xwind)
pos = list()
vel = list()
dt = 0.1
time_vals = np.arange(0, 1000, dt)
for t in time_vals:
world.timeEvolve(dt)
pos.append(world.object.pos)
vel.append(world.object.velocity)
pos = np.array(pos)
vel = np.array(vel)
# +
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111, projection='3d')
ax.plot(pos[:, 0], pos[:, 1], pos[:, 2], 'k')
ax.set_zlim(-5, 5)
ax.set_xlim(-5, 5)
ax.set_ylim(0, 10)
ax.set_xlabel('$x$ [m]', fontsize=20)
ax.set_ylabel('$y$ [m]', fontsize=20)
ax.set_zlabel('$z$ [m]', fontsize=20)
plt.show()
| BallonInAFluid/BoudreauxNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sakasa/notebooks/blob/master/tokyo_covid19_3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="B9I0otUoM-I_"
import requests
import json
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from pytz import timezone
import numpy as np
import seaborn as sns
# %matplotlib inline
# + [markdown] id="-KCogvsSM-JD"
# ### Tokyo COVID19 Data
# https://github.com/tokyo-metropolitan-gov/covid19
# + id="UAkqklMlM-JD"
url = 'https://raw.githubusercontent.com/tokyo-metropolitan-gov/covid19/development/data/data.json'
# + colab={"base_uri": "https://localhost:8080/"} id="Pegky7yeM-JF" outputId="dddcfa3d-ea20-4098-d355-d341009d40d5"
response = requests.get(url)
print(response)
j_res = json.loads(response.text) # json文字列をdictへ
print(j_res.keys())
j_res['patients_summary']['data'][-15:]
# + id="kxbUYb_HB25G"
def utc_datestr_to_jst_date(datestr):
#return datetime.datetime.fromisoformat(datestr.replace('Z', '+00:00')).astimezone(timezone('Asia/Tokyo'))
# for ~Python3.6
return datetime.datetime.strptime(datestr.split('.')[0].replace('-', '').replace(':', ''), '%Y%m%dT%H%M%S').astimezone(timezone('Asia/Tokyo'))
# + id="PtSjr0JLKm8L"
keys = ['contacts', 'querents', 'patients_summary']
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="hl0JfBS5HyAG" outputId="50797b39-2f44-4e0e-e428-fbb828d74135"
d = j_res
df = pd.DataFrame()
for key in keys:
k = f'{key.split("_")[0]}_count'
for v in d[key].values():
if type(v) is list:
index = [_v['日付'] for _v in v]
tmp_df = pd.DataFrame(_v for _v in v)
tmp_df.index = [utc_datestr_to_jst_date(dt) for dt in tmp_df['日付']]
tmp_df[k] = tmp_df['小計']
df = pd.concat([df, tmp_df[k]], axis=1)
df = df.fillna(0).astype('int64')
df.index = [i for i in df.index]
df['datetime'] = df.index
df
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="0bDKk8pzH3I4" outputId="ae46b23a-9b4e-45b2-b80b-ae4c01d6e991"
# 7日移動平均
df1 = df
for key in keys:
k = key.split('_')[0]
df1[f'{k}_7d_mov_ave'] = df[f'{k}_count'].rolling('7D').mean()
# df1[f'{k}_7d_mov_center_ave'] = df[f'{k}_count'].rolling(7, center=True).mean()
df1[f'{k}_7d_mov_sum'] = df[f'{k}_count'].rolling('7D').sum()
df1.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 824} id="29XdYL6bH4JN" outputId="96507186-ca50-40ad-f644-6552faeec260"
df2 = df1
# 日付が年の何週目かを列にセット `'{yy}w{week nnum}`
df2['week_num'] = [f'\'{d.strftime("%y")}w{d.strftime("%V")}' for d in df1.index]
# 曜日の列をセット
df2['week_day'] = [d.strftime('%a') for d in df1.index]
df2.tail(15)
# + id="M2Ud9-WfIgct"
# 週毎に集計
groupby_week_num = df2.groupby('week_num', as_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="5sr8B_HwIgEW" outputId="4cf887c1-1c28-44aa-9b3c-6ebb119c7801"
# 集計結果から平均と合計を取得しデータフレームにセット
patients_week_num_df = pd.DataFrame({
# 'patients_week_num': df2['week_num'].unique(),
'patients_week_mean': [m for m in groupby_week_num.agg({'patients_count': 'mean'})['patients_count']],
'patients_week_sum': [s for s in groupby_week_num.agg({'patients_count': 'sum'})['patients_count']]
}, index=[d for d in groupby_week_num.agg({'datetime': 'max'})['datetime']])
patients_week_num_df.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 844} id="fuHCAMAwH3mX" outputId="78252288-b0ab-4944-a6a6-ed5df103785f"
df3 = pd.concat([
df2,
patients_week_num_df.loc[:, ['patients_week_mean', 'patients_week_sum']]
], axis=1)
df3.tail(15)
# + [markdown] id="YYYiYt6sM-JZ"
# ### グラフ出力
# + id="DKYNzasF4Hgv"
sns.set()
sns.set_style('whitegrid')
# + id="EXiY2UjgM-Ja"
### 日毎の数
x = df3.index
y1_1 = df3['patients_count']
y1_2 = df3['patients_7d_mov_ave']
#y1_3 = df3['patients_7d_mov_center_ave']
# y1_4 = df3['patients_week_mean']
# + id="EaWNEkjWM-Jc"
### 週毎の数
# y2_1 = df3['patients_7d_mov_sum']
# y2_2 = df3['patients_week_sum']
# + id="Rq5oJbZsbhW6"
# + colab={"base_uri": "https://localhost:8080/", "height": 581} id="tJ4d547xM-Je" outputId="5b4a61ad-1415-42fa-e7ec-b81b416a8904"
fig, ax = plt.subplots(1,1, figsize=(20, 8))
### 日毎の数
ax.set_title('daily count')
ax.set_xlabel('date')
ax.set_ylabel('sum')
ax.bar(x, y1_1, label='patients_count')
ax.plot(x, y1_2, label='patients_7d_mov_ave')
#ax[0].plot(x, y1_3, label='patients_7d_mov_center_ave')
# ax[0].bar(x, y1_4, label='patients_week_mean')
ax.grid(linestyle=':')
ax.legend()
# ### 週毎の合計
# ax[1].set_title('weekly count')
# ax[1].set_xlabel('date')
# ax[1].set_ylabel('sum')
# ax[1].plot(x, y2_1, label='patients_7d_mov_sum')
# ax[1].bar(x, y2_2, label='patients_week_sum')
# ax[1].grid(linestyle=':')
# ax[1].legend()
plt.tight_layout()
# + [markdown] id="KoRfqEaewYtQ"
# ---
# + id="zQq1tD9PK8-j"
# + id="XTdSG_f3XB9d"
def get_date(d: datetime) -> datetime:
"""時間が入った `datetime` を `%Y-%m-%d 00:00:00` の `datetime` に変換します"""
return datetime.datetime.strptime(d.strftime('%Y-%m-%d'), '%Y-%m-%d')
# + id="lUBuDLEGVwKo"
em_start = datetime.datetime(2020, 4, 7)
em_end = datetime.datetime(2020, 5, 25)
def em_term() -> list:
"""緊急事態宣言期間の日付のリスト"""
em_term = []
for i in range((em_end - em_start).days + 1):
em_term.append(em_start + datetime.timedelta(i))
return em_term
# + id="LP9aknBzVBXP"
def emergency(d: datetime) -> int:
"""緊急事態宣言期間中か"""
return int(get_date(d) in em_term())
# + id="U__BOyP8Wxna"
def em_passed(d: datetime) -> int:
"""緊急事態宣言から経過日数"""
d = get_date(d)
return (d - em_start).days if em_start < d else 0
# + colab={"base_uri": "https://localhost:8080/", "height": 584} id="HFNgCNh6SrwI" outputId="b2d6941e-bfb2-4925-905b-02b1dfc5a804"
ds_df = pd.DataFrame({
'timestamp': [d.timestamp() for d in df3.index], # タイムスタンプ
'year': np.array(df3.index.strftime('%Y'), dtype=np.int64), # 年
'month': np.array(df3.index.strftime('%m'), dtype=np.int64), # 月
'day': np.array(df3.index.strftime('%d'), dtype=np.int64), # 日
'week_day': df3['week_day'], # 曜日
})
# 曜日をダミー変数化
ds_df = pd.concat([ds_df, pd.get_dummies(ds_df['week_day'])], axis=1)
ds_df = ds_df.drop(columns=['week_day'])
# 経過日数
# ds_df['passed'] = [(d - df3['datetime'][0]).days for d in df3['datetime']]
ds_df['passed'] = [(d - df3.index[0]).days for d in df3.index]
# 緊急事態宣言期間中
ds_df['emergency'] = [emergency(d) for d in df3.index]
# 緊急事態宣言経過日数
ds_df['em_passed'] = [em_passed(d) for d in df3.index]
for key in keys:
k = key.split('_')[0]
# 日毎の数
ds_df[f'{k}_count'] = df3[f'{k}_count']
# 7日間移動平均
ds_df[f'{k}_7d_mov_ave'] = df3[f'{k}_7d_mov_ave']
#ds_df[f'{k}_7d_mov_center_ave'] = df3[f'{k}_7d_mov_center_ave']
# 7日間移動合計
ds_df[f'{k}_7d_mov_sum'] = df3[f'{k}_7d_mov_sum']
# 日毎の数
# ds_df['patients_count'] = df3['patients_count']
# 7日間移動平均
# ds_df['patients_7d_mov_ave'] = df3['patients_7d_mov_ave']
#ds_df['7d_mov_center_ave'] = df3['patients_7d_mov_center_ave']
# 7日間移動合計
# ds_df['patients_7d_mov_sum'] = df3['patients_7d_mov_sum']
ds_df = ds_df.reset_index(drop=True)
ds_df.tail(15)
# + colab={"base_uri": "https://localhost:8080/"} id="QYKqB8CSm3_0" outputId="7741722a-263d-4c17-f646-e72f70a58a3b"
ds_df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="aTUpPtDM97uY" outputId="84cd9ffd-405f-4db4-df16-bc2c0beec3fa"
ds_df.describe()
# + id="Na_sHq871CpJ"
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ugaJ-BeYlqzB" outputId="296ead23-a7c5-4cad-83a2-4806a6ca6226"
ds_df.corr()
# + [markdown] id="F2MMo2XSZtgu"
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="EmMjAKb3Z0fJ" outputId="dc5e2c49-29a8-44cd-bead-b48ef3cf04d4"
ds_df.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="E7NmtA3QdTgj" outputId="c4bf60e4-fe93-4ee0-fd94-5daf349765a2"
_df = ds_df.copy()
_df = _df.drop(['year', 'month', 'day',
'contacts_7d_mov_ave', 'contacts_7d_mov_sum',
'querents_7d_mov_ave', 'querents_7d_mov_sum',
], axis=1)
_df
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="IX6EtS1trQFD" outputId="8e1a1a19-e1a7-44f8-9e7f-f398d03ceb3b"
_df2 = _df[['timestamp', 'passed', 'emergency', 'patients_count', 'patients_7d_mov_ave', 'patients_7d_mov_sum']]
_df2
# + id="PBG2UIHpZldo"
sns.set(style="ticks", color_codes=True)
#sns.pairplot(data=_df)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="GvL38TUTbjZA" outputId="06e9a25c-04ec-451e-e8ce-7b9881e609b6"
sns.pairplot(data=_df2)
# + colab={"base_uri": "https://localhost:8080/"} id="i-Wi3OuJmeyE" outputId="1ec55aba-fab2-41b9-f935-d799dc7b6d5e"
# !python -V
# + id="i88Cw7JEL8pG"
| tokyo_covid19_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SciPy
# SciPy is a collection of mathematical algorithms and convenience functions. In this this notebook there are just a few examples of the features that are most important to us. But if you want to see all that SciPy has to offer, have a look at the [official documentation](https://docs.scipy.org/doc/scipy/reference/).
#
# Since SciPy has several sublibraries, it is commom practice to import just the one we are going to use, as you'll in the following examples.
import numpy as np
import matplotlib as mpl # ignore this for now
import matplotlib.pyplot as plt # ignore this for now
# # Interpolation
# There are several general interpolation facilities available in SciPy, for data in 1, 2, and higher dimensions. First, let's generate some sample data.
# +
x = np.linspace(0, 10, num=11, endpoint=True)
y = np.cos(-x**2/9.0)
plt.scatter(x,y)
# -
# The `interp1d` funtions grabs data points and **returns a *function***. The default interpolation method is the linear interpolation, but there are several to choose from.
# +
from scipy.interpolate import interp1d
f1 = interp1d(x, y) # linear is the default
f2 = interp1d(x, y, kind='cubic') # cubic splines
f3 = interp1d(x, y, kind='nearest') # grab the nearest value
f4 = interp1d(x, y, kind='previous') # hold last value
f5 = interp1d(x, y, kind='next') # grab the next value
# +
print(f1(4))
print(f2(4))
print(f1(4.6))
print(f2(4.6))
# -
# Now that we have the interpolated function, lets generate a tighter grid in the x axis and plot the resulto of the different interpolation methods.
xnew = np.linspace(0, 10, num=101, endpoint=True)
xnew
plt.plot(x, y, 'o', xnew, f1(xnew), '-', xnew, f2(xnew), '--', xnew, f3(xnew), '-.')
plt.legend(['data', 'linear', 'cubic', 'nearest'], loc='best')
plt.show()
# The `interpolate` sublibrary also has interpolation methods for multivariate data and has **integration with pandas**. Have a look at the documentation.
# # Definite Integrals
# The function `quad` is provided to integrate a function of one variable between two points. This functions has 2 outputs, the first one is the computed integral value and the second is an estimate of the absolute error.
# +
import scipy.integrate as integrate
def my_func(x):
return x**2
integrate.quad(my_func, 0, 2)
# -
# The `quad` functions also allows for infinite limits.
#
# $$
# \int_{-\infty}^{\infty} e^{-x^{2}}dx
# $$
# +
def my_func(x):
return np.exp(-x**2)
integrate.quad(my_func, -np.inf, np.inf)
# -
# SciPy's `integrate` library also has functions for double and triple integrals. Check them out in the documentations.
# # Optimization
# The `scipy.optimize` package provides several commonly used optimization algorithms. Here we are going to use just one to illustrate.
#
# Consider that you have 3 assets available. Their expected returns, risks (standard-deviations) and betas are on the table bellow and $\rho$ is the correlation matrix of the returns.
#
# | Asset | Return | Risk | Beta |
# |-------|--------|------|------|
# |A |3% | 10% | 0.5 |
# |B |3.5% | 11% | 1.2 |
# |C |5% | 15% | 1.8 |
#
# $$
# \rho =
# \begin{bmatrix}
# 1 & 0.3 & -0.6 \\
# 0.3 & 1 & 0 \\
# -0.6 & 0 & 1
# \end{bmatrix}
# $$
#
# Use the `minimize` function to find the weights of each asset that maximizes it's Sharpe index.
# +
retu = np.array([0.03, 0.035, 0.05])
risk = np.array([0.10, 0.11, 0.15])
beta = np.array([0.5, 1.2, 1.8])
corr = np.array([[1, 0.3, -0.6],
[0.3, 1, 0],
[-0.6, 0, 1]])
def port_return(w):
return retu.dot(w)
def port_risk(w):
covar = np.diag(risk).dot(corr).dot(np.diag(risk))
return (w.dot(covar).dot(w))**0.5
def port_sharpe(w):
return -1*(port_return(w) / port_risk(w)) # The -1 is because we want to MINIMIZE the negative of the Sharpe
def port_weight(w):
return w.sum()
# -
# When declaring an optimization problem with inequality restrictions, they have the form of:
#
# $$
# \begin{align*}
# \min_{w} & f\left(w\right)\\
# s.t. & g\left(w\right)\geq0
# \end{align*}
# $$
# +
from scipy.optimize import minimize
eq_cons = {'type': 'eq',
'fun' : lambda w: port_weight(w) - 1}
w0 = np.array([1, 0, 0])
res = minimize(port_sharpe, w0, method='SLSQP', constraints=eq_cons, options={'ftol': 1e-9, 'disp': True})
# -
res.x
res.x.sum()
-1*res.fun
# # Linear Algebra (again)
# `scipy.linalg` contains all the functions in `numpy.linalg` plus some more advanced ones.
# +
from scipy import linalg as la
A = np.array([[1,3,5],[2,5,1],[2,3,8]])
la.inv(A)
# -
# Matrix and vector **norms** can also be computed with SciPy. A wide range of norm definitions are available using different parameters to the order argument of `linalg.norm`.
A = np.array([[1, 2], [3, 4]])
print(la.norm(A)) # frobenius norm is the default.
print(la.norm(A, 1)) # L1 norm (max column sum)
print(la.norm(A, np.inf)) # L inf norm (max row sum)
# Some more advanced matrix decompositions are also available, like the **Schur Decomposition**
la.schur(A)
# Some notable matrices can also be created, like block **diagonal matrices**.
# +
A = np.array([[1, 0],
[0, 1]])
B = np.array([[3, 4, 5],
[6, 7, 8]])
C = np.array([[7]])
la.block_diag(A, B, C)
# -
# # Solving Linear Systems
#
#
# $$
# \begin{align}
# x+3y+5 & =10\\
# 2x+5y+z & =8\\
# 2x+3y+8z & =3
# \end{align}
# $$
#
# The system above can be written with matrix notation as $AX=B$ and we know we can find the solution by doing $X=A^{-1}B$, but inverting a matrix is computationally expensive. When solving big linear system it is advised to use the `solve` method.
A = np.array([[1, 3, 5], [2, 5, 1], [2, 3, 8]])
B = np.array([[10], [8], [3]])
# Lets check the time that it takes to solve the system in both ways...
la.inv(A).dot(B)
la.solve(A, B)
# let's try with a bigger matrix
import numpy.random as rnd
A = rnd.random((1000, 1000))
B = rnd.random((1000, 1))
# %%timeit
la.inv(A).dot(B)
# %%timeit
la.solve(A, B)
| fhnotebooks/Introduction to Python/Section 03 - SciPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="Logo.png" width="100" align="left"/>
#
# # <center> Preparatory Unit project:</center>
#
#
#
# Congratulations on finishing the lessons content for this preparatory unit!!
#
# At this stage it's important to test your theoritical concepts from a practical side and that's exactely the goal of this project.
#
# ## Some guidelines:
# 1. To run a cell you can use the shortcut use : Shift + Enter
#
# 2. Only sections mentioned as To-Do are the places where you should put in your own code other than that we do not recommend that you change the provided code.
#
# 3. You will be graded for the visibility of your code so make sure you respect the correct indentation and your code contains suitable variables names.
#
# 4. This notebook is designed in a sequential way so if you solve your project on different days make sure to run the previous cells before you can run the one you want.
#
# 5. Teacher assistants in th slack space remain available to answer any questions you might have.
#
# >Best of luck !
# ## Project Sections:
# In this project you will have a chance to practice most of the important aspects we saw throughout The Preparatory Unit.
# This project is divided into 5 sections:
#
#
# 1. [Setting the environement](#set_env)
#
# 2. [Importing necessary tools](#importing)
#
# 3. [SQLite section](#sql)
#
# 4. [Data types section](#datatypes)
#
# 5. [Linear Algebra section](#algebra)
#
# ### 1. Setting the environement: <a id='set_env'></a>
# Make sure you have virtualenv installed
# !pip install --user virtualenv
# To-Do: create a virtual environement called myen
# !virtualenv myenv
# Activate the environement
# !myenv\Scripts\activate
# Add this virtual environement to Jupyter notebook
# !pip install --user ipykernel
# !python -m ipykernel install --user --name=myenv
# Install the necessary dependencies
# !pip install scipy
# !pip install numpy
# > Please check if you have sqlite installed on your device. For more informations head to the sql lesson
# ### 2. Importing necessary tools:<a id='importing'></a>
from data import database_manager as dm
import utils
from utils import convert_to_floats
from matplotlib import pyplot
from linear_algebra import curve_fitting as cf
# ### 3. SQLite section : <a id='sql'></a>
# create a connection to the database
connexion = dm.create_connection("longley.db")
# To-Do : retrieve rows of the table
rows = dm.select_all(connexion)
# > Since at this stage we already retrieved our data it's more memory efficient to close the connection to our database.
connexion.close()
# ### 4. Data types section : <a id='datatypes'></a>
#
# Let's check the datatypes of the retrieved rows
rows
# > This is a list containing multiple tuples, each tuple is a row in the Table with each element within this tuple being a string.
# We will be executing mathematical operations on these values and hence we need them in numerical format. Each value contains decimal fractions which means the suitable type to convert to is either double or float. In this case we need to convert these values to a float fomat. Head up to the "utils.py" file and set the function convert_to_floats to be able to do so.
# +
# To-Do convert to an ndarray of floats by calling the function convert_to_floats from the utils file
# make sure to set some requirements in that function before you call it here
data =convert_to_floats(rows)
# -
import numpy as np
# let's check the shape
np.shape(data)
# Let's see the format
data
# ### 5. Linear Algebra section: <a id='algebra'></a>
# Let's check if the two variables GNP.deflator and year are correlated
x, y = data[:,5],data[:, 0]
pyplot.scatter(x,y)
pyplot.xlabel("Year")
pyplot.ylabel("GNP.deflactor")
pyplot.show()
# > You can clearly see that the two variables: GNP.deflator (y axis) and year (x axis). In other words the GNP.deflactor is increasing throughout the years.
# Under this trend it makes sense that we can fit a line to these data points, a line that can describe this trend. And this is our task for this section.
# #### Explanation:
# Curve fitting aims to find the perfect curve equation for a number of correlated variables. In our example we aim to find the equation for the line that can perfectly fit this point . Such a line should be at minimum distance from all points in average.
#
# Because we are dealing with two variables only, the line's equation should be of the form : y = a*x + b . Which is a typical linear equation.
#
# To acheieve this you will have to :
#
# 1. Head to the file linear_algebra/curve_fiiting.py file.
#
# 2. Set the objective function's code (function set_objective), objective function is the function that returns the typical shape of our wanted linear equation ( a*x+b), Please delete the "pass" statement and write your code.
#
# 3. Here in this notebook in the cell below, call the function get_results and pass to it x and y and get back the optimal values of "a" and "b".
#
#
# To-Do get the values of a and b using the get_result function
a,b = cf.get_result( x, y)
# +
# plotting the result
from numpy import arange
pyplot.scatter(x, y)
# define a sequence of inputs between the smallest and largest known inputs
x_line = arange(min(x), max(x), 1)
# calculate the output for the range
y_line = cf.set_objective(x_line, a, b)
# create a line plot for the mapping function
pyplot.plot(x_line, y_line, '--', color='red')
pyplot.show()
# -
# > yohooo ! It's indeed working!!!
# # Final thoughts :
# This curve fitting process can have many use cases within the machine learning workflow.
#
# A curve fitting can be used as a way to fill in missing values. Datasets aren't always clean. In fact in 90% of the cases we need to do some pre-processing and cleaning for the data before using it in any analysis. In many cases, this cleaning can include filling the missing values, in other words you have some data points with some missing values for some features, if we know that we have a "model" a curve that is supposed to model the trend(or correlation between two of our existing features we can use it to infer these missing values. So as a result Curve fitting can be used in the data cleaning step of the workflow.
#
# Another use case, is when the curve fitting is our end goal, Thus we are cleaning and modeling because the end objective is to have such an equation, in this case the curve fitting is the heart of the Machine learning project.
| experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
import numpy as np
import pandas as pd
import tensorflow as tf
# + deletable=true editable=true
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv("test.csv")
# + deletable=true editable=true
def get_chunk(train_data, chunkSize=100):
dummies = pd.get_dummies(train_data['label'], prefix='label', drop_first=False)
samples = train_data.drop('label', axis=1)
labels = dummies
datas = pd.concat([labels, samples], axis=1)
random_batch = datas.loc[np.random.randint(42000, size=chunkSize)]
images = random_batch.iloc[:, 10:].values
images = images.astype(np.float)
images = np.multiply(images, 1.0 / 255.0)
labels = random_batch.iloc[:, 0:10].values
return images, labels
# 定义全连接神经网络
class Fcn():
def __init__(self, in_units=784, h1_units=300):
self.in_units = in_units
self.h1_units = h1_units
self.graph = tf.Graph()
self.train_images = None
self.train_labels = None
self.test_images = None
self.test_labels = None
def define_graph(self):
with self.graph.as_default():
# 这里定义图谱中的各种变量
self.train_images = tf.placeholder(
tf.float32, shape=(None, self.in_units)
)
self.train_labels = tf.placeholder(
tf.float32, shape=(None, 10)
)
self.test_images = tf.placeholder(
tf.float32, shape=(None, self.in_units)
)
self.w1 = tf.Variable(tf.truncated_normal([self.in_units, self.h1_units], stddev = .1))
self.b1 = tf.Variable(tf.zeros([self.h1_units]))
self.w2 = tf.Variable(tf.truncated_normal([self.h1_units, 10], stddev = .1))
self.b2 = tf.Variable(tf.zeros([10]))
self.x = tf.placeholder(tf.float32, [None, self.in_units])
self.keep_prob = tf.placeholder(tf.float32)
def model(x):
hidden1= tf.nn.relu(tf.matmul(x, self.w1) + self.b1)
hidden1_drop = tf.nn.dropout(hidden1, self.keep_prob)
y = tf.nn.softmax(tf.matmul(hidden1_drop, self.w2) + self.b2)
return y
# Training computation. (训练)
logits = model(self.train_images)
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.train_labels)
)
# Optimizer. (优化器)
self.optimizer = tf.train.AdagradOptimizer(0.01).minimize(self.loss)
# Predictions for the training, validation, and test data. (预测数据)
self.train_prediction = tf.nn.softmax(logits)
self.test_prediction = tf.nn.softmax(model(self.test_images))
def run(self):
self.session = tf.Session(graph=self.graph)
with self.session as sess:
tf.global_variables_initializer().run()
print("训练开始")
for i in range(1000):
samples, labels = get_chunk(train_data)
sess.run([], feed_dict={
self.train_images: samples,
self.train_labels: labels,
self.keep_prob: 0.75
})
if(i % 50 == 0):
print('{} step'.fromat(i))
print("训练结束")
# + deletable=true editable=true
f = Fcn()
f.define_graph()
f.run()
# + deletable=true editable=true
# + deletable=true editable=true
| digits_reco/digits_reco_bad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
# -
# # Fit only one SED
from pyGRBz.pyGRBz import GRB_photoZ
# Load module
photoz = GRB_photoZ(output_dir='/results/Tuto/SED/')
# Load the GRB SED stored in data/sed/
photoz.load_data(data_dir='/data/sed/',data_name=['GRB050904'])
# Format data in order to apply galactic estinction and calculates the flux in Jansky to each observations
photoz.formatting()
# +
# Extract the SED at a given time.
# First the data are fitted either with a single power law (SPL) or a broken power law (BPL)
# Secondly the time at which to extract the SED can be either 'fixed' (needs to give through time_SED in seconds) or
# computed to be the time at which the flux is maximum in the reddest band ('ReddestBand')
# In case the input data is already a SED. THis function has to run in order to have the right
# formatting for the follwing computations
photoz.extract_sed(model='SPL',method='ReddestBand')
#a.extract_sed(model='BPL',method='fixed',time_SED=70)
# -
# Create flat priors
priors=dict(z=[0,11],Av=[0,2],beta=[0,2],norm=[0,10])
# +
# Run the MCMC algorithm.
# Select the extinction law to used: 'smc', 'lmc', 'mw', 'nodust'
# Nthreads: number of threads to use in case of parallelisation
# nwalkers: number of walkers
# Nsteps1: number of steps for the first burn-in phase
# Nsteps2: number of steps for the second burn-in phase
# Nsteps3: number of steps for the production run
# Select to add dust, gas in host and our galaxy
# Select IGM transmission method: 'Madau' or 'Meiksin'
photoz.fit(ext_law='smc',Nthreads=4,sampler_type='ensemble',nwalkers=30,Nsteps1=300,Nsteps2=1000,nburn=300,
Host_dust=True,Host_gas=False,MW_dust=False,MW_gas=False,DLA=False,igm_att='Meiksin',
clean_data=False,plot_all=False,plot_deleted=False,priors=priors)
# -
# # Fit only one Light Curve
from pyGRBz.pyGRBz import GRB_photoZ
# Load module
photoz = GRB_photoZ(output_dir='/results/Tuto/LC/')
# Load the same GRB but with its light curve stored in data/lc/
photoz.load_data(data_dir='/data/lc/',data_name=['GRB050904'])
# Format data in order to apply galactic estinction and calculates the flux in Jansky to each observations
photoz.formatting()
# +
# Extract the SED at a given time.
# First the data are fitted either with a single power law (SPL) or a broken power law (BPL)
# Secondly the time at which to extract the SED can be either 'fixed' (needs to give through time_SED in seconds) or
# computed to be the time at which the flux is maximum in the reddest band ('ReddestBand')
# In case the input data is already a SED. THis function has to run in order to have the right
# formatting for the follwing computations
photoz.extract_sed(model='SPL',method='ReddestBand')
#a.extract_sed(model='BPL',method='fixed',time_SED=70)
# -
# Create flat priors
priors=dict(z=[0,11],Av=[0,2],beta=[0,2],norm=[0,10])
# +
# Run the MCMC algorithm.
# Select the extinction law to used: 'smc', 'lmc', 'mw', 'nodust'
# Nthreads: number of threads to use in case of parallelisation
# nwalkers: number of walkers
# Nsteps1: number of steps for the first burn-in phase
# Nsteps2: number of steps for the second burn-in phase
# Nsteps3: number of steps for the production run
# Select to add dust, gas in host and our galaxy
# Select IGM transmission method: 'Madau' or 'Meiksin'
photoz.fit(ext_law='smc',Nthreads=4,sampler_type='ensemble', nwalkers=30,Nsteps1=300,Nsteps2=1000,nburn=300,
Host_dust=True,Host_gas=False,MW_dust=False,MW_gas=False,DLA=False,igm_att='Meiksin',
clean_data=False,plot_all=False,plot_deleted=False,priors=priors)
# -
# # Run code for multiple Targets
from pyGRBz.pyGRBz import GRB_photoZ
# Load module
photoz = GRB_photoZ(output_dir='/results/Tuto/MultipleTargets/')
# Load as many targets as you want. It can be a mix of SEDs and light curves
photoz.load_data(data_dir='/data/sed/',
data_name=['GRB050904','GRB080825B','GRB080906'])
#data_name=['GRB050904','GRB080825B','GRB080906','GRB080913','GRB080916C','GRB081228','GRB090423',
# 'GRB090429B','GRB090516','GRB100518A','GRB110721A','GRB120712A','GRB120922A','GRB130215A',
# 'GRB130327A','GRB130408A','GRB130514A','GRB130606A','GRB130907A','GRB130925A','GRB131117A',
# 'GRB140419A','GRB140515A','GRB140518A','GRB140614A','GRB141109A','GRB150120B','GRB150910A',
# 'GRB151027B','GRB160203A','GRB160327A','GRB160625B'])
# Format data in order to apply galactic estinction and calculates the flux in Jansky to each observations
photoz.formatting()
# +
# Extract the SED at a given time.
# First the data are fitted either with a single power law (SPL) or a broken power law (BPL)
# Secondly the time at which to extract the SED can be either 'fixed' (needs to give through time_SED in seconds) or
# computed to be the time at which the flux is maximum in the reddest band ('ReddestBand')
# In case the input data is already a SED. THis function has to run in order to have the right
# formatting for the follwing computations
photoz.extract_sed(model='SPL',method='ReddestBand')
#a.extract_sed(model='BPL',method='fixed',time_SED=70)
# -
# Create flat priors
priors=dict(z=[0,11],Av=[0,2],beta=[0,2],norm=[0,10])
# +
# Run the MCMC algorithm.
# Select the extinction law to used: 'smc', 'lmc', 'mw', 'nodust'
# Nthreads: number of threads to use in case of parallelisation
# nwalkers: number of walkers
# Nsteps1: number of steps for the first burn-in phase
# Nsteps2: number of steps for the second burn-in phase
# Nsteps3: number of steps for the production run
# Select to add dust, gas in host and our galaxy
# Select IGM transmission method: 'Madau' or 'Meiksin'
photoz.fit(ext_law='smc',Nthreads=4,sampler_type= 'ensemble',nwalkers=30,Nsteps1=300,Nsteps2=1000,nburn=300,
Host_dust=True,Host_gas=False,MW_dust=False,MW_gas=False,DLA=False,igm_att='Meiksin',
clean_data=False,plot_all=False,plot_deleted=False,priors=priors)
# -
# Plot all redshift estimations together
photoz.plot_zsim_zphot(input_file='best_fits_all_smc',output_suffix='_smc_1sig',sigma=1,
input_dir='/results/Tuto/MultipleTargets/',output_dir='/results/Tuto/MultipleTargets/')
| notebooks/Tuto_photoZ.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="color:#777777;background-color:#ffffff;font-size:12px;text-align:right;">
# prepared by <NAME> (QuSoft@Riga) | November 07, 2018
# </div>
# <table><tr><td><i> I have some macros here. If there is a problem with displaying mathematical formulas, please run me to load these macros.</i></td></td></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\inner}[2]{\langle #1,#2\rangle} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2>Quantum State</h2>
#
# The overall probability must be 1 when we observe a quantum system.
#
# For example, the following vectors <u>cannot</u> be a valid quantum state:
#
# $$
# \myvector{ \frac{1}{2} \\ \frac{1}{2} }
# \mbox{ and }
# \myvector{ \frac{\sqrt{3}}{2} \\ \frac{1}{\sqrt{2}} }.
# $$
#
# For the first vector: the probabilities of observing the states $\ket{0} $ and $ \ket{1} $ are $ \frac{1}{4} $.
#
# So, the overall probability of getting a result is $ \frac{1}{4} + \frac{1}{4} = \frac{1}{2} $, which is less than 1.
#
# For the second vector: the probabilities of observing the states $\ket{0} $ and $ \ket{1} $ are respectively $ \frac{3}{4} $ and $ \frac{1}{2} $.
#
# So, the overall probability of getting a result is $ \frac{3}{4} + \frac{1}{2} = \frac{5}{4} $, which is greater than 1.
# <font color="blue"><b>The summation of amplitude squares must be 1 for a valid quantum state.</b></font>
# <font color="blue"><b>In other words, a quantum state can be represented by a vector having length 1, and vice versa.</b></font>
#
# <i>The summation of amplitude squares gives the square of the length of vector.
#
# But, this summation is 1, and its sqaure root is also 1. So, we directly use the term <u>length</u> in the defintion.</i>
#
# We represent a quantum state as $ \ket{u} $ instead of $ u $.
#
# Remember the relation between the length and inner product: $ \norm{u} = \sqrt{\inner{u}{u}} $.
#
# In quantum computation, we use almost the same notation for the inner product: $ \braket{u}{u}$.
#
# $ \norm{ \ket{u} } = \sqrt{ \braket{u}{u} } = 1 $, or equivalently $ \braket{u}{u} = 1 $.
# <h3> Task 1 </h3>
#
# Let $a$ and $b$ be real numbers.
#
# If the folllowing vectors are valid quantum states, then what can be the values of $a$ and $b$?
#
# $$
# \ket{v} = \myrvector{a \\ -0.1 \\ -0.3 \\ 0.4 \\ 0.5}
# ~~~~~ \mbox{and} ~~~~~
# \ket{u} = \myrvector{ \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{b}} \\ -\frac{1}{\sqrt{3}} }.
# $$
#
# your code is here or you may find the values by hand (in mind)
#
# <a href="..\bronze-solutions\B46_Quantum_State_Solutions.ipynb#task1">click for our solution</a>
# <h3> Quantum Operators </h3>
#
# Once the quantum state is defined, the definition of quantum operator is very easy.
#
# <font color="blue"><b>Any length preserving matrix is a quantum operator, and vice versa.</b></font>
# <h3> Task 2</h3>
#
# Remember Hadamard operator:
#
# $$
# H = \hadamard.
# $$
#
# Let's randomly create a 2-dimensional quantum state, and test whether Hadamard operator preserves the length or not.
#
# Write a function that returns a randomly created 2-dimensional quantum state:
# <ul>
# <li> Pick a random value between 0 and 100 </li>
# <li> Divide it by 100</li>
# <li> Take sqaure root of it</li>
# <li> Randomly determine its sign ($+$ or $-$)</li>
# <li> This is the first entry of the vector </li>
# <li> Find an appropriate value for the second entry </li>
# <li> Randomly determine its sign ($+$ or $-$)</li>
# </ul>
#
# Write a function that determines whether a given vector is a valid quantum state or not.
#
# (Due to precision problem, the summation of squares may not be exactly 1 but very close to 1, e.g., 0.9999999999999998.)
#
# Repeat 10 times:
# <ul>
# <li> Randomly create a quantum state </li>
# <li> Multiply Hadamard matrix with the randomly created quantum state </li>
# <li> Check whether the result quantum state is valid </li>
# </ul>
#
# your solution is here
#
# <a href="..\bronze-solutions\B46_Quantum_State_Solutions.ipynb#task2">click for our solution</a>
| community/awards/teach_me_quantum_2018/bronze/bronze/B46_Quantum_State.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 9 - Intro to Encrypted Programs
#
# Believe it or not, it is possible to compute with encrypted data. In other words, it's possible to run a program where **ALL of the variables** in the program are **encrypted**!
#
# In this tutorial, we're going to walk through very basic tools of encrypted computation. In particular, we're going to focus on one popular approach called Secure Multi-Party Computation. In this lesson, we'll learn how to build an encrypted calculator which can perform calculations on encrypted numbers.
#
# Authors:
# - <NAME> - Twitter: [@iamtrask](https://twitter.com/iamtrask)
# - <NAME> - GitHub: [@LaRiffle](https://github.com/LaRiffle)
#
# References:
# - <NAME> - [Blog](https://mortendahl.github.io) - Twitter: [@mortendahlcs](https://twitter.com/mortendahlcs)
# # Step 1: Encryption Using Secure Multi-Party Computation
#
# SMPC is at first glance a rather strange form of "encryption". Instead of using a public/private key to encrypt a variable, each value is split into multiple `shares`, each of which operates like a private key. Typically, these `shares` will be distributed amongst 2 or more _owners_. Thus, in order to decrypt the variable, all owners must agree to allow the decryption. In essence, everyone has a private key.
#
# ### Encrypt()
#
# So, let's say we wanted to "encrypt" a variable `x`, we could do so in the following way.
#
# > Encryption doesn't use floats or real numbers but happens in a mathematical space called [integer quotient ring](http://mathworld.wolfram.com/QuotientRing.html) which is basically the integers between `0` and `Q-1`, where `Q` is prime and "big enough" so that the space can contain all the numbers that we use in our experiments. In practice, given a value `x` integer, we do `x % Q` to fit in the ring. (That's why we avoid using number `x' > Q`).
Q = 1234567891011
x = 25
# +
import random
def encrypt(x):
share_a = random.randint(-Q,Q)
share_b = random.randint(-Q,Q)
share_c = (x - share_a - share_b) % Q
return (share_a, share_b, share_c)
# -
encrypt(x)
# As you can see here, we have split our variable `x` into 3 different shares, which could be sent to 3 different owners.
#
# ### Decrypt()
#
# If we wanted to decrypt these 3 shares, we could simply sum them together and take the modulus of the result (mod Q)
def decrypt(*shares):
return sum(shares) % Q
a,b,c = encrypt(25)
decrypt(a, b, c)
# Importantly, notice that if we try to decrypt with only two shares, the decryption does not work!
decrypt(a, b)
# Thus, we need all of the owners to participate in order to decrypt the value. It is in this way that the `shares` act like private keys, all of which must be present in order to decrypt a value.
# # Step 2: Basic Arithmetic Using SMPC
#
# However, the truly extraordinary property of Secure Multi-Party Computation is the ability to perform computation **while the variables are still encrypted**. Let's demonstrate simple addition below.
x = encrypt(25)
y = encrypt(5)
def add(x, y):
z = list()
# the first worker adds their shares together
z.append((x[0] + y[0]) % Q)
# the second worker adds their shares together
z.append((x[1] + y[1]) % Q)
# the third worker adds their shares together
z.append((x[2] + y[2]) % Q)
return z
decrypt(*add(x,y))
# ### Success!!!
#
# And there you have it! If each worker (separately) adds their shares together, then the resulting shares will decrypt to the correct value (25 + 5 == 30).
#
# As it turns out, SMPC protocols exist which can allow this encrypted computation for the following operations:
# - addition (which we've just seen)
# - multiplication
# - comparison
#
# and using these basic underlying primitives, we can perform arbitrary computation!!!
#
# In the next section, we're going to learn how to use the PySyft library to perform these operations!
# # Step 3: SMPC Using PySyft
#
# In the previous sections, we outlined some basic intuitions around SMPC is supposed to work. However, in practice we don't want to have to hand-write all of the primitive operations ourselves when writing our encrypted programs. So, in this section we're going to walk through the basics of how to do encrypted computation using PySyft. In particular, we're going to focus on how to do the 3 primitives previously mentioned: addition, multiplication, and comparison.
#
# First, we need to create a few Virtual Workers (which hopefully you're now familiar with given our previous tutorials).
# +
import torch
import syft as sy
hook = sy.TorchHook(torch)
bob = sy.VirtualWorker(hook, id="bob")
alice = sy.VirtualWorker(hook, id="alice")
bill = sy.VirtualWorker(hook, id="bill")
# -
# ### Basic Encryption/Decryption
#
# Encryption is as simple as taking any PySyft tensor and calling .share(). Decryption is as simple as calling .get() on the shared variable
x = torch.tensor([25])
x
encrypted_x = x.share(bob, alice, bill)
encrypted_x.get()
# ### Introspecting the Encrypted Values
#
# If we look closer at Bob, Alice, and Bill's workers, we can see the shares that get created!
list(bob._tensors.values())
x = torch.tensor([25]).share(bob, alice, bill)
# Bob's share
bobs_share = list(bob._tensors.values())[0]
bobs_share
# Alice's share
alices_share = list(alice._tensors.values())[0]
alices_share
# Bill's share
bills_share = list(bill._tensors.values())[0]
bills_share
# And if we wanted to, we could decrypt these values using the SAME approach we talked about earlier!!!
(bobs_share + alices_share + bills_share)
# As you can see, when we called `.share()` it simply split the value into 3 shares and sent one share to each of the parties!
# # Encrypted Arithmetic
#
# And now you see that we can perform arithmetic on the underlying values! The API is constructed so that we can simply perform arithmetic like we would normal PyTorch tensors.
x = torch.tensor([25]).share(bob,alice)
y = torch.tensor([5]).share(bob,alice)
z = x + y
z.get()
z = x - y
z.get()
# # Encrypted Multiplication
#
# For multiplication we need an additional party who is responsible for consistently generating random numbers (and not colluding with any of the other parties). We call this person a "crypto provider". For all intensive purposes, the crypto provider is just an additional VirtualWorker, but it's important to acknowledge that the crypto provider is not an "owner" in that he/she doesn't own shares but is someone who needs to be trusted to not collude with any of the existing shareholders.
crypto_provider = sy.VirtualWorker(hook, id="crypto_provider")
x = torch.tensor([25]).share(bob,alice, crypto_provider=crypto_provider)
y = torch.tensor([5]).share(bob,alice, crypto_provider=crypto_provider)
# +
# multiplication
z = x * y
z.get()
# -
# You can also do matrix multiplication
x = torch.tensor([[1, 2],[3,4]]).share(bob,alice, crypto_provider=crypto_provider)
y = torch.tensor([[2, 0],[0,2]]).share(bob,alice, crypto_provider=crypto_provider)
# +
# matrix multiplication
z = x.mm(y)
z.get()
# -
# # Encrypted comparison
# It is also possible to private comparisons between private values. We rely here on the SecureNN protocol, the details of which can be found [here](https://eprint.iacr.org/2018/442.pdf). The result of the comparison is also a private shared tensor.
x = torch.tensor([25]).share(bob,alice, crypto_provider=crypto_provider)
y = torch.tensor([5]).share(bob,alice, crypto_provider=crypto_provider)
z = x > y
z.get()
z = x <= y
z.get()
z = x == y
z.get()
z = x == y + 20
z.get()
# You can also perform max operations
x = torch.tensor([2, 3, 4, 1]).share(bob,alice, crypto_provider=crypto_provider)
x.max().get()
x = torch.tensor([[2, 3], [4, 1]]).share(bob,alice, crypto_provider=crypto_provider)
max_values = x.max(dim=0)
max_values.get()
# # Congratulations!!! - Time to Join the Community!
#
# Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
#
# ### Star PySyft on GitHub
#
# The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building.
#
# - [Star PySyft](https://github.com/OpenMined/PySyft)
#
# ### Join our Slack!
#
# The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org)
#
# ### Join a Code Project!
#
# The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue".
#
# - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
# - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
#
# ### Donate
#
# If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
#
# [OpenMined's Open Collective Page](https://opencollective.com/openmined)
| examples/tutorials/Part 09 - Intro to Encrypted Programs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="title_ID"></a>
# # JWST Pipeline Validation Testing Notebook: Calwebb_Image3, Resample step
#
# <span style="color:red"> **Instruments Affected**</span>: MIRI
#
# Tested on MIRI Simulated data
#
# ### Table of Contents
# <div style="text-align: left">
#
# <br> [Introduction](#intro_ID) <br> [Run JWST Pipelines](#pipeline_ID) <br> [Imports](#imports_ID) <br> [Create an association table for your cal files and run them through calwebb_image3](#runpipeline_ID) <br> [Find Stars in Image and Determine their Coordinates](#runscript_ID) <br> [Compare RA and Dec to expected Values](#residual_ID) <br> [About This Notebook](#about_ID) <br>
#
#
# </div>
# <a id="intro_ID"></a>
# # Introduction
#
#
# This test is designed to test the resample step in the calwebb_spec3 pipeline. This step takes the dither positions in an association table and combines them into 1 output product. Resample applies the distortion correction using the drizzling algorithm (as defined in the DrizzlePac handbook) and combines the listed files. For more information on the pipeline step visit the links below.
#
# Step description: https://jwst-pipeline.readthedocs.io/en/latest/jwst/resample/main.html
#
# Pipeline code: https://github.com/spacetelescope/jwst/tree/master/jwst/resample
#
# The data for this test were created with the MIRI Data Simulator, and the documentation for that code can be found here: http://miri.ster.kuleuven.be/bin/view/Public/MIRISim_Public
#
#
# ### Calibration WG Requested Algorithm:
#
# A short description and link to the page: https://outerspace.stsci.edu/display/JWSTCC/Vanilla+Resample+Slit+Spectra
#
#
# ### Defining Terms
# Definition of terms or acronymns.
#
# JWST: James Webb Space Telescope
#
# MIRI: Mid-Infrared Instrument
#
# MIRISim: MIRI Data Simulator
#
#
#
# [Top of Page](#title_ID)
# Create a temporary directory to hold notebook output, and change the working directory to that directory.
from tempfile import TemporaryDirectory
import os
data_dir = TemporaryDirectory()
os.chdir(data_dir.name)
print(data_dir)
#
# <a id="imports_ID"></a>
# # Imports
# The following packages will need to be imported for the scripts to work.
#
#
# * astropy.io for opening files
# * ci_watson.artifactory_helpers to read in data from artifactory
# * jwst.datamodels for opening files as a JWST Datamodel
# * jwst.pipeline to run the pipeline step/module
# * jwst.associations to create association table
# * numpy for calculations
# * matplotlib.pyplot.plt to generate plot
# * os for path information
# * photutils for star finding and aperture photometry
# * regtest to retrieve data from artifactory needed to run notebook
#
#
# [Top of Page](#title_ID)
# + nbpresent={"id": "45177853-942e-4949-9e30-f544d70ef5f4"}
from astropy.io import ascii, fits
from astropy.modeling import models, fitting
from ci_watson.artifactory_helpers import get_bigdata
import glob
import jwst
from jwst.pipeline import Detector1Pipeline, Spec2Pipeline, Spec3Pipeline
from jwst import associations, datamodels
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from jwst.associations.lib.rules_level2_base import DMSLevel2bBase
from jwst.associations.asn_from_list import asn_from_list
from jwst.regtest.regtestdata import RegtestData
import matplotlib.pyplot as plt
import numpy as np
import os
# -
# ## Read in uncal data from artifactory
#
# +
print("Downloading input files")
#This readnoise file is needed for use with simulated data which has higher readnoise than actual data.
readnoise = get_bigdata('jwst_validation_notebooks',
'validation_data',
'jump',
'jump_miri_test',
'jwst_mirisim_readnoise.fits')
Slitfile1 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'calwebb_spec2',
'spec2_miri_test',
'miri_lrs_slit_pt_nod1_v2.3.fits')
Slitfile2 = get_bigdata('jwst_validation_notebooks',
'validation_data',
'calwebb_spec2',
'spec2_miri_test',
'miri_lrs_slit_pt_nod2_v2.3.fits')
files = [Slitfile1, Slitfile2]
print("Finished Downloads")
# -
# <a id="pipeline_ID"></a>
# # Run JWST Pipeline
#
# First we run the data through the Detector1() pipeline to convert the raw counts into slopes. This should use the calwebb_detector1.cfg file. The output of this stage will then be run through the Spec2Pipeline. Extract_1d is the final step of this pipeline stage, so we will just run through the whole pipeline.
#
# [Top of Page](#title_ID)
#
# ### Detector 1 Pipeline
# +
# Run the calwebb_detector1 pipeline
# set up pipeline parameters
rej_thresh=10.0 # rejection threshold for jump step
det1 = []
# Run pipeline on both files
for ff in files:
d1 = Detector1Pipeline.call(ff, steps={'jump':{'rejection_threshold':rej_thresh, 'override_readnoise': readnoise},
'ramp_fit': {'override_readnoise': readnoise}, 'refpix': {'skip': True}},
save_results=True)
det1.append(d1)
print(det1)
# -
print(jwst.__version__)
# ### Spec2 Pipeline
#
# For the Spec2Pipeline we have to produce an association telling the pipeline that the nods should be used as each other's background. Then we call the pipeline with default parameters & settings.
# +
asn_files = [det1[0].meta.filename, det1[1].meta.filename]
asn = asn_from_list(asn_files, rule=DMSLevel2bBase, meta={'program':'test', 'target':'bd60', 'asn_pool':'test'})
# now add the opposite nod as background exposure:
asn['products'][0]['members'].append({'expname': 'miri_lrs_slit_pt_nod2_v2.3_rate.fits', 'exptype':'background'})
asn['products'][1]['members'].append({'expname': 'miri_lrs_slit_pt_nod1_v2.3_rate.fits', 'exptype':'background'})
# write this out to a json file
with open('lrs-slit-test_spec2asn.json', 'w') as fp:
fp.write(asn.dump()[1])
# -
sp2 = Spec2Pipeline.call('lrs-slit-test_spec2asn.json', save_results=True)
# +
x1ds = glob.glob('*_x1d.fits')
fig = plt.figure(figsize=[10,5])
for xx in x1ds:
sp = datamodels.open(xx)
lab = sp.meta.filename
plt.plot(sp.spec[0].spec_table['WAVELENGTH'], sp.spec[0].spec_table['FLUX'], label=lab)
plt.title('Extracted spectra', fontsize='x-large')
plt.xlabel('wavelength (micron)', fontsize='large')
plt.legend()
plt.ylabel('flux (Jy)', fontsize='large')
fig.show()
# -
# ### Spec3 Pipeline
#
# Next we run the Spec3 Pipeline. This also takes an association file as input, which lists the associated science files that will need to be combined into a single product.
#
# [Top of Page](#title_ID)
#
# +
# use asn_from_list to create association table
calfiles = glob.glob('*_cal.fits')
asn = asn_from_list(calfiles, rule=DMS_Level3_Base, product_name='lrs_slit_pipetest_combined.fits')
# dump association table to a .json file for use in image3
with open('lrs_slit_pipetest_stage3.json', 'w') as fp:
fp.write(asn.dump()[1])
print(asn)
# -
sp3 = Spec3Pipeline.call('lrs_slit_pipetest_stage3.json', save_results=True)
# ## Plots & tests
#
# We produce some plots below to check the output of the resample step. What are we looking for?
#
# * The combined 2D image should be 387 (rows) x 62 (columns) in size.
# * The combined resampled image (the positive trace) should be centred in the image, flanked by 2 negative traces which correspond to the subtarcted complementary traces from the individual nod images. We check in the plot below that the trace is centred.
#
# * In the extracted products, the wavelength calibration for all 3 spectra should match - there should be no systematic offset.
#
# There aren't numerical tests for all these checks so they do require some visual inspection.
# +
s2d3_file = glob.glob('*pipetest*_s2d.fits')
s2d3 = datamodels.open(s2d3_file[0])
x1d3_file = glob.glob('*pipetest*_x1d.fits')
x1d3 = datamodels.open(x1d3_file[0])
assert np.shape(s2d3.data)==(387,62), "Spec3 output does not have the expected shape. Its shape is {0}".format(np.shape(s2d3.data)
)
# +
# the spec2 resampled files
s2d2_file = glob.glob('*nod*_s2d.fits')
print(s2d2_file)
fig1, ax = plt.subplots(ncols=3, nrows=1, figsize=[15,8])
for ii, ff in enumerate(s2d2_file):
s2d2 = datamodels.open(ff)
ax[ii].imshow(s2d2.data, origin='lower', interpolation='None')
ax[ii].set_title(s2d2.meta.filename)
ax[2].imshow(s2d3.data, origin='lower', aspect='auto', interpolation='None')
ax[2].set_title('Combined resampled image (Spec3)')
# +
extracted_files = glob.glob('*_x1d.fits')
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=[10,6])
for ef in extracted_files:
x1d = datamodels.open(ef)
ax.plot(x1d.spec[0].spec_table['WAVELENGTH'], x1d.spec[0].spec_table['FLUX'], label=x1d.meta.filename)
ax.legend()
ax.set_title('Comparison of extracted spectra (spec 2 and spec 3)')
# -
# <a id="about_ID"></a>
# ## About this Notebook
# **Author:** <NAME>, <EMAIL>, ESA and INS/STScI
# <br>**Updated On:** May 28th 2021 (first version)
# [Top of Page](#title_ID)
# <img style="float: right;" src="./stsci_pri_combo_mark_horizonal_white_bkgd.png" alt="stsci_pri_combo_mark_horizonal_white_bkgd" width="200px"/>
| jwst_validation_notebooks/resample/jwst_resample_miri_test/jwst_resample_miri_lrs_slit.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell
-- language: haskell
-- name: haskell
-- ---
-- +
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE DeriveGeneric #-}
import Data.ByteString.Lazy (readFile, writeFile)
import Lens.Micro
import Data.Default.Class
import Data.Aeson
import Data.Aeson.Encode.Pretty
import Lens.Micro.Aeson
import Data.Monoid ((<>))
import Data.Text (Text, pack, unpack)
import GHC.Generics
import Network.HTTP.Req
import Prelude hiding (readFile, writeFile)
import System.Process
-- +
data Project = Project
{ owner :: Text
, repo :: Text
, rev :: Text
, sha256 :: Text
} deriving (Show, Generic)
instance FromJSON Project
instance ToJSON Project
-- -
extractProject :: Value -> Text -> Maybe Project
extractProject versions name = do
prj <- versions ^? key name
case fromJSON prj of
Error _ -> Nothing
Success p -> Just p
r :: (MonadHttp m, FromJSON a) => Project -> Text -> m (JsonResponse a)
r project branch = req GET
( https "api.github.com"
/: "repos"
/: owner project
/: repo project
/: "branches"
/: branch
) NoReqBody jsonResponse (header "User-Agent" "vaibhavsagar")
getRev :: Project -> Text -> IO (Maybe Text)
getRev project branch = do
res <- responseBody <$> runReq def (r project branch) :: IO Value
return $ res ^? key "commit" . key "sha" . _String
buildURL :: Project -> Text
buildURL project
= "https://github.com/"
<> owner project <> "/"
<> repo project <> "/"
<> "archive" <> "/"
<> rev project <> ".tar.gz"
getSha256 :: Text -> Bool -> IO Text
getSha256 url doUnpack = let
option = if doUnpack then ["--unpack"] else []
in pack . init <$> readProcess "nix-prefetch-url" (option ++ [unpack url]) ""
update :: FilePath -> Text -> Text -> Bool -> IO ()
update filename projectName branchName doUnpack = do
Just versions <- decode <$> readFile filename :: IO (Maybe Value)
let (Just project) = extractProject versions projectName
Just latestRev <- getRev project branchName
latestSha256 <- getSha256 (buildURL project { rev = latestRev }) doUnpack
let project' = project { rev = latestRev, sha256 = latestSha256 }
let versions' = versions & key projectName .~ toJSON project'
writeFile filename (encodePretty' defConfig { confIndent = Spaces 2, confCompare = compare } versions')
update "versions.json" "nixpkgs" "nixos-18.03" False
| hs-updater/Updater.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import imageio
import cv2
import subprocess
import librosa
import librosa.display
import soundfile as sf
import os
import torch
from torchvision import transforms
from inference.Inferencer import Inferencer
from models.PasticheModel import PasticheModel
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Audio
from PIL import Image
from pathlib import Path
import tqdm.notebook as tq
import pandas as pd
pd.set_option('display.max_rows', 500)
from IPython.core.display import HTML, display
def rm_out_padding(): display(HTML("<style>div.output_subarea { padding:unset;}</style>"))
rm_out_padding()
# -
audio_data = 'audio/disclosure.wav'
z, sr = librosa.load(audio_data, offset = 100, duration = 20)
harm, perc = librosa.effects.hpss(z)
hop_length = 735
Nfft = 2048
n_mels = 100
z = librosa.feature.melspectrogram(perc, sr=sr, n_fft=Nfft, hop_length=hop_length, n_mels=n_mels)
z= librosa.power_to_db(z)
z = (z - z.min()) / (z.max() - z.min())
def mel_grams(z):
plt.figure(figsize=(15, 25))
librosa.display.specshow(z,
x_axis="time",
y_axis="mel",
sr=sr,
hop_length=hop_length)
plt.colorbar(format="%+2.f dB")
plt.show()
mel_grams(z)
s.shape
print(s.argsort()[::-1][:5])
from scipy.ndimage import gaussian_filter
s.shape
filt = gaussian_filter(z, sigma=[0, 2])
s = filt.std(axis=1)
print(s.argsort()[::-1][:5])
filt.shape
def rolling_window(a, window):
pad = np.ones(len(a.shape), dtype=np.int32)
pad[-1] = window-1
pad = list(zip(pad, np.zeros(len(a.shape), dtype=np.int32)))
a = np.pad(a, pad,mode='reflect')
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
# rolling var along last axis
rol_std = np.var(rolling_window(filt, 150), axis=-1)
rol_std.shape
amax = np.argmax(rol_std, axis=0)
print(amax)
print(amax.shape)
beat = [z[a, i] for i, a in enumerate(amax)]
plt.plot(beat)
plt.show()
mel_grams(filt)
| prototype/test_extract_best_freqs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print('Hello, world!')
a = 'Hello, world'
print(a)
b = "Hello,"
c = 'world!'
d = b + " " + c
print(d)
e = d*2
print(e)
e[0:-1:1]
print(e, e[:], e[::], e[0:-1:1])
print(e[0:], e[0:-1], e[::2])
| notes/introduction/01_strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import math
# +
N_st = [12, 13, 14, 19, 22, 22, 24, 29, 32, 38, 44, 51, 61, 72, 82, 90, 101, 110, 122, 147, 167, 182, 199, 220, 243, 270, 305, 344, 379, 435, 487, 546, 594, 680, 759, 854, 947, 1065, 1201, 1322, 1505, 1683, 1887, 2143, 2405, 2672, 2958, 3328, 3758, 4181, 4658, 5215, 5800, 6502, 7268, 8099, 9110, 10215, 11464, 12754, 14250, 15904, 17732, 19753, 22070, 24583, 27386, 30471, 33949, 37804]
A = 48.8
return M
N_FOV_ls = []
for i in range(0, len(N_st)):
N_fov = N_st[i] * ((1-np.cos(A/2))/2)
N_FOV_ls.append(N_fov)
E_cent = 1. #picture quality not enough fo better determination
N_pix = 1472
# -
# ## Error vs. different number of stars for different magnitude
# +
Mer = []
for i in range(0, len(N_FOV_ls)):
E_r = np.arctan(E_cent / 0.3825* (N_pix)) * ( 1 / (np.sqrt(N_FOV_ls[i])))
Mer.append(float(np.rad2deg(E_r)*60))
Mecb = []
for k in range (0, len(N_FOV_ls)):
E_cb = (A * E_cent) / (N_pix * np.sqrt(N_FOV_ls[k]))
Mecb.append(E_cb*3600)
# +
fig1, ax1 = plt.subplots()
ax1.plot(N_FOV_ls, Mecb)
ax1.set_ylabel("Error [arcsec]")
ax1.set_xlabel("Star number")
ax1.set_title("Cross-boresight error vs. number of stars regarding Mv")
plt.ylim([0,35])
plt.xticks(np.arange(1,8,0.1))
plt.plot([518, 518], [0,35], marker="o", color="red")
plt.plot([546, 546], [0,35], marker="o", color="green")
fig2, ax2 = plt.subplots()
ax2.plot(N_FOV_ls, Mer)
ax2.set_ylabel("Error [arcmin]")
ax2.set_xlabel("Star number")
ax2.set_title("Roll error vs. number of stars regarding Mv")
plt.ylim([0,1000])
plt.plot([518, 518], [0,1000], marker="o", color="red")
plt.plot([546, 546], [0,1000], marker="o", color="green")
# -
# ## Error of position vs. number of pixels
N_m4 = 546 #for magnitude 4.1
A = 48.8
N_fov = N_m4 * ( (1-np.cos(A/2)) / 2 )
E_cent = 1 #picture quality not enough fo better determination
N_pix = list(range(480, 2800, 16))
# +
Mer = []
for i in range(0, len(N_pix)):
E_r = np.arctan(E_cent / (0.3825* (N_pix[i]))) * ( 1 / (np.sqrt(N_fov)))
Mer.append(float(np.rad2deg(E_r)*60))
Mecb = []
for k in range (0, len(N_pix)):
E_cb = (A * E_cent) / (N_pix[k] * np.sqrt(N_fov))
Mecb.append(E_cb*3600)
# +
fig1, ax1 = plt.subplots()
ax1.plot(N_pix, Mecb)
ax1.set_ylabel("Error [arcsec]")
ax1.set_xlabel("Pixel number")
ax1.set_title("Cross boresight error vs number of pixels")
plt.plot([1024,1024], [0,45], marker="o", color="red")
plt.plot([1472, 1472], [0,45], marker="o", color="green")
fig2, ax2 = plt.subplots()
ax2.plot(N_pix, Mer)
ax2.set_ylabel("Error [arcmin]")
ax2.set_xlabel("Pixel number")
ax2.set_title("Roll error vs. number of pixel")
plt.plot([1024,1024], [0,8], marker="o", color="red")
plt.plot([1472, 1472], [0,8], marker="o", color="green")
# -
# ## Error vs field of view
# +
N_st = 854 #for magnitude 4.5
A = list(range(10,60)) #FOV from 10 to 60°
N_FOV_ls = []
for i in range(0, len(A)):
N_fov = N_st * ((1-np.cos(A[i]/2))/2)
N_FOV_ls.append(N_fov)
E_cent = 1 #picture quality not enough fo better determination
N_pix = 1472
# +
Mer = []
for i in range(0, len(N_FOV_ls)):
E_r = np.arctan(E_cent / 0.3825* (N_pix)) * ( 1 / (np.sqrt(N_FOV_ls[i])))
Mer.append(float(np.rad2deg(E_r)*60))
Mecb = []
for k in range (0, len(N_FOV_ls)):
E_cb = (A[k] * E_cent) / (N_pix * np.sqrt(N_FOV_ls[k]))
Mecb.append(E_cb*3600)
# +
fig1, ax1 = plt.subplots()
ax1.plot(A, Mecb)
ax1.set_ylabel("Error [arcsec]")
ax1.set_xlabel("Field of view [deg]")
ax1.set_title("Cross boresight error vs. field of view")
fig2, ax2 = plt.subplots()
ax2.plot(A, Mer)
ax2.set_ylabel("Error [arcmin]")
ax2.set_xlabel("Field of view [deg]")
ax2.set_title("Roll error vs. field of view")
# -
# ## Error function of both number of stars and pixels
# +
N_st = [12, 13, 14, 19, 22, 22, 24, 29, 32, 38, 44, 51, 61, 72, 82, 90, 101, 110, 122, 147, 167, 182, 199, 220, 243, 270, 305, 344, 379, 435, 487, 546, 594, 680, 759, 854, 947, 1065, 1201, 1322, 1505, 1683, 1887, 2143, 2405, 2672, 2958, 3328, 3758, 4181, 4658, 5215, 5800, 6502, 7268, 8099, 9110, 10215, 11464, 12754, 14250, 15904, 17732, 19753, 22070, 24583, 27386, 30471, 33949, 37804]
A = 48.8
N_FOV_ls = []
for i in range(0, len(N_st)):
N_fov = N_st[i] * ((1-np.cos(A/2))/2)
N_FOV_ls.append(N_fov)
E_cent = 1 #picture quality not enough fo better determination
N_pix = list(range(480, 2800, 216))
# -
Mecb = np.ones((len(N_pix), len(N_FOV_ls)))
Mer = np.ones((len(N_pix), len(N_FOV_ls)))
for i in range(0, len(N_pix)) :
for j in range(0, len(N_FOV_ls)) :
E_cb = (A * E_cent) / ( N_pix[i] * np.sqrt(N_FOV_ls[j]))
E_r = np.arctan(E_cent / (0.3825 * N_pix[i])) * ( 1 / (np.sqrt(N_FOV_ls[j])))
Mecb[i][j] = float(E_cb*3600)
Mer[i][j] = float(np.rad2deg(E_r)*60)
# +
fig3, ax3 = plt.subplots()
for i in range( 0, len(N_pix)) :
ax3.plot(N_FOV_ls, Mecb[i], label = 'RES_{0}'.format(N_pix[i]))
ax3.set_ylabel("Error [arcsec]")
ax3.set_xlabel("Star number")
ax3.set_title("Cross boresight error vs. number of stars regarding Mv vs. Resolution")
ax3.legend()
plt.ylim([0,45])
fig4, ax4 = plt.subplots()
for i in range( 0, len(N_pix)) :
ax4.plot(N_FOV_ls, Mer[i], label = 'RES_{0}'.format(N_pix[i]))
ax4.set_ylabel("Error [arcmin]")
ax4.set_xlabel("Star number")
ax4.set_title("Roll error vs. number of stars regarding Mv vs. Resolution")
ax4.legend()
plt.ylim([0, 4])
# -
A= 48.8
N_st = 546
N_fov = N_st * ((1 - np.cos(A/2)) / 2)
E_r = np.arctan(1 / (0.3825 * 1472)) * ( 1 / (np.sqrt(N_fov)))
np.rad2deg(E_r)*3600
| T_CAM_ICRS/Error_of_ position/Error.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Week 4 - Using Natural Language Processing
# This week follows chapter 3 from the text.
#
# ## Page 118
# +
import requests
data = requests.get('http://www.gutenberg.org/cache/epub/8001/pg8001.html')
content = data.content
# the text that prints is a little different because of book version differences
print(content[1163:2200], '\n')
# -
# ## Pages 118-119
# +
import re
from bs4 import BeautifulSoup
def strip_html_tags(text):
soup = BeautifulSoup(text, 'html.parser')
[s.extract() for s in soup(['iframe', 'script'])]
stripped_text = soup.get_text()
stripped_text = re.sub(r'[\r|\n|\r\n]+', '\n', stripped_text)
return stripped_text
clean_content = strip_html_tags(content)
print(clean_content[1163:2045], '\n')
# -
# ## Tokenizer
# Shorter text than the text, but this is the same core code, which comes after showing some data about the Alice corpus.
# +
import nltk
#nltk.download('gutenberg')
from nltk.corpus import gutenberg
from pprint import pprint
import numpy as np
## SENTENCE TOKENIZATION
# loading text corpora
alice = gutenberg.raw(fileids='carroll-alice.txt')
sample_text = 'We will discuss briefly about the basic syntax,\
structure and design philosophies. \
There is a defined hierarchical syntax for Python code which you should remember \
when writing code! Python is a really powerful programming language!'
print('Sample text: ', sample_text, '\n')
# -
# ### Output a bit of Alice in Wonderland
# Total characters in Alice in Wonderland
print('Length of alice: ', len(alice))
# First 100 characters in the corpus
print('First 100 chars of alice: ', alice[0:100], '\n')
#
# ## Default Sentence Tokenizer
default_st = nltk.sent_tokenize
alice_sentences = default_st(text=alice)
sample_sentences = default_st(text=sample_text)
print('Default sentence tokenizer')
print('Total sentences in sample_text:', len(sample_sentences))
print('Sample text sentences :-')
pprint(sample_sentences)
print('\nTotal sentences in alice:', len(alice_sentences))
print('First 5 sentences in alice:-')
pprint(alice_sentences[0:5])
# ## Other Languages Sentence Tokenization
# +
#nltk.download('europarl_raw')
from nltk.corpus import europarl_raw
german_text = europarl_raw.german.raw(fileids='ep-00-01-17.de')
print('Other language tokenization')
# Total characters in the corpus
print(len(german_text))
# First 100 characters in the corpus
print(german_text[0:100])
german_sentences_def = default_st(text=german_text, language='german')
# loading german text tokenizer into a PunktSentenceTokenizer instance
german_tokenizer = nltk.data.load(resource_url='tokenizers/punkt/german.pickle')
german_sentences = german_tokenizer.tokenize(german_text)
# verify the type of german_tokenizer
# should be PunktSentenceTokenizer
print('German tokenizer type:', type(german_tokenizer))
# check if results of both tokenizers match
# should be True
print(german_sentences_def == german_sentences)
# print(first 5 sentences of the corpus
for sent in german_sentences[0:5]:
print(sent)
print('\n')
# -
# ## Using Punkt Tokenizer for Sentence Tokenization
print('Punkt tokenizer')
punkt_st = nltk.tokenize.PunktSentenceTokenizer()
sample_sentences = punkt_st.tokenize(sample_text)
pprint(np.array(sample_sentences))
print('\n')
#
# ## Using RegexpTokenizer for Sentence Tokenization
print('Regex tokenizer')
SENTENCE_TOKENS_PATTERN = r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z]\.)(?<=\.|\?|\!)\s'
regex_st = nltk.tokenize.RegexpTokenizer(
pattern=SENTENCE_TOKENS_PATTERN,
gaps=True)
sample_sentences = regex_st.tokenize(sample_text)
# again, the output is different because the sample sentence is different
pprint(sample_sentences)
print('\n')
# ## Work Tokenization
sentence = "The brown fox wasn't that quick and he couldn't win the race"
# default word tokenizer
print('Word tokenizer')
default_wt = nltk.word_tokenize
words = default_wt(sentence)
print(words, '\n')
# +
print('Treebank tokenizer')
treebank_wt = nltk.TreebankWordTokenizer()
words = treebank_wt.tokenize(sentence)
print(words, '\n')
# toktok tokenizer
print('TokTok tokenizer')
from nltk.tokenize.toktok import ToktokTokenizer
tokenizer = ToktokTokenizer()
words = tokenizer.tokenize(sample_text)
print(np.array(words), '\n')
# regex word tokenizer
print('RegEx word tokenizer')
TOKEN_PATTERN = r'\w+'
regex_wt = nltk.RegexpTokenizer(pattern=TOKEN_PATTERN,
gaps=False)
words = regex_wt.tokenize(sentence)
print(words)
GAP_PATTERN = r'\s+'
regex_wt = nltk.RegexpTokenizer(pattern=GAP_PATTERN,
gaps=True)
words = regex_wt.tokenize(sentence)
print(words)
word_indices = list(regex_wt.span_tokenize(sentence))
print(word_indices)
print([sentence[start:end] for start, end in word_indices], '\n')
# derived regex tokenizers
print("Derived RegEx tokenizers")
wordpunkt_wt = nltk.WordPunctTokenizer()
words = wordpunkt_wt.tokenize(sentence)
print(words, '\n')
# whitespace tokenizer
print('Whitespace Tokenizer')
whitespace_wt = nltk.WhitespaceTokenizer()
words = whitespace_wt.tokenize(sentence)
print(words, '\n')
# -
# ## Pages 132 - 134
# +
print('Robust tokenizer - NLTK')
def tokenize_text(text):
sentences = nltk.sent_tokenize(text)
word_tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return word_tokens
sents = tokenize_text(sample_text)
print(np.array(sents),'\n')
words = [word for sentence in sents for word in sentence]
print(np.array(words), '\n')
print('spaCy...')
import spacy
nlp = spacy.load('en_core_web_sm', parse=True, tag=True, entity=True)
text_spacy = nlp(sample_text)
print(np.array(list(text_spacy.sents)), '\n')
sent_words = [[word for word in sent] for sent in sents]
print(np.array(sent_words), '\n')
# in spacy documentation, this is usually written as [token for token in doc]
words = [word for word in text_spacy]
print(np.array(words), '\n')
# -
# ## Page 135
# +
import unicodedata
def remove_accented_chars(text):
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore').decode('utf-8', 'ignore')
return text
print(remove_accented_chars('Sòme Åccentềd cliché façades'))
# -
# # Expanding Contractions
# Starting on page 136
# +
import nltk
from contractions import CONTRACTION_MAP
import re
def expand_contractions(sentence, contraction_mapping=CONTRACTION_MAP):
contractions_pattern = re.compile('({})'.format('|'.join(contraction_mapping.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contraction_mapping.get(match)\
if contraction_mapping.get(match)\
else contraction_mapping.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expaned_text = contractions_pattern.sub(expand_match, sentence)
expanded_text = re.sub("'", "", expaned_text)
return expanded_text
print('Exanding contractions:')
print(expand_contractions("Y'all can't expand contractions I'd think"), '\n')
# -
# # Removing special characters, page 138
# +
def remove_special_characters(text, remove_digits =False):
pattern = r'[^a-zA-Z0-9\s]' if not remove_digits else r'[^a-zA-Z\s]'
text = re.sub(pattern, '', text)
return text
print('Remove special characters:')
print(remove_special_characters('Well this was fun! What do you think? 123#@!', remove_digits=True), '\n')
# -
# ## Case Conversion
print('Case conversions:')
# lowercase
text = 'The quick brown fox jumped over The Big Dog'
print(text.lower())
# uppercase
print(text.upper())
# title case
print(text.title(), '\n')
# ## Correcting repeating characters - pages 139-140
old_word = 'finalllyyy'
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
step = 1
while True:
# remove on repeated character
new_word = repeat_pattern.sub(match_substitution, old_word)
if new_word != old_word:
print('Step: {} Word: {}'.format(step, new_word))
step += 1 #update step
# update old word to last substituted state
old_word = new_word
continue
else:
print('Final word: ', new_word, '\n')
break
# ## Pages 140-141 - Wordnet
print('Wordnet:')
from nltk.corpus import wordnet
old_word = 'finalllyyy'
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
step = 1
while True:
# check for semantically correct words
if wordnet.synsets(old_word):
print('Final correct word: ', old_word, '\n')
break
# remove on repeated characters
new_word = repeat_pattern.sub(match_substitution, old_word)
if new_word != old_word:
print('Step: {} Word: {}'.format(step, new_word))
step += 1 # update step
# update old word to last substituted state
old_word = new_word
continue
else:
print('Final word: ', new_word, '\n')
break
# ## Pages 141-142 - Remove repeated characters
# +
def remove_repeated_characters(tokens):
repeat_pattern = re.compile(r'(\w*)(\w)\2(\w*)')
match_substitution = r'\1\2\3'
def replace(old_word):
if wordnet.synsets(old_word):
return old_word
new_word = repeat_pattern.sub(match_substitution, old_word)
return replace(new_word) if new_word != old_word else new_word
correct_tokens = [replace(word) for word in tokens]
return correct_tokens
sample_sentence = 'My schooool is realllllyyy amaaazingggg'
correct_tokens = remove_repeated_characters(nltk.word_tokenize(sample_sentence))
print(' '.join(correct_tokens), '\n')
# -
# ## Spelling Corrector Part 1 - Starting on Page 143
# + tags=["outputPrepend"]
import re, collections
def tokens(text):
"""
Get all words from the corpus
"""
return re.findall('[a-z]+', text.lower())
WORDS = tokens(open('big.txt').read())
WORD_COUNTS = collections.Counter(WORDS)
# top 10 words in corpus
print('Top 10 words in corpus:')
print(WORD_COUNTS.most_common(10), '\n')
def edits0(word):
"""
Return all strings that are zero edits away
from the input word (i.e., the word itself).
"""
return {word}
def edits1(word):
"""
Return all strings that are one edit away
from the input word.
"""
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def splits(word):
"""
Return a list of all possible (first, rest) pairs
that the input word is made of.
"""
return [(word[:i], word[i:])
for i in range(len(word)+1)]
pairs = splits(word)
deletes = [a+b[1:] for (a, b) in pairs if b]
transposes = [a+b[1]+b[0]+b[2:] for (a, b) in pairs if len(b) > 1]
replaces = [a+c+b[1:] for (a, b) in pairs for c in alphabet if b]
inserts = [a+c+b for (a, b) in pairs for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"""Return all strings that are two edits away
from the input word.
"""
return {e2 for e1 in edits1(word) for e2 in edits1(e1)}
def known(words):
"""
Return the subset of words that are actually
in our WORD_COUNTS dictionary.
"""
return {w for w in words if w in WORD_COUNTS}
print('Input words:')
# input word
word = 'fianlly'
# zero edit distance from input word
print(edits0(word))
# returns null set since it is not a valid word
print(known(edits0(word)))
# one edit distance from input word
print(edits1(word))
# get correct words from above set
print(known(edits1(word)))
# two edit distances from input word
print(edits2(word))
# get correc twords from above set
print(known(edits2(word)))
candidates = (known(edits0(word)) or known(edits1(word)) or known(edits2(word)) or [word])
print(candidates, '\n')
# -
# ## Spelling Correction Part 2
# +
def correct(word):
'''
Get the best correct spelling for the input word
:param word: the input word
:return: best correct spelling
'''
# priority is for edit distance 0, then 1, then 2
# else defaults to the input word iteself.
candidates = (known(edits0(word)) or known(edits1(word)) or known(edits2(word)) or [word])
return max(candidates, key=WORD_COUNTS.get)
print(correct('fianlly'))
print(correct('FIANLLY'), '\n')
def correct_match(match):
'''
Spell-correct word in match, and preserve proper upper/lower/title case.
:param match: word to be corrected
:return: corrected word
'''
word = match.group()
def case_of(text):
'''
Return the case-function appropriate for text: upper/lower/title/as-is
:param text: The text to be acted on
:return: Correct text
'''
return (str.upper if text.isupper() else
str.lower if text.islower() else
str.title if text.istitle() else
str)
return case_of(word)(correct(word.lower()))
def correct_text_generic(text):
'''
Correct all the words within a text, returning the corrected text
:param text: Text to be corrected
:return: Corrected text
'''
return re.sub('[a-zA-Z]+', correct_match, text)
print(correct_text_generic('fianlly'))
print(correct_text_generic('FIANLLY'), '\n')
print('TextBlob way (you may need to use pip to install textblob):')
from textblob import Word
w = Word('fianlly')
print(w.correct())
# check suggestions
print(w.spellcheck())
# another example
w = Word('flaot')
print(w.spellcheck())
# -
# ## Stem and Lem
# +
# porter stemmer
from nltk.stem import PorterStemmer
ps = PorterStemmer()
print('Porter stemmer:')
print(ps.stem('jumping'), ps.stem('jumps'), ps.stem('jumped'))
print(ps.stem('lying'))
print(ps.stem('strange'), '\n')
# lancaster stemmer
print('Lancaster stemmer:')
from nltk.stem import LancasterStemmer
ls = LancasterStemmer()
print(ls.stem('jumping'), ls.stem('jumps'), ls.stem('jumped'))
print(ls.stem('lying'))
print(ls.stem('strange'), '\n')
# regex stemmer
print('Regex stemmer:')
from nltk.stem import RegexpStemmer
rs = RegexpStemmer('ing$|s$|ed$', min=4)
print(rs.stem('jumping'), rs.stem('jumps'), rs.stem('jumped'))
print(rs.stem('lying'))
print(rs.stem('strange'), '\n')
# snowball stemmer
print('Snowball stemmer:')
from nltk.stem import SnowballStemmer
ss = SnowballStemmer("german")
print('Supported Languages:', SnowballStemmer.languages)
# autobahnen -> cars
# autobahn -> car
print(ss.stem('autobahnen'))
# springen -> jumping
# spring -> jump
print(ss.stem('springen'), '\n')
# lemmatization
print('WordNet lemmatization:')
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
# lemmatize nouns
print(wnl.lemmatize('cars', 'n'))
print(wnl.lemmatize('men', 'n'))
# lemmatize verbs
print(wnl.lemmatize('running', 'v'))
print(wnl.lemmatize('ate', 'v'))
# lemmatize adjectives
print(wnl.lemmatize('saddest', 'a'))
print(wnl.lemmatize('fancier', 'a'))
# ineffective lemmatization
print(wnl.lemmatize('ate', 'n'))
print(wnl.lemmatize('fancier', 'v'), '\n')
print('spaCy:')
import spacy
nlp = spacy.load('en_core_web_sm')
text = 'My system keeps crashing! his crashed yesterday, ours crashes daily'
def lemmatize_text(text):
text = nlp(text)
text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-'
else word.text for word in text])
return text
print(lemmatize_text(text))
# -
# ## Stop Words
# +
import re
import nltk
from nltk.tokenize.toktok import ToktokTokenizer
tokenizer = ToktokTokenizer()
stopword_list = nltk.corpus.stopwords.words('english')
# removing stopwords
def remove_stopwords(text, is_lower_case=False):
tokens = tokenizer.tokenize(text)
tokens = [token.strip() for token in tokens]
if is_lower_case:
filtered_tokens = [token for token in tokens if token not in stopword_list]
else:
filtered_tokens = [token for token in tokens if token.lower() not in stopword_list]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
print(remove_stopwords('The, and, if are stopwords, computer is not'))
# -
# ## POS Tagging - Starting on Page 166
# +
sentence = 'US unveils world\'s most powerful supercomputer, beats China.'
import pandas as pd
import spacy
from pprint import pprint
print('spaCy:')
nlp = spacy.load('en_core_web_sm')
sentence_nlp = nlp(sentence)
# POS tagging with spaCy
spacy_pos_tagged = [(word, word.tag_, word.pos_) for word in sentence_nlp]
# the .T in the book transposes rows and columsn, but it's harder to read
pprint(pd.DataFrame(spacy_pos_tagged, columns=['Word', 'POS tag', 'Tag type']))
# POS tagging with nltk
print('\n', 'NLTK')
import nltk
# only need the following two lines one time
#nltk.download('averaged_perceptron_tagger')
#nltk.download('universal_tagset')
nltk_pos_tagged = nltk.pos_tag(nltk.word_tokenize(sentence), tagset='universal')
pprint(pd.DataFrame(nltk_pos_tagged, columns=['Word', 'POS tag']))
print('\n', 'Treebank:')
# you only need the next line once
# nltk.download('treebank')
from nltk.corpus import treebank
data = treebank.tagged_sents()
train_data = data[:3500]
test_data = data[3500:]
print(train_data[0])
print('\n', 'Default tagger:')
# default tagger
from nltk.tag import DefaultTagger
dt = DefaultTagger('NN')
# accuracy on test data
print(dt.evaluate(test_data))
# tagging our sample headline
print(dt.tag(nltk.word_tokenize(sentence)))
print('\n', 'Regex tagger')
# regex tagger
from nltk.tag import RegexpTagger
# define regex tag patterns
patterns = [
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # simple past
(r'.*es$', 'VBZ'), # 3rd singular present
(r'.*ould$', 'MD'), # modals
(r'.*\'s$', 'NN$'), # possessive nouns
(r'.*s$', 'NNS'), # plural nouns
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'.*', 'NN') # nouns (default) ...
]
rt = RegexpTagger(patterns)
# accuracy on test data
print(rt.evaluate(test_data))
# tagging our sample headline
print(rt.tag(nltk.word_tokenize(sentence)))
print('\n', 'N Gram taggers')
## N gram taggers
from nltk.tag import UnigramTagger
from nltk.tag import BigramTagger
from nltk.tag import TrigramTagger
ut = UnigramTagger(train_data)
bt = BigramTagger(train_data)
tt = TrigramTagger(train_data)
# testing performance on unigram tagger
print(ut.evaluate(test_data))
print(ut.tag(nltk.word_tokenize(sentence)))
# testing performance of bigram tagger
print(bt.evaluate(test_data))
print(bt.tag(nltk.word_tokenize(sentence)))
# testing performance of trigram tagger
print(tt.evaluate(test_data))
print(tt.tag(nltk.word_tokenize(sentence)))
def combined_tagger(train_data, taggers, backoff=None):
for tagger in taggers:
backoff = tagger(train_data, backoff=backoff)
return backoff
ct = combined_tagger(train_data=train_data,
taggers=[UnigramTagger, BigramTagger, TrigramTagger],
backoff=rt)
print(ct.evaluate(test_data))
print(ct.tag(nltk.word_tokenize(sentence)))
print('\n', 'Naive Bayes and Maxent')
from nltk.classify import NaiveBayesClassifier, MaxentClassifier
from nltk.tag.sequential import ClassifierBasedPOSTagger
nbt = ClassifierBasedPOSTagger(train=train_data,
classifier_builder=NaiveBayesClassifier.train)
print(nbt.evaluate(test_data))
print(nbt.tag(nltk.word_tokenize(sentence)), '\n')
# the following takes a LONG time to run - run if you have time
'''
met = ClassifierBasedPOSTagger(train=train_data,
classifier_builder=MaxentClassifier.train)
print(met.evaluate(test_data))
print(met.tag(nltk.word_tokenize(sentence)))
'''
# -
# ## Shallow Parsing - Starting on Page 173
#
# This can take a bit of time to run at the end...
# +
print('Treebank:')
from nltk.corpus import treebank_chunk
data = treebank_chunk.chunked_sents()
train_data = data[:3500]
test_data = data[3500:]
print(train_data[7], '\n')
print('Regext parser:')
simple_sentence = 'US unveils world\'s most powerful supercomputer, beats China.'
from nltk.chunk import RegexpParser
import nltk
from pattern.en import tag
# get POS tagged sentence
tagged_simple_sent = nltk.pos_tag(nltk.word_tokenize(simple_sentence))
print('POS Tags:', tagged_simple_sent)
chunk_grammar = """
NP: {<DT>?<JJ>*<NN.*>}
"""
rc = RegexpParser(chunk_grammar)
c = rc.parse(tagged_simple_sent)
print(c, '\n')
print('Chinking:')
chink_grammar = """
NP: {<.*>+} # chunk everything as NP
}<VBD|IN>+{
"""
rc = RegexpParser(chink_grammar)
c = rc.parse(tagged_simple_sent)
# print and view chunked sentence using chinking
print(c, '\n')
# create a more generic shallow parser
print('More generic shallow parser:')
grammar = """
NP: {<DT>?<JJ>?<NN.*>}
ADJP: {<JJ>}
ADVP: {<RB.*>}
PP: {<IN>}
VP: {<MD>?<VB.*>+}
"""
rc = RegexpParser(grammar)
c = rc.parse(tagged_simple_sent)
# print and view shallow parsed simple sentence
print(c)
# Evaluate parser performance on test data
print(rc.evaluate(test_data), '\n')
print('Chunked and treebank:')
from nltk.chunk.util import tree2conlltags, conlltags2tree
train_sent = train_data[7]
print(train_sent)
# get the (word, POS tag, Chung tag) triples for each token
wtc = tree2conlltags(train_sent)
print(wtc)
# get shallow parsed tree back from the WTC trples
tree = conlltags2tree(wtc)
print(tree, '\n')
print('NGramTagChunker:')
def conll_tag_chunks(chunk_sents):
tagged_sents = [tree2conlltags(tree) for tree in chunk_sents]
return [[(t, c) for (w, t, c) in sent] for sent in tagged_sents]
def combined_tagger(train_data, taggers, backoff=None):
for tagger in taggers:
backoff = tagger(train_data, backoff=backoff)
return backoff
from nltk.tag import UnigramTagger, BigramTagger
from nltk.chunk import ChunkParserI
class NGramTagChunker(ChunkParserI):
def __init__(self, train_sentences,
tagger_classes=[UnigramTagger, BigramTagger]):
train_sent_tags = conll_tag_chunks(train_sentences)
self.chunk_tagger = combined_tagger(train_sent_tags, tagger_classes)
def parse(self, tagged_sentence):
if not tagged_sentence:
return None
pos_tags = [tag for word, tag in tagged_sentence]
chunk_pos_tags = self.chunk_tagger.tag(pos_tags)
chunk_tags = [chunk_tag for (pos_tag, chunk_tag) in chunk_pos_tags]
wpc_tags = [(word, pos_tag, chunk_tag) for ((word, pos_tag), chunk_tag)
in zip(tagged_sentence, chunk_tags)]
return conlltags2tree(wpc_tags)
# train the shallow parser
ntc = NGramTagChunker(train_data)
# test parser performance on test data
print(ntc.evaluate(test_data))
# the next 2 lines don't belong and have been commented out
# sentence_nlp = nlp(sentence)
# tagged_sentence = [(word.text, word.tag_) for word in sentence_nlp]
# parse our sample sentence
print('Parsing NTC...')
tree = ntc.parse(tagged_simple_sent)
print(tree)
tree.draw()
print('Wall Street Journal (cut to just 1000):')
# only need the next line once
#nltk.download('conll2000')
from nltk.corpus import conll2000
wsj_data = conll2000.chunked_sents()
train_wsj_data = wsj_data[:1000]
test_wsj_data = wsj_data[1000:]
print(train_wsj_data[10])
# tran the shallow parser
tc = NGramTagChunker(train_wsj_data)
# test performance on test data
print(tc.evaluate(test_wsj_data))
# there's code on the start of page 183 that's a repeat of the code on 181
# I didn't even write it - no need
# -
# ## Dependency Parsing
# +
sentence = 'US unveils world\'s most powerful supercomputer, beats China.'
import spacy
nlp = spacy.load('en_core_web_sm')
sentence_nlp = nlp(sentence)
dependency_pattern = '{left}<---{word}[{w_type}]--->{right}\n--------'
for token in sentence_nlp:
print(dependency_pattern.format(word=token.orth_, w_type=token.dep_,
left=[t.orth_ for t in token.lefts],
right=[t.orth_ for t in token.rights]))
from spacy import displacy
displacy.render(sentence_nlp, jupyter=True, style='dep',
options={'distance': 100,
'arrow_stroke': 2,
'arrow_width': 8})
# -
# ## NOTE
# The book goes into teh Stanford parser at the bottom of page 187. This Standford parser is depricated and requires a local server (too complicated for this). Therefore, I commented all the code out - it's just another parser and does the same thing as the rest of the code without the hassle.
# ## Constituency Parsing - Starting on Page 195
# +
entence = 'US unveils world\'s most powerful supercomputer, beats China.'
import nltk
from nltk.grammar import Nonterminal
from nltk.corpus import treebank
training_set = treebank.parsed_sents()
print(training_set[1], '\n')
# extract the productions for all annotated training sentences
treebank_productions = list(
set(production
for sent in training_set
for production in sent.productions()
)
)
# view some production rules
print(treebank_productions[0:10])
# add productions for each word, POS tag
for word, tag in treebank.tagged_words():
t = nltk.Tree.fromstring( "("+ tag + " " + word + ")")
for production in t.productions():
treebank_productions.append(production)
# build the PCFG based grammar
treebank_grammar = nltk.grammar.induce_pcfg(Nonterminal('S'),
treebank_productions)
# build the parser
viterbi_parser = nltk.ViterbiParser(treebank_grammar)
# get sample sentence tokens
tokens = nltk.word_tokenize(sentence)
# get parse tree for sample sentence
# this next lines throw and error (see the text on page 197)
# result = list(viterbi_parser.parse(tokens))
# get tokens and their POS tags and check it
tagged_sent = nltk.pos_tag(nltk.word_tokenize(sentence))
print(tagged_sent, '\n')
# extend productions for sample sentence tokens
for word, tag in tagged_sent:
t = nltk.Tree.fromstring("("+ tag + " " + word +")")
for production in t.productions():
treebank_productions.append(production)
# rebuild grammar
treebank_grammar = nltk.grammar.induce_pcfg(Nonterminal('S'), treebank_productions)
# rebuild parser
viterbi_parser = nltk.ViterbiParser(treebank_grammar)
# get parse tree for sample sentence
result = list(viterbi_parser.parse(tokens))
#print parse tree
print(result[0])
# visualize parse tree
result[0].draw()
# -
| 12 Week/week_4/week_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:snowflakes]
# language: python
# name: conda-env-snowflakes-py
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from bs4 import BeautifulSoup
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import GaussianNB, BernoulliNB
# The forum data was scraped from https://forum.metacartel.org/ using the Scrape_MC_forum.py script.
# The funded projects list is from https://airtable.com/appWsfebn79bII12n/tblA2NXylnlVE9eIW/viw5H4Aaxv3XsyI0e?blocks=hide in the tab called "MC Grants Proposal Portfolio".
forum_data = pd.read_csv('scraped_forum_data.csv')
mc_funded_data = pd.read_csv('mc_funded_projects.csv')[0:19]
# In order to merge these datasets, I tried to match the names of the projects to titles of forum posts. Four projects had no matching forum post:
# +
matching_names = []
projects_with_no_match_in_forum = []
for i, project in mc_funded_data.iterrows():
if pd.isna(project['Project Name']):
matching_names.append(np.nan)
continue
matching_titles = [title for title in forum_data['title'] if project['Project Name'] in title]
if len(matching_titles) > 0:
matching_names.append(matching_titles[0])
else:
matching_names.append(np.nan)
projects_with_no_match_in_forum.append(project['Project Name'])
print('No matching forum post:')
print(projects_with_no_match_in_forum)
# -
# Merging the datasets and cleaning them up:
# +
mc_funded_data['Matching Title'] = matching_names
forum_data = forum_data.merge(mc_funded_data, how='left', left_on='title', right_on='Matching Title')
forum_data = forum_data.drop_duplicates(subset=['description'])
forum_data['was_funded'] = np.where(pd.isnull(forum_data['$ Amount']), 0, 1)
forum_data['Date'] = pd.to_datetime(forum_data['time_posted'])
forum_data['Date'] = forum_data['Date'].apply(lambda x: x.date())
forum_data['description'] = forum_data['description'].apply(lambda x: str(x).lower())
forum_data = forum_data[['title', 'author', 'description', 'comments', 'Person Contact',
'Description', '$ Amount', 'Date', 'was_funded']]
# +
forum_data['Num comments'] = forum_data['comments'].apply(lambda x: len(x))
forum_data['Len description'] = forum_data['description'].apply(lambda x: len(str(x)))
sns.lmplot(x='Len description', y='was_funded', data=forum_data)
plt.show()
sns.lmplot(x='Num comments', y='was_funded', data=forum_data)
plt.show()
# +
def remove_punctuation(text):
punctuationfree="".join([i for i in text if i not in '!"#$&\'()*+,-./:;<=>?@[\\]^_`{|}~'])
return punctuationfree
def remove_html_tags(raw_html):
cleantext = re.sub(re.compile('<.*?>'), '', raw_html)
return cleantext
def clean_html(text):
soup = BeautifulSoup(text, 'html.parser')
return soup.get_text()
# -
forum_data['Description_cleaned'] = forum_data['description'].apply(lambda x:clean_html(x))
forum_data['Description_cleaned'] = forum_data['Description_cleaned'].apply(lambda x:remove_punctuation(x))
forum_data[['title', 'author', 'Description_cleaned', 'comments', 'was_funded', '$ Amount']].head()
# +
#Xtrain, Xtest, ytrain, ytest = train_test_split(forum_data[['Description_cleaned']], forum_data['was_funded'])
tfv = TfidfVectorizer(stop_words = 'english')
tfv.fit(forum_data['Description_cleaned'])
X_tfv = tfv.transform(forum_data['Description_cleaned'])
clf = BernoulliNB()
clf.fit(X_tfv.toarray(), forum_data['was_funded'])
neg_class_prob = clf.feature_log_prob_[0, :]
pos_class_prob = clf.feature_log_prob_[1, :]
zipped = list(zip(tfv.get_feature_names_out(), pos_class_prob - neg_class_prob))
zipped = sorted(zipped, key=lambda t: t[1], reverse=False)
copy_forum_data = forum_data.copy()
common_words = []
for word, score in zipped:
copy_forum_data[word] = copy_forum_data['Description_cleaned'].apply(lambda x: ' '+word+' ' in str(x))
if copy_forum_data[word].sum() > 5:
common_words.append((word,score))
# -
top_words = sorted(common_words, key=lambda t: t[1], reverse=False)
for word, score in top_words[1:200]:
print(word)
def plot_word(forum_data, word):
copy_forum_data = forum_data.copy()
copy_forum_data[word] = copy_forum_data['Description_cleaned'].apply(lambda x: ' '+word+' ' in str(x))
print(f'Number of forum posts with keyword \'{word}\':', copy_forum_data[word].sum())
sns.barplot(x=word, y='was_funded', data=copy_forum_data, errwidth=0.5)
plt.ylabel('Was funded')
plt.show()
# +
colors = ["#c7def4", "#b1de5f"]
sns.set_palette(colors)
for word, score in top_words[0:10]:
plot_word(forum_data, word)
# +
word = 'art'
copy_forum_data = forum_data.copy()
copy_forum_data[word] = copy_forum_data['Description_cleaned'].apply(lambda x: ' '+word+' ' in str(x))
print(copy_forum_data[copy_forum_data[word] == 1])
print(copy_forum_data[word].sum())
copy_forum_data[copy_forum_data[word] == 1].iloc[10]['Description_cleaned']
# -
plot_word(forum_data, 'artists')
len(forum_data)
| MC grants analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from pymaid import rmaid
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import pymaid
# # %run startup_py3.py
# # %run load_pn_metadata_v1.py
# pn_skids = load_json(local_path + "data/skids/pn")
# pn_skids = cc.get_skids_from_annos(fafb_c, [['right_calyx_PN'], ['has_bouton']], ["multiglomerular PN"])
# pns_ms = neurogenesis.init_from_skid_list(fafb_c, pn_skids)
# nl = [pymaid.get_neuron([str(i) for i in j]) for j in [pn_skids[:40], pn_skids[40:80], pn_skids[80:]]]
# pns_pm = nl[0] + nl[1] + nl [2]
# ca = pymaid.get_volume('MB_CA_R')
local_path = "/Users/zhengz11/myscripts/git_clone/pn_kc/"
path = local_path + "data/pn_bouton_clusters/"
# with open(path + "pns_pm.pkl", 'rb') as f:
# pns_pm = pickle.load(f)
df = pd.read_excel(local_path + 'data/180613-pn_subtypes.xlsx')
# +
# to run this file please download binary files from
# https://drive.google.com/drive/folders/15a3r4LUPB0nSdteCtE-4QAci4dYoVqha?usp=sharing
# save them in pn_kc/data/pn_bouton_clusters
# then you can load them from below
import pickle
with open(path + "pns_ms.pkl", 'rb') as f:
pns_ms = pickle.load(f)
with open(path + "ca.pkl", 'rb') as f:
ca = pickle.load(f)
path = local_path + "data/pn_bouton_clusters/"
nl_t1 = []
for i in pn_skids:
with open(path + "pns_pm/" + "{}.pkl".format(i), 'rb') as f:
n = pickle.load(f)
nl_t1.append(n)
pns_pm = pymaid.CatmaidNeuronList(nl_t1)
# +
## clustering of calyx skeleton of PNs
pns_ca_sk = pns_pm.copy()
pns_btn = {j: sum(pns_ms[j].segments.nodes_id,[]) for j in pns_ms.keys()}
pns_ca_btn = []
for i in pns_ca_sk.skeleton_id:
t1 = pns_btn[int(i)]
pns_ca_btn.append(pymaid.subset_neuron(pns_ca_sk.skid[i], pns_btn[int(i)]))
pns_ca_btn = pymaid.CatmaidNeuronList(pns_ca_btn)
pns_ca_btn.resample(100, inplace=True)
gloms = pd.unique(df.gloms)
glom_skids = {i: list(df.query('gloms==@i').skids) for i in gloms}
super_pns = []
for glom in gloms:
sks = [i for i in glom_skids[glom] if i in pn_skids]
if len(sks) > 0:
if len(sks) > 1:
this_super = pymaid.stitch_neurons(*[pns_ca_btn.skid[i] for i in sks], method='NONE')
else:
this_super = pns_ca_btn.skid[sks[0]].copy()
this_super.neuron_name = glom
super_pns.append(this_super)
super_pns_nl = pymaid.CatmaidNeuronList(super_pns)
nblast_clust = rmaid.nblast_allbyall(super_pns_nl)
# +
# Cluster using Ward's
nblast_clust.cluster(method='ward')
from scipy.cluster import hierarchy
hierarchy.set_link_color_palette(['r', 'b', 'orange', 'm'])
# Plot dendrogram
fig = nblast_clust.plot_dendrogram(color_threshold=1.5)
plt.tight_layout()
fig.set_size_inches(12,4)
# plt.savefig('PN_bouton_cluster_190807.pdf')
plt.show()
# +
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats, cluster
import matplotlib as mpl
clust_result = nblast_clust # needs to be a pymaid.clust_results object
clust_object = super_pns_nl # needs to be a CatmaidNeuronList
cluster_names = nblast_clust.get_clusters(k=4, criterion='maxclust', return_type='labels')
cluster_skids = nblast_clust.get_clusters(k=4, criterion='maxclust', return_type='columns')
f, ax = plt.subplots(len(cluster_skids),2,figsize=(12, len(cluster_skids)*8))
sns.set_style("white")
# Palettes will repeat after 5!
poss_palettes = ['Reds','Blues','Oranges', 'Purples'] * 10
# Collect synapse positions
for i,cl in enumerate(cluster_skids):
# Collect synapses
this_cl = pymaid.CatmaidNeuronList( [ n for n in clust_object if n.skeleton_id in cl ] )
this_cn = this_cl.nodes
cm = mpl.cm.get_cmap(poss_palettes[i])
norm = matplotlib.colors.Normalize(vmin=0, vmax=3.5e-9)
# create a scalarmappable from the colormap
sm = matplotlib.cm.ScalarMappable(cmap=cm, norm=norm)
sm.set_array([])
ls = np.linspace(0, 3e-9, 7)
# Draw XY density plots (make sure to invert Y coordinates!)
_ax1 = sns.kdeplot(this_cn.x.values, this_cn.y.values*-1,
cmap=cm, shade=True, shade_lowest=False, ax=ax[i][0], vmin=0, vmax=3.5e-9, levels=ls, cbar=True)
# plt.colorbar(m, boundaries=np.linspace(0, 2.5e-9, 6), ax=_ax1)
# plt.colorbar()
# Draw XZ density plots
_ax2 = sns.kdeplot(this_cn.x.values, this_cn.z.values,
cmap=cm, shade=True, shade_lowest=False, ax=ax[i][1], vmax=3.5e-9, cbar=True, levels=ls)
# plt.colorbar(m, boundaries=np.linspace(0, 2.5e-9, 6), ax=_ax2)
ax[i][0].set_aspect("equal")
ax[i][1].set_aspect("equal")
ax[i][0].set_ylabel( ','.join(cluster_names[i]) )
ax[i][0].text( 375000, -140000, ','.join(cluster_names[i]), fontsize=12,
rotation=90, verticalalignment='center', horizontalalignment='center')
ax[i][0].set_axis_off()
ax[i][1].set_axis_off()
ax[i][0].set_xlim((375000, 480000))
ax[i][0].set_ylim((-185000, -100000))
ax[i][1].set_xlim((375000, 480000))
ax[i][1].set_ylim((150000, 235000))
# Draw MB calyx outlines (invert y coordinates)
vpatch_xy = mpatches.Polygon(ca.to_2d(view='xy', invert_y=True), closed=True, lw=1, fill=False, ec='grey', alpha=1)
ax[i][0].add_patch(vpatch_xy)
vpatch_xz = mpatches.Polygon(ca.to_2d(view='xz', invert_y=True), closed=True, lw=1, fill=False, ec='grey', alpha=1)
ax[i][1].add_patch(vpatch_xz)
ax[0][0].set_title('XY view')
ax[0][1].set_title('XZ view')
plt.tight_layout()
# plt.savefig('KDE_PN_bouton_4clusters_18070_v1.pdf')
# plt.savefig('KDE_PN_bouton_4clusters_w_cbar_190808_v3_wColorBar.pdf')
# plt.savefig('KDE_PN_bouton_4clusters_w_cbar_190809_wColorBar_resampled.pdf')
plt.show()
# -
df_lookup('id', comm_ids, 'glom', glom_id_table)
import matplotlib
matplotlib.__version__
| connectivity/Fig4H_PN_bouton_cluster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 1. Structured Query Language (SQL) is used to (check all that apply)
# ##### Ans:
# <ul>
# <li>Insert data</li>
# <li>Delete data</li>
# <li>Create a table</li>
# </ul>
# #### 2. Which of these is the right syntax to make a new table?
# ##### Ans: CREATE TABLE people;
# #### 3. Which SQL command is used to insert a new row into a table?
# ##### Ans: INSERT INTO
# #### 4. Which command is used to retrieve all records from a table?
# ##### Ans: SELECT * FROM Users
# #### 5. Which keyword will cause the results of the query to be displayed in sorted order?
# ##### Ans: ORDER BY
# #### 6. In database terminology, another word for table is
# ##### Ans: relation
# #### 7. In a typical online production environment, who has direct access to the production database?
# ##### Ans: Database Administrator
# #### 8. Which of the following is the database software used in this class?
# ##### Ans: SQLite
# #### 9. What happens if a DELETE command is run on a table without a WHERE clause?
# ##### Ans: All the rows in the table are deleted
# #### 10. Which of the following commands would update a column named "name" in a table named "Users"?
# ##### Ans: UPDATE Users SET name='new name' WHERE ...
# #### 11. What does this SQL command do?
# + active=""
# SELECT COUNT(*) FROM Users
# -
# ##### Ans: It counts the rows in the table Users
| Coursera/Using Databases with Python/Week-2/Quiz/Single-Table-SQL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import sklearn.preprocessing as skl
def read_file(file_name, spliter=' '):
f = open(file_name)
line = f.readline()
data = [] # 二维数组,[样本总数,720数据]
while line:
trainRead = line.split(spliter)
for i in range(len(trainRead)):
trainRead[i] = float(trainRead[i])
data.append(trainRead)
line = f.readline()
return data
# +
# AE Model
num_hidden_1 = 256 # 1st layer num features
num_hidden_2 = 128 # 2nd layer num features (the latent dim)
num_hidden_3 = 128
num_input = 720 # img shape: 60*12
AE_steps = 30000
display_step = AE_steps / 10
# combine data
sampleDataSet = [] # [7,32,720]
sampleDataSet.append(read_file("data/imgs_sample_1.txt"))
sampleDataSet.append(read_file("data/imgs_sample_2.txt"))
sampleDataSet.append(read_file("data/imgs_sample_3.txt"))
sampleDataSet.append(read_file("data/imgs_sample_4.txt"))
sampleDataSet.append(read_file("data/imgs_sample_5.txt"))
sampleDataSet.append(read_file("data/imgs_sample_6.txt"))
sampleDataSet.append(read_file("data/imgs_sample_7.txt"))
# FingerPrint
FPDataSet = read_file("data/ECFPs.txt", ' ') # [32,128]
# normalize
for i in range(len(sampleDataSet)):
sampleDataSet[i] = skl.normalize(sampleDataSet[i], axis=1)
# print(sampleDataSet[0][0])
# +
# Delete sample only include zero
trainAE = [] # data for training
for i in range(len(sampleDataSet)):
for j in range(len(sampleDataSet[i])):
if np.sum(sampleDataSet[i][j] != 0):
trainAE.append(sampleDataSet[i][j]) # shape [7, 32, 720] --> [212, 720]
# print("trainDataSetAE: ", len(trainDataSetAE)) # 212
# training of regression
trainNN = []
for i in range(32): #
tmpArray = []
for j in range(7): #
if np.sum(sampleDataSet[j][i] != 0):
tmpArray.append(sampleDataSet[j][i])
trainNN.append(tmpArray)
print("trainDataSetReg: ", len(trainNN)) # 224, 7, 720
# +
# AutoEncoder model
img_input = tf.placeholder('float32', [None, num_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])),
# 'encoder_h3': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3])),
'decoder_h1': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input])),
# 'decoder_h3': tf.Variable(tf.random_normal([num_hidden_1, num_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])),
# 'encoder_b3': tf.Variable(tf.random_normal([num_hidden_3])),
'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([num_input])),
# 'decoder_b3': tf.Variable(tf.random_normal([num_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2']))
# layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']), biases['encoder_b3']))
return layer_2
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
# layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3']))
return layer_2
# +
# model
encoder_op = encoder(img_input)
decoder_op = decoder(encoder_op)
y_pred = decoder_op # reconstructed image
y_true = img_input # original image
learning_rate = 0.01
learning_rate2 = learning_rate / 10
learning_rate3 = learning_rate2 / 10
loss = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
optimizer2 = tf.train.AdamOptimizer(learning_rate2).minimize(loss)
optimizer3 = tf.train.AdamOptimizer(learning_rate3).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Training
batch_x = trainAE # 212
# print("batch_x: ", len(batch_x))
_, l = sess.run([optimizer, loss], feed_dict={img_input: batch_x})
for j in range(AE_steps):
if j <= AE_steps * 1 / 3:
_, l = sess.run([optimizer, loss], feed_dict={img_input: batch_x})
elif j <= AE_steps * 2 / 3:
_, l = sess.run([optimizer2, loss], feed_dict={img_input: batch_x})
else:
_, l = sess.run([optimizer3, loss], feed_dict={img_input: batch_x})
if j % display_step == 0:
print('Step {0}: Minibatch Loss: {1:<6f}'.format(j, l))
# +
# test performance of AE
n = 1
m = 20
canvas_orig = np.empty((60, 12 * m))
canvas_recon = np.empty((60, 12 * m))
for i in range(n):
batch_x = trainAE
# Encode and decode the digit image
g = sess.run(decoder_op, feed_dict={img_input: batch_x})
# Display original images
for j in range(m):
# Draw the original digits
# canvas_orig[i * 60:(i + 1) * 60, j * 12:(j + 1) * 12] = batch_x[j].reshape([60, 12])
canvas_orig[0:60, j * 12:(j + 1) * 12] = batch_x[j + 24].reshape([60, 12])
# Display reconstructed images
for j in range(m):
# Draw the reconstructed digits
# canvas_recon[i * 60:(i + 1) * 60, j * 12:(j + 1) * 12] = g[j].reshape([60, 12])
canvas_recon[0:60, j * 12:(j + 1) * 12] = g[j + 24].reshape([60, 12])
print("Original Images")
plt.figure(figsize=(m, m))
plt.imshow(canvas_orig, origin="upper", cmap="gray")
plt.show()
print("Reconstructed Images")
plt.figure(figsize=(m, m))
plt.imshow(canvas_recon, origin="upper", cmap="gray")
plt.show()
# +
# Classification
result_y = []
for i in range(len(trainNN)):
batch_x = trainNN[i] # [7,720]
batch_y = sess.run(encoder_op, feed_dict={img_input: batch_x}) # [7,128]
result_y.append(batch_y)
def random_choose(x):
randInt = np.random.randint(low=0, high=32)
while x == randInt:
randInt = np.random.randint(low=0, high=32)
return randInt
# neg / pos set
negative_ratio = 3
batch_data = [] # [None, 256]
batch_label = [] # [None, 2]
for i in range(len(trainNN)):
data_x = trainNN[i] # [7,720]
data_y = sess.run(encoder_op, feed_dict={img_input: data_x}) # [7,128]
data_fpT = [] # [7,128]
data_fpF = [] # [3,7,128]
for j in range(len(data_y)):
data_fpT.append(FPDataSet[j])
for j in range(negative_ratio):
tmp = []
for k in range(len(data_y)):
tmp.append(FPDataSet[random_choose(i)])
data_fpF.append(tmp)
# Combine new data
sample_T = np.append(data_y, data_fpT, axis=1) # [7,256]
sample_F = np.append(data_y, data_fpF[0], axis=1) # [21,256]
for j in range(1, negative_ratio):
sample_F = np.append(sample_F, np.append(data_y, data_fpF[j], axis=1), axis=0)
if batch_data == []:
batch_data = np.append(sample_T, sample_F, axis=0)
else:
batch_data = np.append(batch_data, sample_T, axis=0)
batch_data = np.append(batch_data, sample_F, axis=0)
for j in range(len(sample_T)):
batch_label.append([0, 1]) # T
for j in range(len(sample_F)):
batch_label.append([1, 0]) # F
# +
# training model
data_size = 256
l1_size = 64
l2_size = 32
l3_size = 8
out_size = 2
# 占位符,基本训练变量
data_input = tf.placeholder(tf.float32, [None, data_size])
label_input = tf.placeholder(tf.int64, [None, 2])
weightsNN = {
'layer_w1': tf.Variable(tf.random_normal([data_size, l1_size])),
'layer_w2': tf.Variable(tf.random_normal([l1_size, l2_size])),
'layer_w3': tf.Variable(tf.random_normal([l2_size, l3_size])),
'layer_wo': tf.Variable(tf.random_normal([l3_size, out_size])),
}
biasesNN = {
'layer_b1': tf.Variable(tf.random_normal([l1_size])),
'layer_b2': tf.Variable(tf.random_normal([l2_size])),
'layer_b3': tf.Variable(tf.random_normal([l3_size])),
'layer_bo': tf.Variable(tf.random_normal([out_size])),
}
def network(x):
layer_1 = tf.add(tf.matmul(x, weightsNN['layer_w1']), biasesNN['layer_b1'])
layer_2 = tf.add(tf.matmul(layer_1, weightsNN['layer_w2']), biasesNN['layer_b2'])
layer_3 = tf.add(tf.matmul(layer_2, weightsNN['layer_w3']), biasesNN['layer_b3'])
layer_o = tf.add(tf.matmul(layer_3, weightsNN['layer_wo']), biasesNN['layer_bo'])
return layer_o
y_conv = network(data_input)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=label_input, logits=y_conv)
optimizerNN = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(label_input, 1), tf.argmax(y_conv, 1))
test_out = tf.argmax(y_conv, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sessNN = tf.Session()
sessNN.run(tf.global_variables_initializer())
# training
train_step = 10000
print_step = train_step / 20
for i in range(train_step):
sessNN.run(optimizerNN, feed_dict={data_input: batch_data, label_input: batch_label})
if i % print_step == 0:
acc = sessNN.run(accuracy, feed_dict={data_input: batch_data, label_input: batch_label})
print("step:", i, "training accuracy:", acc)
# print("test_out: ", test_out)
acc = sessNN.run(accuracy, feed_dict={data_input: batch_data, label_input: batch_label})
print("Final accuracy:", acc)
# -
| Autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## imSitu regular models
# ### Training
#
# CUDA_VISIBLE_DEVICES=3 python train.py --save_dir origin_0 --log_dir origin_0 --batch_size 32 --num_epochs 15 --learning_rate 0.0001
#
#
# CUDA_VISIBLE_DEVICES=6 python train.py --save_dir ratio_1 --log_dir ratio_1 --batch_size 32 --num_epochs 15 --learning_rate 0.0001 --balanced --ratio 1
#
#
# CUDA_VISIBLE_DEVICES=2 python train.py --save_dir blackout_box_0 --log_dir blackout_box_0 --batch_size 32 --num_epochs 15 --learning_rate 0.0001 --blackout_box
#
#
#
# ### Attacker
# CUDA_VISIBLE_DEVICES=3 python attacker.py --exp_id origin_0 --num_rounds 5 --num_epochs 100 --learning_rate 0.00005 --batch_size 128
#
# CUDA_VISIBLE_DEVICES=4 python attacker.py --exp_id ratio_1 --num_rounds 5 --num_epochs 100 --learning_rate 0.00005 --balanced --ratio 1 --batch_size 128
#
# CUDA_VISIBLE_DEVICES=1 python attacker.py --exp_id blackout_box_0 --num_rounds 5 --num_epochs 100 --learning_rate 0.00005 --batch_size 128 --blackout_box
#
#
# ### Dataset leakage
# CUDA_VISIBLE_DEVICES=0 python dataset_leakage.py --num_epochs 50 --learning_rate 0.00005 --batch_size 128 --num_rounds 5 --balanced --ratio 1
#
#
# CUDA_VISIBLE_DEVICES=3 python dataset_leakage.py --num_epochs 100 --learning_rate 0.00005 --batch_size 128 --num_rounds 5
#
#
# ### Natural leakage
# CUDA_VISIBLE_DEVICES=5 python natural_leakage.py --num_rounds 5 --num_epochs 100 --learning_rate 0.00005 --batch_size 128
#
#
#
# ### Random baseline:
# CUDA_VISIBLE_DEVICES=3 python attacker.py --exp_id origin_5 --num_rounds 5 --num_epochs 100 --learning_rate 0.00005 --batch_size 128 --noise --noise_scale 0.2
#
# ## imSitu adv models
# ### Attacker
#
# CUDA_VISIBLE_DEVICES=2 python adv_attacker.py --exp_id conv4_300_1.0_0.2_imSitu_linear_0 --adv_on --layer conv4 --no_avgpool --adv_capacity 300 --adv_lambda 1 --learning_rate 0.00005 --num_epochs 100 --batch_size 128
#
# CUDA_VISIBLE_DEVICES=2 python adv_attacker.py --exp_id conv5_300_1.0_0.2_imSitu_linear_0 --adv_on --layer conv5 --no_avgpool --adv_capacity 300 --adv_lambda 1 --learning_rate 0.00005 --num_epochs 100 --batch_size 128
# ## imSitu adv+autoencoder
# ### Attacker
# CUDA_VISIBLE_DEVICES=3 python ae_adv_attacker.py --exp_id generated_image_10.0_1.0_0 --adv_on --layer generated_image --adv_capacity 300 --adv_lambda 1 --learning_rate 0.00005 --num_epochs 100 --batch_size 128
#
| dlfairness/original_code/Balanced-Datasets-Are-Not-Enough/verb_classification/commands.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Neural Networks: Step by Step
#
# Welcome to Course 4's first assignment! In this assignment, you will implement convolutional (CONV) and pooling (POOL) layers in numpy, including both forward propagation and (optionally) backward propagation.
#
# **Notation**:
# - Superscript $[l]$ denotes an object of the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
#
# - Superscript $(i)$ denotes an object from the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$, assuming this is a fully connected (FC) layer.
#
#
# - $n_H$, $n_W$ and $n_C$ denote respectively the height, width and number of channels of a given layer. If you want to reference a specific layer $l$, you can also write $n_H^{[l]}$, $n_W^{[l]}$, $n_C^{[l]}$.
# - $n_{H_{prev}}$, $n_{W_{prev}}$ and $n_{C_{prev}}$ denote respectively the height, width and number of channels of the previous layer. If referencing a specific layer $l$, this could also be denoted $n_H^{[l-1]}$, $n_W^{[l-1]}$, $n_C^{[l-1]}$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# ## 1 - Packages
#
# Let's first import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
# +
import numpy as np
import h5py
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
# -
# ## 2 - Outline of the Assignment
#
# You will be implementing the building blocks of a convolutional neural network! Each function you will implement will have detailed instructions that will walk you through the steps needed:
#
# - Convolution functions, including:
# - Zero Padding
# - Convolve window
# - Convolution forward
# - Convolution backward (optional)
# - Pooling functions, including:
# - Pooling forward
# - Create mask
# - Distribute value
# - Pooling backward (optional)
#
# This notebook will ask you to implement these functions from scratch in `numpy`. In the next notebook, you will use the TensorFlow equivalents of these functions to build the following model:
#
# <img src="images/model.png" style="width:800px;height:300px;">
#
# **Note** that for every forward function, there is its corresponding backward equivalent. Hence, at every step of your forward module you will store some parameters in a cache. These parameters are used to compute gradients during backpropagation.
# ## 3 - Convolutional Neural Networks
#
# Although programming frameworks make convolutions easy to use, they remain one of the hardest concepts to understand in Deep Learning. A convolution layer transforms an input volume into an output volume of different size, as shown below.
#
# <img src="images/conv_nn.png" style="width:350px;height:200px;">
#
# In this part, you will build every step of the convolution layer. You will first implement two helper functions: one for zero padding and the other for computing the convolution function itself.
# ### 3.1 - Zero-Padding
#
# Zero-padding adds zeros around the border of an image:
#
# <img src="images/PAD.png" style="width:600px;height:400px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **Zero-Padding**<br> Image (3 channels, RGB) with a padding of 2. </center></caption>
#
# The main benefits of padding are the following:
#
# - It allows you to use a CONV layer without necessarily shrinking the height and width of the volumes. This is important for building deeper networks, since otherwise the height/width would shrink as you go to deeper layers. An important special case is the "same" convolution, in which the height/width is exactly preserved after one layer.
#
# - It helps us keep more of the information at the border of an image. Without padding, very few values at the next layer would be affected by pixels as the edges of an image.
#
# **Exercise**: Implement the following function, which pads all the images of a batch of examples X with zeros. [Use np.pad](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html). Note if you want to pad the array "a" of shape $(5,5,5,5,5)$ with `pad = 1` for the 2nd dimension, `pad = 3` for the 4th dimension and `pad = 0` for the rest, you would do:
# ```python
# a = np.pad(a, ((0,0), (1,1), (0,0), (3,3), (0,0)), 'constant', constant_values = (..,..))
# ```
# +
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
"""
### START CODE HERE ### (≈ 1 line)
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0)), 'constant', constant_values=0)
### END CODE HERE ###
return X_pad
# +
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1, 1] =", x[1, 1])
print ("x_pad[1, 1] =", x_pad[1, 1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
# -
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **x.shape**:
# </td>
# <td>
# (4, 3, 3, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x_pad.shape**:
# </td>
# <td>
# (4, 7, 7, 2)
# </td>
# </tr>
# <tr>
# <td>
# **x[1,1]**:
# </td>
# <td>
# [[ 0.90085595 -0.68372786]
# [-0.12289023 -0.93576943]
# [-0.26788808 0.53035547]]
# </td>
# </tr>
# <tr>
# <td>
# **x_pad[1,1]**:
# </td>
# <td>
# [[ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]
# [ 0. 0.]]
# </td>
# </tr>
#
# </table>
# ### 3.2 - Single step of convolution
#
# In this part, implement a single step of convolution, in which you apply the filter to a single position of the input. This will be used to build a convolutional unit, which:
#
# - Takes an input volume
# - Applies a filter at every position of the input
# - Outputs another volume (usually of different size)
#
# <img src="images/Convolution_schematic.gif" style="width:500px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u><font color='purple'> : **Convolution operation**<br> with a filter of 2x2 and a stride of 1 (stride = amount you move the window each time you slide) </center></caption>
#
# In a computer vision application, each value in the matrix on the left corresponds to a single pixel value, and we convolve a 3x3 filter with the image by multiplying its values element-wise with the original matrix, then summing them up. In this first step of the exercise, you will implement a single step of convolution, corresponding to applying a filter to just one of the positions to get a single real-valued output.
#
# Later in this notebook, you'll apply this function to multiple positions of the input to implement the full convolutional operation.
#
# **Exercise**: Implement conv_single_step(). [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.sum.html).
#
# +
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice and W. Add bias.
s = np.multiply(a_slice_prev, W) + b
# Sum over all entries of the volume s
Z = np.sum(s)
### END CODE HERE ###
return Z
# +
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
# -
# **Expected Output**:
# <table>
# <tr>
# <td>
# **Z**
# </td>
# <td>
# -23.1602122025
# </td>
# </tr>
#
# </table>
# ### 3.3 - Convolutional Neural Networks - Forward pass
#
# In the forward pass, you will take many filters and convolve them on the input. Each 'convolution' gives you a 2D matrix output. You will then stack these outputs to get a 3D volume:
#
# <center>
# <video width="620" height="440" src="images/conv_kiank.mp4" type="video/mp4" controls>
# </video>
# </center>
#
# **Exercise**: Implement the function below to convolve the filters W on an input activation A_prev. This function takes as input A_prev, the activations output by the previous layer (for a batch of m inputs), F filters/weights denoted by W, and a bias vector denoted by b, where each filter has its own (single) bias. Finally you also have access to the hyperparameters dictionary which contains the stride and the padding.
#
# **Hint**:
# 1. To select a 2x2 slice at the upper left corner of a matrix "a_prev" (shape (5,5,3)), you would do:
# ```python
# a_slice_prev = a_prev[0:2,0:2,:]
# ```
# This will be useful when you will define `a_slice_prev` below, using the `start/end` indexes you will define.
# 2. To define a_slice you will need to first define its corners `vert_start`, `vert_end`, `horiz_start` and `horiz_end`. This figure may be helpful for you to find how each of the corner can be defined using h, w, f and s in the code below.
#
# <img src="images/vert_horiz_kiank.png" style="width:400px;height:300px;">
# <caption><center> <u> <font color='purple'> **Figure 3** </u><font color='purple'> : **Definition of a slice using vertical and horizontal start/end (with a 2x2 filter)** <br> This figure shows only a single channel. </center></caption>
#
#
# **Reminder**:
# The formulas relating the output shape of the convolution to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f + 2 \times pad}{stride} \rfloor +1 $$
# $$ n_C = \text{number of filters used in the convolution}$$
#
# For this exercise, we won't worry about vectorization, and will just implement everything with for-loops.
# +
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
"""
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
"""
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (≈1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (≈1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (≈2 lines)
stride = hparameters['stride']
pad = hparameters['pad']
# Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)
n_H = int((n_H_prev - f + 2 * pad) / stride) + 1
n_W = int((n_W_prev - f + 2 * pad) / stride) + 1
# Initialize the output volume Z with zeros. (≈1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
a_slice_prev = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)
Z[i, h, w, c] = conv_single_step(a_slice_prev, W[...,c], b[...,c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
# +
np.random.seed(1)
A_prev = np.random.randn(10, 4, 4, 3)
W = np.random.randn(2, 2, 3, 8)
b = np.random.randn(1, 1, 1, 8)
hparameters = {"pad" : 2,
"stride": 1}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
# -
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Z's mean**
# </td>
# <td>
# 0.155859324889
# </td>
# </tr>
# <tr>
# <td>
# **cache_conv[0][1][2][3]**
# </td>
# <td>
# [-0.20075807 0.18656139 0.41005165]
# </td>
# </tr>
#
# </table>
#
# Finally, CONV layer should also contain an activation, in which case we would add the following line of code:
#
# ```python
# # Convolve the window to get back one output neuron
# Z[i, h, w, c] = ...
# # Apply activation
# A[i, h, w, c] = activation(Z[i, h, w, c])
# ```
#
# You don't need to do it here.
#
# ## 4 - Pooling layer
#
# The pooling (POOL) layer reduces the height and width of the input. It helps reduce computation, as well as helps make feature detectors more invariant to its position in the input. The two types of pooling layers are:
#
# - Max-pooling layer: slides an ($f, f$) window over the input and stores the max value of the window in the output.
#
# - Average-pooling layer: slides an ($f, f$) window over the input and stores the average value of the window in the output.
#
# <table>
# <td>
# <img src="images/max_pool1.png" style="width:500px;height:300px;">
# <td>
#
# <td>
# <img src="images/a_pool.png" style="width:500px;height:300px;">
# <td>
# </table>
#
# These pooling layers have no parameters for backpropagation to train. However, they have hyperparameters such as the window size $f$. This specifies the height and width of the fxf window you would compute a max or average over.
#
# ### 4.1 - Forward Pooling
# Now, you are going to implement MAX-POOL and AVG-POOL, in the same function.
#
# **Exercise**: Implement the forward pass of the pooling layer. Follow the hints in the comments below.
#
# **Reminder**:
# As there's no padding, the formulas binding the output shape of the pooling to the input shape is:
# $$ n_H = \lfloor \frac{n_{H_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_W = \lfloor \frac{n_{W_{prev}} - f}{stride} \rfloor +1 $$
# $$ n_C = n_{C_{prev}}$$
# +
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
"""
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
"""
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = vert_start + f
horiz_start = w * stride
horiz_end = horiz_start + f
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
# +
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 1, "f": 4}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
# -
# **Expected Output:**
# <table>
#
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[ 1.74481176 1.6924546 2.10025514]]] <br/>
#
#
# [[[ 1.19891788 1.51981682 2.18557541]]]]
#
# </td>
# </tr>
# <tr>
# <td>
# A =
# </td>
# <td>
# [[[[-0.09498456 0.11180064 -0.14263511]]] <br/>
#
#
# [[[-0.09525108 0.28325018 0.33035185]]]]
#
# </td>
# </tr>
#
# </table>
#
# Congratulations! You have now implemented the forward passes of all the layers of a convolutional network.
#
# The remainer of this notebook is optional, and will not be graded.
#
# ## 5 - Backpropagation in convolutional neural networks (OPTIONAL / UNGRADED)
#
# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers don't need to bother with the details of the backward pass. The backward pass for convolutional networks is complicated. If you wish however, you can work through this optional portion of the notebook to get a sense of what backprop in a convolutional network looks like.
#
# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in convolutional neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are not trivial and we did not derive them in lecture, but we briefly presented them below.
#
# ### 5.1 - Convolutional layer backward pass
#
# Let's start by implementing the backward pass for a CONV layer.
#
# #### 5.1.1 - Computing dA:
# This is the formula for computing $dA$ with respect to the cost for a certain filter $W_c$ and a given training example:
#
# $$ dA += \sum _{h=0} ^{n_H} \sum_{w=0} ^{n_W} W_c \times dZ_{hw} \tag{1}$$
#
# Where $W_c$ is a filter and $dZ_{hw}$ is a scalar corresponding to the gradient of the cost with respect to the output of the conv layer Z at the hth row and wth column (corresponding to the dot product taken at the ith stride left and jth stride down). Note that at each time, we multiply the the same filter $W_c$ by a different dZ when updating dA. We do so mainly because when computing the forward propagation, each filter is dotted and summed by a different a_slice. Therefore when computing the backprop for dA, we are just adding the gradients of all the a_slices.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
# ```
#
# #### 5.1.2 - Computing dW:
# This is the formula for computing $dW_c$ ($dW_c$ is the derivative of one filter) with respect to the loss:
#
# $$ dW_c += \sum _{h=0} ^{n_H} \sum_{w=0} ^ {n_W} a_{slice} \times dZ_{hw} \tag{2}$$
#
# Where $a_{slice}$ corresponds to the slice which was used to generate the acitivation $Z_{ij}$. Hence, this ends up giving us the gradient for $W$ with respect to that slice. Since it is the same $W$, we will just add up all such gradients to get $dW$.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
# ```
#
# #### 5.1.3 - Computing db:
#
# This is the formula for computing $db$ with respect to the cost for a certain filter $W_c$:
#
# $$ db = \sum_h \sum_w dZ_{hw} \tag{3}$$
#
# As you have previously seen in basic neural networks, db is computed by summing $dZ$. In this case, you are just summing over all the gradients of the conv output (Z) with respect to the cost.
#
# In code, inside the appropriate for-loops, this formula translates into:
# ```python
# db[:,:,:,c] += dZ[i, h, w, c]
# ```
#
# **Exercise**: Implement the `conv_backward` function below. You should sum over all the training examples, filters, heights, and widths. You should then compute the derivatives using formulas 1, 2 and 3 above.
def conv_backward(dZ, cache):
"""
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
"""
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters["stride"]
pad = hparameters["pad"]
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros((m, n_H_prev, n_W_prev, n_C_prev))
dW = np.zeros((f, f, n_C_prev, n_C))
db = np.zeros((1, 1, 1, n_C))
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h
vert_end = vert_start + f
horiz_start = w
horiz_end = horiz_start + f
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
# print(dA.shape)
# ** Expected Output: **
# <table>
# <tr>
# <td>
# **dA_mean**
# </td>
# <td>
# 9.60899067587
# </td>
# </tr>
# <tr>
# <td>
# **dW_mean**
# </td>
# <td>
# 10.5817412755
# </td>
# </tr>
# <tr>
# <td>
# **db_mean**
# </td>
# <td>
# 76.3710691956
# </td>
# </tr>
#
# </table>
#
# ## 5.2 Pooling layer - backward pass
#
# Next, let's implement the backward pass for the pooling layer, starting with the MAX-POOL layer. Even though a pooling layer has no parameters for backprop to update, you still need to backpropagation the gradient through the pooling layer in order to compute gradients for layers that came before the pooling layer.
#
# ### 5.2.1 Max pooling - backward pass
#
# Before jumping into the backpropagation of the pooling layer, you are going to build a helper function called `create_mask_from_window()` which does the following:
#
# $$ X = \begin{bmatrix}
# 1 && 3 \\
# 4 && 2
# \end{bmatrix} \quad \rightarrow \quad M =\begin{bmatrix}
# 0 && 0 \\
# 1 && 0
# \end{bmatrix}\tag{4}$$
#
# As you can see, this function creates a "mask" matrix which keeps track of where the maximum of the matrix is. True (1) indicates the position of the maximum in X, the other entries are False (0). You'll see later that the backward pass for average pooling will be similar to this but using a different mask.
#
# **Exercise**: Implement `create_mask_from_window()`. This function will be helpful for pooling backward.
# Hints:
# - [np.max()]() may be helpful. It computes the maximum of an array.
# - If you have a matrix X and a scalar x: `A = (X == x)` will return a matrix A of the same size as X such that:
# ```
# A[i,j] = True if X[i,j] = x
# A[i,j] = False if X[i,j] != x
# ```
# - Here, you don't need to consider cases where there are several maxima in a matrix.
def create_mask_from_window(x):
"""
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
"""
### START CODE HERE ### (≈1 line)
mask = x == np.max(x)
### END CODE HERE ###
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
# **Expected Output:**
#
# <table>
# <tr>
# <td>
#
# **x =**
# </td>
#
# <td>
#
# [[ 1.62434536 -0.61175641 -0.52817175] <br>
# [-1.07296862 0.86540763 -2.3015387 ]]
#
# </td>
# </tr>
#
# <tr>
# <td>
# **mask =**
# </td>
# <td>
# [[ True False False] <br>
# [False False False]]
# </td>
# </tr>
#
#
# </table>
# Why do we keep track of the position of the max? It's because this is the input value that ultimately influenced the output, and therefore the cost. Backprop is computing gradients with respect to the cost, so anything that influences the ultimate cost should have a non-zero gradient. So, backprop will "propagate" the gradient back to this particular input value that had influenced the cost.
# ### 5.2.2 - Average pooling - backward pass
#
# In max pooling, for each input window, all the "influence" on the output came from a single input value--the max. In average pooling, every element of the input window has equal influence on the output. So to implement backprop, you will now implement a helper function that reflects this.
#
# For example if we did average pooling in the forward pass using a 2x2 filter, then the mask you'll use for the backward pass will look like:
# $$ dZ = 1 \quad \rightarrow \quad dZ =\begin{bmatrix}
# 1/4 && 1/4 \\
# 1/4 && 1/4
# \end{bmatrix}\tag{5}$$
#
# This implies that each position in the $dZ$ matrix contributes equally to output because in the forward pass, we took an average.
#
# **Exercise**: Implement the function below to equally distribute a value dz through a matrix of dimension shape. [Hint](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ones.html)
def distribute_value(dz, shape):
"""
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
"""
### START CODE HERE ###
# Retrieve dimensions from shape (≈1 line)
(n_H, n_W) = shape
# Compute the value to distribute on the matrix (≈1 line)
average = dz / (n_H * n_W)
# Create a matrix where every entry is the "average" value (≈1 line)
a = np.ones(shape) * average
### END CODE HERE ###
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# distributed_value =
# </td>
# <td>
# [[ 0.5 0.5]
# <br\>
# [ 0.5 0.5]]
# </td>
# </tr>
# </table>
# ### 5.2.3 Putting it together: Pooling backward
#
# You now have everything you need to compute backward propagation on a pooling layer.
#
# **Exercise**: Implement the `pool_backward` function in both modes (`"max"` and `"average"`). You will once again use 4 for-loops (iterating over training examples, height, width, and channels). You should use an `if/elif` statement to see if the mode is equal to `'max'` or `'average'`. If it is equal to 'average' you should use the `distribute_value()` function you implemented above to create a matrix of the same shape as `a_slice`. Otherwise, the mode is equal to '`max`', and you will create a mask with `create_mask_from_window()` and multiply it by the corresponding value of dZ.
# +
def pool_backward(dA, cache, mode = "max"):
"""
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
"""
### START CODE HERE ###
# Retrieve information from cache (≈1 line)
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters" (≈2 lines)
stride = hparameters["stride"]
f = hparameters["f"]
# Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros (≈1 line)
dA_prev = np.zeros(A_prev.shape)
for i in range(m): # loop over the training examples
# select training example from A_prev (≈1 line)
a_prev = A_prev[i]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice" (≈4 lines)
vert_start = h
vert_end = vert_start + f
horiz_start = w
horiz_end = horiz_start + f
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (≈1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (≈1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += np.multiply(mask, dA[i, h, w, c])
elif mode == "average":
# Get the value a from dA (≈1 line)
da = dA[i, h, w, c]
# Define the shape of the filter as fxf (≈1 line)
shape = (f, f)
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)
dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, c] += distribute_value(da, shape)
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
# +
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
# -
# **Expected Output**:
#
# mode = max:
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0. 0. ] <br>
# [ 5.05844394 -1.68282702] <br>
# [ 0. 0. ]]
# </td>
# </tr>
# </table>
#
# mode = average
# <table>
# <tr>
# <td>
#
# **mean of dA =**
# </td>
#
# <td>
#
# 0.145713902729
#
# </td>
# </tr>
#
# <tr>
# <td>
# **dA_prev[1,1] =**
# </td>
# <td>
# [[ 0.08485462 0.2787552 ] <br>
# [ 1.26461098 -0.25749373] <br>
# [ 1.17975636 -0.53624893]]
# </td>
# </tr>
# </table>
# ### Congratulations !
#
# Congratulation on completing this assignment. You now understand how convolutional neural networks work. You have implemented all the building blocks of a neural network. In the next assignment you will implement a ConvNet using TensorFlow.
| Convolutional Neural Networks/Convolution model - Step by Step - v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# +
from deep_dss.utils import *
import numpy as np
import healpy as hp
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
# -
# %matplotlib inline
# +
sigma8 = np.linspace(0.5, 1.2, num=201)
test_sig8 = np.random.choice(sigma8, 21, replace=False)
rsig8 = np.setdiff1d(sigma8, test_sig8)
train1_sig8 = np.random.choice(rsig8, 45, replace=False)
rsig8 = np.setdiff1d(rsig8, train1_sig8)
train2_sig8 = np.random.choice(rsig8, 45, replace=False)
rsig8 = np.setdiff1d(rsig8, train2_sig8)
train3_sig8 = np.random.choice(rsig8, 45, replace=False)
rsig8 = np.setdiff1d(rsig8, train3_sig8)
train4_sig8 = np.random.choice(rsig8, 45, replace=False)
rsig8 = np.setdiff1d(rsig8, train4_sig8)
# -
sigma8
test_sig8
train1_sig8
train2_sig8
train3_sig8
train4_sig8
# + active=""
#
| notebooks/.ipynb_checkpoints/FLASK v2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Multi-Layer Perceptrons
#
# To solve more complicated problems, we need to add additional layers. There are several immediate questions to consider:
# - How many perceptrons do I need?
# - How many layers is sufficient?
# - Should the hidden layers be larger or smaller than the input layer?
#
# This notebook will shed light on some of these questions through examples.
# ## XOR Problem
#
# Let's start out by creating the data representing the XOR problem.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib notebook
def calc_decision_boundary(weights):
x = -weights[0] / weights[1]
y = -weights[0] / weights[2]
m = -y / x
return np.array([m, y])
def gen_boundary_points(weights, m, b):
# If the slope is undefined, it is vertical.
if weights[2] != 0:
x = np.linspace(-5, 5, 100)
y = m * x + b
else:
x = np.zeros(100)
y = np.linspace(-5, 5, 100) + b
return x, y
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def dot(w, x):
x_bias = np.concatenate((np.ones((x.shape[0], 1)), x), axis=1)
return w @ x_bias.T
# +
# Define XOR inputs -- prepend a constant of 1 for bias multiplication
samples = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
targets = np.array([0, 1, 1, 0])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(samples[:, 0], samples[:, 1], c=targets)
# -
# What was the result of using a single perceptron to solve this problem?
#
# The most optimal outcome is 75\% accuracy.
# +
# Classifier Parameters
weights = np.array([-0.5, 1, 1])
# For visualizing the line
m, b = calc_decision_boundary(weights)
x, y = gen_boundary_points(weights, m, b)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y, c='g')
ax.scatter(samples[:, 0], samples[:, 1], c=targets)
ax.set_xlim([-0.2, 1.2])
ax.set_ylim([-0.2, 1.2])
# -
# ## Adding another perceptron
#
# We begin by adding a hidden layer with a single perceptron having a sigmoidal, nonlinear activation function.
#
# 
#
# If the hidden layer has only a single unit that produces a scalar output, then the initialization of our output perceptron changes. The weight matrix defining the output perceptron must have a weight for each incoming input. Since the hidden layer output is of size 1, the output perceptron only has a single weight.
#
# ## Forward Pass
#
# To compute the forward pass with a hidden layer, we must first transform the input into the hidden layer space before transforming the intermediate result into the output space.
#
# $$y = \sigma(\mathbf{w}_o \cdot \sigma(\mathbf{w}_h \cdot \mathbf{x}))$$
#
# We can write this in algorithmic form as
#
# layer_out = input
#
# for layer in layers:
# layer_out = layer(layer_out)
#
# return layer_out
# +
hidden_weights = np.array([-0.5, 1, 1])
out_weights = np.array([0, 1])
# For visualizing the line
hidden_m, hidden_b = calc_decision_boundary(hidden_weights)
hidden_x, hidden_y = gen_boundary_points(hidden_weights, hidden_m, hidden_b)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(hidden_x, hidden_y, c='g')
ax.scatter(samples[:, 0], samples[:, 1], c=targets)
ax.set_xlim([-0.2, 1.2])
ax.set_ylim([-0.2, 1.2])
# -
# A single perceptron in the hidden layer means that we still only have a single decision boundary. It seems intuitive at this point that adding another neuron would give us 2 different decision boundaries.
#
# 
# +
# hidden_weights = np.random.uniform(-1, 1, size=(2, 3))
# out_weights = np.random.uniform(-1, 1, size=(3,))
hidden_weights = np.array([[-0.5, 1, 1], [-1.5, 1, 1]])
out_weights = np.array([-0.22, 1.0, -1.0])
# For visualizing the line
hidden_m0, hidden_b0 = calc_decision_boundary(hidden_weights[0])
hidden_x0, hidden_y0 = gen_boundary_points(hidden_weights[0], hidden_m0, hidden_b0)
hidden_m1, hidden_b1 = calc_decision_boundary(hidden_weights[1])
hidden_x1, hidden_y1 = gen_boundary_points(hidden_weights[1], hidden_m1, hidden_b1)
out_m, out_b = calc_decision_boundary(out_weights)
out_x, out_y = gen_boundary_points(out_weights, out_m, out_b)
# Forward propagation
hidden_out = dot(hidden_weights, samples)
hidden_act = sigmoid(hidden_out)
print("Hidden layer BEFORE non-linearity")
print(hidden_out)
print("Hidden layer AFTER non-linearity")
print(hidden_act)
c = hidden_act.mean(1)
h_min = hidden_act.min(1)
h_max = hidden_act.max(1)
b = np.abs(h_max - h_min).max()
# Visualize hidden layer space
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.set_title("Hidden Layer Space")
ax1.plot(out_x, out_y, c='g')
ax1.scatter(hidden_act[0, :], hidden_act[1, :], c=targets)
ax1.set_xlim([c[0] - b, c[0] + b])
ax1.set_ylim([c[1] - b, c[1] + b])
# Forward pass finishing with final neuron
out = dot(out_weights, hidden_act.T)
print("Output BEFORE non-linearity")
print(out)
out_act = sigmoid(out)
print("Output AFTER non-linearity")
print(out_act)
# Visualize input space
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.set_title("Input Space")
ax2.plot(hidden_x0, hidden_y0, c='g')
ax2.plot(hidden_x1, hidden_y1, c='g')
ax2.scatter(samples[:, 0], samples[:, 1], c=targets)
ax2.set_xlim([-0.2, 1.2])
ax2.set_ylim([-0.2, 1.2])
| neural_networks/mlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:plotting_intro]
# language: python
# name: conda-env-plotting_intro-py
# ---
# # Python Plotting - An Introduction
# ## Introduction
# This notebook takes you through many different types of plot you'll come across in the atmospheric sciences. We'll use real climate data and some model output where appropriate.
#
# You'll need to download the BEST dataset - on a Linux machine this can be done straightforwardly by running `wget http://berkeleyearth.lbl.gov/auto/Global/Gridded/Land_and_Ocean_LatLong1.nc` in the `data` folder.
#
# Please send any comments or suggestions to dcw32.wade - at - gmail.com.
#
#Import all the packages we need now! This will take a while
import cartopy.crs as ccrs
import numpy as np
import matplotlib.pylab as plt
import math as m
import os
from netCDF4 import Dataset
import pandas as pd
#Specific packages
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import scipy.ndimage as ndimage
# ## Scatter plots and errorbars with Farman et al 1985
# In this section we will plot the October mean ozone from 1957 to 1984. This long-term record of column ozone allowed for the detection of the ozone hole over Antarctica. The strong springtime depletion supported the role of heterogenous chemisty.
#Read in all the files
#These have been digitised from the original figure
loc='data/'
farman1=np.genfromtxt(loc+'farman_o32.csv',delimiter=',',skip_header=1)
farman2=np.genfromtxt(loc+'farman_f11.csv',delimiter=',',skip_header=1)
farman3=np.genfromtxt(loc+'farman_f12.csv',delimiter=',',skip_header=1)
#Take an example to print
print farman1
print farman1.shape
#Ozone data
o3_t=farman1[:,0]
o3_mu=farman1[:,1] #DU
o3_up=farman1[:,2] #DU
o3_lo=farman1[:,3] #DU
#F-11 data
f11_t=farman2[:,0]
f11_val=farman2[:,1] #pptv
#F-12 data
f12_t=farman3[:,0]
f12_val=farman3[:,1] #pptv
#Rough and ready plot
plt.scatter(o3_t,o3_mu,marker='x',c='k')
plt.show()
#Now we want to include the upper and lower values on our plot
fig,ax=plt.subplots()
#better to create an axis object, then plot to that - makes things
#easier when you want to plot multiple things on the same graph!
ax.errorbar(o3_t,o3_mu,yerr=[o3_mu-o3_lo,o3_up-o3_mu],fmt='_',c='k',capthick=0)
#Same ticks as the Farman plot:
#Sets major xticks to given values
ax.set_xticks([1960,1970,1980])
#Sets minor xticks every 2 years
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.set_yticks([200,300])
#Sets ylabel
ax.set_ylabel('Ozone Column / DU')
ax.yaxis.set_minor_locator(ticker.MultipleLocator(20))
plt.show()
#def make_patch_spines_invisible(ax):
# ax.set_frame_on(True)
# ax.patch.set_visible(False)
# for sp in ax.spines.values():
# sp.set_visible(False)
# To include the F-11, F-12 values, we need to do it slightly differently:
#ax = host_subplot(111, axes_class=AA.Axes)
fig,ax=plt.subplots(figsize=(5,6))
#Now want to create a second axis
ax1 = ax.twinx() #Share x axis with the ozone
#
#Plot as before
ax.errorbar(o3_t,o3_mu,yerr=[o3_mu-o3_lo,o3_up-o3_mu],fmt='_',c='k',capthick=0)
#Now plot the scatter data
ax1.scatter(f11_t,f11_val,c='k',marker='o')
ax1.scatter(f12_t,f12_val/2.,facecolors='none', edgecolors='k',marker='o')
#
ax.set_xticks([1960,1970,1980])
ax.xaxis.set_minor_locator(ticker.MultipleLocator(2))
ax.set_yticks([200,300])
ax.yaxis.set_minor_locator(ticker.MultipleLocator(20))
#Note that matm cm in the orginal paper is identical to the Dobson unit
ax.set_ylabel('Column Ozone / DU',fontsize=12)
#Xlims
ax.set_xlim(1956,1986)
ax.set_ylim(170.,350.)
#Reverse y axis
ax1.set_ylim(300,-60)
ax1.set_yticks([-60,0,100,200])
ax1.set_yticks([50,150],minor=True)
ax1.set_yticklabels(["F11".center(5)+"F12".center(5),
"0".center(7)+"0".center(7),
"100".center(5)+"200".center(5),
"200".center(5)+"400".center(5)
])
#Write October on the plot in the bottom left corner
ax.annotate('October',xy=(1960,200),horizontalalignment='center',fontsize=12)
plt.savefig('/homes/dcw32/figures/farman.png',bbox_inches='tight',dpi=200)
plt.show()
# + language="bash"
# echo "hello from $BASH"
# -
# ## Line and bar charts with the NAO index
#
#Extract the NAO data
nao_data=np.genfromtxt('data/nao.dat',skip_header=4)[:192,:] #No 2017 as incomplete
print nao_data.shape
print nao_data[:,0]#Calendar years
#
#For the NAO index we want the DJF (December, January, February averages)
#Remove the first year (as only taking December) using [1:,0] meanining index 1 onwards
years=nao_data[1:,0]
#
#Initialize
nao_djf=np.zeros(len(years))
# Take the December of the previous year [i] then the January and February of the current year [i+1] and average
# Note that `years` doesn't include the first year, hence the offset of i and i+1 (would otherwise be i-1 and i)
for i in range(len(years)):
nao_djf[i]=np.mean([nao_data[i,12],nao_data[i+1,1],nao_data[i+1,2]])
#def running_mean(x, N):
# cumsum = np.cumsum(np.insert(x, 0, 0))
# return (cumsum[N:] - cumsum[:-N]) / N
# +
#nao_running=running_mean(nao_djf,11)
#print nao_running.shape
#print years[2:-3].shape
# -
fig,ax=plt.subplots(figsize=(6,4))
#Barchart - all negative values in blue
ax.bar(years[nao_djf<0],nao_djf[nao_djf<0],color='#0018A8',edgecolor='#0018A8')
#Barchart - all positive values in red
ax.bar(years[nao_djf>0],nao_djf[nao_djf>0],color='#ED2939',edgecolor='#ED2939')
#Plot the smoothed field - use a Gaussian filter
ax.plot(years,ndimage.filters.gaussian_filter(nao_djf,2.),c='k',linewidth=4)
#Set limits
ax.set_xlim([np.min(years),np.max(years)])
ax.set_ylim([-3.5,3.5])
#Plot the zero line
ax.axhline(0.,c='k')
#Decrease label pad to make it closer to the axis
ax.set_ylabel('NAO index',labelpad=-3,fontsize=14)
plt.savefig('/homes/dcw32/figures/nao.png',bbox_inches='tight',dpi=200)
plt.show()
# ## Plot of the Berkeley Earth data
sat_file=Dataset('data/Land_and_Ocean_LatLong1.nc')
#This will raise a warning due to the missing data for early points
sata=sat_file.variables['temperature'][:]
sat_clim=sat_file.variables['climatology'][:]
times=sat_file.variables['time'][:]
lons=sat_file.variables['longitude'][:]
print lons.shape
lats=sat_file.variables['latitude'][:]
print lats.shape
print sata.shape
sata=sata[np.logical_and(times>1950,times<2017),:,:]
times=times[np.logical_and(times>1950,times<2017)]
print sata.shape
best_sata=np.reshape(sata,[12,sata.shape[0]/12,180,360])
nyrs=len(times)/12
print nyrs
yrs=np.zeros(nyrs)
annual_data=np.zeros([nyrs,len(lats),len(lons)])
for i in range(nyrs):
annual_data[i,:,:]=np.mean(sata[12*i:12*i+12,:,:],axis=0)
yrs[i]=np.mean(times[12*i:12*i+12])
yrs=yrs-0.5
zonal_annual=np.mean(annual_data,axis=2)
def gbox_areas(x,y):
# lats x lons
area=np.zeros([x,y])
R=6.371E6
for j in range(x):
area[j,:]=(R**2)*m.radians(360./y)*(m.sin(m.radians(90.-(j-0.5)*180./(x-1)))-m.sin(m.radians(90.-(180./(x-1))*(j+0.5))))
return area
areas=gbox_areas(len(lats),len(lons))
gmst=np.zeros(nyrs)
for i in range(nyrs):
gmst[i]=np.average(annual_data[i,:,:],weights=areas)
# +
fig,ax=plt.subplots(figsize=(6,4))
ax.fill_between(yrs, 0., gmst,where=gmst>=0,facecolor='#ED2939',interpolate=True)
ax.fill_between(yrs, 0., gmst,where=gmst<0,facecolor='#0018A8',interpolate=True)
#Remove the right and top axes and make the ticks come out of the plot
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='y', direction='out')
ax.tick_params(axis='x', direction='out')
#
ax.set_xlim([np.min(yrs),np.max(yrs)])
ax.set_ylim([-0.2,1.0])
ax.set_ylabel(r'GMST Anomaly / $\degree$C')
#ax.plot(yrs,gmst,c='k',linewidth=2)
plt.show()
# -
#Contour plot
#This function shifts a colormap with uneven levels
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
fig=plt.figure()
ax1=fig.add_subplot(111)
cmap=plt.get_cmap('RdBu_r')
levs=[-0.9,-0.3,0.3,0.9,1.5,2.1]
cmap=shiftedColorMap(cmap,0.30)
cf1=ax1.contourf(yrs,lats,np.transpose(zonal_annual),levs,cmap=cmap,extend='both')
ax1.set_yticks([-90,-45,0,45,90])
ax1.set_yticklabels(["90S","45S","EQ","45N","90N"])
fig=plt.figure()
ax2=fig.add_subplot(111)
cf2=ax2.contourf(yrs,np.sin(np.pi*lats/180.),np.transpose(zonal_annual),levs,cmap=cmap,extend='both')
ax2.set_yticks([-1.0,-0.5,0.0,0.5,1.0])
ax2.set_yticklabels(['90S','30S','EQ','30N','90N'])
cbaxes=fig.add_axes([0.15, 0.00, 0.7, 0.03])
cbar=plt.colorbar(cf1,cax=cbaxes,orientation="horizontal")
#cbar=plt.colorbar(cf2,orientation='horizontal',pad=0.15)
cbar.set_label('Surface Air Temperature Anomaly (1951-1980) / $\degree$C',fontsize=10)
plt.show()
#Note that the top plot is equal in latitude
#while the bottom plot is equal in area
#The high latitude warming is more accentuated in the top plot
#If your interest is global mean, the bottom plot is more appropriate
#If you want to highlight the high latitudes, the top plot is more appropriate
# ### Global map projections and regional plots with Cartopy
#
gs=gridspec.GridSpec(2,1)
gs.update(left=0.05, right=0.95, hspace=-0.2)
levs=[10.,20.,30.,40.,50.] # These are the plotting levels
extend='both' # Extend the colorbar above/below? Options are 'max','min','neither','both'
colmap='RdBu_r' # colorscales, google "matplotlib colormaps" for other options
colmap=plt.cm.get_cmap(colmap)
colmap=shiftedColorMap(colmap,0.30)
levs=[-1.0,-0.2,0.2,1.0,1.8,2.6,3.4]
# Want to extract the SST for 2016
sst_2016=annual_data[np.where(yrs==2016)[0][0],:,:]
#Create new figure
fig=plt.figure(figsize=(5,8))
#Use a Robinson projection, draw coastlines
im0=fig.add_subplot(gs[0],projection=ccrs.Robinson(central_longitude=0))
#im0=plt.axes(projection=ccrs.Robinson(central_longitude=0))
im0.coastlines()
im0.set_global()
#im1 is a reduced plot
im1=fig.add_subplot(gs[1],projection=ccrs.PlateCarree())
im1.set_extent([-25,40,30,70])
im1.coastlines()
#
#Trickery to get the colormap to append for the 'both' extension - insert levels above and below
levs2=np.insert(levs,0,levs[0]-1)
levs2=np.append(levs2,levs2[len(levs2)-1]+1)
# This normalises the levels so that if there are large differences between the sizes
# of bins that the colors are uniform
norm=colors.BoundaryNorm(levs2, ncolors=cmap.N, clip=True)
# Filled contour at defined levels
cay=im0.contourf(lons,lats,sst_2016,levs,transform=ccrs.PlateCarree(),cmap=colmap,extend=extend,norm=norm)
caz=im1.contourf(lons,lats,sst_2016,levs,transform=ccrs.PlateCarree(),cmap=colmap,extend=extend,norm=norm)
#Add colorbar, this is a more 'precise' way to add the colorbar by defining a new axis
cbaxes=fig.add_axes([0.05, 0.1, 0.9, 0.03])
cbar=plt.colorbar(cay,cax=cbaxes,orientation="horizontal")
cbar.set_label('2016 SAT Anomaly (1951-1980 Climatology) / $\degree$C')
#plt.suptitle('2016 Surface Temperature Anomaly (from 1951-1980)')
plt.savefig('/homes/dcw32/figures/best.png',bbox_inches='tight',dpi=200)
plt.show()
# ### Central England Temperature record vs BEST
# Extract the Met Office Central England Temperature record
#
cet_data=np.genfromtxt('data/cetml1659on.dat',skip_header=7)
fig=plt.figure(figsize=(4,4))
#1950-->2016
nyrs=2017-1950
sdate=np.where(cet_data[:,0]==1950)[0][0]
cet=np.zeros([12,nyrs])
for i in range(nyrs):
cet[:,i]=cet_data[sdate+i,1:13]
print cet.shape
#
# # +asume that the CET can be represented by the box at 52N, -0.5&-1.5W
x=np.where(lats==52.5)[0][0]
y=np.where(lons==-1.5)[0][0]
best_cet=np.mean(best_sata[:,:,x,y:y+2],axis=2)
for i in range(nyrs):
best_cet[:,i]=best_cet[:,i]+np.mean(sat_clim[:,x,y:y+2],axis=1)
print best_cet.shape
#
# Now plot
xmin=-4.
xmax=22.
plt.scatter(cet,best_cet,marker='.',c='darkred')
plt.plot(np.linspace(xmin,xmax,100),np.linspace(xmin,xmax,100),c='k',linestyle='--')
plt.xlabel(r'CET Monthly Mean Temperature / $\degree$C')
plt.xlim(xmin,xmax)
plt.ylim(xmin,xmax)
plt.ylabel(r'BEST Monthly Mean Temperature / $\degree$C')
plt.show()
# +
# Set names to plot and number of months
scenarios = ['Obs', 'Model']
months = list(range(1, 13))
# Make some random data:
var_obs = pd.DataFrame() # Start with empty dataframes
var_model = pd.DataFrame()
N_data = nyrs
# Loop through months of years, feeding with random distributions
for month in months:
var_obs[month] = cet[month-1,:]
var_model[month] = best_cet[month-1,:]
# Set plotting settings
scen_colours = {'Obs': 'black', 'Model': 'red'}
scen_lstyle = {'Obs': '-', 'Model': '-.'}
scen_marker = {'Obs': 'o', 'Model': 'v'}
scen_flier = {'Obs': '+', 'Model': 'x'}
labels = {'Obs': 'CET Record', 'Model': 'BEST Reconstruction'}
labelsxy = {'Obs': [0.05,0.9], 'Model': [0.05,0.85]}
linewidth = 2.5
# Combine data into dict
var_all = {'Obs': var_obs, 'Model': var_model}
# Set plotting options for each scenario
displace_vals = [-.2, 0.2]
widths = 0.3
markersize = 3
# Set percentiles for whiskers
whis_perc = [5, 95]
showfliers = True
showmeans = True
# Open figure
fig = plt.figure(1, figsize=[8.5,4.5])
ax = fig.add_axes([0.15, 0.15, 0.65, 0.75])
# Loop over months and scenrios
for month in months:
for iscen, scen in enumerate(scenarios):
# Load data
data = var_all[scen][month]
# Make plotting option dicts for boxplot function
meanprops = dict(marker=scen_marker[scen],
markerfacecolor=scen_colours[scen],
markeredgecolor=scen_colours[scen]
)
boxprops = dict(linestyle=scen_lstyle[scen],
linewidth=linewidth,
color=scen_colours[scen]
)
medianprops = dict(linestyle=scen_lstyle[scen],
linewidth=linewidth,
color=scen_colours[scen]
)
whiskerprops = dict(linestyle=scen_lstyle[scen],
linewidth=linewidth,
color=scen_colours[scen]
)
capprops = dict(linestyle=scen_lstyle[scen],
linewidth=linewidth,
color=scen_colours[scen]
)
flierprops = dict(marker=scen_flier[scen],
markerfacecolor=scen_colours[scen],
markeredgecolor=scen_colours[scen]
)
# Plot data for this month and scenario
plt.boxplot(data, positions=[month+displace_vals[iscen]],
showmeans=showmeans, whis=whis_perc,
showfliers=showfliers, flierprops=flierprops,
meanprops=meanprops, medianprops=medianprops,
boxprops=boxprops, whiskerprops=whiskerprops,
capprops=capprops, widths=widths
)
ax.annotate(labels[scen],xy=labelsxy[scen],xycoords='axes fraction',color=scen_colours[scen])
# Set axis labels
ax.set_title('Central England Temperature')
ax.set_xlim([months[0]-1, months[-1]+1])
ax.set_xticks(months)
ax.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'], fontsize=12)
#ax.set_xlabel('Month of Year')
# ax.set_ylim(ymin,ymax)
ax.set_ylabel(r'Montly Mean Temperature / $\degree$C')
plt.savefig('/homes/dcw32/figures/best_boxwhisker.png',transparent=True,bbox_inches='tight',dpi=200)
plt.show()
# -
# ## Surface Ozone - Trends and Spectral Decomposition
# To come!
| plotting_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import print_function, division
import uproot
import numpy as np
#Make sure to install both old awkward0 and new awkward1(referred to now as awkward)
import awkward1 as ak
import awkward0 as ak0
from coffea.nanoevents import NanoAODSchema,NanoEventsFactory
from uproot3_methods import TLorentzVectorArray
import uproot3_methods
import numpy as np
import coffea.hist as hist
import matplotlib.pyplot as plt
import awkward
class HackSchema(NanoAODSchema):
def __init__(self, base_form):
base_form["contents"].pop("Muon_fsrPhotonIdx", None)
base_form["contents"].pop("Electron_photonIdx", None)
super().__init__(base_form)
def m3_recon(tree):
comb= ak.combinations(tree,n=3,axis=1,fields=['j1','j2','j3'])
trijets= comb.j1+comb.j2+comb.j3
recon =ak.max(trijets,axis=1)
reconfinal=np.sqrt(recon.t*recon.t-recon.x*recon.x-recon.y*recon.y-recon.z*recon.z)
list1= ak.to_numpy(reconfinal)
return list1
files ="TTbarPowheg_Semilept_Skim_NanoAOD_1of21.root"
import coffea.processor as processor
from pprint import pprint
file=uproot.open(files)
nEvents=file['hEvents'].values[0]+file['hEvents'].values[2]
from pprint import pprint
events =NanoEventsFactory.from_root(files,schemaclass=HackSchema).events()
test = abs(events.GenPart.pdgId)==6
gevents=events.GenPart[test]
comb= ak.combinations(gevents,n=2,axis=1,fields=['j1','j2'])
deltar= comb['j1'].delta_r(comb['j2'])
deltar_min=ak.min(deltar,axis=1)
["data"]
bins_l=[]
x=0
while x<100:
bins_l.append(x*.01)
x+=1
import matplotlib.pyplot as plt
plt.hist(deltar_min,bins=bins_l)
#Introducing Delta_r cuts to M3 Recon
tight_jets=events.Jet
print(tight_jets)
jetSel = ak.num(tight_jets[((tight_jets.pt>30)&(tight_jets.eta<2.4)&(tight_jets.eta>-2.4))],axis=1)>=3
jetSelection=(jetSel&(ak.num(tight_jets.btagCSVV2>.4184)>=1))
tight_muons = events.Muon
muonsel=ak.num(tight_muons[((tight_muons.pt>30)&(abs(tight_muons.eta)<2.4))],axis=1)==1
tight_electrons= events.Electron
electronsel=ak.num(tight_electrons[((tight_electrons.pt>35)&(abs(tight_electrons.eta)<2.4))],axis=1)==1
leptonsel=(muonsel|electronsel)
print(leptonsel)
jetlepselmask = (jetSelection&leptonsel)
print((jetlepselmask))
print(events[jetlepselmask])
final=events[jetlepselmask]
postcuts_m3=m3_recon(events[jetlepselmask].Jet)
test_data1=[]
test_data2=[]
for j in final.GenJet[4:5]:
j0=j[0]
test_data1.append(j0.delta_r(j[1]))
test_data2.append(j0.delta_r(j[2]))
final.GenJet.partonFlavour[4]
final.GenJet[4][3].delta_r(final.GenJet[4][-2])
NonTag=final[final.GenJet.partonFlavour==0]
print(NonTag)
NonTag[4][1].delta_r(NonTag[4][2])
plt.hist(test_data1)
plt.hist(test_data2)
| .ipynb_checkpoints/GenParticlesAnalysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import qttk
import denali
run denali
# +
# Options Analysis
import os
import json
import pandas as pd
import numpy as np
import seaborn as sea
import matplotlib.pyplot as plt
# Load Data
def read_json(filename:str):
with open(filename, "r") as f:
data = json.load(f)
f.close()
return data
f1 = '$SPX.X-2021-05-23-price-hist.json'
f2 = '$SPX.X-2021-05-23-opt-chain.json'
price = read_json(f1)
opt_chain = read_json(f2)
# -
price.keys()
price_df = pd.DataFrame.from_dict(price['candles'])
epoch = price_df.iloc[:,5]
price_df.iloc[:,5] = pd.to_datetime(epoch, unit='ms')
price_df = price_df.set_index(price_df.iloc[:,5])
plt.figure(figsize=(25,5))
sea.lineplot(data=price_df[["close"]])
# # Processing Option Chain
option_keys = [['exp','price',*nodes] for exp, info in opt_chain["callExpDateMap"].items() for values in info.values() for nodes in values][0]
call_option_data = [[exp, price, *nodes.values()] for exp, info in opt_chain["callExpDateMap"].items() for price, values in info.items() for nodes in values]
put_option_data = [[exp, price, *nodes.values()] for exp, info in opt_chain["putExpDateMap"].items() for price, values in info.items() for nodes in values]
last_price = price_df.tail(1)['close'][0]
#opt_chain['callExpDateMap']['2021-05-24:2']['410.0'][0]
option_df = pd.DataFrame(data=put_option_data,columns=option_keys)
option_df = option_df.append(pd.DataFrame(data=call_option_data,columns=option_keys))
option_df['price'] = pd.to_numeric(option_df['price'])
option_df['mark'] = pd.to_numeric(option_df['mark'])
option_df['last'] = pd.to_numeric(option_df['last'])
option_df[option_df['putCall'] =='PUT'].head()
option_df[option_df['putCall'] =='CALL'].head()
# # Plotting Call Skew
# +
strike_range = 150
call_filter = (option_df['putCall'] == "CALL")
call_skew_filter = call_filter & (option_df['price'] > last_price-strike_range) & (option_df['price'] < last_price+strike_range)
# option_df['exp'] == "2021-05-24:1") &
plt.figure(figsize=(20,10))
plt.axvline(x=last_price, ymin=0, ymax=1, label='Last Price', color='black', linestyle='--')
sea.lineplot(data=option_df[call_skew_filter],x='price',y='mark', hue='exp')
# -
# # Plotting Put Skew
# +
put_filter = (option_df['putCall'] == "PUT")
put_skew_filter = put_filter & (option_df['price'] > last_price-strike_range) & (option_df['price'] < last_price+strike_range)
# option_df['exp'] == "2021-05-24:1") &
plt.figure(figsize=(20,10))
plt.axvline(x=last_price, ymin=0, ymax=1, label='Last Price', color='black', linestyle='--')
sea.lineplot(data=option_df[put_skew_filter],x='price',y='mark', hue='exp')
# -
# # Finding Mispriced Options (Profit Opportunity!)
strike = 3475
strike_filter = (option_df['price'] == strike)
exp_filter = (option_df['exp'] == '2021-05-24:1' )
option_df[ put_filter & strike_filter & exp_filter]
# +
# We want to compare reciprocal prices for options CALL vs PUT to find gaps.
strike = 4120
strike_filter = (option_df['price'] == strike)
exp_filter = (option_df['exp'] == '2021-05-24:1' )
# Parity Formula: C + PV(x) = P + S
# Call price + (Strike * interest) = Put Price + Current Price
#parity_df = pd.Dataframe(columns=['exp', 'strike','call_price', 'put_price', 'parity'])
parity_list = []
call_price = 0.0
put_price = 0.0
for exp in option_df['exp'].unique():
exp_filter = (option_df['exp'] == exp )
for strike in option_df[exp_filter]['price'].unique():
strike_filter = (option_df['price'] == strike)
call_price = option_df[call_filter & strike_filter & exp_filter]['mark'].apply(lambda x: x.item() if isinstance(x,pd.Series) else x)
put_price = option_df[put_filter & strike_filter & exp_filter]['mark'].apply(lambda x: x.item() if isinstance(x,pd.Series) else x)
#print(f"exp: {exp} strike:{strike} type:{option_df[put_filter & strike_filter & exp_filter]}")
#print(option_df[put_filter & strike_filter & exp_filter].values)
#print(call_price + strike - put_price + last_price)
parity = (call_price.item() + strike.item()) - (put_price.item() + last_price)
parity_item = {'exp': exp, 'strike':strike,'call_price':call_price.item(), 'put_price':put_price.item() , 'last_price':last_price, 'parity':parity }
parity_list.append(parity_item)
parity_df = pd.DataFrame(parity_list)
# -
plt.figure(figsize=(25,5))
sea.lineplot(data=parity_df, x='strike',y='parity',hue='exp')
| tdameritrade/SPX-options.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="icthLzm_fm7I" colab_type="text"
# # [Intracranial hemorrhage detection](http://kaggle.com/c/rsna-intracranial-hemorrhage-detection)
#
# Intracranial hemorrhage, bleeding that occurs inside the cranium, is a serious health problem requiring rapid and often intensive medical treatment. For example, intracranial hemorrhages account for approximately 10% of strokes in the U.S., where stroke is the fifth-leading cause of death. Identifying the location and type of any hemorrhage present is a critical step in treating the patient.
#
# Diagnosis requires an urgent procedure. When a patient shows acute neurological symptoms such as severe headache or loss of consciousness, highly trained specialists review medical images of the patient’s cranium to look for the presence, location and type of hemorrhage. The process is complicated and often time consuming.
#
# The goal of this notebook is to build an algorithm to detect acute intracranial hemorrhage and [its subtypes](https://www.kaggle.com/c/rsna-intracranial-hemorrhage-detection/overview/hemorrhage-types).
#
# ## [Launch this notebook in Google CoLab](https://colab.research.google.com/github/ihais-official/Intracranial_Hemorrhage_Detection/blob/master/notebook/Intracranial_hemorrhage_detection.ipynb)
#
#
# + [markdown] id="9FFTOaeayrpa" colab_type="text"
# # Step 00 -- Setting-up
# + id="g-cPm97Affbs" colab_type="code" colab={}
training_mode = True
setup = True
colab_mode = True
verbose = True
kaggle_kernelMode = False
download_rawData = True
# + id="UqjL3ZmRhtLR" colab_type="code" colab={}
MODEL_ID = 'seresnext50' #'seresnext101' #'EfficientNet-UNet'
EPOCHS = 5
BATCH_SIZE = 2
CLASS_SIZE = 4
IMG_HEIGHT = 256
IMG_WIDTH = 1600 if MODEL_ID == 'seresnext101' else 512
ORIG_IMG_HEIGHT = 256
ORIG_IMG_WIDTH = 1600
NUM_CHANNELS = 3
FILTER_IMG_HEIGHT = 256
FILTER_IMG_WIDTH = 256
FILTER_THRESHOLD = 0.5
FILTER_MODEL_ID = 'DenseNet-BC-201'
DATASET_ID = 'intracranial_hemorrhage_2019'
NOTEBOOK_ID = '{}_mask_{}'.format(DATASET_ID, MODEL_ID)
BACKBONE = MODEL_ID
HISTORY_FILE = 'history.json'
GOOGLE_DRIVE = '/content/drive/'
GOOGLE_DRIVE_DIR = '{}/My\ Drive/'.format(GOOGLE_DRIVE)
DATA_DIR = '/content/' if colab_mode else '../input/rsna-intracranial-hemorrhage-detection/' if kaggle_kernelMode else './'
OUTPUT_DIR = DATA_DIR if colab_mode else './'
WEIGHTS_DIR = DATA_DIR
PRETRAINED_WEIGHTS_DIR = DATA_DIR
CLASSIFIER_MODEL_DIR = PRETRAINED_WEIGHTS_DIR
TRAIN_DATA_FILENAME = '{}_stage_1_train.zip'.format(DATASET_ID)
TRAIN_DATA_LABEL_FILENAME = '{}_stage_1_train.csv.zip'.format(DATASET_ID)
SAMPLE_SUBMISSION_FILENAME = '{}_stage_1_sample_submission.csv.zip'.format(DATASET_ID)
TRAIN_DATA_DIR = '../input/rsna-intracranial-hemorrhage-detection/train_images' if kaggle_kernelMode else '{}/train/'.format(DATA_DIR)
TRAIN_DIR = '../tmp/train/' if kaggle_kernelMode else '/tmp/train/'
TEST_DATA_DIR = 'test_images' if kaggle_kernelMode else 'test'
TEST_DIR = '{}/{}/'.format(DATA_DIR,
TEST_DATA_DIR)
BACKBONE_WEIGHTS_FILENAME = 'efficientnet-b4_imagenet_1000_notop.h5'
BACKBONE_WEIGHTS = ('{}/{}'.format(BACKBONE_WEIGHTS_DIR,
BACKBONE_WEIGHTS_FILENAME)) if kaggle_kernelMode else 'ImageNet'
PRETRAINED_WEIGHTS_FILENAME = '{}_mask_{}_weights.h5'.format(DATASET_ID,
MODEL_ID)
PRETRAINED_WEIGHTS = '{}/{}'.format(PRETRAINED_WEIGHTS_DIR,
PRETRAINED_WEIGHTS_FILENAME)
CLASSIFIER_MODEL_FILENAME = '{}_missing_mask_predictions_{}_checkpoint.h5'.format(DATASET_ID,
FILTER_MODEL_ID) #'severstal_steel_defect_missing_mask_predictions_DesneNet-BC-169_checkpoint.h5'
CLASSIFIER_MODEL_WEIGHTS = '{}/{}'.format(CLASSIFIER_MODEL_DIR,
CLASSIFIER_MODEL_FILENAME)
CHECKPOINT_FILENAME = '{}_checkpoint.h5'.format(NOTEBOOK_ID)
CHECKPOINT_FILE = '{}/{}'.format(PRETRAINED_WEIGHTS_DIR,
CHECKPOINT_FILENAME)
SAMPLE_SUBMISSION_FILENAME = 'sample_submission.csv' if kaggle_kernelMode else '{}_sample_submission.csv'.format(DATASET_ID)
SUBMISSION_FILENAME = 'submission.csv' if kaggle_kernelMode else '{}_submission.csv'.format(NOTEBOOK_ID)
SUBMISSION_FILE = '{}/{}'.format(DATA_DIR,
SUBMISSION_FILENAME)
# + id="FoNW3RUyht-J" colab_type="code" outputId="771dcda3-ef8d-4dc3-c661-6b6d07241f75" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 74}
if setup and colab_mode and download_rawData:
from google.colab import files
files.upload()
# + id="r3r99UJmkMFw" colab_type="code" colab={}
if setup and colab_mode:
from google.colab import drive
drive.mount(GOOGLE_DRIVE)
# + id="7k_yxOk_iR64" colab_type="code" colab={}
if setup and colab_mode and download_rawData:
# ! mkdir /root/.kaggle/
# ! mv /content/kaggle.json /root/.kaggle/
# + id="O0UHn3HjiwOf" colab_type="code" colab={}
if setup and download_rawData and not kaggle_kernelMode:
# ! pip install --upgrade kaggle
# ! kaggle competitions download -p {GOOGLE_DRIVE_DIR} -c rsna-intracranial-hemorrhage-detection
# + id="3beT_ChbkodF" colab_type="code" outputId="b73f2ab1-87d2-4c7a-8347-9249df102bd2" colab={"base_uri": "https://localhost:8080/", "height": 85}
import time
if setup and download_rawData and not kaggle_kernelMode:
print ('Loading raw data using Kaggle API into Google Drive...')
start = time.time()
# ! mv /{DATA_DIR}/rsna-intracranial-hemorrhage-detection.zip {DATA_DIR}/{TRAIN_DATA_FILENAME}
# ! cp {DATA_DIR}/{TRAIN_DATA_FILENAME} /{GOOGLE_DRIVE_DIR}/
# ! cp /{DATA_DIR}/{CHECKPOINT_FILENAME} /{GOOGLE_DRIVE_DIR}/
print ('Loaded data into Google Drive in: {} seconds ...'.format(round(time.time()-start),2))
elif setup and colab_mode:
print ('Loading data and saved weights from Google drive ...')
start = time.time()
# ! cp {GOOGLE_DRIVE_DIR}/{TRAIN_DATA_FILENAME} {DATA_DIR}/
# ! cp {GOOGLE_DRIVE_DIR}/{CHECKPOINT_FILENAME} {DATA_DIR}/
# ! cp {GOOGLE_DRIVE_DIR}/{CLASSIFIER_MODEL_FILENAME} {DATA_DIR}/
print ('Loaded data and saved weights from Google drive ...')
print ('Loaded data from Google Drive in: {} seconds ...'.format(round(time.time()-start),2))
# + id="ropKkPaYCHOL" colab_type="code" colab={}
if setup:
# ! mkdir /{TRAIN_DATA_DIR}
# ! unzip -q {DATA_DIR}/{TRAIN_DATA_FILENAME} -d /{TRAIN_DATA_DIR}
# + id="yWfJrhGzH3X8" colab_type="code" colab={}
# ! mv {DATA_DIR}/{TRAIN_DATA_FILENAME} /{GOOGLE_DRIVE_DIR}
# + id="ITILwlr4OmPa" colab_type="code" colab={}
| notebook/Intracranial_hemorrhage_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8
# language: python
# name: python3
# ---
# <center>
# <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%204/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# # Hierarchical Clustering
#
# Estimated time needed: **25** minutes
#
# ## Objectives
#
# After completing this lab you will be able to:
#
# * Use scikit-learn to do Hierarchical clustering
# * Create dendograms to visualize the clustering
#
# <h1>Table of contents</h1>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
# <li><a href="https://#hierarchical_agglomerative">Hierarchical Clustering - Agglomerative</a></li>
# <ol>
# <li><a href="https://#generating_data">Generating Random Data</a></li>
# <li><a href="https://#agglomerative_clustering">Agglomerative Clustering</a></li>
# <li><a href="https://#dendrogram">Dendrogram Associated for the Agglomerative Hierarchical Clustering</a></li>
# </ol>
# <li><a href="https://#clustering_vehicle_dataset">Clustering on the Vehicle Dataset</a></li>
# <ol>
# <li><a href="https://#data_cleaning">Data Cleaning</a></li>
# <li><a href="https://#clustering_using_scipy">Clustering Using Scipy</a></li>
# <li><a href="https://#clustering_using_skl">Clustering using scikit-learn</a></li>
# </ol>
# </ol>
# </div>
# <br>
# <hr>
#
# <h1 id="hierarchical_agglomerative">Hierarchical Clustering - Agglomerative</h1>
#
# We will be looking at a clustering technique, which is <b>Agglomerative Hierarchical Clustering</b>. Remember that agglomerative is the bottom up approach. <br> <br>
# In this lab, we will be looking at Agglomerative clustering, which is more popular than Divisive clustering. <br> <br>
# We will also be using Complete Linkage as the Linkage Criteria. <br> <b> <i> NOTE: You can also try using Average Linkage wherever Complete Linkage would be used to see the difference! </i> </b>
#
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.cluster import hierarchy
from scipy.spatial import distance_matrix
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_blobs
# %matplotlib inline
# <hr>
# <h3 id="generating_data">Generating Random Data</h3>
# We will be generating a set of data using the <b>make_blobs</b> class. <br> <br>
# Input these parameters into make_blobs:
# <ul>
# <li> <b>n_samples</b>: The total number of points equally divided among clusters. </li>
# <ul> <li> Choose a number from 10-1500 </li> </ul>
# <li> <b>centers</b>: The number of centers to generate, or the fixed center locations. </li>
# <ul> <li> Choose arrays of x,y coordinates for generating the centers. Have 1-10 centers (ex. centers=[[1,1], [2,5]]) </li> </ul>
# <li> <b>cluster_std</b>: The standard deviation of the clusters. The larger the number, the further apart the clusters</li>
# <ul> <li> Choose a number between 0.5-1.5 </li> </ul>
# </ul> <br>
# Save the result to <b>X1</b> and <b>y1</b>.
#
X1, y1 = make_blobs(n_samples=50, centers=[[4,4], [-2, -1], [1, 1], [10,4]], cluster_std=0.9)
# Plot the scatter plot of the randomly generated data.
#
plt.scatter(X1[:, 0], X1[:, 1], marker='o')
# <hr>
# <h3 id="agglomerative_clustering">Agglomerative Clustering</h3>
#
# We will start by clustering the random data points we just created.
#
# The <b> Agglomerative Clustering </b> class will require two inputs:
#
# <ul>
# <li> <b>n_clusters</b>: The number of clusters to form as well as the number of centroids to generate. </li>
# <ul> <li> Value will be: 4 </li> </ul>
# <li> <b>linkage</b>: Which linkage criterion to use. The linkage criterion determines which distance to use between sets of observation. The algorithm will merge the pairs of cluster that minimize this criterion. </li>
# <ul>
# <li> Value will be: 'complete' </li>
# <li> <b>Note</b>: It is recommended you try everything with 'average' as well </li>
# </ul>
# </ul> <br>
# Save the result to a variable called <b> agglom </b>.
#
agglom = AgglomerativeClustering(n_clusters = 4, linkage = 'average')
# Fit the model with <b> X2 </b> and <b> y2 </b> from the generated data above.
#
agglom.fit(X1,y1)
# Run the following code to show the clustering! <br>
# Remember to read the code and comments to gain more understanding on how the plotting works.
#
# +
# Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(6,4))
# These two lines of code are used to scale the data points down,
# Or else the data points will be scattered very far apart.
# Create a minimum and maximum range of X1.
x_min, x_max = np.min(X1, axis=0), np.max(X1, axis=0)
# Get the average distance for X1.
X1 = (X1 - x_min) / (x_max - x_min)
# This loop displays all of the datapoints.
for i in range(X1.shape[0]):
# Replace the data points with their respective cluster value
# (ex. 0) and is color coded with a colormap (plt.cm.spectral)
plt.text(X1[i, 0], X1[i, 1], str(y1[i]),
color=plt.cm.nipy_spectral(agglom.labels_[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
# Remove the x ticks, y ticks, x and y axis
plt.xticks([])
plt.yticks([])
#plt.axis('off')
# Display the plot of the original data before clustering
plt.scatter(X1[:, 0], X1[:, 1], marker='.')
# Display the plot
plt.show()
# -
# <h3 id="dendrogram">Dendrogram Associated for the Agglomerative Hierarchical Clustering</h3>
#
# Remember that a <b>distance matrix</b> contains the <b> distance from each point to every other point of a dataset </b>.
#
# Use the function <b> distance_matrix, </b> which requires <b>two inputs</b>. Use the Feature Matrix, <b> X1 </b> as both inputs and save the distance matrix to a variable called <b> dist_matrix </b> <br> <br>
# Remember that the distance values are symmetric, with a diagonal of 0's. This is one way of making sure your matrix is correct. <br> (print out dist_matrix to make sure it's correct)
#
dist_matrix = distance_matrix(X1,X1)
print(dist_matrix)
# Using the <b> linkage </b> class from hierarchy, pass in the parameters:
#
# <ul>
# <li> The distance matrix </li>
# <li> 'complete' for complete linkage </li>
# </ul> <br>
# Save the result to a variable called <b> Z </b>.
#
Z = hierarchy.linkage(dist_matrix, 'complete')
# A Hierarchical clustering is typically visualized as a dendrogram as shown in the following cell. Each merge is represented by a horizontal line. The y-coordinate of the horizontal line is the similarity of the two clusters that were merged, where cities are viewed as singleton clusters.
# By moving up from the bottom layer to the top node, a dendrogram allows us to reconstruct the history of merges that resulted in the depicted clustering.
#
# Next, we will save the dendrogram to a variable called <b>dendro</b>. In doing this, the dendrogram will also be displayed.
# Using the <b> dendrogram </b> class from hierarchy, pass in the parameter:
#
# <ul> <li> Z </li> </ul>
#
dendro = hierarchy.dendrogram(Z)
# ## Practice
#
# We used **complete** linkage for our case, change it to **average** linkage to see how the dendogram changes.
#
Z = hierarchy.linkage(dist_matrix, 'average')
dendro = hierarchy.dendrogram(Z)
# <hr>
# <h1 id="clustering_vehicle_dataset">Clustering on Vehicle dataset</h1>
#
# Imagine that an automobile manufacturer has developed prototypes for a new vehicle. Before introducing the new model into its range, the manufacturer wants to determine which existing vehicles on the market are most like the prototypes--that is, how vehicles can be grouped, which group is the most similar with the model, and therefore which models they will be competing against.
#
# Our objective here, is to use clustering methods, to find the most distinctive clusters of vehicles. It will summarize the existing vehicles and help manufacturers to make decision about the supply of new models.
#
# ### Download data
#
# To download the data, we will use **`!wget`** to download it from IBM Object Storage.\
# **Did you know?** When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
#
# !wget -O cars_clus.csv https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ML0101EN-SkillsNetwork/labs/Module%204/data/cars_clus.csv
# ## Read data
#
# Let's read dataset to see what features the manufacturer has collected about the existing models.
#
# +
filename = 'cars_clus.csv'
#Read csv
pdf = pd.read_csv(filename)
print ("Shape of dataset: ", pdf.shape)
pdf.head(5)
# -
# The feature sets include price in thousands (price), engine size (engine_s), horsepower (horsepow), wheelbase (wheelbas), width (width), length (length), curb weight (curb_wgt), fuel capacity (fuel_cap) and fuel efficiency (mpg).
#
# <h2 id="data_cleaning">Data Cleaning</h2>
#
# Let's clean the dataset by dropping the rows that have null value:
#
print ("Shape of dataset before cleaning: ", pdf.size)
pdf[[ 'sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']] = pdf[['sales', 'resale', 'type', 'price', 'engine_s',
'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap',
'mpg', 'lnsales']].apply(pd.to_numeric, errors='coerce')
pdf = pdf.dropna()
pdf = pdf.reset_index(drop=True)
print ("Shape of dataset after cleaning: ", pdf.size)
pdf.head(5)
# ### Feature selection
#
# Let's select our feature set:
#
featureset = pdf[['engine_s', 'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap', 'mpg']]
# ### Normalization
#
# Now we can normalize the feature set. **MinMaxScaler** transforms features by scaling each feature to a given range. It is by default (0, 1). That is, this estimator scales and translates each feature individually such that it is between zero and one.
#
from sklearn.preprocessing import MinMaxScaler
x = featureset.values #returns a numpy array
min_max_scaler = MinMaxScaler()
feature_mtx = min_max_scaler.fit_transform(x)
feature_mtx [0:5]
# <h2 id="clustering_using_scipy">Clustering using Scipy</h2>
#
# In this part we use Scipy package to cluster the dataset.
#
# First, we calculate the distance matrix.
#
import scipy
leng = feature_mtx.shape[0]
D = scipy.zeros([leng,leng])
for i in range(leng):
for j in range(leng):
D[i,j] = scipy.spatial.distance.euclidean(feature_mtx[i], feature_mtx[j])
D
# In agglomerative clustering, at each iteration, the algorithm must update the distance matrix to reflect the distance of the newly formed cluster with the remaining clusters in the forest.
# The following methods are supported in Scipy for calculating the distance between the newly formed cluster and each:
# \- single
# \- complete
# \- average
# \- weighted
# \- centroid
#
# We use **complete** for our case, but feel free to change it to see how the results change.
#
import pylab
import scipy.cluster.hierarchy
Z = hierarchy.linkage(D, 'complete')
# Essentially, Hierarchical clustering does not require a pre-specified number of clusters. However, in some applications we want a partition of disjoint clusters just as in flat clustering.
# So you can use a cutting line:
#
from scipy.cluster.hierarchy import fcluster
max_d = 3
clusters = fcluster(Z, max_d, criterion='distance')
clusters
# Also, you can determine the number of clusters directly:
#
from scipy.cluster.hierarchy import fcluster
k = 5
clusters = fcluster(Z, k, criterion='maxclust')
clusters
# Now, plot the dendrogram:
#
# +
fig = pylab.figure(figsize=(18,50))
def llf(id):
return '[%s %s %s]' % (pdf['manufact'][id], pdf['model'][id], int(float(pdf['type'][id])) )
dendro = hierarchy.dendrogram(Z, leaf_label_func=llf, leaf_rotation=0, leaf_font_size =12, orientation = 'right')
# -
# <h2 id="clustering_using_skl">Clustering using scikit-learn</h2>
#
# Let's redo it again, but this time using the scikit-learn package:
#
from sklearn.metrics.pairwise import euclidean_distances
dist_matrix = euclidean_distances(feature_mtx,feature_mtx)
print(dist_matrix)
Z_using_dist_matrix = hierarchy.linkage(dist_matrix, 'complete')
# +
fig = pylab.figure(figsize=(18,50))
def llf(id):
return '[%s %s %s]' % (pdf['manufact'][id], pdf['model'][id], int(float(pdf['type'][id])) )
dendro = hierarchy.dendrogram(Z_using_dist_matrix, leaf_label_func=llf, leaf_rotation=0, leaf_font_size =12, orientation = 'right')
# -
# Now, we can use the 'AgglomerativeClustering' function from scikit-learn library to cluster the dataset. The AgglomerativeClustering performs a hierarchical clustering using a bottom up approach. The linkage criteria determines the metric used for the merge strategy:
#
# * Ward minimizes the sum of squared differences within all clusters. It is a variance-minimizing approach and in this sense is similar to the k-means objective function but tackled with an agglomerative hierarchical approach.
# * Maximum or complete linkage minimizes the maximum distance between observations of pairs of clusters.
# * Average linkage minimizes the average of the distances between all observations of pairs of clusters.
#
# +
agglom = AgglomerativeClustering(n_clusters = 6, linkage = 'complete')
agglom.fit(dist_matrix)
agglom.labels_
# -
# We can add a new field to our dataframe to show the cluster of each row:
#
pdf['cluster_'] = agglom.labels_
pdf.head()
# +
import matplotlib.cm as cm
n_clusters = max(agglom.labels_)+1
colors = cm.rainbow(np.linspace(0, 1, n_clusters))
cluster_labels = list(range(0, n_clusters))
# Create a figure of size 6 inches by 4 inches.
plt.figure(figsize=(16,14))
for color, label in zip(colors, cluster_labels):
subset = pdf[pdf.cluster_ == label]
for i in subset.index:
plt.text(subset.horsepow[i], subset.mpg[i],str(subset['model'][i]), rotation=25)
plt.scatter(subset.horsepow, subset.mpg, s= subset.price*10, c=color, label='cluster'+str(label),alpha=0.5)
# plt.scatter(subset.horsepow, subset.mpg)
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')
# -
# As you can see, we are seeing the distribution of each cluster using the scatter plot, but it is not very clear where is the centroid of each cluster. Moreover, there are 2 types of vehicles in our dataset, "truck" (value of 1 in the type column) and "car" (value of 0 in the type column). So, we use them to distinguish the classes, and summarize the cluster. First we count the number of cases in each group:
#
pdf.groupby(['cluster_','type'])['cluster_'].count()
# Now we can look at the characteristics of each cluster:
#
agg_cars = pdf.groupby(['cluster_','type'])['horsepow','engine_s','mpg','price'].mean()
agg_cars
# It is obvious that we have 3 main clusters with the majority of vehicles in those.
#
# **Cars**:
#
# * Cluster 1: with almost high mpg, and low in horsepower.
#
# * Cluster 2: with good mpg and horsepower, but higher price than average.
#
# * Cluster 3: with low mpg, high horsepower, highest price.
#
# **Trucks**:
#
# * Cluster 1: with almost highest mpg among trucks, and lowest in horsepower and price.
# * Cluster 2: with almost low mpg and medium horsepower, but higher price than average.
# * Cluster 3: with good mpg and horsepower, low price.
#
# Please notice that we did not use **type** and **price** of cars in the clustering process, but Hierarchical clustering could forge the clusters and discriminate them with quite a high accuracy.
#
plt.figure(figsize=(16,10))
for color, label in zip(colors, cluster_labels):
subset = agg_cars.loc[(label,),]
for i in subset.index:
plt.text(subset.loc[i][0]+5, subset.loc[i][2], 'type='+str(int(i)) + ', price='+str(int(subset.loc[i][3]))+'k')
plt.scatter(subset.horsepow, subset.mpg, s=subset.price*20, c=color, label='cluster'+str(label))
plt.legend()
plt.title('Clusters')
plt.xlabel('horsepow')
plt.ylabel('mpg')
# <h2>Want to learn more?</h2>
#
# IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="https://www.ibm.com/analytics/spss-statistics-software?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01">SPSS Modeler</a>
#
# Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://www.ibm.com/cloud/watson-studio?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01">Watson Studio</a>
#
# ### Thank you for completing this lab!
#
# ## Author
#
# <NAME>
#
# ### Other Contributors
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkML0101ENSkillsNetwork20718538-2021-01-01" target="_blank"><NAME></a>
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | ---------- | --------------------------------------------------- |
# | 2021-01-11 | 2.2 | Lakshmi | Changed distance matrix in agglomerative clustering |
# | 2020-11-03 | 2.1 | Lakshmi | Updated URL |
# | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
#
# ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
| 9_Machine Learning with Python/4-2.Hierarchical-Cars.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
from pyspark.sql.types import (StructField,StringType,
IntegerType,StructType)
# Start the Spark session.
spark = SparkSession.builder.appName('Basics').getOrCreate()
# Read from a path relative to the one where the Jupyter program was
# started. Can also read from large distributed files like from HDFS,
# this may involve large datasets in Amazon EC2.
df = spark.read.json('Data/people.json')
# Missing data will be replaced with `null`.
df.show()
# Check the schema of the dataframe. Some type inferences may not be
# proper, like assuming an Integer has Long type.
df.printSchema()
# Check the column names. Its not a function but an attribute so
# it can be called without ().
df.columns
# Count of elements in the dataframe.
df.count()
# Statistical summary of the dataframe.
df.describe().show()
# Some data types make it easier to infer schema (like tabular formats
# such as csv). Often we have to set the schema ourselves if we are
# dealing with a .read method that doesn't have inferSchema() built-in.
data_schema = [
# Name, type, and whether it can be null.
StructField('age', IntegerType(), True),
StructField('name', StringType(), True)
]
final_struct = StructType(fields=data_schema)
# Read the JSON but using the schema we defined.
df = spark.read.json('Data/people.json', schema=final_struct)
# Now age can be a integer instead of a long. Also by enforcing
# a schema we can guarantee constraints.
df.printSchema()
df.show()
# How do we actually grab data from our dataframe? This only returns a
# column object, we can validate by checking the return type.
type(df['age'])
# This will instead create a new data frame only with the
# column we want.
df.select('age').show()
# The type is a whole dataframe, not a column.
type(df.select('age'))
# This returns a list of row objects
type(df.head(2)[0])
# You can also select multiple columns!
df.select(['age', 'name']).show()
# You can add a new column, for each row it will take the value
# of the 'age' column object and multiply by two. This doesn't
# alter the original object.
df.withColumn('new_age', df['age'] * 2).show()
# This is a command to just rename columns and nothing else.
df.withColumnRenamed('age', 'new_age').show()
# A UDF (User defined function) will be a function that returns
# a value then converted into a Spark type (e.g. StringType)
# to be used in operations. For example if you want to create
# a new column, you can't just put the values in there, it has
# to contain a Column object, which this will return.
concat_udf = f.udf(lambda age, name: "{}.{}".format(age, name), StringType())
# If you want to add a new column but also drop an old one.
# Use the UDF to concatenate age and name into a value that
# will be put into the new column.
df.withColumn('info', concat_udf(df.age, df.name)).drop('age')
# Now, lets do some SQL operations! But you can't just write the
# name of the python variable of a dataframe, so lets "temporarily"
# alias it to a table name.
df.createOrReplaceTempView('people')
results = spark.sql('''
SELECT *
FROM people
WHERE age >= 25 AND age <= 35
''')
results.show()
| 02_DataFrame_Basic_Operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 368} id="DGl4uKSenvb5" outputId="82cc603b-ee01-431d-d065-22e3eb168cb5"
#import libraries
import boto3
import csv
import json
import time
# +
# Connection to AWS
folder = '/home/propietari/Documents/claus/' # TODO: change to your local path
# file_name = 'AWS_S3_keys_JordiPlanas_Made_in_game.json' # TODO: Change to your filename
# folder = 'C:/Users/jordi/Documents/claus/' # TODO: change to your local path
file_name = 'AWS_S3_keys_wri.json' # TODO: Change to your filename
file = folder + file_name
with open(file, 'r') as dict:
credentials = json.load(dict)
KEY = list(credentials)[0]
SECRET = list(credentials.values())[0]
# s3BucketName = "wri-testing"
s3BucketName = "wri-nlp-policy"
# region = 'eu-central-1'
region = "us-east-1"
s3 = boto3.resource(
service_name = 's3',
region_name = region,
aws_access_key_id = KEY,
aws_secret_access_key = SECRET
)
# -
prefix = "english_documents/raw_pdf/"
i = 0
for object in s3.Bucket(s3BucketName).objects.all().filter(Prefix=prefix):
srcKey = object.key
if ".txt" in srcKey:
print(i)
newKey = srcKey.replace(".txt", ".pdf")
copySource = s3BucketName + '/' + srcKey
s3.Object(s3BucketName, newKey).copy_from(CopySource=copySource)
s3.Object(s3BucketName, srcKey).delete()
i += 1
# +
def startJob(s3BucketName, objectName):
response = None
client = boto3.client('textract',
# service_name = 's3',
region_name = region,
aws_access_key_id = KEY,
aws_secret_access_key = SECRET)
response = client.start_document_text_detection(DocumentLocation={
'S3Object': {'Bucket': s3BucketName,
'Name': objectName
}
})
return response["JobId"]
def isJobComplete(jobId):
# For production use cases, use SNS based notification
# Details at: https://docs.aws.amazon.com/textract/latest/dg/api-async.html
time.sleep(5)
client = boto3.client('textract',
# service_name = 's3',
region_name = region,
aws_access_key_id = KEY,
aws_secret_access_key = SECRET)
response = client.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
# print("Job status: {}".format(status))
while(status == "IN_PROGRESS"):
time.sleep(5)
response = client.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
# print("Job status: {}".format(status))
return status
def getJobResults(jobId):
pages = []
client = boto3.client('textract',
# service_name = 's3',
region_name = region,
aws_access_key_id = KEY,
aws_secret_access_key = SECRET)
response = client.get_document_text_detection(JobId=jobId)
pages.append(response)
# print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
while(nextToken):
response = client.get_document_text_detection(JobId=jobId, NextToken=nextToken)
pages.append(response)
# print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
return pages
def save_txt_file(file, file_name):
f = open(file_name, "w")
f.write(file)
f.close()
# -
def AWS_pdf_to_text(key, s3BucketName):
# Document
documentName = key
jobId = startJob(s3BucketName, documentName)
# print("Started job with id: {}".format(jobId))
if(isJobComplete(jobId)):
response = getJobResults(jobId)
#print(response)
# Print detected text
policy = ""
for resultPage in response:
# print(resultPage)
try:
for item in resultPage["Blocks"]:
if item["BlockType"] == "LINE":
policy = policy + item["Text"] + "\n"
# print ('\033[94m' + item["Text"] + '\033[0m')
except:
f = open("../output/log_file.txt", 'a')
f.write(f"The file {key} could not be converted\n")
f.close()
print(f"The file {key} could not be converted")
return policy
# +
keyword = "0bc17448527c7798e84937eb897bdaa82c3c4a3c.pdf"
prefix = "english_documents/text_files/HSSC/new"
i = 0
for obj in s3.Bucket(s3BucketName).objects.all().filter(Prefix = prefix):#.:
if ".pdf" in obj.key: #and i > 48: lower_limit < i < upper_limit:
print(i, "**", obj.key)
key = obj.key.replace(".pdf", ".txt")#replace("raw_pdf", "text_files").
policy = AWS_pdf_to_text(obj.key, s3BucketName)
if policy != "":
s3.Object(s3BucketName, key).put(Body = policy)
# save_txt_file(policy, "/home/propietari/Documents/S3/" + obj.key[:-4] + ".txt")
i += 1
# -
policy
| tasks/extract_text/notebooks/document_to_text_AWS_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 32-bit
# language: python
# name: python_defaultSpec_1598117966831
# ---
#
# <a href="https://cognitiveclass.ai/">
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/CCLog.png" width="200" align="center">
# </a>
# <h1>Lists in Python</h1>
# <p><strong>Welcome!</strong> This notebook will teach you about the lists in the Python Programming Language. By the end of this lab, you'll know the basics list operations in Python, including indexing, list operations and copy/clone list.</p>
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li>
# <a href="#dataset">About the Dataset</a>
# </li>
# <li>
# <a href="#list">Lists</a>
# <ul>
# <li><a href="index">Indexing</a></li>
# <li><a href="content">List Content</a></li>
# <li><a href="op">List Operations</a></li>
# <li><a href="co">Copy and Clone List</a></li>
# </ul>
# </li>
# <li>
# <a href="#quiz">Quiz on Lists</a>
# </li>
# </ul>
# <p>
# Estimated time needed: <strong>15 min</strong>
# </p>
# </div>
#
# <hr>
# <h2 id="#dataset">About the Dataset</h2>
# Imagine you received album recommendations from your friends and compiled all of the recommandations into a table, with specific information about each album.
#
# The table has one row for each movie and several columns:
#
# - **artist** - Name of the artist
# - **album** - Name of the album
# - **released_year** - Year the album was released
# - **length_min_sec** - Length of the album (hours,minutes,seconds)
# - **genre** - Genre of the album
# - **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
# - **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
# - **date_released** - Date on which the album was released
# - **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N)
# - **rating_of_friends** - Indicates the rating from your friends from 1 to 10
# <br>
# <br>
#
# The dataset can be seen below:
#
# <font size="1">
# <table font-size:xx-small style="width:100%">
# <tr>
# <th>Artist</th>
# <th>Album</th>
# <th>Released</th>
# <th>Length</th>
# <th>Genre</th>
# <th>Music recording sales (millions)</th>
# <th>Claimed sales (millions)</th>
# <th>Released</th>
# <th>Soundtrack</th>
# <th>Rating (friends)</th>
# </tr>
# <tr>
# <td><NAME></td>
# <td>Thriller</td>
# <td>1982</td>
# <td>00:42:19</td>
# <td>Pop, rock, R&B</td>
# <td>46</td>
# <td>65</td>
# <td>30-Nov-82</td>
# <td></td>
# <td>10.0</td>
# </tr>
# <tr>
# <td>AC/DC</td>
# <td>Back in Black</td>
# <td>1980</td>
# <td>00:42:11</td>
# <td>Hard rock</td>
# <td>26.1</td>
# <td>50</td>
# <td>25-Jul-80</td>
# <td></td>
# <td>8.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>The Dark Side of the Moon</td>
# <td>1973</td>
# <td>00:42:49</td>
# <td>Progressive rock</td>
# <td>24.2</td>
# <td>45</td>
# <td>01-Mar-73</td>
# <td></td>
# <td>9.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>The Bodyguard</td>
# <td>1992</td>
# <td>00:57:44</td>
# <td>Soundtrack/R&B, soul, pop</td>
# <td>26.1</td>
# <td>50</td>
# <td>25-Jul-80</td>
# <td>Y</td>
# <td>7.0</td>
# </tr>
# <tr>
# <td>Meat Loaf</td>
# <td>Bat Out of Hell</td>
# <td>1977</td>
# <td>00:46:33</td>
# <td>Hard rock, progressive rock</td>
# <td>20.6</td>
# <td>43</td>
# <td>21-Oct-77</td>
# <td></td>
# <td>7.0</td>
# </tr>
# <tr>
# <td>Eagles</td>
# <td>Their Greatest Hits (1971-1975)</td>
# <td>1976</td>
# <td>00:43:08</td>
# <td>Rock, soft rock, folk rock</td>
# <td>32.2</td>
# <td>42</td>
# <td>17-Feb-76</td>
# <td></td>
# <td>9.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>Saturday Night Fever</td>
# <td>1977</td>
# <td>1:15:54</td>
# <td>Disco</td>
# <td>20.6</td>
# <td>40</td>
# <td>15-Nov-77</td>
# <td>Y</td>
# <td>9.0</td>
# </tr>
# <tr>
# <td>Fleetwood Mac</td>
# <td>Rumours</td>
# <td>1977</td>
# <td>00:40:01</td>
# <td>Soft rock</td>
# <td>27.9</td>
# <td>40</td>
# <td>04-Feb-77</td>
# <td></td>
# <td>9.5</td>
# </tr>
# </table></font>
# <hr>
# <h2 id="list">Lists</h2>
# <h3 id="index">Indexing</h3>
# We are going to take a look at lists in Python. A list is a sequenced collection of different objects such as integers, strings, and other lists as well. The address of each element within a list is called an <b>index</b>. An index is used to access and refer to items within a list.
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsIndex.png" width="1000" />
# To create a list, type the list within square brackets <b>[ ]</b>, with your content inside the parenthesis and separated by commas. Let’s try it!
# + jupyter={"outputs_hidden": false}
# Create a list
L = ["<NAME>", 10.1, 1982]
L
# -
# We can use negative and regular indexing with a list :
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsNeg.png" width="1000" />
# + jupyter={"outputs_hidden": false} tags=[]
# Print the elements on each index
print('the same element using negative and positive indexing:\n Postive:',L[0],
'\n Negative:' , L[-3] )
print('the same element using negative and positive indexing:\n Postive:',L[1],
'\n Negative:' , L[-2] )
print('the same element using negative and positive indexing:\n Postive:',L[2],
'\n Negative:' , L[-1] )
# -
# <h3 id="content">List Content</h3>
# Lists can contain strings, floats, and integers. We can nest other lists, and we can also nest tuples and other data structures. The same indexing conventions apply for nesting:
#
# + jupyter={"outputs_hidden": false}
# Sample List
["<NAME>", 10.1, 1982, [1, 2], ("A", 1)]
# -
# <h3 id="op">List Operations</h3>
# We can also perform slicing in lists. For example, if we want the last two elements, we use the following command:
# + jupyter={"outputs_hidden": false}
# Sample List
L = ["<NAME>", 10.1,1982,"MJ",1]
L
# -
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsSlice.png" width="1000">
# + jupyter={"outputs_hidden": false}
# List slicing
L[3:5]
# -
# We can use the method <code>extend</code> to add new elements to the list:
# + jupyter={"outputs_hidden": false}
# Use extend to add elements to list
L = [ "<NAME>", 10.2]
L.extend(['pop', 10])
L
# -
# Another similar method is <code>append</code>. If we apply <code>append</code> instead of <code>extend</code>, we add one element to the list:
# + jupyter={"outputs_hidden": false}
# Use append to add elements to list
L = [ "<NAME>", 10.2]
L.append(['pop', 10])
L
# -
# Each time we apply a method, the list changes. If we apply <code>extend</code> we add two new elements to the list. The list <code>L</code> is then modified by adding two new elements:
# + jupyter={"outputs_hidden": false}
# Use extend to add elements to list
L = [ "<NAME>", 10.2]
L.extend(['pop', 10])
L
# -
# If we append the list <code>['a','b']</code> we have one new element consisting of a nested list:
# + jupyter={"outputs_hidden": false}
# Use append to add elements to list
L.append(['a','b'])
L
# -
# As lists are mutable, we can change them. For example, we can change the first element as follows:
# + jupyter={"outputs_hidden": false} tags=[]
# Change the element based on the index
A = ["disco", 10, 1.2]
print('Before change:', A)
A[0] = 'hard rock'
print('After change:', A)
# -
# We can also delete an element of a list using the <code>del</code> command:
# + jupyter={"outputs_hidden": false} tags=[]
# Delete the element based on the index
print('Before change:', A)
del(A[0])
print('After change:', A)
# -
# We can convert a string to a list using <code>split</code>. For example, the method <code>split</code> translates every group of characters separated by a space into an element in a list:
# + jupyter={"outputs_hidden": false}
# Split the string, default is by space
'hard rock'.split()
# -
# We can use the split function to separate strings on a specific character. We pass the character we would like to split on into the argument, which in this case is a comma. The result is a list, and each element corresponds to a set of characters that have been separated by a comma:
# + jupyter={"outputs_hidden": false}
# Split the string by comma
'A,B,C,D'.split(',')
# -
# <h3 id="co">Copy and Clone List</h3>
# When we set one variable <b>B</b> equal to <b>A</b>; both <b>A</b> and <b>B</b> are referencing the same list in memory:
# + jupyter={"outputs_hidden": false} tags=[]
# Copy (copy by reference) the list A
A = ["hard rock", 10, 1.2]
B = A
print('A:', A)
print('B:', B)
# -
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRef.png" width="1000" align="center">
# Initially, the value of the first element in <b>B</b> is set as hard rock. If we change the first element in <b>A</b> to <b>banana</b>, we get an unexpected side effect. As <b>A</b> and <b>B</b> are referencing the same list, if we change list <b>A</b>, then list <b>B</b> also changes. If we check the first element of <b>B</b> we get banana instead of hard rock:
# + jupyter={"outputs_hidden": false} tags=[]
# Examine the copy by reference
print('B[0]:', B[0])
A[0] = "banana"
print('B[0]:', B[0])
# -
# This is demonstrated in the following figure:
# <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsRefGif.gif" width="1000" />
# You can clone list **A** by using the following syntax:
# + jupyter={"outputs_hidden": false}
# Clone (clone by value) the list A
B = A[:]
B
# -
# Variable **B** references a new copy or clone of the original list; this is demonstrated in the following figure:
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Chapter%202/Images/ListsVal.gif" width="1000" />
# Now if you change <b>A</b>, <b>B</b> will not change:
# + jupyter={"outputs_hidden": false} tags=[]
print('B[0]:', B[0])
A[0] = "hard rock"
print('B[0]:', B[0])
# -
# <h2 id="quiz">Quiz on List</h2>
# Create a list <code>a_list</code>, with the following elements <code>1</code>, <code>hello</code>, <code>[1,2,3]</code> and <code>True</code>.
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
a_list=[1, 'hello', [1,2,3], True]
# -
# Double-click <b>here</b> for the solution.
#
# <!-- Your answer is below:
# a_list = [1, 'hello', [1, 2, 3] , True]
# a_list
# -->
# Find the value stored at index 1 of <code>a_list</code>.
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
a_list[1]
# -
# Double-click <b>here</b> for the solution.
#
# <!-- Your answer is below:
# a_list[1]
# -->
# Retrieve the elements stored at index 1, 2 and 3 of <code>a_list</code>.
# + jupyter={"outputs_hidden": false}
# Write your code below and press Shift+Enter to execute
a_list[1:4]
# -
# Double-click <b>here</b> for the solution.
#
# <!-- Your answer is below:
# a_list[1:4]
# -->
# Concatenate the following lists <code>A = [1, 'a']</code> and <code>B = [2, 1, 'd']</code>:
# + jupyter={"outputs_hidden": true}
# Write your code below and press Shift+Enter to execute
A=[1,'a']
B=[2,1,'d']
C=A+B
C
# -
# Double-click <b>here</b> for the solution.
#
# <!-- Your answer is below:
# A = [1, 'a']
# B = [2, 1, 'd']
# A + B
# -->
# <hr>
# <h2>The last exercise!</h2>
# <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
# <hr>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <h2>Get IBM Watson Studio free of charge!</h2>
# <p><a href="https://cocl.us/PY0101EN_edx_add_bbottom"><img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/PY0101EN/Ad/BottomAd.png" width="750" align="center"></a></p>
# </div>
# <h3>About the Authors:</h3>
# <p><a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a> is a Data Scientist at IBM, and holds a PhD in Electrical Engineering. His research focused on using Machine Learning, Signal Processing, and Computer Vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.</p>
# Other contributors: <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a"><NAME></a>
# <hr>
# <p>Copyright © 2018 IBM Developer Skills Network. This notebook and its source code are released under the terms of the <a href="https://cognitiveclass.ai/mit-license/">MIT License</a>.</p>
| Python for Data Science and AI/w2/PY0101EN-2-2-Lists.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://github.com/insaid2018/Term-1/blob/master/Images/INSAID_Full%20Logo.png?raw=true" width="240" height="360" />
#
# # LINEAR REGRESSION
# ## Table of Content
#
# 1. [Problem Statement](#section1)<br>
# 2. [Data Loading and Description](#section2)<br>
# 3. [Exploratory Data Analysis](#section3)<br>
# 4. [Introduction to Linear Regression](#section4)<br>
# - 4.1 [Linear Regression Equation with Errors in consideration](#section401)<br>
# - 4.1.1 [Assumptions of Linear Regression](#sectionassumptions)<br>
# - 4.2 [Preparing X and y using pandas](#section402)<br>
# - 4.3 [Splitting X and y into training and test datasets](#section403)<br>
# - 4.4 [Linear regression in scikit-learn](#section404)<br>
# - 4.5 [Interpreting Model Coefficients](#section405)<br>
# - 4.3 [Using the Model for Prediction](#section406)<br>
# 5. [Model evaluation](#section5)<br>
# - 5.1 [Model evaluation using metrics](#section501)<br>
# - 5.2 [Model Evaluation using Rsquared value.](#section502)<br>
# 6. [Feature Selection](#section6)<br>
# 7. [Handling Categorical Features](#section7)<br>
# <a id=section1></a>
# ## 1. Problem Statement
#
# __Sales__ (in thousands of units) for a particular product as a __function__ of __advertising budgets__ (in thousands of dollars) for _TV, radio, and newspaper media_. Suppose that in our role as __Data Scientist__ we are asked to suggest.
#
# - We want to find a function that given input budgets for TV, radio and newspaper __predicts the output sales__.
#
# - Which media __contribute__ to sales?
#
# - Visualize the __relationship__ between the _features_ and the _response_ using scatter plots.
# <a id=section2></a>
# ## 2. Data Loading and Description
#
# The adverstising dataset captures sales revenue generated with respect to advertisement spends across multiple channles like radio, tv and newspaper.
# - TV - Spend on TV Advertisements
# - Radio - Spend on radio Advertisements
# - Newspaper - Spend on newspaper Advertisements
# - Sales - Sales revenue generated
# __Importing Packages__
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
# allow plots to appear directly in the notebook
# %matplotlib inline
# -
# #### Importing the Dataset
data = pd.read_csv('https://raw.githubusercontent.com/insaid2018/Term-2/master/CaseStudy/Advertising.csv', index_col=0)
data.head()
# What are the **features**?
# - TV: advertising dollars spent on TV for a single product in a given market (in thousands of dollars)
# - Radio: advertising dollars spent on Radio
# - Newspaper: advertising dollars spent on Newspaper
#
# What is the **response**?
# - Sales: sales of a single product in a given market (in thousands of widgets)
# <a id=section3></a>
# ## 3. Exploratory Data Analysis
data.shape
data.info()
data.describe(include="all")
# There are 200 **observations**, and thus 200 markets in the dataset.
# __Distribution of Features__
# +
feature_cols = ['TV', 'radio', 'newspaper'] # create a Python list of feature names
X = data[feature_cols]
y=data.sales
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=0.20,random_state=1)
from sklearn.linear_model import LinearRegression
lr=LinearRegression()
linreg=lr.fit(X_train,Y_train)
print(linreg.coef_)
print(linreg.intercept_)
y_pred=linreg.predict(X_train)
from sklearn.metrics import mean_squared_error
mean_squared_error(Y_train,y_pred)
# +
f, axes = plt.subplots(2, 2, figsize=(7, 7), sharex=False) # Set up the matplotlib figure
sns.despine(left=True)
sns.distplot(data.sales, color="b", ax=axes[0, 0])
sns.distplot(data.TV, color="r", ax=axes[0, 1])
sns.distplot(data.radio, color="g", ax=axes[1, 0])
sns.distplot(data.newspaper, color="m", ax=axes[1, 1])
# -
# __Observations__<br/>
# _Sales_ seems to be __normal distribution__. Spending on _newspaper advertisement_ seems to be __right skewed__. Most of the spends on _newspaper_ is __fairly low__ where are spend on _radio and tv_ seems be __uniform distribution__. Spends on _tv_ are __comparatively higher__ then spend on _radio and newspaper_.
# ### Is there a relationship between sales and spend various advertising channels?
# +
JG1 = sns.jointplot("newspaper", "sales", data=data, kind='reg')
JG2 = sns.jointplot("radio", "sales", data=data, kind='reg')
JG3 = sns.jointplot("TV", "sales", data=data, kind='reg')
#subplots migration
f = plt.figure()
for J in [JG1, JG2,JG3]:
for A in J.fig.axes:
f._axstack.add(f._make_key(A), A)
# -
# __Observation__<br/>
# _Sales and spend on newpaper_ is __not__ highly correlaed where are _sales and spend on tv_ is __highly correlated__.
# ### Visualising Pairwise correlation
sns.pairplot(data, size = 2, aspect = 1.5)
sns.pairplot(data, x_vars=['TV', 'radio', 'newspaper'], y_vars='sales', size=5, aspect=1, kind='reg')
# __Observation__
#
# - Strong relationship between TV ads and sales
# - Weak relationship between Radio ads and sales
# - Very weak to no relationship between Newspaper ads and sales
#
#
# ### Calculating and plotting heatmap correlation
data.corr()
sns.heatmap( data.corr(), annot=True );
# __Observation__
#
# - The diagonal of the above matirx shows the auto-correlation of the variables. It is always 1. You can observe that the correlation between __TV and Sales is highest i.e. 0.78__ and then between __sales and radio i.e. 0.576__.
#
# - correlations can vary from -1 to +1. Closer to +1 means strong positive correlation and close -1 means strong negative correlation. Closer to 0 means not very strongly correlated. variables with __strong correlations__ are mostly probably candidates for __model builing__.
#
# <a id=section4></a>
# ## 4. Introduction to Linear Regression
#
# __Linear regression__ is a _basic_ and _commonly_ used type of __predictive analysis__. The overall idea of regression is to examine two things:
# - Does a set of __predictor variables__ do a good job in predicting an __outcome__ (dependent) variable?
# - Which variables in particular are __significant predictors__ of the outcome variable, and in what way they do __impact__ the outcome variable?
#
# These regression estimates are used to explain the __relationship between one dependent variable and one or more independent variables__. The simplest form of the regression equation with one dependent and one independent variable is defined by the formula :<br/>
# $y = \beta_0 + \beta_1x$
#
# 
#
# What does each term represent?
# - $y$ is the response
# - $x$ is the feature
# - $\beta_0$ is the intercept
# - $\beta_1$ is the coefficient for x
#
#
# Three major uses for __regression analysis__ are:
# - determining the __strength__ of predictors,
# - Typical questions are what is the strength of __relationship__ between _dose and effect_, _sales and marketing spending_, or _age and income_.
# - __forecasting__ an effect, and
# - how much __additional sales income__ do I get for each additional $1000 spent on marketing?
# - __trend__ forecasting.
# - what will the __price of house__ be in _6 months_?
# <a id=section401></a>
# ### 4.1 Linear Regression Equation with Errors in consideration
#
# While taking errors into consideration the equation of linear regression is:
# 
# Generally speaking, coefficients are estimated using the **least squares criterion**, which means we are find the line (mathematically) which minimizes the **sum of squared residuals** (or "sum of squared errors"):
#
# What elements are present in the diagram?
# - The black dots are the **observed values** of x and y.
# - The blue line is our **least squares line**.
# - The red lines are the **residuals**, which are the distances between the observed values and the least squares line.
# 
#
# How do the model coefficients relate to the least squares line?
# - $\beta_0$ is the **intercept** (the value of $y$ when $x$ = 0)
# - $\beta_1$ is the **slope** (the change in $y$ divided by change in $x$)
#
# Here is a graphical depiction of those calculations:
# 
# <a id = sectionassumptions></a>
# #### 4.1.1 Assumptions of Linear Regression
# 1. There should be a linear and additive relationship between dependent (response) variable and independent (predictor) variable(s). A linear relationship suggests that a change in response Y due to one unit change in X¹ is constant, regardless of the value of X¹. An additive relationship suggests that the effect of X¹ on Y is independent of other variables.
# 2. There should be no correlation between the residual (error) terms. Absence of this phenomenon is known as Autocorrelation.
# 3. The independent variables should not be correlated. Absence of this phenomenon is known as multicollinearity.
# 4. The error terms must have constant variance. This phenomenon is known as homoskedasticity. The presence of non-constant variance is referred to heteroskedasticity.
# 5. The error terms must be normally distributed.
# <a id=section402></a>
# ### 4.2 Preparing X and y using pandas
# - __Standardization__. <br/>
# Standardize features by removing the _mean_ and scaling to _unit standard deviation_.
sns.distplot(data['TV'])
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(data)
data1 = scaler.transform(data)
data = pd.DataFrame(data1)
data.head()
data.columns = ['TV','radio','newspaper','sales']
data.head()
plt.scatter(data['radio'],data['sales'])
plt.show()
sns.distplot(data['TV'])
feature_cols = ['TV', 'radio', 'newspaper'] # create a Python list of feature names
X = data[feature_cols] # use the list to select a subset of the original DataFrame-+
# - Checking the type and shape of X.
print(type(X))
print(X.shape)
y = data.sales
y.head()
# - Check the type and shape of y
print(type(y))
print(y.shape)
# <a id=section403></a>
####SUKRUTH:
# ### 4.3 Splitting X and y into training and test datasets.
# +
from sklearn.model_selection import train_test_split
def split(X,y):
return train_test_split(X, y, test_size=0.20, random_state=1)
# -
X_train, X_test, y_train, y_test=split(X,y)
print('Train cases as below')
print('X_train shape: ',X_train.shape)
print('y_train shape: ',y_train.shape)
print('\nTest cases as below')
print('X_test shape: ',X_test.shape)
print('y_test shape: ',y_test.shape)
# <a id=section404></a>
# ### 4.4 Linear regression in scikit-learn
# To apply any machine learning algorithm on your dataset, basically there are 4 steps:
# 1. Load the algorithm
# 2. Instantiate and Fit the model to the training dataset
# 3. Prediction on the test set
# 4. Calculating Root mean square error
# The code block given below shows how these steps are carried out:<br/>
#
# ``` from sklearn.linear_model import LinearRegression
# linreg = LinearRegression()
# linreg.fit(X_train, y_train)
# RMSE_test = np.sqrt(metrics.mean_squared_error(y_test, y_pred_test))```
def linear_reg( X, y, gridsearch = False):
X_train, X_test, y_train, y_test = split(X,y)
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
if not(gridsearch):
linreg.fit(X_train, y_train)
else:
from sklearn.model_selection import GridSearchCV
parameters = {'normalize':[True,False], 'copy_X':[True, False]}
linreg = GridSearchCV(linreg,parameters, cv = 10,refit = True)
linreg.fit(X_train, y_train) # fit the model to the training data (learn the coefficients)
print("Mean cross-validated score of the best_estimator : ", linreg.best_score_)
y_pred_test = linreg.predict(X_test) # make predictions on the testing set
RMSE_test = np.sqrt(metrics.mean_squared_error(y_test, y_pred_test)) # compute the RMSE of our predictions
print('RMSE for the test set is {}'.format(RMSE_test))
return linreg
# ### Linear Regression Model without GridSearcCV
# Note: Linear Regression Model with GridSearcCV is implemented at Table of Contents: 8
X = data[feature_cols]
y = data.sales
linreg = linear_reg(X,y)
# <a id=section405></a>
# ### 4.5 Interpreting Model Coefficients
print('Intercept:',linreg.intercept_) # print the intercept
print('Coefficients:',linreg.coef_)
# Its hard to remember the order of the feature names, we so we are __zipping__ the features to pair the feature names with the coefficients
feature_cols.insert(0,'Intercept')
coef = linreg.coef_.tolist()
coef.insert(0, linreg.intercept_)
# +
eq1 = zip(feature_cols, coef)
for c1,c2 in eq1:
print(c1,c2)
# -
# __y = 0.00116 + 0.7708 `*` TV + 0.508 `*` radio + 0.010 `*` newspaper__
# How do we interpret the TV coefficient (_0.77081_)
# - A "unit" increase in TV ad spending is **associated with** a _"0.7708_ unit" increase in Sales.
# - Or more clearly: An additional $1,000 spent on TV ads is **associated with** an increase in sales of 770.8 widgets.
#
# Important Notes:
# - This is a statement of __association__, not __causation__.
# - If an increase in TV ad spending was associated with a __decrease__ in sales, β1 would be __negative.__
# <a id=section406></a>
# ### 4.6 Using the Model for Prediction
y_pred_train = linreg.predict(X_train)
y_pred_test = linreg.predict(X_test) # make predictions on the testing set
# - We need an evaluation metric in order to compare our predictions with the actual values.
# <a id=section5></a>
# ## 5. Model evaluation
# __Error__ is the _deviation_ of the values _predicted_ by the model with the _true_ values.<br/>
# For example, if a model predicts that the price of apple is Rs75/kg, but the actual price of apple is Rs100/kg, then the error in prediction will be Rs25/kg.<br/>
# Below are the types of error we will be calculating for our _linear regression model_:
# - Mean Absolute Error
# - Mean Squared Error
# - Root Mean Squared Error
# <a id=section501></a>
# ### 5.1 Model Evaluation using __metrics.__
# __Mean Absolute Error__ (MAE) is the mean of the absolute value of the errors:
# $$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$
# Computing the MAE for our Sales predictions
MAE_train = metrics.mean_absolute_error(y_train, y_pred_train)
MAE_test = metrics.mean_absolute_error(y_test, y_pred_test)
print('MAE for training set is {}'.format(MAE_train))
print('MAE for test set is {}'.format(MAE_test))
# __Mean Squared Error__ (MSE) is the mean of the squared errors:
# $$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$
#
# Computing the MSE for our Sales predictions
MSE_train = metrics.mean_squared_error(y_train, y_pred_train)
MSE_test = metrics.mean_squared_error(y_test, y_pred_test)
print('MSE for training set is {}'.format(MSE_train))
print('MSE for test set is {}'.format(MSE_test))
# __Root Mean Squared Error__ (RMSE) is the square root of the mean of the squared errors:
#
# $$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$
#
# Computing the RMSE for our Sales predictions
RMSE_train = np.sqrt( metrics.mean_squared_error(y_train, y_pred_train))
RMSE_test = np.sqrt(metrics.mean_squared_error(y_test, y_pred_test))
print('RMSE for training set is {}'.format(RMSE_train))
print('RMSE for test set is {}'.format(RMSE_test))
# Comparing these metrics:
#
# - __MAE__ is the easiest to understand, because it's the __average error.__
# - __MSE__ is more popular than MAE, because MSE "punishes" larger errors.
# - __RMSE__ is even more popular than MSE, because RMSE is _interpretable_ in the "y" units.
# - Easier to put in context as it's the same units as our response variable.
# <a id=section502></a>
# ### 5.2 Model Evaluation using Rsquared value.
# - There is one more method to evaluate linear regression model and that is by using the __Rsquared__ value.<br/>
# - R-squared is the **proportion of variance explained**, meaning the proportion of variance in the observed data that is explained by the model, or the reduction in error over the **null model**. (The null model just predicts the mean of the observed response, and thus it has an intercept and no slope.)
#
# - R-squared is between 0 and 1, and higher is better because it means that more variance is explained by the model. But there is one shortcoming of Rsquare method and that is **R-squared will always increase as you add more features to the model**, even if they are unrelated to the response. Thus, selecting the model with the highest R-squared is not a reliable approach for choosing the best linear model.
#
# There is alternative to R-squared called **adjusted R-squared** that penalizes model complexity (to control for overfitting).
yhat = linreg.predict(X_train)
SS_Residual = sum((y_train-yhat)**2)
SS_Total = sum((y_train-np.mean(y_train))**2)
r_squared = 1 - (float(SS_Residual))/SS_Total
adjusted_r_squared = 1 - (1-r_squared)*(len(y_train)-1)/(len(y_train)-X_train.shape[1]-1)
print(r_squared, adjusted_r_squared)
yhat = linreg.predict(X_test)
SS_Residual = sum((y_test-yhat)**2)
SS_Total = sum((y_test-np.mean(y_test))**2)
r_squared = 1 - (float(SS_Residual))/SS_Total
adjusted_r_squared = 1 - (1-r_squared)*(len(y_test)-1)/(len(y_test)-X_test.shape[1]-1)
print(r_squared, adjusted_r_squared)
# <a id=section6></a>
# ## 6. Feature Selection
#
# At times some features do not contribute much to the accuracy of the model, in that case its better to discard those features.<br/>
# - Let's check whether __"newspaper"__ improve the quality of our predictions or not.<br/>
# To check this we are going to take all the features other than "newspaper" and see if the error (RMSE) is reducing or not.
# - Also Applying __gridsearch__ method for exhaustive search over specified parameter values of estimator.
feature_cols = ['TV','radio'] # create a Python list of feature names
X = data[feature_cols]
y = data.sales
linreg=linear_reg(X,y,gridsearch=True)
# - _Before_ doing feature selection _RMSE_ for the test dataset was __0.271182__.<br/>
# - _After_ discarding 'newspaper' column, RMSE comes to be __0.268675__.<br/>
# - As you can see there is __no significant improvement__ in the quality, therefore, the 'newspaper' column shouldn't be discarded. But if in some other case if there is significant decrease in the RMSE, then you must discard that feature.
# - Give a try to other __features__ and check the RMSE score for each one.
# <a id=section7></a>
# +
features=['TV','newspaper','radio']
X=data[features]
y=data.sales
#linreg=linear_reg(X,y,gridsearch=True)
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=0.2,random_state=1)
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
linreg=lr.fit(X_train,Y_train)
Y_pred=linreg.predict(X_train)
mean_squared_error(Y_train,Y_pred)
#print("MSE = {0}".format(mean_squared_error(Y_train,Y_pred)))
# -
# ## 7. Handling Categorical Features
#
# Let's create a new feature called **Area**, and randomly assign observations to be **rural, suburban, or urban** :
np.random.seed(123456) # set a seed for reproducibility
nums = np.random.rand(len(data))
mask_suburban = (nums > 0.33) & (nums < 0.66) # assign roughly one third of observations to each group
mask_urban = nums > 0.66
data['Area'] = 'rural'
data.loc[mask_suburban, 'Area'] = 'suburban'
data.loc[mask_urban, 'Area'] = 'urban'
data.head()
# We want to represent Area numerically, but we can't simply code it as:<br/>
# - 0 = rural,<br/>
# - 1 = suburban,<br/>
# - 2 = urban<br/>
# Because that would imply an **ordered relationship** between suburban and urban, and thus urban is somehow "twice" the suburban category.<br/> Note that if you do have ordered categories (i.e., strongly disagree, disagree, neutral, agree, strongly agree), you can use a single dummy variable to represent the categories numerically (such as 1, 2, 3, 4, 5).<br/>
#
# Anyway, our Area feature is unordered, so we have to create **additional dummy variables**. Let's explore how to do this using pandas:
area_dummies = pd.get_dummies(data.Area, prefix='Area') # create three dummy variables using get_dummies
area_dummies.head()
# However, we actually only need **two dummy variables, not three**.
# __Why???__
# Because two dummies captures all the "information" about the Area feature, and implicitly defines rural as the "baseline level".
#
# Let's see what that looks like:
area_dummies = pd.get_dummies(data.Area, prefix='Area').iloc[:, 1:]
area_dummies.head()
# Here is how we interpret the coding:
# - **rural** is coded as Area_suburban = 0 and Area_urban = 0
# - **suburban** is coded as Area_suburban = 1 and Area_urban = 0
# - **urban** is coded as Area_suburban = 0 and Area_urban = 1
#
# If this sounds confusing, think in general terms that why we need only __k-1 dummy variables__ if we have a categorical feature with __k "levels"__.
#
# Anyway, let's add these two new dummy variables onto the original DataFrame, and then include them in the linear regression model.
# concatenate the dummy variable columns onto the DataFrame (axis=0 means rows, axis=1 means columns)
data = pd.concat([data, area_dummies], axis=1)
data.head()
feature_cols = ['TV', 'radio', 'newspaper', 'Area_suburban', 'Area_urban'] # create a Python list of feature names
X = data[feature_cols]
y = data.sales
linreg = linear_reg(X,y)
# +
feature_cols.insert(0,'Intercept')
coef = linreg.coef_.tolist()
coef.insert(0, linreg.intercept_)
eq1 = zip(feature_cols, coef)
for c1,c2 in eq1:
print(c1,c2)
# -
# __y = - 0.00218 + 0.7691 `*` TV + 0.505 `*` radio + 0.011 `*` newspaper - 0.0311 `*` Area_suburban + 0.0418 `*` Area_urban__<br/>
# How do we interpret the coefficients?<br/>
# - Holding all other variables fixed, being a **suburban** area is associated with an average **decrease** in Sales of 0.0311 widgets (as compared to the baseline level, which is rural).
# - Being an **urban** area is associated with an average **increase** in Sales of 0.0418 widgets (as compared to rural).
# <a id=section8></a>
| INSAID/Course_Material/Term1-ML/Linear_Regression/LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Initialization
#
# Welcome to the first assignment of "Improving Deep Neural Networks".
#
# Training your neural network requires specifying an initial value of the weights. A well chosen initialization method will help learning.
#
# If you completed the previous course of this specialization, you probably followed our instructions for weight initialization, and it has worked out so far. But how do you choose the initialization for a new neural network? In this notebook, you will see how different initializations lead to different results.
#
# A well chosen initialization can:
# - Speed up the convergence of gradient descent
# - Increase the odds of gradient descent converging to a lower training (and generalization) error
#
# To get started, run the following cell to load the packages and the planar dataset you will try to classify.
# +
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
# %matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
# -
# You would like a classifier to separate the blue dots from the red dots.
# ## 1 - Neural Network model
# You will use a 3-layer neural network (already implemented for you). Here are the initialization methods you will experiment with:
# - *Zeros initialization* -- setting `initialization = "zeros"` in the input argument.
# - *Random initialization* -- setting `initialization = "random"` in the input argument. This initializes the weights to large random values.
# - *He initialization* -- setting `initialization = "he"` in the input argument. This initializes the weights to random values scaled according to a paper by He et al., 2015.
#
# **Instructions**: Please quickly read over the code below, and run it. In the next part you will implement the three initialization methods that this `model()` calls.
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# ## 2 - Zero initialization
#
# There are two types of parameters to initialize in a neural network:
# - the weight matrices $(W^{[1]}, W^{[2]}, W^{[3]}, ..., W^{[L-1]}, W^{[L]})$
# - the bias vectors $(b^{[1]}, b^{[2]}, b^{[3]}, ..., b^{[L-1]}, b^{[L]})$
#
# **Exercise**: Implement the following function to initialize all parameters to zeros. You'll see later that this does not work well since it fails to "break symmetry", but lets try it anyway and see what happens. Use np.zeros((..,..)) with the correct shapes.
# +
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l],layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
# -
parameters = initialize_parameters_zeros([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[ 0. 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using zeros initialization.
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# The performance is really bad, and the cost does not really decrease, and the algorithm performs no better than random guessing. Why? Lets look at the details of the predictions and the decision boundary:
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# The model is predicting 0 for every example.
#
# In general, initializing all the weights to zero results in the network failing to break symmetry. This means that every neuron in each layer will learn the same thing, and you might as well be training a neural network with $n^{[l]}=1$ for every layer, and the network is no more powerful than a linear classifier such as logistic regression.
# <font color='blue'>
# **What you should remember**:
# - The weights $W^{[l]}$ should be initialized randomly to break symmetry.
# - It is however okay to initialize the biases $b^{[l]}$ to zeros. Symmetry is still broken so long as $W^{[l]}$ is initialized randomly.
#
# ## 3 - Random initialization
#
# To break symmetry, lets intialize the weights randomly. Following random initialization, each neuron can then proceed to learn a different function of its inputs. In this exercise, you will see what happens if the weights are intialized randomly, but to very large values.
#
# **Exercise**: Implement the following function to initialize your weights to large random values (scaled by \*10) and your biases to zeros. Use `np.random.randn(..,..) * 10` for weights and `np.zeros((.., ..))` for biases. We are using a fixed `np.random.seed(..)` to make sure your "random" weights match ours, so don't worry if running several times your code gives you always the same initial values for the parameters.
# +
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*10
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
# -
parameters = initialize_parameters_random([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 17.88628473 4.36509851 0.96497468]
# [-18.63492703 -2.77388203 -3.54758979]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.82741481 -6.27000677]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using random initialization.
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
# If you see "inf" as the cost after the iteration 0, this is because of numerical roundoff; a more numerically sophisticated implementation would fix this. But this isn't worth worrying about for our purposes.
#
# Anyway, it looks like you have broken symmetry, and this gives better results. than before. The model is no longer outputting all 0s.
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The cost starts very high. This is because with large random-valued weights, the last activation (sigmoid) outputs results that are very close to 0 or 1 for some examples, and when it gets that example wrong it incurs a very high loss for that example. Indeed, when $\log(a^{[3]}) = \log(0)$, the loss goes to infinity.
# - Poor initialization can lead to vanishing/exploding gradients, which also slows down the optimization algorithm.
# - If you train this network longer you will see better results, but initializing with overly large random numbers slows down the optimization.
#
# <font color='blue'>
# **In summary**:
# - Initializing weights to very large random values does not work well.
# - Hopefully intializing with small random values does better. The important question is: how small should be these random values be? Lets find out in the next part!
# ## 4 - He initialization
#
# Finally, try "He Initialization"; this is named for the first author of He et al., 2015. (If you have heard of "Xavier initialization", this is similar except Xavier initialization uses a scaling factor for the weights $W^{[l]}$ of `sqrt(1./layers_dims[l-1])` where He initialization would use `sqrt(2./layers_dims[l-1])`.)
#
# **Exercise**: Implement the following function to initialize your parameters with He initialization.
#
# **Hint**: This function is similar to the previous `initialize_parameters_random(...)`. The only difference is that instead of multiplying `np.random.randn(..,..)` by 10, you will multiply it by $\sqrt{\frac{2}{\text{dimension of the previous layer}}}$, which is what He initialization recommends for layers with a ReLU activation.
# +
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l],layers_dims[l-1])*(np.sqrt(2/layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l],1))
### END CODE HERE ###
return parameters
# -
parameters = initialize_parameters_he([2, 4, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **W1**
# </td>
# <td>
# [[ 1.78862847 0.43650985]
# [ 0.09649747 -1.8634927 ]
# [-0.2773882 -0.35475898]
# [-0.08274148 -0.62700068]]
# </td>
# </tr>
# <tr>
# <td>
# **b1**
# </td>
# <td>
# [[ 0.]
# [ 0.]
# [ 0.]
# [ 0.]]
# </td>
# </tr>
# <tr>
# <td>
# **W2**
# </td>
# <td>
# [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
# </td>
# </tr>
# <tr>
# <td>
# **b2**
# </td>
# <td>
# [[ 0.]]
# </td>
# </tr>
#
# </table>
# Run the following code to train your model on 15,000 iterations using He initialization.
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# **Observations**:
# - The model with He initialization separates the blue and the red dots very well in a small number of iterations.
#
# ## 5 - Conclusions
# You have seen three different types of initializations. For the same number of iterations and same hyperparameters the comparison is:
#
# <table>
# <tr>
# <td>
# **Model**
# </td>
# <td>
# **Train accuracy**
# </td>
# <td>
# **Problem/Comment**
# </td>
#
# </tr>
# <td>
# 3-layer NN with zeros initialization
# </td>
# <td>
# 50%
# </td>
# <td>
# fails to break symmetry
# </td>
# <tr>
# <td>
# 3-layer NN with large random initialization
# </td>
# <td>
# 83%
# </td>
# <td>
# too large weights
# </td>
# </tr>
# <tr>
# <td>
# 3-layer NN with He initialization
# </td>
# <td>
# 99%
# </td>
# <td>
# recommended method
# </td>
# </tr>
# </table>
# <font color='blue'>
# **What you should remember from this notebook**:
# - Different initializations lead to different results
# - Random initialization is used to break symmetry and make sure different hidden units can learn different things
# - Don't intialize to values that are too large
# - He initialization works well for networks with ReLU activations.
| deep-neural-network/Week 5/Initialization/Initialization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import scipy
import itertools
import string
from prediction_utils.pytorch_utils.metrics import CalibrationEvaluator, FairOVAEvaluator
from sklearn.metrics import roc_curve
from zipcode_cvd.experiments.util import flatten_multicolumns
# -
figures_data_path = '../zipcode_cvd/experiments/figures_data/eo_rr/'
attributes = ['race_eth', 'gender_concept_name', 'race_eth_gender']
data_dict = {
attribute: pd.read_parquet(os.path.join(figures_data_path, f'result_df_ci_{attribute}.parquet'))
for attribute in attributes
}
# +
group_name_dict = {
'race_eth': pd.DataFrame(
{
'Asian': 'Asian',
'Black or African American': 'Black',
'Hispanic or Latino': 'Hispanic',
'Other': 'Other',
'White': 'White',
}, index=['_race_eth']).transpose().rename_axis('race_eth').reset_index(),
'gender_concept_name': pd.DataFrame({
'FEMALE': 'Female',
'MALE': 'Male',
}, index=['_gender_concept_name']).transpose().rename_axis('gender_concept_name').reset_index(),
'race_eth_gender': pd.DataFrame(
{
'Asian | FEMALE': 'A-F',
'Asian | MALE': 'A-M',
'Black or African American | MALE': 'B-M',
'Black or African American | FEMALE': 'B-F',
'Hispanic or Latino | MALE': 'H-M',
'Hispanic or Latino | FEMALE': 'H-F',
'Other | FEMALE': 'O-F',
'Other | MALE': 'O-M',
'White | FEMALE': 'W-F',
'White | MALE': 'W-M',
}, index=['_race_eth_gender']).transpose().rename_axis('race_eth_gender').reset_index(),
}
# -
data_dict = {
key: value.merge(group_name_dict[key]).drop(columns=key).rename(columns={f'_{key}': key})
for key, value in data_dict.items()
}
def plot_data(
df,
ax=None,
x_var='score',
y_var='calibration_density',
group_var_name='race_eth_gender',
ci_lower_var=None,
ci_upper_var=None,
drawstyle=None,
ylim=(None, None),
xlim=(None, None),
ylabel=None,
xlabel=None,
legend=True,
bbox_to_anchor=(1.04, 1),
plot_y_equals_x=False,
plot_x_axis=False,
despine=True,
hide_yticks=False,
hide_xticks=False,
linestyle=None,
label_group=True,
title=None,
axvline=None,
y_labelpad=None,
titlepad=None,
xticks=None,
xticklabels=None,
):
if ax is None:
plt.figure()
ax = plt.gca()
groups = []
for i, (group_id, group_df) in enumerate(df.groupby(group_var_name)):
groups.append(group_id)
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][i%len(plt.rcParams['axes.prop_cycle'])]
ax.plot(group_df[x_var], group_df[y_var], drawstyle=drawstyle, color=color, linestyle=linestyle, label=group_id if label_group else None)
if ci_upper_var is not None and ci_lower_var is not None:
ax.fill_between(
group_df[x_var],
group_df[ci_lower_var],
group_df[ci_upper_var],
alpha=0.25,
color=color,
label='_nolegend_'
)
if plot_y_equals_x:
ax.plot(np.linspace(1e-4, 1-1e-4, 1000), np.linspace(1e-4, 1-1e-4, 1000), linestyle='--', color='k', label='_nolegend_')
if axvline is not None:
ax.axvline(axvline, linestyle='--', color='k', label="_nolegend_")
if plot_x_axis:
ax.axhline(0, linestyle='--', color='k', label="_nolegend_")
if legend:
ax.legend(labels=groups, bbox_to_anchor=bbox_to_anchor, frameon=False)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel, labelpad=y_labelpad)
if title is not None:
ax.set_title(title, pad=titlepad)
if hide_xticks:
ax.xaxis.set_ticklabels([])
elif xticks is not None:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
if hide_yticks:
ax.yaxis.set_ticklabels([])
if despine:
sns.despine()
return ax
data_dict_pivot = {}
for key, value in data_dict.items():
data_dict_pivot[key] = value.pivot(
index = set(value.columns) - set(['comparator', 'baseline', 'delta', 'CI_quantile_95', 'metric']),
columns=['metric', 'CI_quantile_95'],
values=['comparator', 'delta']
).pipe(flatten_multicolumns).reset_index()
# +
plot_config_dict = {
'calibration_curve': {
'xlim':(0, 0.4),
'ylim':(0, 0.4),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'plot_y_equals_x':True,
'legend': False,
'ylabel': 'Incidence',
'x_var':'score',
'y_var':'comparator_calibration_density_mid',
'ci_lower_var': 'comparator_calibration_density_lower',
'ci_upper_var': 'comparator_calibration_density_upper',
},
'tpr': {
'xlim':(0, 0.4),
'ylim':(0, 1),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend':False,
'ylabel': 'TPR',
'x_var':'score',
'y_var': 'comparator_tpr_mid',
'ci_lower_var':'comparator_tpr_lower',
'ci_upper_var':'comparator_tpr_upper',
},
'fpr': {
'xlim':(0, 0.4),
'ylim':(0, 1),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend':False,
'ylabel': 'FPR',
'x_var':'score',
'y_var': 'comparator_fpr_mid',
'ci_lower_var':'comparator_fpr_lower',
'ci_upper_var':'comparator_fpr_upper',
},
'decision_curve': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB',
'x_var': 'score',
'y_var': 'comparator_nb_mid',
'ci_lower_var':'comparator_nb_lower',
'ci_upper_var':'comparator_nb_upper',
# 'y_labelpad': 8
},
'decision_curve_treat_all': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'y_var': 'comparator_nb_all_mid',
'ci_lower_var':'comparator_nb_all_lower',
'ci_upper_var':'comparator_nb_all_upper',
'linestyle': '--',
'label_group': False
},
'decision_curve_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB (rel)',
'x_var': 'score',
'y_var': 'delta_nb_mid',
'ci_lower_var': 'delta_nb_lower',
'ci_upper_var': 'delta_nb_upper',
'plot_x_axis': True,
# 'y_labelpad': -1
},
'decision_curve_implied': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB',
'x_var': 'score',
'y_var': 'comparator_nb_implied_mid',
'ci_lower_var': 'comparator_nb_implied_lower',
'ci_upper_var': 'comparator_nb_implied_upper',
# 'y_labelpad': 8
},
'decision_curve_treat_all_implied': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'y_var': 'comparator_nb_all_mid',
'ci_lower_var':'comparator_nb_all_lower',
'ci_upper_var':'comparator_nb_all_upper',
'linestyle': '--',
'label_group': False
},
'decision_curve_implied_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB (rel)',
'x_var': 'score',
'y_var': 'delta_nb_implied_mid',
'ci_lower_var': 'delta_nb_implied_lower',
'ci_upper_var': 'delta_nb_implied_upper',
'plot_x_axis': True,
# 'y_labelpad': -1
},
'decision_curve_075': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB (7.5%)',
'x_var': 'score',
'y_var': 'comparator_nb_0.075_mid',
'ci_lower_var': 'comparator_nb_0.075_lower',
'ci_upper_var': 'comparator_nb_0.075_upper',
'axvline': 0.075,
# 'y_labelpad': 8
},
'decision_curve_075_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB (7.5%, rel)',
'x_var': 'score',
'y_var': 'delta_nb_0.075_mid',
'ci_lower_var': 'delta_nb_0.075_lower',
'ci_upper_var': 'delta_nb_0.075_upper',
'axvline': 0.075,
# 'y_labelpad': 0,
'plot_x_axis': True
},
'decision_curve_075_implied': {
'xlim': (0, 0.4),
'ylim': (0, 0.05),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB (7.5%)',
'x_var': 'score',
'y_var': 'comparator_nb_0.075_implied_mid',
'ci_lower_var': 'comparator_nb_0.075_implied_lower',
'ci_upper_var': 'comparator_nb_0.075_implied_upper',
'axvline': 0.075,
# 'y_labelpad': 8
},
'decision_curve_075_implied_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB (7.5%, rel)',
'x_var': 'score',
'y_var': 'delta_nb_0.075_implied_mid',
'ci_lower_var': 'delta_nb_0.075_implied_lower',
'ci_upper_var': 'delta_nb_0.075_implied_upper',
'axvline': 0.075,
# 'y_labelpad': 0,
'plot_x_axis': True
},
'decision_curve_20': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB (20%)',
'x_var': 'score',
'y_var': 'comparator_nb_0.2_mid',
'ci_lower_var': 'comparator_nb_0.2_lower',
'ci_upper_var': 'comparator_nb_0.2_upper',
'axvline': 0.2,
# 'y_labelpad': 0,
'plot_x_axis': True
},
'decision_curve_20_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'NB (20%, rel)',
'x_var': 'score',
'y_var': 'delta_nb_0.2_mid',
'ci_lower_var': 'delta_nb_0.2_lower',
'ci_upper_var': 'delta_nb_0.2_upper',
'axvline': 0.2,
# 'y_labelpad': 0,
'plot_x_axis': True
},
'decision_curve_20_implied': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB (20%)',
'x_var': 'score',
'y_var': 'comparator_nb_0.2_implied_mid',
'ci_lower_var': 'comparator_nb_0.2_implied_lower',
'ci_upper_var': 'comparator_nb_0.2_implied_upper',
'axvline': 0.2,
# 'y_labelpad': 0,
'plot_x_axis': True
},
'decision_curve_20_implied_diff': {
'xlim': (0, 0.4),
'ylim': (-0.025, 0.025),
'xticks': [0, 0.2, 0.4],
'xticklabels': ['0', '0.2', '0.4'],
'legend': False,
'ylabel': 'cNB (20%, rel)',
'x_var': 'score',
'y_var': 'delta_nb_0.2_implied_mid',
'ci_lower_var': 'delta_nb_0.2_implied_lower',
'ci_upper_var': 'delta_nb_0.2_implied_upper',
'axvline': 0.2,
# 'y_labelpad': 0,
'plot_x_axis': True
},
}
def make_plot_grid(
result_df,
plot_keys,
group_var_name,
bbox_to_anchor=(1.1, 0.6),
xlabel_height=0.02,
wspace=0.2,
hspace=0.2,
titlepad=None
):
lambda_values = result_df.lambda_group_regularization.unique()
fig, ax_list = plt.subplots(
len(plot_keys), len(lambda_values), squeeze=False, figsize=(10,1.5*len(plot_keys)), dpi=180
)
plt.subplots_adjust(wspace=wspace, hspace=hspace)
for j, plot_key in enumerate(plot_keys):
for i, lambda_value in enumerate(lambda_values):
the_df = result_df.query('lambda_group_regularization == @lambda_value')
config = plot_config_dict[plot_key].copy()
if i > 0:
config['ylabel'] = None
if j == 0:
text_title = r'$\lambda$ = {0:.3}'.format(lambda_value)
ax_list[j, i].set_title(text_title, pad=titlepad)
plot_data(
the_df,
ax=ax_list[j][i],
hide_yticks=i>0,
hide_xticks=j<len(plot_keys)-1,
group_var_name=group_var_name,
**config
)
# Add treat-all line to decision curves
if plot_key == "decision_curve":
plot_data(
the_df,
ax=ax_list[j][i],
hide_yticks = i > 0,
hide_xticks=j<len(plot_keys)-1,
x_var=plot_config_dict[plot_key]['x_var'],
group_var_name=group_var_name,
**plot_config_dict['decision_curve_treat_all']
)
elif plot_key == "decision_curve_implied":
plot_data(
the_df,
ax=ax_list[j][i],
hide_yticks = i > 0,
hide_xticks=j<len(plot_keys)-1,
x_var=plot_config_dict[plot_key]['x_var'],
group_var_name=group_var_name,
**plot_config_dict['decision_curve_treat_all_implied']
)
ax_list[j][i].text(
0.02, 1.02,
string.ascii_uppercase[j*len(lambda_values) + i],
transform=ax_list[j][i].transAxes,
size=12, weight='bold')
handles, labels = ax_list[-1, -1].get_legend_handles_labels()
fig.text(0.5, xlabel_height, 'Threshold', ha='center', size=18)
fig.align_ylabels(ax_list[:, 0])
plt.figlegend(
handles, labels, bbox_to_anchor=bbox_to_anchor, frameon=False
)
# return fig
# -
plot_keys_dict = {
'performance': ['calibration_curve', 'tpr', 'fpr'],
'decision_curves': [
'decision_curve',
'decision_curve_diff',
'decision_curve_implied',
'decision_curve_implied_diff'
],
'decision_curves_threshold_075': [
'decision_curve_075',
'decision_curve_075_diff',
'decision_curve_075_implied',
'decision_curve_075_implied_diff'
],
'decision_curves_threshold_20': [
'decision_curve_20',
'decision_curve_20_diff',
'decision_curve_20_implied',
'decision_curve_20_implied_diff',
]
}
plot_grid_config = {
('race_eth', 'performance'): {
'bbox_to_anchor': (1.05, 0.6),
'xlabel_height': 0.0,
'titlepad': 15
},
('race_eth', 'decision_curves'): {
'bbox_to_anchor': (1.05, 0.6),
'xlabel_height': 0.02,
'titlepad': 15
},
('race_eth', 'decision_curves_threshold_075'): {
'bbox_to_anchor': (1.05, 0.6),
'xlabel_height': 0.02,
'titlepad': 15
},
('race_eth', 'decision_curves_threshold_20'): {
'bbox_to_anchor': (1.05, 0.6),
'xlabel_height': 0.02,
'titlepad': 15
},
('gender_concept_name', 'performance'): {
'bbox_to_anchor': (1.02, 0.55),
'xlabel_height': 0.0,
'titlepad': 15
},
('gender_concept_name', 'decision_curves'): {
'bbox_to_anchor': (1.02, 0.55),
'xlabel_height': 0.02,
'titlepad': 15
},
('gender_concept_name', 'decision_curves_threshold_075'): {
'bbox_to_anchor': (1.02, 0.55),
'xlabel_height': 0.02,
'titlepad': 15
},
('gender_concept_name', 'decision_curves_threshold_20'): {
'bbox_to_anchor': (1.02, 0.55),
'xlabel_height': 0.02,
'titlepad': 15
},
('race_eth_gender', 'performance'): {
'bbox_to_anchor': (1.0, 0.73),
'xlabel_height': 0.0,
'titlepad': 15
},
('race_eth_gender', 'decision_curves'): {
'bbox_to_anchor': (1.0, 0.7),
'xlabel_height': 0.02,
'titlepad': 15
},
('race_eth_gender', 'decision_curves_threshold_075'): {
'bbox_to_anchor': (1.0, 0.7),
'xlabel_height': 0.02,
'titlepad': 15
},
('race_eth_gender', 'decision_curves_threshold_20'): {
'bbox_to_anchor': (1.0, 0.7),
'xlabel_height': 0.02,
'titlepad': 15
},
}
figures_path = '../zipcode_cvd/experiments/figures/optum/eo_rr/bootstrapped'
plt.close()
attribute='race_eth'
plot_key='decision_curves'
group_objective_metric = 'mmd'
make_plot_grid(
data_dict_pivot[attribute].query('group_objective_metric == @group_objective_metric'),
plot_keys=plot_keys_dict[plot_key],
group_var_name=attribute,
**plot_grid_config[(attribute, plot_key)]
)
for attribute, group_objective_metric, plot_key in itertools.product(
attributes,
['mmd', 'threshold_rate'],
plot_keys_dict.keys()
):
make_plot_grid(
data_dict_pivot[attribute].query('group_objective_metric == @group_objective_metric'),
plot_keys=plot_keys_dict[plot_key],
group_var_name=attribute,
**plot_grid_config[(attribute, plot_key)]
)
figure_path = os.path.join(figures_path, attribute, group_objective_metric)
os.makedirs(figure_path, exist_ok=True)
plt.savefig(os.path.join(figure_path, 'eo_grid_{}.png'.format(plot_key)), dpi=180, bbox_inches='tight')
plt.savefig(os.path.join(figure_path, 'eo_grid_{}.pdf'.format(plot_key)), bbox_inches='tight')
plt.close()
| notebooks/generate_plots_eo_rr_bootstrap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
from matplotlib.pyplot import subplots
import matplotlib.patches as patches
import seaborn as sns
from pylab import rcParams
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import confusion_matrix
import eli5
import os
from tqdm import tqdm
import gc
import random
import math
import psutil
import pickle
import datetime
# +
warnings.filterwarnings('ignore')
root = "../../data/Gamma_Log_Facies_Type_Prediction/"
RANDOM_STATE = 42
np.random.seed(RANDOM_STATE)
plt.style.use('seaborn')
pd.set_option('max_columns', 150)
torch.manual_seed(RANDOM_STATE)
# +
# Original code from https://www.kaggle.com/gemartin/load-data-reduce-memory-usage by @gemartin
# Modified to support timestamp type, categorical type
# Modified to add option to use float16 or not. feather format does not support float16.
from pandas.api.types import is_datetime64_any_dtype as is_datetime
from pandas.api.types import is_categorical_dtype
def reduce_mem_usage(df, use_float16=False):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
# skip datetime type or categorical type
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
# -
# %%time
full_train_df = pd.read_csv(root + "Train_File.csv")
test_df = pd.read_csv(root + "Test_File.csv")
submit_df = pd.read_csv(root + "Submission_File.csv")
# %time
reduce_mem_usage(full_train_df, use_float16=True);
reduce_mem_usage(test_df, use_float16=True);
class Kannada_MNIST_data(Dataset):
def __init__(self, df):
n_pixels = 28 * 28
if "label" not in df.columns:
# test data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
else:
# training data
self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
train_df, valid_df = train_test_split(full_train_df, test_size=0.2, random_state=RANDOM_STATE, shuffle=True)
# +
batch_size = 256
train_dataset = Kannada_MNIST_data(train_df)
valid_dataset = Kannada_MNIST_data(valid_df)
test_dataset = Kannada_MNIST_data(test_df)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
# -
show_batch(train_loader)
show_batch(valid_loader)
show_batch(test_loader)
def train(model, train_loader):
batch_loss = 0.0
batch_corrects = 0.0
model.train()
for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
preds = torch.max(outputs, 1)[1]
batch_loss += loss.item()
batch_corrects += torch.sum(preds == labels.data)
return batch_loss/len(train_loader), batch_corrects.float()/len(train_dataset)
def evaluate(model, valid_loader):
loss = 0.0
corrects = 0.0
model.eval()
with torch.no_grad():
for inputs, labels in valid_loader:
inputs, labels = Variable(inputs), Variable(labels)
inputs, labels = inputs.to(device), labels.to(device)
outputs = model(inputs)
loss += F.cross_entropy(outputs, labels, reduction='mean').item()
pred = outputs.data.max(1, keepdim=True)[1]
corrects += pred.eq(labels.data.view_as(pred)).cpu().sum()
return loss/len(valid_loader), corrects.float()/len(valid_dataset)
def prediciton(model, data_loader):
model.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = Variable(data, volatile=True)
data = data.to(device)
test_pred = test_pred.to(device)
output = model(data)
pred = output.data.max(1, keepdim=True)[1]
test_pred = torch.cat((test_pred, pred), dim=0)
return test_pred
# +
cell = nn.LSTM(input_size=10, hidden_size=10, batch_first=True)
class LSTMClassifier(nn.Module):
def __init__(self):
super(LSTMClassifier, self).__init__()
self.lstm = nn.LSTM(64, 64)
self.classifier = nn.Linear(64, 4)
for m in self.lstm.children():
inn.init.xavier_uniform_(m.weight)
for m in self.classifier.children():
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
lstm_out, _ = self.lstm(x)
out = self.classifier(lstm_out.view(len(sentence), -1))
return out
# -
model = Conv2Class2Net()
model.to(device)
epochs = 30
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)
scheduler = ReduceLROnPlateau(
optimizer,
factor=0.7,
mode="max",
verbose=True,
patience=1,
threshold=1e-3,
eps=1e-06
)
# +
# %%time
epoch_loss_history = []
epoch_corrects_history = []
val_loss_history = []
val_corrects_history = []
for epoch in range(epochs):
epoch_loss, epoch_corrects = train(model, train_loader)
val_loss, val_corrects = evaluate(model, valid_loader)
epoch_loss_history.append(epoch_loss)
epoch_corrects_history.append(epoch_corrects)
val_loss_history.append(val_loss)
val_corrects_history.append(val_corrects)
print('epoch:', (epoch+1))
print('training loss: {:.4f}, training acc {:.4f} '.format(epoch_loss, epoch_corrects.item()))
print('validation loss: {:.4f}, validation acc {:.4f} '.format(val_loss, val_corrects.item()))
scheduler.step(val_corrects)
# -
plt.plot(epoch_loss_history, label='training loss')
plt.plot(val_loss_history, label='validation loss')
plt.legend()
plt.plot(epoch_corrects_history, label='training accuracy')
plt.plot(val_corrects_history, label='validation accuracy')
plt.legend()
# %%time
test_pred = prediciton(model, test_loader)
test_pred = test_pred.to(torch.device('cpu'))
ids = test_df['id']
submission = pd.DataFrame({'id': ids,'label': test_pred.numpy().T[0]})
submission.to_csv(path_or_buf ="submission.csv", index=False)
submission.head()
| notebooks/Gamma_Log_Facies_Type_Prediction/02_pytorch_lstm_dens.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''data-science-tutorial-xTdhcGj7'': pipenv)'
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# # Seriler
# Pandas paketi seri (Series) ve veri çerçevesi (DataFrame) isimli iki veri yapısının üzerine kurulmuş. Bunlardan serileri tek boyutlu diziler gibi düşünebiliriz. Seri, etiketli verilerden oluşan tek boyutlu bir veri yapısıdır. Etiket değerlerine indeks denir. Verinin kendisi sayılar, dizeler ya da başka Python objelerinden oluşabilir.
# #### Sözlük ile Oluşturma
myDict = {'enes' : 21, 'kahraman' : 45, 'beril' : 19}
pd.Series(myDict)
# #### Liste ile Oluşturma
myAge = [21,45,19]
myName = ['enes', 'kahraman', 'beril']
pd.Series(myAge)
pd.Series(data=myAge, index=myName)
# #### Numpy Array ile OLuşturma
numpyArray = np.array([21,45,19])
pd.Series(numpyArray)
# #### Verinin Alınması
series1 = pd.Series([10,20,30],['a','b','c'])
series2 = pd.Series([9,2,5],['a','b','c'])
series1['a']
# #### Operatör İşlemleri
# Concatinating işlemi içnde kullanlmaktadır.
series1 + series2
series1 - series2
series1 * series2
series1 / series2
# **Not:** İndeksleri aynı olan değerleri işleme sokar, ancak birinde olan ama diğerinde olmayan indeksleri işleme sokmaz ve **Nan** değeri atar.
pd.Series([10,20,30],['a','b','c']) + pd.Series([5,8,90],['r','b','v'])
| pandas/1.0.series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import json
queries = []
labels = []
sqls = []
db_ids = []
with open("sparc/train.json","r") as f:
interactions = json.load(f)
for interaction in interactions:
db_id = interaction["database_id"]
turns = interaction["interaction"]
if len(turns)>1:
for i in range(1,len(turns)):
queries.append(" <s> ".join([ele["utterance"] for ele in turns[0:i]]))
sqls.append(turns[i]["sql"])
labels.append(turns[i]["utterance"])
db_ids.append(db_id)
# + pycharm={"name": "#%%\n"}
import attr
import torch
from seq2struct.utils import registry
@attr.s
class PreprocessConfig:
config = attr.ib()
config_args = attr.ib()
class Preprocessor:
def __init__(self, config):
self.config = config
self.model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
@attr.s
class InferConfig:
config = attr.ib()
config_args = attr.ib()
logdir = attr.ib()
section = attr.ib()
beam_size = attr.ib()
output = attr.ib()
step = attr.ib()
use_heuristic = attr.ib(default=False)
mode = attr.ib(default="infer")
limit = attr.ib(default=None)
output_history = attr.ib(default=False)
class Inferer:
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
torch.set_num_threads(1)
# 0. Construct preprocessors
self.model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
self.model_preproc.load()
def load_model(self, logdir, step):
'''Load a model (identified by the config used for construction) and return it'''
# 1. Construct model
model = registry.construct('model', self.config['model'], preproc=self.model_preproc, device=self.device)
model.to(self.device)
model.eval()
model.visualize_flag = False
# 2. Restore its parameters
saver = saver_mod.Saver({"model": model})
last_step = saver.restore(logdir, step=step, map_location=self.device, item_keys=["model"])
if not last_step:
raise Exception('Attempting to infer on untrained model')
return model
# + pycharm={"name": "#%%\n"}
import _jsonnet
from seq2struct import datasets
from seq2struct import models
from seq2struct.utils import registry
from seq2struct.utils import vocab
exp_config = json.loads(_jsonnet.evaluate_file("experiments/sparc-configs/gap-run.jsonnet"))
model_config_file = exp_config["model_config"]
model_config_args = json.dumps(exp_config["model_config_args"])
preprocess_config = PreprocessConfig(model_config_file, model_config_args)
config = json.loads(_jsonnet.evaluate_file(preprocess_config.config, tla_codes={'args': preprocess_config.config_args}))
preprocessor = Preprocessor(config)
data = registry.construct('dataset', config['data']["train"])
# test = preprocessor.model_preproc.dec_preproc.grammar.parse(sqls[0],"train")
# -
# ===========================
# + pycharm={"name": "#%%\n"}
import sys
import os
from seq2struct import beam_search
from seq2struct import datasets
from seq2struct import models
from seq2struct import optimizers
from seq2struct.utils import registry
from seq2struct.utils import saver as saver_mod
from seq2struct.models.spider import spider_beam_search
exp_config = json.loads(_jsonnet.evaluate_file("experiments/sparc-configs/gap-run.jsonnet"))
model_config_file = exp_config["model_config"]
model_config_args = json.dumps(exp_config["model_config_args"])
infer_output_path = "{}/{}-step{}.infer".format(
exp_config["eval_output"],
exp_config["eval_name"],
38100)
infer_config = InferConfig(
model_config_file,
model_config_args,
exp_config["logdir"],
exp_config["eval_section"],
exp_config["eval_beam_size"],
infer_output_path,
38100,
use_heuristic=exp_config["eval_use_heuristic"]
)
if infer_config.config_args:
config = json.loads(_jsonnet.evaluate_file(infer_config.config, tla_codes={'args': infer_config.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(infer_config.config))
if 'model_name' in config:
infer_config.logdir = os.path.join(infer_config.logdir, config['model_name'])
output_path = infer_config.output.replace('__LOGDIR__', infer_config.logdir)
inferer = Inferer(config)
model = inferer.load_model(infer_config.logdir, infer_config.step)
# + pycharm={"name": "#%%\n"}
# 重写sqls
class TTT:
def __init__(self,schema):
self.schema = schema
rewrite_sqls = []
for i in range(len(sqls)):
root = preprocessor.model_preproc.dec_preproc.grammar.parse(sqls[i],"train")
ttt = TTT(data.schemas[db_ids[i]])
r_sql = model.decoder.preproc.grammar.unparse(root, ttt)
rewrite_sqls.append(r_sql)
# + pycharm={"name": "#%%\n"}
with open("coco/sparc/train.json","w") as f:
for i in range(len(sqls)):
x = queries[i]+" </s> "+rewrite_sqls[i]
label = labels[i]
f.write(json.dumps({"x":x,"label":label})+"\n")
# + [markdown] pycharm={"name": "#%% md\n"}
# ==============
# 数据增强
#
#
#
#
# + pycharm={"name": "#%%\n"}
import copy
from random import choice
import sqlite3
from tqdm import tqdm
def do_change_select_agg(sql,schema):
is_change = False
selects = sql["select"][1]
is_distinct = sql["select"][0]
for select in selects:
val_unit = select[1]
col_unit = val_unit[1]
if not is_distinct:
if col_unit[2]==False:
#no distinct
agg = col_unit[0]
column_id = col_unit[1]
#=0, aka *, skip
if column_id!=0:
tp = schema.columns[column_id].type
if tp=='text':
agg_candidates =[0,3]
if agg in agg_candidates:
agg_candidates.remove(agg)
new_agg = choice(agg_candidates)
col_unit[0] = new_agg
is_change = True
return is_change
def do_change_select_column(sql,schema):
is_change = False
selects = sql["select"][1]
is_distinct = sql["select"][0]
for select in selects:
val_unit = select[1]
col_unit = val_unit[1]
if (not is_distinct) and col_unit[2]==False:
#no distinct
column_id = col_unit[1]
if column_id!=0:
tp = schema.columns[column_id].type
to_replaces = [ele.id for ele in schema.columns[column_id].table.columns if tp==ele.type]
if column_id in to_replaces:
to_replaces.remove(column_id)
if 0 in to_replaces:
to_replaces.remove(0)
if len(to_replaces)>0:
to_replace = choice(to_replaces)
col_unit[1]=to_replace
is_change = True
else:
table_id = None
for table_unit in sql["from"]["table_units"]:
if table_unit[0] =="table_unit":
table_id = table_unit[1]
if table_id is not None:
to_replaces = [ele.id for ele in schema.tables[table_id].columns if "text"==ele.type]
if 0 in to_replaces:
to_replaces.remove(0)
if len(to_replaces)>0:
to_replace = choice(to_replaces)
col_unit[1]=to_replace
is_change = True
return is_change
def do_change_where_column(sql,schema):
#建立sqlite链接
conn = sqlite3.connect('sparc/database/{0}/{0}.sqlite'.format(schema.db_id))
cursor = conn.cursor()
is_change = False
wheres = sql["where"]
for where in wheres:
if isinstance(where,list):
cond_unit = where
column_id = cond_unit[2][1][1]
tp = schema.columns[column_id].type
if tp == "number":
to_replaces = [ele.id for ele in schema.columns[column_id].table.columns if ele.type==tp]
if 0 in to_replaces:
to_replaces.remove(0)
if len(to_replaces)>0:
cond_unit[2][1][1] = choice(to_replaces)
is_change = True
if tp == "text":
to_replaces = [ele.id for ele in schema.columns[column_id].table.columns if ele.type==tp]
if column_id in to_replaces:
to_replaces.remove(column_id)
if 0 in to_replaces:
to_replaces.remove(0)
if len(to_replaces)>0:
to_replace = choice(to_replaces)
#随机选取
try:
cursor.execute("select {} from {} ORDER BY RANDOM() limit 2".format(schema.columns[to_replace].orig_name,schema.columns[to_replace].table.orig_name))
except:
return False
c_result = cursor.fetchall()
vals = [ele[0] for ele in c_result]
if vals is not None and len(vals) > 0:
if not isinstance(cond_unit[3],dict):
orig_val = cond_unit[3]
if orig_val is None:
return False
if isinstance(orig_val,str):
if len(orig_val)>0 and orig_val[0] == "\"":
orig_val = orig_val[1:]
if len(orig_val)>0 and orig_val[-1] == "\"":
orig_val = orig_val[:-1]
if orig_val in vals:
vals = vals.remove(orig_val)
if vals is not None and len(vals)>0:
v_to_replace = choice(vals)
cond_unit[2][1][1] = to_replace
cond_unit[3] = v_to_replace
is_change=True
return is_change
rewrite_sqls = []
for i in tqdm(range(len(sqls))):
_cur = {}
root = preprocessor.model_preproc.dec_preproc.grammar.parse(sqls[i],"dev")
ttt = TTT(data.schemas[db_ids[i]])
gold_sql = model.decoder.preproc.grammar.unparse(root, ttt)
_cur["gold"] = gold_sql
# _cur["select_agg"] = []
# _cur["select_agg_ast"] = []
_cur["select_column"] = []
_cur["select_column_ast"] = []
_cur["where_column"] = []
_cur["where_column_ast"] = []
#5次select agg changed
# for iii in range(5):
# changed_sql = copy.deepcopy(sqls[i])
# is_changed = do_change_select_agg(changed_sql,data.schemas[db_ids[i]])
# if is_changed:
# root = preprocessor.model_preproc.dec_preproc.grammar.parse(changed_sql,"dev")
# ttt = TTT(data.schemas[db_ids[i]])
# r_sql = model.decoder.preproc.grammar.unparse(root, ttt)
# _cur["select_agg"].append(r_sql)
# _cur["select_agg_ast"].append(changed_sql)
for iii in range(5):
changed_sql = copy.deepcopy(sqls[i])
is_changed = do_change_select_column(changed_sql,data.schemas[db_ids[i]])
if is_changed:
root = preprocessor.model_preproc.dec_preproc.grammar.parse(changed_sql,"dev")
ttt = TTT(data.schemas[db_ids[i]])
r_sql = model.decoder.preproc.grammar.unparse(root, ttt)
_cur["select_column"].append(r_sql)
_cur["select_column_ast"].append(changed_sql)
for iii in range(10):
changed_sql = copy.deepcopy(sqls[i])
is_changed = do_change_where_column(changed_sql,data.schemas[db_ids[i]])
if is_changed:
try:
root = preprocessor.model_preproc.dec_preproc.grammar.parse(changed_sql,"dev")
ttt = TTT(data.schemas[db_ids[i]])
r_sql = model.decoder.preproc.grammar.unparse(root, ttt)
_cur["where_column"].append(r_sql)
_cur["where_column_ast"].append(changed_sql)
except:
ignored = 0
rewrite_sqls.append(_cur)
# + pycharm={"name": "#%%\n"}
with open("coco/sparc/train_aug.json","w") as f:
for i in range(len(sqls)):
record = {
"db_id":db_ids[i],
"query":queries[i],
"gold_sql":rewrite_sqls[i]["gold"],
"rewrite_sqls":{
# "select_agg" : rewrite_sqls[i]["select_agg"],
# "select_agg_ast" : rewrite_sqls[i]["select_agg_ast"],
"select_column" : rewrite_sqls[i]["select_column"],
"select_column_ast" : rewrite_sqls[i]["select_column_ast"],
"where_column" : rewrite_sqls[i]["where_column"],
"where_column_ast" : rewrite_sqls[i]["where_column_ast"]
}
}
f.write(json.dumps(record)+"\n")
| rat-sql-gap/coco_sql_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ZgwpUVYMtxzK" colab_type="code" colab={}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display, HTML, IFrame
from ipywidgets.embed import embed_minimal_html
# !jupyter nbextension enable --py --sys-prefix widgetsnbextension
# + id="rKaIay9HcgEy" colab_type="code" colab={}
#Upload kaggle.json here!
# !pip install -q kaggle
from google.colab import files
files.upload()
# + id="a1o7dEKanDjW" colab_type="code" colab={}
# !mkdir ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 ~/.kaggle/kaggle.json
# + id="CetbuJSWok-B" colab_type="code" colab={}
# !kaggle datasets download -d sudalairajkumar/novel-corona-virus-2019-dataset
# !unzip ./novel-corona-virus-2019-dataset.zip -d ./
# + id="YEhuwpuwuuKB" colab_type="code" colab={}
confirmed_world = pd.read_csv("./time_series_covid_19_confirmed.csv")
confirmed_world.name = "confirmed_world"
death_world = pd.read_csv("./time_series_covid_19_deaths.csv")
death_world.name = "Death_world"
recovered_world = pd.read_csv("./time_series_covid_19_recovered.csv")
recovered_world.name ="recovered_world"
confirmed_US = pd.read_csv("./time_series_covid_19_confirmed_US.csv")
confirmed_US.rename(columns={'Long_': 'Long'}, inplace=True)
confirmed_US.name = "confirmed_US"
death_US = pd.read_csv("./time_series_covid_19_deaths_US.csv")
death_US.rename(columns={'Long_': 'Long'}, inplace=True)
death_US.name = "Death_US"
# + id="_bis1H3Oe996" colab_type="code" colab={}
# !pip install geopandas
#import shape file from https://www.naturalearthdata.com/
#I used https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/physical/ne_110m_land.zip
#must import .shp and .shx
from google.colab import files
files.upload()
# + id="T3i9U1GPRMQt" colab_type="code" colab={}
import geopandas
# + id="4vwTcBmIHhUE" colab_type="code" colab={}
def generate_map(data_frame, column, shape_file):
#when loading the dataframe you have to zip the lat and long into markov coordenents
gdf = geopandas.GeoDataFrame(
data_frame[column], geometry=geopandas.points_from_xy(data_frame['Long'], data_frame['Lat']))
world = geopandas.read_file(shape_file)
ax = world.plot()
#plots the data in relation to the numbers found in a specified collumn e.g. the number of deaths
#would probs be nice to change colors based on relative percentage or something
gdf.plot(markersize=data_frame[column], ax=ax, color='red')
plt.show()
# + id="5mwP23OxJEJp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 228} outputId="f77f86d8-a6e8-4cc7-e357-90a06e142e41"
generate_map(death_world, '4/20/20', 'ne_110m_admin_0_countries.shp')
# + id="dpAqHiXbUXDM" colab_type="code" colab={}
#turns the images into a gif using pillow
def generate_gif(directory_name):
import pillow as pil
from os import listdir
from os.path import isfile, join
images = [f for f in listdir(directory_name) if isfile(join(directory_name, f))]
images.save(directory_name + '.gif', save_all=True, append_images=images[1:])
# + id="XI9pYEQsZiq6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 534} outputId="78a5f3fa-b230-4752-d9ce-05e563eee5e7"
#This generates images for every day we've been keeping track and makes the gif
#it's broken for now
def day_by_day_map(data_set, shape_file):
import os
try:
os.makedirs("/content/" + data_set.name)
path = "/content/" + data_set.name
except:
path = "/content/" + data_set.name
dropped = data_set.drop(columns=['Province/State', 'Country/Region', 'Lat', 'Long'])
for column in dropped.columns:
generate_map(data_set, column, shape_file).savefig(path + column + ".png")
generate_gif(path)
day_by_day_map(confirmed_world, 'ne_110m_admin_0_countries.shp')
# + id="NYeCMr3lCLsd" colab_type="code" colab={}
#this is where I was going to calc ifr and other relavent stats
#it would be nice to add a simulation as well for hypothetical data points, maybe assign a simularity score to the hypo data so it doesn't throw off the model
def country_ifr(data_set):
for country in data_set['Country/Region']:
for date in
data_set[date]
# + id="WHXAReCz4-bn" colab_type="code" colab={}
#code I found to lay out maps side by side e.g. to contrast deaths vs recovered
import matplotlib.image as mpimg
from matplotlib import rcParams
# %matplotlib inline
# figure size in inches optional
rcParams['figure.figsize'] = 11 ,8
# read images
img_A = mpimg.imread('\path\to\img_A.png')
img_B = mpimg.imread('\path\to\img_B.png')
# display images
fig, ax = plt.subplots(1,2)
ax[0].imshow(img_A);
ax[1].imshow(img_B);
| corona_map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import math
import pandas as pd
import seaborn as sns
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.optimizers import SGD
import tensorflow.keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout, GRU, SimpleRNN
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
# plt.style.use('fivethirtyeight')
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from numpy import concatenate
# <h1>Make Multivariable Dataset</h1>
# <h2>Load Chinook Data</h2>
def load_data(pathname):
salmon_data = pd.read_csv(pathname)
salmon_data.head()
salmon_copy = salmon_data
salmon_copy.rename(columns = {"mo": "month", "da" : "day", "fc" : "king"}, inplace = True)
salmon_copy['date']=pd.to_datetime(salmon_copy[['year','month','day']])
king_data = salmon_copy.filter(["date","king"], axis=1)
king_greater = king_data['date'].apply(pd.Timestamp) >= pd.Timestamp('01/01/1939')
greater_than = king_data[king_greater]
king_all = greater_than[greater_than['date'].apply(pd.Timestamp) <= pd.Timestamp('12/31/2020')]
king_all_copy = king_all
king_all_copy = king_all_copy.reset_index()
king_all_copy = king_all_copy.drop('index', axis=1)
return king_all_copy, king_data
chris_path = '/Users/chrisshell/Desktop/Stanford/SalmonData/Use Data/passBonCS.csv'
ismael_path = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/data.csv'
abdul_path = '/Users/abdul/Downloads/SalmonNet/passBonCS.csv'
king_all_copy, king_data= load_data(ismael_path)
print(king_all_copy)
data_copy = king_all_copy
print(data_copy['date'])
data_copy.set_index('date', inplace=True)
data_copy.index = pd.to_datetime(data_copy.index)
data_copy = data_copy.resample('1M').sum()
data_copy
print(data_copy)
data_copy.shape
data_copy.reset_index(inplace=True)
data_copy = data_copy.rename(columns = {'index':'date'})
data_copy
master_data = data_copy
master_data
master_data = master_data[132:]
master_data
#
master_data.reset_index(inplace=True)
master_data = master_data.drop(labels='index', axis=1)
print(master_data)
# <h2>Load Covariate Data and Concat to Master_Data</h2>
def load_cov_set(pathname):
data = pd.read_csv(pathname)
return data
ismael_path_cov = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/covariates.csv'
chris_path_cov = '/Users/chrisshell/Desktop/Stanford/SalmonData/Environmental Variables/salmon_env_use.csv'
abdul_path_cov= '/Users/abdul/Downloads/SalmonNet/salmon_env_use.csv'
cov_data = load_cov_set(ismael_path_cov)
cov_data
upwelling = cov_data["upwelling"]
master_data = master_data.join(upwelling)
master_data
noi = cov_data["noi"]
master_data = master_data.join(noi)
master_data
npgo = cov_data["npgo"]
master_data = master_data.join(npgo)
master_data
pdo = cov_data["pdo"]
master_data = master_data.join(pdo)
master_data
oni = cov_data["oni "]
master_data = master_data.join(oni)
master_data
master_data = master_data.rename(columns={"oni ": "oni"})
master_data
# <h2> Load and Concat NOI data </h2>
master_data.set_index('date', inplace=True)
master_data.index = pd.to_datetime(master_data.index)
master_data
master_data.to_csv('master_data.csv')
checkpoint_filepath = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/checkpoint'
chris_checkpoint_path = '/Users/chrisshell/Desktop/Stanford/SalmonData/Checkpoint'
abdul_checkpoint_path = '/Users/abdul/Downloads/SalmonNet/Checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# <h2>Let's plot each series</h2>
dataset = read_csv('master_data.csv', header=0, index_col=0)
values = dataset.values
# specify columns to plot
groups = [0, 1, 2, 3, 4, 5]
i = 1
# plot each column
plt.figure()
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(dataset.columns[group], y=.5, loc='right')
i += 1
plt.show()
# <h2>Make Series into Train and Test Set with inputs and ouptuts</h2>
# +
# convert series to supervised learning
# series_to_supervised from <NAME>'s "Multivariate Time Series Forecasting in Keras"
def series_to_supervised(data, n_in=6, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# load dataset
dataset = read_csv('master_data.csv', header=0, index_col=0)
values = dataset.values
# integer encode direction
encoder = LabelEncoder()
values[:,1] = encoder.fit_transform(values[:,1])
# ensure all data is float
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# frame as supervised learning
n_months = 6
n_features = 6
reframed = series_to_supervised(scaled, n_months, 1)
# drop columns we don't want to predict
# reframed.drop(reframed.columns[[13]], axis=1, inplace=True)
print(reframed.head())
# -
# split into train and test sets
values = reframed.values
n_train_months = 66 * 12 # MENTAL NOTE: IF ERROR IN MOTH TO YEAR CHECK THIS
train = values[:n_train_months, :]
test = values[n_train_months:, :]
# split into input and outputs
n_obs = n_months * n_features
train_X, train_y = train[:, :n_obs], train[:, -n_features]
test_X, test_y = test[:, :n_obs], test[:, -n_features]
train_X = train_X.reshape((train_X.shape[0], n_months, n_features))
test_X = test_X.reshape((test_X.shape[0], n_months, n_features))
print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
#create train, test, dev split
X_train, X_dev, y_train, y_dev = train_test_split(train_X, train_y, test_size=0.10, shuffle=False)
print(X_dev.shape)
print(y_dev.shape)
print(X_train.shape)
print(y_train.shape)
print(test_X.shape)
print(test_y.shape)
# design network
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]), return_sequences=True))
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]), return_sequences=True))
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]), return_sequences=True))
model.add(LSTM(1))
# model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=[tf.keras.metrics.RootMeanSquaredError()])
# fit network
#
history = model.fit(train_X, train_y, epochs=1000, batch_size=1000, validation_data=(X_dev, y_dev), verbose=2, shuffle=False, callbacks=[model_checkpoint_callback])
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='dev')
plt.legend()
plt.show()
# make a prediction
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], n_months*n_features))
# invert scaling for forecast
inv_yhat = concatenate((yhat, test_X[:, -5:]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, -5:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# invert scaling for actual
test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_X[:, -5:]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# +
def plot_predictions(test,predicted):
plt.plot(test, color='red',label='Real Chinook Count')
plt.plot(predicted, color='blue',label='Predicted Chinook Count')
plt.title('Chinook Population Prediction')
plt.xlabel('Time')
plt.ylabel('Chinook Count')
plt.legend()
plt.show()
def plot_loss(history):
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
def month_to_year(month_preds):
month_preds = month_preds[6:]
year_preds = []
for i in range(12, len(month_preds) + 1, 12):
salmon_count = np.sum(month_preds[i - 12:i])
year_preds.append(salmon_count)
year_preds = pd.DataFrame(year_preds, columns = ["Count"])
return year_preds
def return_rmse(test, predicted):
rmse = math.sqrt(mean_squared_error(test, predicted))
print("The test root mean squared error is {}.".format(rmse))
# -
plot_predictions(inv_y, inv_yhat)
return_rmse(inv_y, inv_yhat)
plot_loss(history)
preds = month_to_year(inv_yhat).astype(np.int64)
actual = month_to_year(inv_y).astype(np.int64)
print(preds)
print(actual)
bs_chris_path = '/Users/chrisshell/Desktop/Stanford/SalmonData/Use Data/Forecast Data Update.csv'
bs_ismael_path = '/Users/ismaelcastro/Documents/Computer Science/CS Classes/CS230/project/forecast_data_17_20.csv'
bs_abdul_path = '/Users/abdul/Downloads/SalmonNet/Forecast Data Update.csv'
baseline_data = pd.read_csv(bs_ismael_path)
traditional = pd.DataFrame(baseline_data["Count"])
print(traditional)
return_rmse(actual, traditional)
return_rmse(actual, preds)
| .ipynb_checkpoints/multivar_simple_lstm-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] editable=true
# Execute `create_tables.py` and `etl.py` scripts.
# + [markdown] editable=true
# ### Create tables
# + editable=true language="bash"
#
# python create_tables.py
# + [markdown] editable=true
# Run test.ipynb to verify tables are created.
# + [markdown] editable=true
# #### ETL into the tables
# + editable=true language="bash"
#
# python etl.py
# + [markdown] editable=true
# Run test.ipynb to verify tables contain data.
# + editable=true
| Udacity/Data Modeling/Project1/notebooks/.ipynb_checkpoints/run_scripts-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# [](https://www.pythonista.io)
# # Desarrollo de una aplicación web simple.
#
# ## Objetivos.
#
# 1. Desplegar un documento HTML con los datos completos del objeto de tipo *dict* contenido en la representación del objeto tipo *list* guardado en [data/alumnos.txt](data/alumnos.txt), cuando el valor correspondiente al identificador *'Cuenta'* coincida con el número ingresado como parte de la URL ```http://localhost:5000/alumno/<número>```.
#
# * Desplegar un documento HTML que contenga todas las coincidencias de la búsqueda de la cadena que se ingrese como parte de la URL ```http://localhost:5000/busca/<cadena>``` en los objetos de tipo *dict* contenidos en la representación del objeto tipo *list* guardado en [data/alumnos.txt](data/alumnos.txt).
# * La busqueda se realizará en los valores correspondientes a los identificadores *'Nombre'*, *'Primer Apellido'* y *'Segundo Apellido'*.
# * El documento HTML mostrará la lista de coincidencias de los objetos tipo _dict_ incluyendo los valores correspondientes a *'Nombre'*, *'Primer Apellido'*, *'Segundo Apellido'*, así como una URL que incluya el número correspondiente a *Cuenta* en el formato ```http://localhost:5000/alumno/<número>```.
# ## Plantillas.
#
# Los documentos HTML se elaborarán a partir de platillas de Jinja 2.
# ### Plantilla para ```http://localhost:5000/alumno/<número>```.
#
# La plantilla [templates/despliega.html](templates/despliega.html) contiene el siguiente código:
#
# ```html
# <h1> Alumno {{ alumno['Cuenta'] }} </h1>
# <ul>
# <li>Nombre: {%for campo in ['Nombre', 'Primer Apellido', 'Segundo Apellido'] %}
# {{alumno[campo]}}{% endfor %}</li>
# <li>Carrera: {{ alumno['Carrera'] }} </li>
# <li>Semestre: {{ alumno['Semestre'] }} </li>
# <li>Promedio: {{ alumno['Promedio'] }} </li>
# {% if alumno["Al Corriente"] %} <li>El alumno está al corriente de pagos.</li> {% endif %}
# </ul>
# ```
# ### Plantilla para ```http://localhost:5000/busca/<cadena>```.
#
# La plantilla [templates/busqueda_avanzada.html](templates/busqueda_avanzada.html) contiene el siguiente código:
#
# ``` html
# <h1> Alumnos Encontrados</h1>
# <ul>
# {%for alumno in alumnos %}
# <li> <a href={{ url_for('despliega', cuenta=alumno['Cuenta']) }}> {{ alumno['Cuenta']}}</a>:
# {%for campo in ['Nombre', 'Primer Apellido', 'Segundo Apellido'] %}
# {{alumno[campo]}}
# {% endfor %} </li>
# {% endfor %}
# </ul>
# ```
# ## Código de la aplicación.
# ### Sección de datos.
campos = ('Nombre', 'Primer Apellido', 'Segundo Apellido')
ruta = 'data/alumnos.txt'
# ### La función ```encuentra()``` .
#
# * Busca una cadena de caracteres dentro de los campos indicados de un objeto tipo ```dict```.
# * En caso de encontrar una coincidencia, el resultado es ```True```.
encuentra = lambda cadena, registro, campos: bool(sum([cadena.casefold() \
in registro[campo].casefold() for campo in campos]))
# ### La función ```buscar_archivo()```.
#
# * Lee el contenido del archivo de texto indicado en el parámetro ```ruta``` y lo transforma mediante la función ```eval()```. Se da por sentado de que el objeto `` `base``` es de tipo ```tuple``` o ```list```que a su vez contiene objetos tipo ```dict```.
# * A cada elemento del objeto ```base``` se le aplica la función ```encuentra()``` y se crea una lista de aquellos elementos en los que exista una coicidencia de la cadena en los campos indicados.
def buscar_archivo(cadena, ruta, campos):
with open(ruta, 'tr') as archivo:
base = eval(archivo.read())
return [registro for registro in base if encuentra(cadena, registro, campos)]
# Se importan los componentes requeridos.
import jinja2
from flask import Flask, render_template, url_for, abort
# Se instancia el objeto *app* a partir de la clase *Flask*.
app = Flask(__name__)
# Se crea la función de vista para ```http://localhost:5000/busca/<cadena>```.
@app.route('/busca/<cadena>')
def busca(cadena):
return render_template('busqueda_avanzada.html', alumnos=buscar_archivo(str(cadena), ruta, campos))
# Se crea la función de vista para ```http://localhost:5000/alumno/<cuenta>```.
@app.route('/alumno/<cuenta>')
def despliega(cuenta):
falla = True
with open(ruta, 'tr') as archivo:
base = eval(archivo.read())
for registro in base:
try:
if registro['Cuenta'] == int(cuenta):
alumno = registro
falla = False
break
except:
pass
if falla :
abort(404)
return render_template('despliega.html', alumno=alumno)
# Se crea la función de vista en caso de un error 404.
@app.errorhandler(404)
def no_encontrado(error):
return '<h1> Error</h1><p>Recurso no encontrado.</p>', 404
# **Advertencia:** Una vez ejecutada la siguente celda es necesario interrumpir el kernel de Jupyter para poder ejecutar el resto de las celdas de la notebook.
#Si no se define el parámetro host, flask sólo será visible desde localhost
# app.run(host='localhost')
app.run(host="0.0.0.0", port=5000)
# **Ejemplos:**
#
# * Regresa la lista de coincidencias usando la cadena *Ramos*.
# * http://localhost:5000/busca/Ramos
#
# * Regresa al registro con el campo 'Cuenta' igual a *1231223*.
# * http://localhost:5000/alumno/1231223
#
# * Regresa la página de error 404.
# * http://localhost:5000/alumno/1231217
# <p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
# <p style="text-align: center">© <NAME>. 2018.</p>
| 13_aplicacion_web_simple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 403 RNN Regressor
#
# View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
# My Youtube Channel: https://www.youtube.com/user/MorvanZhou
#
# Dependencies:
# * torch: 0.1.11
# * matplotlib
# * numpy
# +
import platform
print(platform.python_version())
# %load_ext watermark
# %watermark -a 'Gopala KR' -u -d -v -p watermark,numpy,pandas,scipy,scikit-learn,matplotlib,torch,tensorflow
# -
import torch
from torch import nn
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
torch.manual_seed(1) # reproducible
# Hyper Parameters
TIME_STEP = 10 # rnn time step
INPUT_SIZE = 1 # rnn input size
LR = 0.02 # learning rate
# show data
steps = np.linspace(0, np.pi*2, 100, dtype=np.float32)
x_np = np.sin(steps) # float32 for converting torch FloatTensor
y_np = np.cos(steps)
plt.plot(steps, y_np, 'r-', label='target (cos)')
plt.plot(steps, x_np, 'b-', label='input (sin)')
plt.legend(loc='best')
plt.show()
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.RNN(
input_size=INPUT_SIZE,
hidden_size=32, # rnn hidden unit
num_layers=1, # number of rnn layer
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(32, 1)
def forward(self, x, h_state):
# x (batch, time_step, input_size)
# h_state (n_layers, batch, hidden_size)
# r_out (batch, time_step, hidden_size)
r_out, h_state = self.rnn(x, h_state)
outs = [] # save all predictions
for time_step in range(r_out.size(1)): # calculate output for each time step
outs.append(self.out(r_out[:, time_step, :]))
return torch.stack(outs, dim=1), h_state
rnn = RNN()
print(rnn)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state = None # for initial hidden state
plt.figure(1, figsize=(12, 5))
plt.ion() # continuously plot
for step in range(60):
start, end = step * np.pi, (step+1)*np.pi # time range
# use sin predicts cos
steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)
x_np = np.sin(steps) # float32 for converting torch FloatTensor
y_np = np.cos(steps)
x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis])) # shape (batch, time_step, input_size)
y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
prediction, h_state = rnn(x, h_state) # rnn output
# !! next step is important !!
h_state = Variable(h_state.data) # repack the hidden state, break the connection from last iteration
loss = loss_func(prediction, y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# plotting
plt.plot(steps, y_np.flatten(), 'r-')
plt.plot(steps, prediction.data.numpy().flatten(), 'b-')
plt.draw(); plt.pause(0.05)
| ML-week/pytorch/403_RNN_regressor .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LpdrpggfGWSi" colab_type="text"
# #Descrizione
#
# In questo notebook vengono testati diversi metodi per fare interpolazione dalla libreria `cv2`.
#
# Modello su database da 18800; rete bidirezionale LSTM (CuDNN).
#
# Parametri:
#
# 1. 2 layer da 141 nodi, 94 nodi;
# 2. Immagini 64x64;
# 3. 1024 batch size;
#
# (Patience = 5).
#
# Eseguo lo stesso modello con diverse funzioni di interpolazione.
#
# Interpolazioni e val_accuracy raggiunta:
#
# * INTER_NEAREST (0) – 92.8%, 48 epoche
# * INTER_LINEAR (1) – 93.5%, 71 epoche
# * INTER_AREA (2) – 91,4%, 66 epoche
# * INTER_CUBIC (3) – 92.2%, 61 epoche
# * INTER_LANCZOS4 (4) – 92,2%, 65 epoche
#
# + id="HCpTreGNRCsD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1595430748869, "user_tz": -120, "elapsed": 699, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="d8061355-ea76-4394-8aae-fc12c2d6c74a"
# %tensorflow_version 1.x
# + [markdown] id="B-Cp_j51RbCd" colab_type="text"
# #Import
# + id="yxIEJSErRZQn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} executionInfo={"status": "ok", "timestamp": 1595430756053, "user_tz": -120, "elapsed": 7861, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="d0bde021-1b47-450e-b39d-8cff7e7c2bca"
import tensorflow as tf
from tensorflow import keras as ks
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from skimage.transform import resize
import cv2
#Data visualization
import seaborn as sns
from matplotlib import pyplot as plt
import glob
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import os, os.path
#Per modello NN
from tensorflow.keras import layers
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import CuDNNLSTM
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import ConvLSTM2D
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Softmax
from tensorflow.keras.layers import Dropout
import time
import math
print(tf.__version__)
# + [markdown] id="gZVB9coyRgC2" colab_type="text"
# ##Defining methods
# + id="P6bkLLzvBm3K" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595430756062, "user_tz": -120, "elapsed": 7864, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}}
class Stats:
def __init__( self, img_dim, perc_used, batch_size_used,
val_acc, val_loss, total_epochs,
early_stopping_epochs, total_time, modello, history, _for_conf_matr, model_name):
self.model = modello
self.history = history
self._for_conf_matr = _for_conf_matr
self.img_dim = img_dim
self.perc = str(perc_used)
self.batch_size = str(batch_size_used)
self.val_acc = str(round(val_acc, 5))
self.val_loss = str(round(val_loss, 5))
self.total_epochs = str(total_epochs)
self.early_stopping_epochs = str(early_stopping_epochs)
self.training_time = str(round(total_time, 5))
self.model_name = model_name
def myStats(self):
print("Dimensione immagini: ", self.img_dim)
print("Percentuale test set: " + self.perc + "%")
print("Dimensione batch size: " + self.batch_size)
print("Val accuracy: " + self.val_acc)
print("Val loss: " + self.val_loss)
print("Epoche di addestramento utilizzate: " + self.early_stopping_epochs + "/" + self.total_epochs)
print("Tempo di addestramento: " + self.training_time + " sec")
print("Model: " + self.model_name)
# + id="kwInADWiRjPT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1595430756067, "user_tz": -120, "elapsed": 7851, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="cac6d173-d3d5-418b-8778-8824ebcee226"
#Extract the class from the file name, if the class is the string before che -
def extract_label(from_string):
position = from_string.index('-') # gets position of the - in the filename
substring = from_string[0:position]
return substring
def extract_obf(from_string):
start_pos = from_string.index('-')
end_pos = from_string.index('.')
substring = from_string[(start_pos + 1):end_pos]
return substring
def mapping_labels_encoded(label_encoder):
for index in range(len(list(label_encoder.classes_))):
print(index, end = "-> ")
print(list(label_encoder.inverse_transform([index])))
class TimeHistory(ks.callbacks.Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, epoch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, epoch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(predicted_label,
100*np.max(predictions_array),
true_label),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
print("Done")
# + [markdown] id="DZPoDau9RlZ0" colab_type="text"
# #Import database
# + id="2kVGQGupRmh4" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595430770931, "user_tz": -120, "elapsed": 22710, "user": {"displayName": "P<PASSWORD>ant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}}
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + id="4A5WU0KB0jIg" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595430775781, "user_tz": -120, "elapsed": 27555, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}}
downloaded = drive.CreateFile({'id':"18ESID3MpwG-SzZPE1EENzsGPh8vl8ti9"}) # replace the id with id of file you want to access
downloaded.GetContentFile('data_18800.zip') # replace the file name with your file
# + id="MvEXLaNJ0kNV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1595430853918, "user_tz": -120, "elapsed": 105676, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="0379dd38-f37c-414f-b198-fec8e29e3686"
# !unzip -q data_18800.zip -d DB_Repo/
print("Done")
# + id="_N7XV2D1Rn_-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"status": "ok", "timestamp": 1595430853950, "user_tz": -120, "elapsed": 105690, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="a07fe894-1d27-4a1e-ef51-a40b155fadd4"
path, dirs, files = next(os.walk("/content/DB_Repo/data"))
file_count = len(files)
#Should be 18800
print(file_count)
# + id="tbBJ3YyJRpen" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} executionInfo={"status": "ok", "timestamp": 1595430857669, "user_tz": -120, "elapsed": 109391, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="4f46629a-89aa-4701-f596-e8f13a5ad095"
# %cd /content/DB_Repo/data
# !pwd
# + [markdown] id="PlGvM2uuI_vA" colab_type="text"
# #Lettura database e resize immagini con interpolazioni diverse
#
# * INTER_NEAREST (0) – a nearest-neighbor interpolation
# * INTER_LINEAR (1) – a bilinear interpolation (used by default)
# * INTER_AREA (2) – resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
# * INTER_CUBIC (3) – a bicubic interpolation over 4×4 pixel neighborhood
# * INTER_LANCZOS4 (4) – a Lanczos interpolation over 8×8 pixel neighborhood
# + id="8ou96bpsI_ZG" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1595430857679, "user_tz": -120, "elapsed": 109397, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}}
def interp_e_fit(interpolation_chosen):
batch_size = 1024
#Unit in first layer
num_units1 = 141
num_units2 = 94
new_dim = 64
MAX_LEN = 64 #fisso
channels = 1
time_steps = new_dim
n_features = MAX_LEN
#size_ts_blocks = 8
n_epochs = 100
#Considero il primo 20% della lista di dati come test set
percentage_required = 20 #%
#COSTANTI E DICHIARAZIONI
database_list = list()
labels_list = list()
obf_list = list()
#LETTURA E RESIZE IMMAGINI
print("START IMAGE INPUT")
#Aggiungo i valori alle liste leggendo i vari files
for filename in glob.glob('*.npy'):
temp_img = np.load(filename)
temp_img = temp_img.reshape((-1, MAX_LEN)).astype('float32')
if interpolation_chosen == 0:
temp_img = cv2.resize(temp_img, (MAX_LEN, new_dim), interpolation=cv2.INTER_NEAREST)
elif interpolation_chosen == 1:
temp_img = cv2.resize(temp_img, (MAX_LEN, new_dim), interpolation=cv2.INTER_LINEAR)
elif interpolation_chosen == 2:
temp_img = cv2.resize(temp_img, (MAX_LEN, new_dim), interpolation=cv2.INTER_AREA)
elif interpolation_chosen == 3:
temp_img = cv2.resize(temp_img, (MAX_LEN, new_dim), interpolation=cv2.INTER_CUBIC)
elif interpolation_chosen == 4:
temp_img = cv2.resize(temp_img, (MAX_LEN, new_dim), interpolation=cv2.INTER_LANCZOS4)
else:
print("ERROR")
quit()
database_list.append(temp_img)
#Salvo la label, ossia la classe
labels_list.append(extract_label(filename))
#Salvo la lista di offuscatori di ogni file
obf_list.append(extract_obf(filename))
print("END IMAGE INPUT")
#SHUFFLE
#Ho i valori e le etichette in due liste (+ obf);
#le mescolo mantenendo l'ordine tra valore-label
temp = list(zip(database_list, labels_list, obf_list))
np.random.shuffle(temp)
database_list, labels_list, obf_list = zip(*temp)
#SUDDIVISIONE DATI
#Suddivido in training set e test set
assert len(database_list) == len(labels_list) == len(obf_list)
index_to_split = math.ceil((len(database_list) * percentage_required) / 100)
indices = [(0, index_to_split - 1), (index_to_split, len(database_list) - 1)]
test_list, training_list = [database_list[s:e+1] for s,e in indices]
labels_test_list, labels_training_list = [labels_list[s:e+1] for s,e in indices]
obf_test_list, obf_training_list = [obf_list[s:e+1] for s,e in indices]
#Trasformo i valori in numpy.ndarray
train_images = np.array(training_list)
test_images = np.array(test_list)
train_labels = np.array(labels_training_list)
test_labels = np.array(labels_test_list)
train_obf = np.array(obf_training_list)
test_obf = np.array(obf_test_list)
label_encoder = LabelEncoder()
label_encoder.fit(train_labels)
train_labels_encoded = label_encoder.transform(train_labels)
test_labels_encoded = label_encoder.transform(test_labels)
#Normalizzazione valori in range 0-1
train_images = train_images / 65535.0
test_images = test_images / 65535.0
#Dichiarazione parametri
n_classes = len(list(label_encoder.classes_))
modelLSTM = ks.Sequential()
#no activation selection
modelLSTM.add(Bidirectional(CuDNNLSTM(num_units1, unit_forget_bias='true', return_sequences='true'),
input_shape=(time_steps, n_features)))
modelLSTM.add(Bidirectional(CuDNNLSTM(num_units2, unit_forget_bias='true')))
modelLSTM.add(Dense(n_classes, activation='softmax'))
modelLSTM.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#Validation_data è usato al termine di ogni epoch;
#Batch size should be (at most) the same number of hidden cells
es = ks.callbacks.EarlyStopping(monitor='val_loss', patience=5,
mode='auto', restore_best_weights=True, verbose=1)
time_callback = TimeHistory()
hist = modelLSTM.fit(train_images, train_labels_encoded,
batch_size = batch_size,
validation_data=(test_images, test_labels_encoded),
epochs=n_epochs, shuffle='true',
callbacks=[time_callback, es], verbose=0)
number_of_epochs_it_ran = len(hist.history['loss'])
time_per_epoch = time_callback.times
total_time = sum(time_per_epoch)
test_accuracy = modelLSTM.evaluate(test_images, test_labels_encoded)
_for_conf_matr = (label_encoder.classes_, test_images, test_labels_encoded)
statistiche_modello = Stats(train_images[0].shape, percentage_required, batch_size, test_accuracy[1], test_accuracy[0], n_epochs, number_of_epochs_it_ran, total_time, modelLSTM, hist, _for_conf_matr, "model1_bi")
return statistiche_modello
# + id="L4q9P3SPt7QV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 717} executionInfo={"status": "ok", "timestamp": 1595432194304, "user_tz": -120, "elapsed": 1446006, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="532d841b-bad3-4263-e5f2-b100782559f0"
results = list()
results.append(interp_e_fit(0))
results.append(interp_e_fit(1))
results.append(interp_e_fit(2))
results.append(interp_e_fit(3))
results.append(interp_e_fit(4))
# + [markdown] id="kjRFxm044AGp" colab_type="text"
# #Risultati
# + id="HwgsZ-_YByc-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 91} executionInfo={"status": "ok", "timestamp": 1595432194312, "user_tz": -120, "elapsed": 1445996, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="e318d321-0f82-4b68-aeab-3107f3c1970f"
'''
count = 1
for elem in results:
print(count)
count += 1
elem.myStats()
#Plot training & validation accuracy values
hist = elem.history
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
'''
# + id="vlCVL2J9EdDM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 621} executionInfo={"status": "ok", "timestamp": 1595432262446, "user_tz": -120, "elapsed": 968, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="c6f3c666-7f69-44b7-d8ce-b3a4c1c2921e"
storie = list()
for elem in results:
hist = elem.history
storie.append(hist)
#Plot training & validation accuracy values
plt.figure(figsize=(10, 10))
plt.plot(storie[0].history['val_acc'])
plt.plot(storie[1].history['val_acc'])
plt.plot(storie[2].history['val_acc'])
plt.plot(storie[3].history['val_acc'])
plt.plot(storie[4].history['val_acc'])
plt.axhline(0.9, color="grey") # Horizontal line adding the threshold
plt.axhline(0.95, color="grey") # Horizontal line adding the threshold
plt.title('Model accuracy')
plt.ylabel('Val Accuracy')
plt.xlabel('Epoch')
plt.legend(['NEAREST', 'LINEAR', 'AREA', 'CUBIC', 'INTER_LANCZOS4'], loc='lower right')
plt.show()
# + id="sOUQghBpE_76" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 621} executionInfo={"status": "ok", "timestamp": 1595432294752, "user_tz": -120, "elapsed": 920, "user": {"displayName": "Pleasant94", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggc18QmWcekRk39ps1vtP2fsFwmCWuEr7kJj8SPVg=s64", "userId": "00661494034163855202"}} outputId="e5afb50f-64d3-4165-9580-e0735ea6c7fd"
#Plot training & validation accuracy values
plt.figure(figsize=(10, 10))
plt.plot(storie[0].history['val_loss'])
plt.plot(storie[1].history['val_loss'])
plt.plot(storie[2].history['val_loss'])
plt.plot(storie[3].history['val_loss'])
plt.plot(storie[4].history['val_loss'])
plt.axhline(0.5, color="grey") # Horizontal line adding the threshold
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['NEAREST', 'LINEAR', 'AREA', 'CUBIC', 'INTER_LANCZOS4'], loc='upper right')
plt.show()
| Analysis/Interpolation_methods_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Method
#
# A method is a function that “belongs to” an object. (In Python, the term method is not unique to class instances: other object types can have methods as well. For example, list objects have methods called append, insert, remove, sort, and so on.
#
# Built-in objects in python have a variety of methods. For example, list objects have methods called append, insert, remove, sort, and so on.
myList = [1,2,3]
# Associated methods
# Append
myList
myList.append(4)
myList
# Pop
myList.pop()
myList
# Insert
myList.insert(2, 5)
myList
# Sort
myList.sort
myList
myList.sort(reverse=True)
myList
myList.sort(reverse=True)
myList
myList.sort(reverse=False)
myList
myList.pop()
myList
# Count
myList
count = myList.count(2)
print (count)
myList
myList.append(2)
myList
myList.append(2)
myList
myList
count = myList.count(2)
print (count)
# Random example taken from Programiz for COUNTING COUPLE AND LIST ELEMENTS IN A LISTcounting couple and list elements in a list
# +
# random list
random = ['a', ('a', 'b'), ('a', 'b'), [3, 4]]
# count element ('a', 'b')
count = random.count(('a', 'b'))
# print count
print("The count of ('a', 'b') is:", count)
# count element [3, 4]
count = random.count([3, 4])
# print count
print("The count of [3, 4] is:", count)
# -
| [Self Help] Python/Notes - Learning Resources/Functions & Methods/[Notes] Methods and The Python Documentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="gQEhrWODGxYt"
import matplotlib.pyplot as plt
# %matplotlib inline
import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# + colab={} colab_type="code" id="s3gVLCsfHGms"
import sys
# %config InlineBackend.figure_format = 'retina'
# + colab={} colab_type="code" id="_QOKO6R5HfR8"
transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))])
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="5BTbBSIdHSXk" outputId="f7bae1b2-45fa-43e9-f7e6-1f6fc4f52978"
train_data = datasets.CIFAR10("Cifar/",train=True,transform=transforms,download=True)
test_data = datasets.CIFAR10("Cifar/",train=False,transform=transforms,download=True)
# + colab={} colab_type="code" id="nV5VjG0oH1Rk"
train_loader = torch.utils.data.DataLoader(train_data,batch_size=64,num_workers=0,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=20,num_workers=0,shuffle=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="byZt7yo-IYXE" outputId="5eb81938-6490-423f-f629-0e7df5a272a9"
torch.cuda.is_available()
# + colab={} colab_type="code" id="RlycwcmGI7U8"
images,labels = next(iter(train_loader))
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="XVMISqBOJzIM" outputId="0538f536-c3c9-46d1-99b8-b193bb7b3274"
images.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="5DxNqhCAIvuM" outputId="ee22515d-5fd3-4234-dd1c-302f686836db"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
# convolutional layer (sees 16x16x16 tensor)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
# convolutional layer (sees 8x8x32 tensor)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# linear layer (64 * 4 * 4 -> 500)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
# linear layer (500 -> 10)
self.fc2 = nn.Linear(500, 10)
# dropout layer (p=0.25)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 4 * 4)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
# create a complete CNN
model = Net()
print(model)
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
# + colab={} colab_type="code" id="ZDjotf0uTUuh"
criterion = nn.CrossEntropyLoss()
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001)
# + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" id="30Q8BZMbN4DM" outputId="555d8a8c-5fea-4e96-cb30-81170b548a48"
n_epochs = 30
train_on_gpu = torch.cuda.is_available()
if train_on_gpu:
model.cuda()
for idx in range(n_epochs):
train_loss = 0.0
test_loss = 0.0
for data, labels in train_loader:
if train_on_gpu:
data, labels = data.cuda(), labels.cuda()
optimizer.zero_grad()
output = model(data)
loss= criterion(output,labels)
loss.backward()
optimizer.step()
train_loss+=loss.item()*data.size(0)
train_loss = train_loss/len(train_loader.dataset)
print("\rEpoch {}/{} \t Training Loss: {:.6f}".format(idx,n_epochs,train_loss))
# + colab={} colab_type="code" id="mPb_WtZwZUy_"
import numpy as np
# + colab={} colab_type="code" id="rl_PBx3hZbs2"
batch_size=20
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# + colab={"base_uri": "https://localhost:8080/", "height": 255} colab_type="code" id="5U-T924YVIMX" outputId="4761922d-dad6-4024-a490-fc9d571b7143"
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval()
for data, target in test_loader:
if train_on_gpu:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
test_loss += loss.item()*data.size(0)
_, pred = torch.max(output, 1)
correct_tensor = pred.eq(target.data.view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
| python/pytorch/Convolutional Neural Networks/CNNs_in_PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import nibabel as nib
import nbimporter
from functions import *
from numpy import *
import matplotlib.pyplot as plt
# %matplotlib inline
# +
ndim = 2
unet_input_features = 2
loadable_model='200epoch_with_val.h5'
# data shape 64*64
s=(64,64)
inshape = (*s, unet_input_features)
# configure unet features
nb_features =[
[64, 64, 64, 64], # encoder features
[64, 64, 64, 64, 64, 32,16] # decoder features
]
# build model using VxmDense
inshape =s
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
# voxelmorph has a variety of custom loss classes
losses = [vxm.losses.MSE().loss, vxm.losses.Grad('l2').loss]
# usually, we have to balance the two losses by a hyper-parameter
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer='Adam', loss=losses, loss_weights=loss_weights, metrics=['accuracy'])
vxm_model.load_weights(loadable_model)
# +
D7_dir='/home/mahdi/Desktop/data_selection_D7'
n,name=count(D7_dir)
out_dir='/home/mahdi/Desktop/valid'
n1,name1=count(out_dir)
i=70
#x=(len(name[i][0][:])-7)
#input_direction='/home/mahdi/Desktop/centerline/'+name1[i][0]+'/output/zero_ref/'+name[i][0][:x]+'_c.'+name[i][0][-6:]
maximum_intensity=1705
sli=5
reference='100'
# -
input_direction=out_dir+'/'+name1[i][0]+'/main_seg/'+name[i][0]
prepare_data=ref(input_direction,maximum_intensity,sli,reference)
val_input, _ = prepare_data
val_pred = vxm_model.predict(val_input)
import neurite as ne
# visualize
volume=50
images = [img[volume, :, :, 0] for img in val_input + val_pred]
titles = ['moving', 'fixed', 'moved', 'flow']
ne.plot.slices(images, titles=titles, cmaps=['gray'], do_colorbars=True);
ne.plot.flow([val_pred[1][volume].squeeze()], width=5,titles= 'flow',grid=True,scale=.1)
| Result/showing flow field.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="JzrMHKM06YJk" colab_type="code" outputId="acb9bb7d-7b1f-4b0a-e8d1-190d379f97ff" colab={"base_uri": "https://localhost:8080/", "height": 244}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('austin_weather.csv')
df.head()
# + id="GZUfXVIP6YLM" colab_type="code" outputId="a73fe4dc-acc3-4e88-c54d-1f1768e5298a" colab={"base_uri": "https://localhost:8080/", "height": 503}
df.info()
# + [markdown] id="H7oLUpPq6YNb" colab_type="text"
# <h2>Visualisasi Scatter Plot Perbandingan Kuantitatif</h2>
#
# Pada tugas kali ini kita akan mengamati nilai DewPointAvg (F) dengan mengamati nilai HumidityAvg (%), TempAvg (F), dan WindAvg (MPG)
#
# Perhatikan bahwa data kita tidaklah siap untuk di analisis, salah satunya tipe data dari DewPointAvg (F), HumidityAvg (%), dan WindAvg (MPG) adalah object, padahalnya data nya ber isi numeric. maka :
# - Ubahlah tipe data tersebut menjadi tipe data float
#
# Step2 :
#
# - Kalian tidak akan dengan mudah mengubah tipe data tersebut karena column tersebut mempunyai nilai '-' yang dimana tidak bisa di ubah ke bentuk float, maka replace lah terlebih dahulu data yang bernilai '-' dengan nilai NaN, gunakan method .replace(). baca dokumentasi https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.replace.html
# - Isi nilai nan dengan nilai sebelumnya di row tersebut. gunakan method .fillna() dengan argument method bernilai 'ffill', baca dokumentasi https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.fillna.html
# - Sekarang ubah tipe datanya dengan float, gunakan method .astype(), baca dokumentasi https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.astype.html
#
# Setelah ini sebagian data siap untuk di jadikan bahan analisis. maka :
#
# Buahlah visualisasi perbandingan kuantitatif scatter plot, sehingga menghasilkan gambar seperti dibawah :
#
# ket :
#
# - colormap adalah 'coolwarm'
# - berikat warna terhadap setiap data poin dengan nilai dari column TempAvgF
# - berikan size terhadap setiap data poin dengan nilai dari column WindAvgMPH, kalikan dengan 20 agar size terlihat lebih besar
#
#
# Berikan pendapat dari insight yang bisa di dapat dari visualisasi perbandingan kuantitatif ini!!!
# + [markdown] id="N9tmTWDv6YNo" colab_type="text"
# 
# + id="YMVlt5yx6YN2" colab_type="code" colab={}
# DewPointAvgF
# HumidityAvgPercent
# TempAvgF
# WindAvgMPH
df['DewPointAvgF'] = df['DewPointAvgF'].replace({'-':None})
df['HumidityAvgPercent'] = df['HumidityAvgPercent'].replace({'-':None})
df['WindAvgMPH'] = df['WindAvgMPH'].replace({'-':None})
# + id="CwGo6hWZ1YEY" colab_type="code" colab={}
df[df['DewPointAvgF'].isnull()]
# + id="-W4w9SgP3de0" colab_type="code" colab={}
df[df['HumidityAvgPercent'].isnull()]
# + id="txdpjTVshfMY" colab_type="code" colab={}
df[df['WindAvgMPH'].isnull()]
# + id="MQGI80ZPhibr" colab_type="code" colab={}
df['DewPointAvgF'] = df['DewPointAvgF'].fillna(method='ffill')
df['HumidityAvgPercent'] = df['HumidityAvgPercent'].fillna(method='ffill')
df['WindAvgMPH'] = df['WindAvgMPH'].fillna(method='ffill')
# + id="cMlRdDk-i9qc" colab_type="code" colab={}
df[df['DewPointAvgF'].isnull()]
# + id="FMsEDDGxi7ua" colab_type="code" colab={}
df[df['HumidityAvgPercent'].isnull()]
# + id="L8ikoFyllFLk" colab_type="code" colab={}
df[df['WindAvgMPH'].isnull()]
# + id="ZbYhVgw1lITY" colab_type="code" colab={}
df['DewPointAvgF'] = df['DewPointAvgF'].astype(float)
df['HumidityAvgPercent'] = df['HumidityAvgPercent'].astype(float)
df['WindAvgMPH'] = df['WindAvgMPH'].astype(float)
df['TempAvgF'] = df['TempAvgF'].astype(float)
# + id="w8a4zSmRljXr" colab_type="code" outputId="7eb09957-22c4-4e70-f13b-fc736677ce49" colab={"base_uri": "https://localhost:8080/", "height": 461}
df
# + id="xA5nATL1lljZ" colab_type="code" outputId="a96b3edc-5865-458c-868b-eecf9c477a38" colab={"base_uri": "https://localhost:8080/", "height": 458}
fig, ax = plt.subplots(figsize=(14,7))
D = df['DewPointAvgF']
H = df['HumidityAvgPercent']
W = df['WindAvgMPH']
T = df['TempAvgF']
axmap = ax.scatter(H, D, c=T, cmap='coolwarm', sizes=W*20)
ax.set_xlabel('Humidity Avg %')
ax.set_ylabel('Dew Point Avg (F)')
ax.set_title('Austin Weather')
fig.colorbar(axmap)
plt.show()
# + [markdown] id="Nl0yTjarmyl3" colab_type="text"
# Insight yang menurut saya didapat yaitu kita dapat melihat data austin weather dengan data humidity dan dew point, semakin rendah nilainya maka semakin berwarna biru dan semakin tinggi nilainya maka lebih berwarna merah.
| Learn/Week 3 Visualization/Week_3_Day_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="yd9BYBz-UGO_" colab_type="text"
# Lambda School Data Science
#
# *Unit 2, Sprint 2, Module 4*
#
# ---
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# # Classification Metrics
#
# ## Assignment
# - [ ] If you haven't yet, [review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.
# - [ ] Plot a confusion matrix for your Tanzania Waterpumps model.
# - [ ] Continue to participate in our Kaggle challenge. Every student should have made at least one submission that scores at least 70% accuracy (well above the majority class baseline).
# - [ ] Submit your final predictions to our Kaggle competition. Optionally, go to **My Submissions**, and _"you may select up to 1 submission to be used to count towards your final leaderboard score."_
# - [ ] Commit your notebook to your fork of the GitHub repo.
# - [ ] Read [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student <NAME>. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.
#
#
# ## Stretch Goals
#
# ### Reading
#
# - [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_
# - [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)
# - [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415)
#
#
# ### Doing
# - [ ] Share visualizations in our Slack channel!
# - [ ] RandomizedSearchCV / GridSearchCV, for model selection. (See module 3 assignment notebook)
# - [ ] Stacking Ensemble. (See module 3 assignment notebook)
# - [ ] More Categorical Encoding. (See module 2 assignment notebook)
# + colab_type="code" id="lsbRiKBoB5RE" colab={}
# %%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
# !pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# + colab_type="code" id="BVA1lph8CcNX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b91936b-008d-4efa-b98f-2d8b9a3adc18"
import pandas as pd
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Splitting training data
from sklearn.model_selection import train_test_split
train, val = train_test_split(train, train_size=.80, test_size=.20, stratify=train['status_group'], random_state=256)
print(train.shape, val.shape, test.shape)
# + id="1Ifpv8O-UGPW" colab_type="code" colab={}
# drop = ['id', 'amount_tsh', 'wpt_name', 'num_private', 'region_code', 'district_code', 'recorded_by', 'scheme_name', 'extraction_type', 'extraction_type_class', 'management_group', 'quality_group', 'quantity_group', 'source_type', 'source_class', 'waterpoint_type_group', 'status_group']
# onehot = ['basin', 'region', 'payment', 'water_quality', 'quantity']
# rest = ['date_recorded', 'funder', 'gps_height', 'installer', 'longitude', 'latitude', 'subvillage', 'lga', 'ward', 'population', 'public_meeting', 'scheme_management', 'permit', 'construction_year', 'extraction_type_group', 'management', 'payment_type', 'waterpoint_type', 'source',]
""" RANGLE FUNCTION """
def wrangle(dataframe):
dataframe = dataframe.copy()
# Dropping columns
columns_to_drop = ['id', 'amount_tsh', 'wpt_name', 'num_private', 'region_code', 'recorded_by', 'scheme_name', 'extraction_type', 'extraction_type_class', 'management_group', 'quality_group', 'quantity_group', 'source_type', 'source_class', 'waterpoint_type_group']
dataframe = dataframe.drop(columns=columns_to_drop, axis=1)
# Converting to datetime
dataframe['date_recorded'] = pd.to_datetime(dataframe['date_recorded'], infer_datetime_format=True)
# Replacing columns with alot of 0's
dataframe['gps_height'] = dataframe['gps_height'].replace(0, np.nan)
dataframe['longitude'] = dataframe['longitude'].replace(0, np.nan)
dataframe['latitude'] = dataframe['latitude'].replace(0, np.nan)
dataframe['population'] = dataframe['population'].replace(0, np.nan)
dataframe['construction_year'] = dataframe['construction_year'].replace(0, np.nan)
# Encoding Booleans
dataframe['public_meeting'] = dataframe['public_meeting'].replace({True: 1, False: -1, np.nan: 0})
dataframe['permit'] = dataframe['permit'].replace({True: 1, False: -1, np.nan: 0})
# Feature engineering
dataframe['year_recorded'] = dataframe['date_recorded'].dt.year
dataframe['years'] = dataframe['year_recorded'] - dataframe['construction_year']
dataframe['years_MISSING'] = dataframe['years'].isnull()
dataframe['date_recorded'] = pd.to_datetime(dataframe['date_recorded'], infer_datetime_format=True).astype(int)/ 10**9
return dataframe
# + id="5nyhsCseSteK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0f43cdb1-a2bb-434e-8f78-49dbd5c2e936"
from sklearn.compose import ColumnTransformer
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
import numpy as np
train_wrangled = wrangle(train)
val_wrangled = wrangle(val)
test_wrangled = wrangle(test)
""" ONE HOT ENCODING """
cols = ['basin', 'region', 'payment', 'water_quality', 'quantity']
subset = train_wrangled[cols]
subset.head(3)
""" TRAINING """
# Encoding columns
onehot_encoder = ce.OneHotEncoder(use_cat_names=True)
encoded = onehot_encoder.fit_transform(subset)
# Concatinating dataframes
final_train = pd.concat([train_wrangled, encoded], axis=1)
# Dropping old columns
final_train = final_train.drop(columns=cols, axis=1)
final_train = final_train.drop(columns='status_group', axis=1)
""" VALIDATION """
subset = val_wrangled[cols]
encoded = onehot_encoder.transform(subset)
final_val = pd.concat([val_wrangled, encoded], axis=1)
final_val = final_val.drop(columns=cols, axis=1)
final_val = final_val.drop(columns='status_group', axis=1)
""" TESTING """
subset = test_wrangled[cols]
encoded = onehot_encoder.transform(subset)
final_test = pd.concat([test_wrangled, encoded], axis=1)
final_test = final_test.drop(columns=cols, axis=1)
final_test.shape
# + id="qwx-kc9JYZSS" colab_type="code" colab={}
""" CONCAT VAL AND TRAIN """
ultimate_X = pd.concat([final_train, final_val], axis=0)
ultimate_y = pd.concat([train['status_group'], val['status_group']], axis=0)
# + [markdown] id="53NuBjaLYtmQ" colab_type="text"
# ##Final Model
# + id="CbohUaUjYh-o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 176} outputId="606ffc82-7bee-426b-964e-5bc8609548ce"
from scipy.stats import randint, uniform
import category_encoders as ce
import numpy as np
from sklearn.feature_selection import f_regression, SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(random_state=121)
)
param_distributions = {
'simpleimputer__strategy': ['mean'],
'randomforestclassifier__max_depth': [20],
'randomforestclassifier__min_samples_leaf':[2],
'randomforestclassifier__n_estimators': [40],
'randomforestclassifier__max_features': uniform(0, 1),
}
# If you're on Colab, decrease n_iter & cv parameters
search = RandomizedSearchCV(
pipeline,
param_distributions=param_distributions,
n_iter=15,
cv=3,
scoring='accuracy',
verbose=10,
return_train_score=True,
n_jobs=-1
)
search.fit(ultimate_X, ultimate_y);
# + id="DSSKNGn0YxqY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="7b75b5ea-ea07-41d2-e866-06fb93bc3696"
print('Best hyperparameters', search.best_params_)
print('Cross-validation accuracy', search.best_score_)
# + id="FfknR7MeZF7d" colab_type="code" colab={}
y_pred = search.predict(final_test)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('Cooper_Vos_Submission_ripx3', index=False)
# + [markdown] id="JzPSjXzhZoN3" colab_type="text"
# ## Confusion Matrix
# + id="xzakSdFYaLr5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d93866d5-2a9d-42e4-add3-5db47d36d09a"
""" PIPELINE """
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(random_state=300,
n_estimators=100,
min_samples_leaf=2
)
)
pipeline.fit(final_train, train['status_group'])
print('Validation Accuracy', pipeline.score(final_val, val['status_group']))
# + id="V3IiG6uNZnXQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 384} outputId="7ef6efe7-cf9d-470d-ab3a-9717a7692f6e"
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(pipeline, final_val, val['status_group'], values_format='.0f', xticks_rotation='vertical');
# + id="aBsFD2_tZ9B_" colab_type="code" colab={}
| module4-classification-metrics/LS_DS_224_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Gamma Ray Normalisation
# **Created by:** <NAME>
#
# This notebook illustrates carry out a simple normalisation on Gamma Ray data from the Volve Dataset.
# Medium Article Link:
# ## What is Normalization?
# Normalization is the process of re-scaling or re-calibrating the well logs so that they are consistent with other logs in other wells within the field. This can be achieved by applying a single point normalization (linear shift) or a two point normalization ('stretch and squeeze') to the required curve.
#
# Normalization is commonly applied to gamma ray logs, but can be applied to neutron porosity, bulk density, sonic and spontaneous potential logs. Resistivity logs are generally not normalized unless there is a sufficient reason to do so (Shier, 2004). It should be noted that applying normalization can remove geological variations and features across the study area and should be considered carefully. Shier (2004) provides an excellent discussion and guidelines on how to carry out normalization on well log data.
# ## Loading and Checking Data
# The first step is to import the required libraries: pandas and matplotlib.
import os
import pandas as pd
import matplotlib.pyplot as plt
root = '/users/kai/desktop/data_science/data/dongara'
well_name = 'DONGARA_26_file003'
file_format = '.csv'
data = pd.read_csv(os.path.join(root,well_name+file_format), header=0)
data.head(1000)
data['WELL'].unique()
# Using the unique method on the dataframe, we can see that we have 3 wells within this Volve Data subset:
# - 15/9-F-1 C
# - 15/9-F-4
# - 15/9-F-7
# ## Plotting the Raw Data
#
wells = data.groupby('WELL')
wells.head()
wells.min()
fig, ax = plt.subplots(figsize=(8,6))
for label, df in wells:
df.GR.plot(kind ='kde', ax=ax, label=label)
plt.xlim(0, 200)
plt.grid(True)
plt.legend()
plt.savefig('before_normalisation.png', dpi=300)
# From the plot above, we will assume that the key well is 15/9-F-7 and we will normalise the other two datasets to this one.
#
# ## Calculating the Percentiles
# It is possible that datasets can contain erroneous values which may affect the minimum and the maximum values within a curve. Therefore, some interpreters prefer to base their normalisation parameters on percentiles.
#
# In this example, I have used the 5th and 95th percentiles.
#
# The first step is to calculate the percentile (or quantile as pandas refers to it) by grouping the data by wells and then applying the .quantile method to a specific column. In this case, GR. The quantile function takes in a decimal value, so a value of 0.05 is equivalent to the 5th percentile and 0.95 is equivalent to the 95th percentile.
gr_percentile_05 = data.groupby('WELL')['GR'].quantile(0.05)
print(gr_percentile_05)
# This calculation generates a pandas Series object. We can see what is in the series by calling upon it like so.
# So now we need to bring that back into our main dataframe. We can do this using the map function, which will combine two data series that share a common column. Once it is mapped we can call upon the `.describe()` method and confirm that it has been added to the dataframe.
data['05_PERC'] = data['WELL'].map(gr_percentile_05)
data.describe()
# We can then repeat the process for the 95th percentile:
gr_percentile_95 = data.groupby('WELL')['GR'].quantile(0.95)
gr_percentile_95
data['95_PERC'] = data['WELL'].map(gr_percentile_95)
data.describe()
# ## Create the Normalisation Function
# In order to normalize the data, we need create a custom function.
# The following equation comes from <NAME>: 'Well Log Normalization: Methods and Guidelines'.
#
# $$Curve_{norm} = Ref_{low} +(Ref_{high}-Ref_{low}) * \Bigg[ \frac {CurveValue - Well_{low}}{ Well_{high} - Well_{low}}\Bigg]$$
def normalise(curve, ref_low, ref_high, well_low, well_high):
return ref_low + ((ref_high - ref_low) * ((curve - well_low) / (well_high - well_low)))
# We can now set of key well high and low parameters.
key_well_low = 25.6464
key_well_high = 110.5413
# To apply the function to each value and use the correct percentiles for each well we can use the `apply()` method to the pandas dataframe and then a `lamda` function for our custom function.
data['GR_NORM'] = data.apply(lambda x: normalise(x['GR'], key_well_low, key_well_high,
x['05_PERC'], x['95_PERC']), axis=1)
# ## Plotting the Normalized Data
# To view our final normalized data, we can re-use the code from above to generate the histogram. When we do, we can see that all curves have been normalized to our reference well.
fig, ax = plt.subplots(figsize=(8,6))
for label, df in wells:
df.GR_NORM.plot(kind ='kde', ax=ax, label=label)
plt.xlim(0, 200)
plt.grid(True)
plt.legend()
plt.savefig('after_normalisation.png', dpi=300)
| 08 - Curve Normalisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Cleansing - CSV Files
# Still, since our analysis involves gender based data, let's continue with our data cleansing for the other 7 sources we've considered also from [Open Source Data From Mexican Government]('https://datos.gob.mx/')<br />
# <ul>
# <li>Graduated students</li>
# <li>Taxable income</li>
# <li>Escolarity levels</li>
# <li>Active population on formal economical activities</li>
# <li>Active population on informal economical activities</li>
# <li>Working people who earn more than three minimum wage</li>
# <li>Percentage of women on the sindical leadership</li>
# </ul>
# Since all these are csv and considerably small, I'll invoque read_csv method to pace up my analysis.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Graduates per Program
grad = pd.read_csv('Data\graduados.csv')
grad.sort_values(by='Programa',ascending=True)
grad.info()
# Again, we have no gender based data; next source:
# ## Income per Taxes by Million MXN
tax_inc=pd.read_csv('Data\IngresosTributarios.csv')
tax_inc.shape
tax_inc.info()
# We have no gender based data but let's visualize it in case if needed in the future.
tax_inc['Year'].unique()
# Let's compare 2 diferent types of taxes from time-spaned 5 last years.
# +
fig, ((ax1,ax2)) = plt.subplots(nrows=2,ncols=1,figsize=(10,15))
#ISR - Fosils 5 last years
x11 = tax_inc.loc[tax_inc['Year'] == 2010, 'Month']
y11 = tax_inc.loc[tax_inc['Year'] == 2014, 'Impuesto Sobre la Renta']
y12 = tax_inc.loc[tax_inc['Year'] == 2015, 'Impuesto Sobre la Renta']
y13 = tax_inc.loc[tax_inc['Year'] == 2016, 'Impuesto Sobre la Renta']
y14 = tax_inc.loc[tax_inc['Year'] == 2017, 'Impuesto Sobre la Renta']
y15 = tax_inc.loc[tax_inc['Year'] == 2018, 'Impuesto Sobre la Renta']
ax1.plot(x11,y11, color='firebrick', linewidth=1,marker='o', markersize=8, label='ISR on 2015')
ax1.plot(x11,y12, color='gold', linewidth=1,marker='v', markersize=8, label='ISR on 2016')
ax1.plot(x11,y13, color='green', linewidth=1,marker='1', markersize=8, label='ISR on 2017')
ax1.plot(x11,y14, color='blue', linewidth=1,marker='s', markersize=8, label='ISR on 2018')
ax1.plot(x11,y15, color='mediumorchid', linewidth=1,marker='p', markersize=8, label='ISR on 2019')
ax1.set_yticks([tax_inc['Impuesto Sobre la Renta'].min(),tax_inc['Impuesto Sobre la Renta'].max()])
ax1.legend()
#IEPS - Gasolinas y diesel - Fosils 5 last years
x21 = tax_inc.loc[tax_inc['Year'] == 2010, 'Month']
y21 = tax_inc.loc[tax_inc['Year'] == 2014, 'IEPS - Gasolinas y diesel']
y22 = tax_inc.loc[tax_inc['Year'] == 2015, 'IEPS - Gasolinas y diesel']
y23 = tax_inc.loc[tax_inc['Year'] == 2016, 'IEPS - Gasolinas y diesel']
y24 = tax_inc.loc[tax_inc['Year'] == 2017, 'IEPS - Gasolinas y diesel']
y25 = tax_inc.loc[tax_inc['Year'] == 2018, 'IEPS - Gasolinas y diesel']
ax2.plot(x21,y21, color='firebrick', linewidth=1,marker='o', markersize=8, label='IEPS on Fosils 2015')
ax2.plot(x21,y22, color='gold', linewidth=1,marker='v', markersize=8, label='IEPS on Fosils 2016')
ax2.plot(x21,y23, color='green', linewidth=1,marker='1', markersize=8, label='IEPS on Fosils 2017')
ax2.plot(x21,y24, color='blue', linewidth=1,marker='s', markersize=8, label='IEPS on Fosils 2018')
ax2.plot(x21,y25, color='mediumorchid', linewidth=1,marker='p', markersize=8, label='IEPS on Fosils 2019')
ax2.set_yticks([tax_inc['IEPS - Gasolinas y diesel'].min(),tax_inc['IEPS - Gasolinas y diesel'].max()])
ax2.legend()
# -
# #### A brief cleansing for data:
# With a sample:
tax_inc.head()
# We can see that at least four columns have 'n.d.' as not defined or nan or null values, so let's assign the mean to the null values. <br />
# First let's get the proper count of how many on or sample we have to see if it would be significant to replace witht the mean.
tax_inc['IEPS - Alimentos alta densidad calorica']=tax_inc['IEPS - Alimentos alta densidad calorica'].replace('n.d.',np.NaN)
tax_inc['IEPS - Plaguicidas']=tax_inc['IEPS - Plaguicidas'].replace('n.d.',np.NaN)
tax_inc['IEPS - Carbono']=tax_inc['IEPS - Carbono'].replace('n.d.',np.NaN)
tax_inc['Impuesto por la Actividad de Exploracion y Extraccion de Hidrocarburos']=tax_inc['Impuesto por la Actividad de Exploracion y Extraccion de Hidrocarburos'].replace('n.a',np.NaN)
# If we get the info from the dataframe again, we can see the Null value counts out of our sample from 113 rows to see how to replace them significantly.
tax_inc[[
'IEPS - Alimentos alta densidad calorica',
'IEPS - Plaguicidas',
'IEPS - Carbono',
'Impuesto por la Actividad de Exploracion y Extraccion de Hidrocarburos']].info()
# Since the amount of non-null is barely more than 50% of the records, we don't have enough information about with which data to replace null values. And we'll continue exploring our sources to get gender-based data.
# ## Escolaridad
escolarity=pd.read_csv('Data\escolaridad_Over15yo.csv')
escolarity
# ## Ocupied Pop by Ec Activity
# Amount of population by gender who are part of remunerated activities:
pop_ec=pd.read_csv('Data\Poblacion_Ocupada_Actividad_Economica.csv')
pop_ec
# As we can see on the `.info()` method this source can be used on our brief analysis to check the glass ceiling on mexican women.
# ## Ocupied Pop by Informal Activity
# Amount of population by gender who are part of formal or informal labour condition in México.
pop_inf=pd.read_csv('Data\Poblacion_Ocupada_Condicion_Informalidad.csv')
pop_inf
# As we can see on the `.info()` method this source can be used on our brief analysis to check the glass ceiling on mexican women.
# ## Percentage of Asalariados que ganan hasta 3 salarios mínimos
asalariados=pd.read_csv('Data\Porcentaje_de_Asalariados_que_Ganan_hasta_tres_Salarios_Minimos.csv')
asalariados
# As per dataframe glance, let's remove the last rows since they were part of a note as per csv was saved.
asalariados.dropna(thresh=6)
# ## Women who are sindical leaders
sindical=pd.read_csv('Data\Porcentaje_de_mujeres_en_la_dirigencia_sindical_UdR02.csv')
sindical
| .ipynb_checkpoints/DataCleansing_CSVFiles-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="title"></a>
# <a id="toc"></a>
# 
# <div style="margin-top: 9px; background-color: #efefef; padding-top:10px; padding-bottom:10px;margin-bottom: 9px;box-shadow: 5px 5px 5px 0px rgba(87, 87, 87, 0.2);">
# <center>
# <h2>Table of Contents</h2>
# </center>
#
#
# <ol>
# <li><a href="#01" style="color: #37509b;">Initialization</a></li>
# <li><a href="#02" style="color: #37509b;">Dataset: Cleaning and Exploration</a></li>
# <li><a href="#03" style="color: #37509b;">Modelling</a></li>
# <li><a href="#04" style="color: #37509b;">Quarta Seção</a></li>
# <li><a href="#05" style="color: #37509b;">Quinta Seção </a></li>
#
# </ol>
#
#
# </div>
# <a id="01" style="
# background-color: #37509b;
# border: none;
# color: white;
# padding: 2px 10px;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# font-size: 10px;" href="#toc">TOC ↻</a>
#
#
# <div style="margin-top: 9px; background-color: #efefef; padding-top:10px; padding-bottom:10px;margin-bottom: 9px;box-shadow: 5px 5px 5px 0px rgba(87, 87, 87, 0.2);">
# <center>
# <h1>1. Initialization</h1>
# </center>
#
#
#
# <ol type="i">
# <!-- <li><a href="#0101" style="color: #37509b;">Inicialização</a></li>
# <li><a href="#0102" style="color: #37509b;">Pacotes</a></li>
# <li><a href="#0103" style="color: #37509b;">Funcoes</a></li>
# <li><a href="#0104" style="color: #37509b;">Dados de Indicadores Sociais</a></li>
# <li><a href="#0105" style="color: #37509b;">Dados de COVID-19</a></li>
# -->
# </ol>
#
#
#
# </div>
# <a id="0101"></a>
# <h2>1.1 Description <a href="#01"
# style="
# border-radius: 10px;
# background-color: #f1f1f1;
# border: none;
# color: #37509b;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# padding: 4px 4px;
# font-size: 14px;
# ">↻</a></h2>
# Dataset available in:
#
# <a href="https://www.kaggle.com/c/titanic/" target="_blank">https://www.kaggle.com/c/titanic/</a>
#
# ### Features
#
# <table>
# <tbody>
# <tr><th><b>Variable</b></th><th><b>Definition</b></th><th><b>Key</b></th></tr>
# <tr>
# <td>survival</td>
# <td>Survival</td>
# <td>0 = No, 1 = Yes</td>
# </tr>
# <tr>
# <td>pclass</td>
# <td>Ticket class</td>
# <td>1 = 1st, 2 = 2nd, 3 = 3rd</td>
# </tr>
# <tr>
# <td>sex</td>
# <td>Sex</td>
# <td></td>
# </tr>
# <tr>
# <td>Age</td>
# <td>Age in years</td>
# <td></td>
# </tr>
# <tr>
# <td>sibsp</td>
# <td># of siblings / spouses aboard the Titanic</td>
# <td></td>
# </tr>
# <tr>
# <td>parch</td>
# <td># of parents / children aboard the Titanic</td>
# <td></td>
# </tr>
# <tr>
# <td>ticket</td>
# <td>Ticket number</td>
# <td></td>
# </tr>
# <tr>
# <td>fare</td>
# <td>Passenger fare</td>
# <td></td>
# </tr>
# <tr>
# <td>cabin</td>
# <td>Cabin number</td>
# <td></td>
# </tr>
# <tr>
# <td>embarked</td>
# <td>Port of Embarkation</td>
# <td>C = Cherbourg, Q = Queenstown, S = Southampton</td>
# </tr>
# </tbody>
# </table>
# <a id="0102"></a>
# <h2>1.2 Packages <a href="#01"
# style="
# border-radius: 10px;
# background-color: #f1f1f1;
# border: none;
# color: #37509b;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# padding: 4px 4px;
# font-size: 14px;
# ">↻</a></h2>
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from time import time,sleep
import nltk
from nltk import tokenize
from string import punctuation
from nltk.stem import PorterStemmer, SnowballStemmer, LancasterStemmer
from unidecode import unidecode
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,f1_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate,KFold,GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import OrdinalEncoder,OneHotEncoder, LabelEncoder
from sklearn.preprocessing import StandardScaler,Normalizer
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
# -
# <a id="0103"></a>
# <h2>1.3 Settings <a href="#01"
# style="
# border-radius: 10px;
# background-color: #f1f1f1;
# border: none;
# color: #37509b;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# padding: 4px 4px;
# font-size: 14px;
# ">↻</a></h2>
# +
# pandas options
pd.options.display.max_columns = 30
pd.options.display.float_format = '{:.2f}'.format
# seaborn options
sns.set(style="darkgrid")
import warnings
warnings.filterwarnings("ignore")
# -
# <a id="0104"></a>
# <h2>1.4 Useful Functions <a href="#01"
# style="
# border-radius: 10px;
# background-color: #f1f1f1;
# border: none;
# color: #37509b;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# padding: 4px 4px;
# font-size: 14px;
# ">↻</a></h2>
def treat_words(df,
col,
language='english',
inplace=False,
tokenizer = tokenize.WordPunctTokenizer(),
decode = True,
stemmer = None,
lower = True,
remove_words = [],
):
"""
Description:
----------------
Receives a dataframe and the column name. Eliminates
stopwords for each row of that column and apply stemmer.
After that, it regroups and returns a list.
tokenizer = tokenize.WordPunctTokenizer()
tokenize.WhitespaceTokenizer()
stemmer = PorterStemmer()
SnowballStemmer()
LancasterStemmer()
nltk.RSLPStemmer() # in portuguese
"""
pnct = [string for string in punctuation] # from string import punctuation
wrds = nltk.corpus.stopwords.words(language)
unwanted_words = pnct + wrds + remove_words
processed_text = list()
for element in tqdm(df[col]):
# starts a new list
new_text = list()
# starts a list with the words of the non precessed text
text_old = tokenizer.tokenize(element)
# check each word
for wrd in text_old:
# if the word are not in the unwanted words list
# add to the new list
if wrd.lower() not in unwanted_words:
new_wrd = wrd
if decode: new_wrd = unidecode(new_wrd)
if stemmer: new_wrd = stemmer.stem(new_wrd)
if lower: new_wrd = new_wrd.lower()
if new_wrd not in remove_words:
new_text.append(new_wrd)
processed_text.append(' '.join(new_text))
if inplace:
df[col] = processed_text
else:
return processed_text
def list_words_of_class(df,
col,
language='english',
inplace=False,
tokenizer = tokenize.WordPunctTokenizer(),
decode = True,
stemmer = None,
lower = True,
remove_words = []
):
"""
Description:
----------------
Receives a dataframe and the column name. Eliminates
stopwords for each row of that column, apply stemmer
and returns a list of all the words.
"""
lista = treat_words(
df,col = col,language = language,
tokenizer=tokenizer,decode=decode,
stemmer=stemmer,lower=lower,
remove_words = remove_words
)
words_list = []
for string in lista:
words_list += tokenizer.tokenize(string)
return words_list
def get_frequency(df,
col,
language='english',
inplace=False,
tokenizer = tokenize.WordPunctTokenizer(),
decode = True,
stemmer = None,
lower = True,
remove_words = []
):
list_of_words = list_words_of_class(
df,
col = col,
decode = decode,
stemmer = stemmer,
lower = lower,
remove_words = remove_words
)
freq = nltk.FreqDist(list_of_words)
df_freq = pd.DataFrame({
'word': list(freq.keys()),
'frequency': list(freq.values())
}).sort_values(by='frequency',ascending=False)
n_words = df_freq['frequency'].sum()
df_freq['prop'] = 100*df_freq['frequency']/n_words
return df_freq
def common_best_words(df,col,n_common = 10,tol_frac = 0.8,n_jobs = 1):
list_to_remove = []
for i in range(0,n_jobs):
print('[info] Most common words in not survived')
sleep(0.5)
df_dead = get_frequency(
df.query('Survived == 0'),
col = col,
decode = False,
stemmer = False,
lower = False,
remove_words = list_to_remove )
print('[info] Most common words in survived')
sleep(0.5)
df_surv = get_frequency(
df.query('Survived == 1'),
col = col,
decode = False,
stemmer = False,
lower = False,
remove_words = list_to_remove )
words_dead = df_dead.nlargest(n_common, 'frequency')
list_dead = list(words_dead['word'].values)
words_surv = df_surv.nlargest(n_common, 'frequency')
list_surv = list(words_surv['word'].values)
for word in list(set(list_dead).intersection(list_surv)):
prop_dead = words_dead[words_dead['word'] == word]['prop'].values[0]
prop_surv = words_surv[words_surv['word'] == word]['prop'].values[0]
ratio = min([prop_dead,prop_surv])/max([prop_dead,prop_surv])
if ratio > tol_frac:
list_to_remove.append(word)
return list_to_remove
def just_keep_the_words(df,
col,
keep_words = [],
tokenizer = tokenize.WordPunctTokenizer()
):
"""
Description:
----------------
Removes all words that is not in `keep_words`
"""
processed_text = list()
# para cada avaliação
for element in tqdm(df[col]):
# starts a new list
new_text = list()
# starts a list with the words of the non precessed text
text_old = tokenizer.tokenize(element)
for wrd in text_old:
if wrd in keep_words: new_text.append(wrd)
processed_text.append(' '.join(new_text))
return processed_text
class Classifier:
'''
Description
-----------------
Class to approach classification algorithm
Example
-----------------
classifier = Classifier(
algorithm = ChooseTheAlgorith,
hyperparameters_range = {
'hyperparameter_1': [1,2,3],
'hyperparameter_2': [4,5,6],
'hyperparameter_3': [7,8,9]
}
)
# Looking for best model
classifier.grid_search_fit(X,y,n_splits=10)
#dt.grid_search_results.head(3)
# Prediction Form 1
par = classifier.best_model_params
dt.fit(X_trn,y_trn,params = par)
y_pred = classifier.predict(X_tst)
print(accuracy_score(y_tst, y_pred))
# Prediction Form 2
classifier.fit(X_trn,y_trn,params = 'best_model')
y_pred = classifier.predict(X_tst)
print(accuracy_score(y_tst, y_pred))
# Prediction Form 3
classifier.fit(X_trn,y_trn,min_samples_split = 5,max_depth=4)
y_pred = classifier.predict(X_tst)
print(accuracy_score(y_tst, y_pred))
'''
def __init__(self,algorithm, hyperparameters_range={},random_state=42):
self.algorithm = algorithm
self.hyperparameters_range = hyperparameters_range
self.random_state = random_state
self.grid_search_cv = None
self.grid_search_results = None
self.hyperparameters = self.__get_hyperparameters()
self.best_model = None
self.best_model_params = None
self.fitted_model = None
def grid_search_fit(self,X,y,verbose=0,n_splits=10,shuffle=True,scoring='accuracy'):
self.grid_search_cv = GridSearchCV(
self.algorithm(),
self.hyperparameters_range,
cv = KFold(n_splits = n_splits, shuffle=shuffle, random_state=self.random_state),
scoring=scoring,
verbose=verbose
)
self.grid_search_cv.fit(X, y)
col = list(map(lambda par: 'param_'+str(par),self.hyperparameters))+[
'mean_fit_time',
'mean_test_score',
'std_test_score',
'params'
]
results = pd.DataFrame(self.grid_search_cv.cv_results_)
self.grid_search_results = results[col].sort_values(
['mean_test_score','mean_fit_time'],
ascending=[False,True]
).reset_index(drop=True)
self.best_model = self.grid_search_cv.best_estimator_
self.best_model_params = self.best_model.get_params()
def best_model_cv_score(self,X,y,parameter='test_score',verbose=0,n_splits=10,shuffle=True,scoring='accuracy'):
if self.best_model != None:
cv_results = cross_validate(
self.best_model,
X = X,
y = y,
cv=KFold(n_splits = 10,shuffle=True,random_state=self.random_state)
)
return {
parameter+'_mean': cv_results[parameter].mean(),
parameter+'_std': cv_results[parameter].std()
}
def fit(self,X,y,params=None,**kwargs):
model = None
if len(kwargs) == 0 and params == 'best_model' and self.best_model != None:
model = self.best_model
elif type(params) == dict and len(params) > 0:
model = self.algorithm(**params)
elif len(kwargs) >= 0 and params==None:
model = self.algorithm(**kwargs)
else:
print('[Error]')
if model != None:
model.fit(X,y)
self.fitted_model = model
def predict(self,X):
if self.fitted_model != None:
return self.fitted_model.predict(X)
else:
print('[Error]')
return np.array([])
def predict_score(self,X_tst,y_tst,score=accuracy_score):
if self.fitted_model != None:
y_pred = self.predict(X_tst)
return score(y_tst, y_pred)
else:
print('[Error]')
return np.array([])
def hyperparameter_info(self,hyperpar):
str_ = 'param_'+hyperpar
return self.grid_search_results[
[str_,'mean_fit_time','mean_test_score']
].groupby(str_).agg(['mean','std'])
def __get_hyperparameters(self):
return [hp for hp in self.hyperparameters_range]
# +
def cont_class_limits(lis_df,n_class):
ampl = lis_df.quantile(1.0)-lis_df.quantile(0.0)
ampl_class = ampl/n_class
limits = [[i*ampl_class,(i+1)*ampl_class] for i in range(n_class)]
return limits
def cont_classification(lis_df,limits):
list_res = []
n_class = len(limits)
for elem in lis_df:
for ind in range(n_class-1):
if elem >= limits[ind][0] and elem < limits[ind][1]:
list_res.append(ind+1)
if elem >= limits[-1][0]: list_res.append(n_class)
return list_res
# -
# <a id="02" style="
# background-color: #37509b;
# border: none;
# color: white;
# padding: 2px 10px;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# font-size: 10px;" href="#toc">TOC ↻</a>
#
#
# <div style="margin-top: 9px; background-color: #efefef; padding-top:10px; padding-bottom:10px;margin-bottom: 9px;box-shadow: 5px 5px 5px 0px rgba(87, 87, 87, 0.2);">
# <center>
# <h1>2. Dataset: Cleaning and Exploration</h1>
# </center>
#
#
#
# <ol type="i">
# <!-- <li><a href="#0101" style="color: #37509b;">Inicialização</a></li>
# <li><a href="#0102" style="color: #37509b;">Pacotes</a></li>
# <li><a href="#0103" style="color: #37509b;">Funcoes</a></li>
# <li><a href="#0104" style="color: #37509b;">Dados de Indicadores Sociais</a></li>
# <li><a href="#0105" style="color: #37509b;">Dados de COVID-19</a></li>
# -->
# </ol>
#
#
#
# </div>
# <a id="0101"></a>
# <h2>2.1 Import Dataset <a href="#02"
# style="
# border-radius: 10px;
# background-color: #f1f1f1;
# border: none;
# color: #37509b;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# padding: 4px 4px;
# font-size: 14px;
# ">↻</a></h2>
# +
df_trn = pd.read_csv('data/train.csv')
df_tst = pd.read_csv('data/test.csv')
df = pd.concat([df_trn,df_tst])
df_trn = df_trn.drop(columns=['PassengerId'])
df_tst = df_tst.drop(columns=['PassengerId'])
# -
df_tst.info()
# ## Pclass
# Investigating if the class is related to the probability of survival
sns.barplot(x='Pclass', y="Survived", data=df_trn)
# ## Name
treat_words(df_trn,col = 'Name',inplace=True)
treat_words(df_tst,col = 'Name',inplace=True)
# +
# %matplotlib inline
from wordcloud import WordCloud
import matplotlib.pyplot as plt
all_words = ' '.join(list(df_trn['Name']))
word_cloud = WordCloud().generate(all_words)
plt.figure(figsize=(10,7))
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# -
common_best_words(df_trn,col='Name',n_common = 10,tol_frac = 0.5,n_jobs = 1)
# We can see that Master and William are words with equivalent proportion between both survived and not survived cases. So, they are not good descriptive words
df_comm = get_frequency(df_trn,col = 'Name',remove_words=['("','")','master', 'william']).reset_index(drop=True)
surv_prob = [ df_trn['Survived'][df_trn['Name'].str.contains(row['word'])].mean() for index, row in df_comm.iterrows()]
df_comm['survival_prob (%)'] = 100*np.array(surv_prob)
print('Survival Frequency related to words in Name')
df_comm.head(10)
df_comm_surv = get_frequency(df_trn[df_trn['Survived']==1],col = 'Name',remove_words=['("','")']).reset_index(drop=True)
sleep(0.5)
print('Most frequent words within those who survived')
df_comm_surv.head(10)
df_comm_dead = get_frequency(df_trn[df_trn['Survived']==0],col = 'Name',remove_words=['("','")']).reset_index(drop=True)
sleep(0.5)
print("Most frequent words within those that did not survive")
df_comm_dead.head(10)
# ### Feature Engineering
# +
min_occurrences = 2
df_comm = get_frequency(df,col = 'Name',
remove_words=['("','")','john', 'henry', 'william','h','j','jr']
).reset_index(drop=True)
words_to_keep = list(df_comm[df_comm['frequency'] > min_occurrences]['word'])
df_trn['Name'] = just_keep_the_words(df_trn,
col = 'Name',
keep_words = words_to_keep
)
df_tst['Name'] = just_keep_the_words(df_tst,
col = 'Name',
keep_words = words_to_keep
)
# +
vectorize = CountVectorizer(lowercase=True,max_features = 4)
vectorize.fit(df_trn['Name'])
bag_of_words = vectorize.transform(df_trn['Name'])
X = pd.DataFrame(vectorize.fit_transform(df_trn['Name']).toarray(),
columns=list(map(lambda word: 'Name_'+word,vectorize.get_feature_names()))
)
y = df_trn['Survived']
from sklearn.model_selection import train_test_split
X_trn,X_tst,y_trn,y_tst = train_test_split(
X,
y,
test_size = 0.25,
random_state=42
)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(C=100)
classifier.fit(X_trn,y_trn)
accuracy = classifier.score(X_tst,y_tst)
print('Accuracy = %.3f%%' % (100*accuracy))
# +
df_trn = pd.concat([
df_trn
,
pd.DataFrame(vectorize.fit_transform(df_trn['Name']).toarray(),
columns=list(map(lambda word: 'Name_'+word,vectorize.get_feature_names()))
)
],axis=1).drop(columns=['Name'])
df_tst = pd.concat([
df_tst
,
pd.DataFrame(vectorize.fit_transform(df_tst['Name']).toarray(),
columns=list(map(lambda word: 'Name_'+word,vectorize.get_feature_names()))
)
],axis=1).drop(columns=['Name'])
# -
# ## Sex
# +
from sklearn.preprocessing import LabelEncoder
Sex_Encoder = LabelEncoder()
df_trn['Sex'] = Sex_Encoder.fit_transform(df_trn['Sex']).astype(int)
df_tst['Sex'] = Sex_Encoder.transform(df_tst['Sex']).astype(int)
# -
# ## Age
mean_age = df['Age'][df['Age'].notna()].mean()
df_trn['Age'].fillna(mean_age,inplace=True)
df_tst['Age'].fillna(mean_age,inplace=True)
# age_limits = cont_class_limits(df['Age'],5)
# df_trn['Age'] = cont_classification(df_trn['Age'],age_limits)
# df_tst['Age'] = cont_classification(df_tst['Age'],age_limits)
# ## Family Size
# +
df_trn['FamilySize'] = df_trn['SibSp'] + df_trn['Parch'] + 1
df_tst['FamilySize'] = df_tst['SibSp'] + df_tst['Parch'] + 1
df_trn = df_trn.drop(columns = ['SibSp','Parch'])
df_tst = df_tst.drop(columns = ['SibSp','Parch'])
# -
# ## Cabin Feature
# There is very little data about the cabin
# +
df_trn['Cabin'] = df_trn['Cabin'].fillna('N000')
df_cab = df_trn[df_trn['Cabin'].notna()]
df_cab = pd.concat(
[
df_cab,
df_cab['Cabin'].str.extract(
'([A-Za-z]+)(\d+\.?\d*)([A-Za-z]*)',
expand = True).drop(columns=[2]).rename(
columns={0: 'Cabin_Class', 1: 'Cabin_Number'}
)
], axis=1)
df_trn = df_cab.drop(columns=['Cabin','Cabin_Number'])
df_trn = pd.concat([
df_trn.drop(columns=['Cabin_Class']),
# pd.get_dummies(df_trn['Cabin_Class'],prefix='Cabin').drop(columns=['Cabin_N'])
pd.get_dummies(df_trn['Cabin_Class'],prefix='Cabin')
],axis=1)
# +
df_tst['Cabin'] = df_tst['Cabin'].fillna('N000')
df_cab = df_tst[df_tst['Cabin'].notna()]
df_cab = pd.concat(
[
df_cab,
df_cab['Cabin'].str.extract(
'([A-Za-z]+)(\d+\.?\d*)([A-Za-z]*)',
expand = True).drop(columns=[2]).rename(
columns={0: 'Cabin_Class', 1: 'Cabin_Number'}
)
], axis=1)
df_tst = df_cab.drop(columns=['Cabin','Cabin_Number'])
df_tst = pd.concat([
df_tst.drop(columns=['Cabin_Class']),
# pd.get_dummies(df_tst['Cabin_Class'],prefix='Cabin').drop(columns=['Cabin_N'])
pd.get_dummies(df_tst['Cabin_Class'],prefix='Cabin')
],axis=1)
# -
# ## Ticket
df_trn = df_trn.drop(columns=['Ticket'])
df_tst = df_tst.drop(columns=['Ticket'])
# ## Fare
# +
mean_fare = df['Fare'][df['Fare'].notna()].mean()
df_trn['Fare'].fillna(mean_fare,inplace=True)
df_tst['Fare'].fillna(mean_fare,inplace=True)
# fare_limits = cont_class_limits(df['Fare'],5)
# df_trn['Fare'] = cont_classification(df_trn['Fare'],fare_limits)
# df_tst['Fare'] = cont_classification(df_tst['Fare'],fare_limits)
# -
# ## Embarked
most_frequent_emb = df['Embarked'].value_counts()[:1].index.tolist()[0]
df_trn['Embarked'] = df_trn['Embarked'].fillna(most_frequent_emb)
df_tst['Embarked'] = df_tst['Embarked'].fillna(most_frequent_emb)
# +
df_trn = pd.concat([
df_trn.drop(columns=['Embarked']),
# pd.get_dummies(df_trn['Embarked'],prefix='Emb').drop(columns=['Emb_C'])
pd.get_dummies(df_trn['Embarked'],prefix='Emb')
],axis=1)
df_tst = pd.concat([
df_tst.drop(columns=['Embarked']),
# pd.get_dummies(df_tst['Embarked'],prefix='Emb').drop(columns=['Emb_C'])
pd.get_dummies(df_tst['Embarked'],prefix='Emb')
],axis=1)
# -
df_trn
# <a id="03" style="
# background-color: #37509b;
# border: none;
# color: white;
# padding: 2px 10px;
# text-align: center;
# text-decoration: none;
# display: inline-block;
# font-size: 10px;" href="#toc">TOC ↻</a>
#
#
# <div style="margin-top: 9px; background-color: #efefef; padding-top:10px; padding-bottom:10px;margin-bottom: 9px;box-shadow: 5px 5px 5px 0px rgba(87, 87, 87, 0.2);">
# <center>
# <h1>3. Modelling</h1>
# </center>
#
#
#
# <ol type="i">
# <!-- <li><a href="#0101" style="color: #37509b;">Inicialização</a></li>
# <li><a href="#0102" style="color: #37509b;">Pacotes</a></li>
# <li><a href="#0103" style="color: #37509b;">Funcoes</a></li>
# <li><a href="#0104" style="color: #37509b;">Dados de Indicadores Sociais</a></li>
# <li><a href="#0105" style="color: #37509b;">Dados de COVID-19</a></li>
# -->
# </ol>
#
#
#
# </div>
sns.barplot(x='Age', y="Survived", data=df_trn)
# +
scaler = StandardScaler()
X = scaler.fit_transform(df_trn.drop(columns=['Survived']))
y = df_trn['Survived']
X_trn,X_tst,y_trn,y_tst = train_test_split(
X,
y,
test_size = 0.25,
random_state=42
)
# -
Model_Scores = {}
# ## Logistic Regression
# +
SEED = 42
hyperparametric_space = {
'solver' : ['newton-cg', 'lbfgs', 'liblinear'],
'C' : [0.01,0.1,1,10,100]
}
grid_search_cv = GridSearchCV(
LogisticRegression(random_state=SEED),
hyperparametric_space,
cv = KFold(n_splits = 10, shuffle=True,random_state=SEED),
scoring='accuracy',
verbose=0
)
grid_search_cv.fit(X, y)
results = pd.DataFrame(grid_search_cv.cv_results_)
pd.options.display.float_format = '{:,.5f}'.format
col = ['param_C', 'param_solver','mean_fit_time', 'mean_test_score', 'std_test_score']
results[col].sort_values(
['mean_test_score','mean_fit_time'],
ascending=[False,True]
).head(10)
# +
log = Classifier(
algorithm = LogisticRegression,
hyperparameters_range = {
'intercept_scaling' : [0.8,1,1.2],
# 'class_weight' : [{ 0:0.45, 1:0.55 },{ 0:0.5, 1:0.5 },{ 0:0.55, 1:0.45 }],
'solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'C' : [0.05,0.07,0.09]
}
)
log.grid_search_fit(X,y,n_splits=10)
print('\nBest Model:')
print('\n',log.best_model)
sc_dict = log.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
log.fit(X_trn,y_trn,params = 'best_model')
psc = log.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % psc)
Model_Scores['logistic_regression'] = {
'model' : log.best_model,
'best_params' : log.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
log.grid_search_results.head(5)
# -
# ## Support Vector Classifier
# +
sv = Classifier(
algorithm = SVC,
hyperparameters_range = {
'kernel' : ['linear', 'poly','rbf','sigmoid'],
'C' : [0.01,0.5,1,3,7,100]
}
)
sv.grid_search_fit(X,y)
print('\nBest Model:')
print('\n',sv.best_model)
sc_dict = sv.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
sv.fit(X_trn,y_trn,params = 'best_model')
psc = sv.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc))
Model_Scores['svc'] = {
'model' : sv.best_model,
'best_params' : sv.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
sv.grid_search_results.head(5)
# -
# ## Decision Tree Classifier
# +
dt = Classifier(
algorithm = DecisionTreeClassifier,
hyperparameters_range = {
'min_samples_split': [15,20,25],
'max_depth': [10,15,20,25],
'min_samples_leaf': [1,3,5,7,9]
}
)
dt.grid_search_fit(X,y)
print('\nBest Model:')
print('\n',dt.best_model)
sc_dict = dt.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
dt.fit(X_trn,y_trn,params = 'best_model')
psc = dt.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc))
Model_Scores['decision_tree'] = {
'model' : dt.best_model,
'best_params' : dt.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
dt.grid_search_results.head(5)
# -
# ## Gaussian Naive Bayes
# +
gnb = Classifier(
algorithm = GaussianNB,
hyperparameters_range = {
'var_smoothing': [1e-09,1e-07,1e-04,1e-02,1,10,100],
}
)
gnb.grid_search_fit(X,y)
print('\nBest Model:')
print('\n',gnb.best_model)
sc_dict = gnb.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
gnb.fit(X_trn,y_trn,params = 'best_model')
psc = gnb.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc ))
pd.options.display.float_format = '{:,.8f}'.format
Model_Scores['gaussian_nb'] = {
'model' : gnb.best_model,
'best_params' : gnb.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
gnb.grid_search_results.head(9)
# -
# ## K-Nearest Neighbors Classifier
# +
knn = Classifier(
algorithm = KNeighborsClassifier,
hyperparameters_range = {
'n_neighbors': [2,5,10,20],
'weights' : ['uniform', 'distance'],
'algorithm' : ['auto', 'ball_tree', 'kd_tree', 'brute'],
'p' : [2,3,4,5]
}
)
knn.grid_search_fit(X,y)
print('\nBest Model:')
print('\n',knn.best_model)
sc_dict = knn.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
knn.fit(X_trn,y_trn,params = 'best_model')
psc = knn.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc))
pd.options.display.float_format = '{:,.3f}'.format
Model_Scores['knn_classifier'] = {
'model' : knn.best_model,
'best_params' : knn.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
knn.grid_search_results.head(9)
# -
# ## Random Forest Classifier
# +
rf = Classifier(
algorithm = RandomForestClassifier,
hyperparameters_range = {
'n_estimators': [100,120,150,175,200],
'min_samples_split': [6,7,8,9,10],
'random_state': [42]
}
)
rf.grid_search_fit(X,y)
print('\nBest Model:')
print('\n',rf.best_model)
sc_dict = rf.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
rf.fit(X_trn,y_trn,params = 'best_model')
psc = rf.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc))
pd.options.display.float_format = '{:,.3f}'.format
Model_Scores['random_forest'] = {
'model' : rf.best_model,
'best_params' : rf.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
rf.grid_search_results.head(9)
# -
# ## Gradient Boosting Classifier
# +
SEED = 42
N_SPLITS = 10
MODEL = 'GradientBoostingClassifier'
start = time()
# Parametric Space
hyperparametric_space = {
'loss': ['deviance', 'exponential'],
# 'min_samples_split': [70,80,90,100,120,140,160],
'min_samples_split': [90,100,120],
# 'max_depth': [4,5,6,7,8],
'max_depth': [4,5,6,7,8]
}
# Searching the best setting
print('[info] Grid Searching')
grid_search_cv = GridSearchCV(
GradientBoostingClassifier(random_state=SEED),
hyperparametric_space,
cv = KFold(n_splits = N_SPLITS , shuffle=True,random_state=SEED),
scoring='accuracy',
verbose=0)
grid_search_cv.fit(X, y)
results = pd.DataFrame(grid_search_cv.cv_results_)
print('[info] Grid Search Timing: %.2f seconds'%(time() - start))
start = time()
# Evaluating Test Score For Best Estimator
print('[info] Test Accuracy Score')
gb = grid_search_cv.best_estimator_
gb.fit(X_trn, y_trn)
y_pred = gb.predict(X_tst)
# Evaluating K Folded Cross Validation
print('[info] KFolded Cross Validation')
cv_results = cross_validate(grid_search_cv.best_estimator_,X,y,
cv=KFold(n_splits = N_SPLITS ,shuffle=True,random_state=SEED) )
print('[info] Cross Validation Timing: %.2f seconds'%(time() - start))
Model_Scores[MODEL] = {
'test_accuracy_score' : gb.score(X_tst,y_tst),
'cv_score' : cv_results['test_score'].mean(),
'cv_score_std' : cv_results['test_score'].std(),
'best_params' : grid_search_cv.best_estimator_.get_params()
}
pd.options.display.float_format = '{:,.5f}'.format
print('\t\t test_accuracy_score: {:.3f}'.format(Model_Scores[MODEL]['test_accuracy_score']))
print('\t\t cv_score: {:.3f}±{:.3f}'.format(
Model_Scores[MODEL]['cv_score'],Model_Scores[MODEL]['cv_score_std']))
params_list = ['mean_test_score']+list(map(lambda var: 'param_'+var,grid_search_cv.best_params_.keys()))+['mean_fit_time']
results[params_list].sort_values(
['mean_test_score','mean_fit_time'],
ascending=[False,True]
).head(5)
# -
# ## Multi Layer Perceptron Classifier
from scipy.stats import randint
from numpy.random import uniform
uniform(-1,0)
# +
SEED = 42
N_SPLITS = 3
MODEL = 'MLPClassifier'
start = time()
# Parametric Space
hyperparametric_space = {
'hidden_layer_sizes': [(160,),(180,),(200,)],
# 'hidden_layer_sizes': [(180,)],
'alpha':[0.000001,0.00001,0.0001,0.001,0.01,0.1],
# 'alpha':[0.0001],
# 'beta_1':[0.81,0.9,0.99],
# 'beta_1':[0.9],
# 'beta_2':[0.999,0.99,0.9],
# 'beta_2':[0.99],
'activation': ['relu'],
'random_state': [SEED],
'learning_rate': ['adaptive']
}
# Searching the best setting
print('[info] Grid Searching')
grid_search_cv = GridSearchCV(
MLPClassifier(random_state=SEED),
hyperparametric_space,
cv = KFold(n_splits = N_SPLITS , shuffle=True,random_state=SEED),
scoring='accuracy',
verbose=0)
grid_search_cv.fit(X, y)
results = pd.DataFrame(grid_search_cv.cv_results_)
print('[info] Grid Search Timing: %.2f seconds'%(time() - start))
start = time()
# Evaluating Test Score For Best Estimator
print('[info] Test Accuracy Score')
gb = grid_search_cv.best_estimator_
gb.fit(X_trn, y_trn)
y_pred = gb.predict(X_tst)
# Evaluating K Folded Cross Validation
print('[info] KFolded Cross Validation')
cv_results = cross_validate(grid_search_cv.best_estimator_,X,y,
cv=KFold(n_splits = N_SPLITS ,shuffle=True,random_state=SEED) )
print('[info] Cross Validation Timing: %.2f seconds'%(time() - start))
Model_Scores[MODEL] = {
'test_accuracy_score' : gb.score(X_tst,y_tst),
'cv_score' : cv_results['test_score'].mean(),
'cv_score_std' : cv_results['test_score'].std(),
'best_params' : grid_search_cv.best_estimator_.get_params()
}
pd.options.display.float_format = '{:,.5f}'.format
print('\t\t test_accuracy_score: {:.3f}'.format(Model_Scores[MODEL]['test_accuracy_score']))
print('\t\t cv_score: {:.3f}±{:.3f}'.format(
Model_Scores[MODEL]['cv_score'],Model_Scores[MODEL]['cv_score_std']))
params_list = ['mean_test_score']+list(map(lambda var: 'param_'+var,grid_search_cv.best_params_.keys()))+['mean_fit_time']
results[params_list].sort_values(
['mean_test_score','mean_fit_time'],
ascending=[False,True]
).head(5)
# -
params_list = ['mean_test_score']+list(map(lambda var: 'param_'+var,grid_search_cv.best_params_.keys()))+['mean_fit_time']
params_list = ['mean_test_score']+list(map(lambda var: 'param_'+var,grid_search_cv.best_params_.keys()))+['mean_fit_time']
results[params_list].sort_values(
['mean_test_score','mean_fit_time'],
ascending=[False,True]
).head(5)
# +
mlc = Classifier(
algorithm = MLPClassifier,
hyperparameters_range = {
'hidden_layer_sizes': [(160,),(180,),(200,)],
'alpha':[0.00001,0.0001,0.001],
'beta_1':[0.81,0.9,0.99],
'beta_2':[0.999,0.99,0.9],
'activation': ['identity'],
# 'activation': ['identity', 'logistic', 'tanh', 'relu'],
'random_state': [42],
'learning_rate': ['adaptive'],
'max_iter': [1000]
}
)
mlc.grid_search_fit(X,y,n_splits=3)
print('\nBest Model:')
print('\n',mlc.best_model)
sc_dict = mlc.best_model_cv_score(X,y)
sc_list = list((100*np.array(list(sc_dict.values()))))
print('\nCV Score: %.2f%% ± %.2f%%' % (sc_list[0],sc_list[1]))
mlc.fit(X_trn,y_trn,params = 'best_model')
psc = mlc.predict_score(X_tst,y_tst)
print('\nAccuracy Score: %.2f ' % (psc))
pd.options.display.float_format = '{:,.6f}'.format
Model_Scores['mlc_classifier'] = {
'model' : mlc.best_model,
'best_params' : mlc.best_model_params,
'test_accuracy_score' : psc,
'cv_score' : 0.01*sc_list[0],
'cv_score_std' : 0.01*sc_list[1]
}
mlc.grid_search_results.head(9)
# +
np.random.seed(SEED)
espaco_de_parametros = {
"n_estimators" :randint(10, 101),
"max_depth" : randint(3, 6),
"min_samples_split" : randint(32, 129),
"min_samples_leaf" : randint(32, 129),
"bootstrap" : [True, False],
"criterion" : ["gini", "entropy"]
}
tic = time.time()
busca = RandomizedSearchCV(RandomForestClassifier(),
espaco_de_parametros,
n_iter = 80,
cv = KFold(n_splits = 5, shuffle=True))
busca.fit(x_azar, y_azar)
tac = time.time()
tempo_que_passou = tac - tic
print("Tempo %.2f segundos" % tempo_que_passou)
resultados = pd.DataFrame(busca.cv_results_)
resultados.head()
# -
# **Categorical Variables**
pd.DataFrame([[
model,
Model_Scores[model]['test_accuracy_score'],
Model_Scores[model]['cv_score'],
Model_Scores[model]['cv_score_std']
] for model in Model_Scores.keys()],columns=['model','test_accuracy_score','cv_score','cv_score_std'])
| Titanico/titanico.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (tensorflow)
# language: python
# name: tensorflow
# ---
# # T81-558: Applications of Deep Neural Networks
# **Module 14: Other Neural Network Techniques**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 14 Video Material
#
# * Part 14.1: What is AutoML [[Video]](https://www.youtube.com/watch?v=TFUysIR5AB0&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_01_automl.ipynb)
# * Part 14.2: Using Denoising AutoEncoders in Keras [[Video]](https://www.youtube.com/watch?v=4bTSu6_fucc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_02_auto_encode.ipynb)
# * Part 14.3: Training an Intrusion Detection System with KDD99 [[Video]](https://www.youtube.com/watch?v=1ySn6h2A68I&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_03_anomaly.ipynb)
# * **Part 14.4: Anomaly Detection in Keras** [[Video]](https://www.youtube.com/watch?v=VgyKQ5MTDFc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_14_04_ids_kdd99.ipynb)
# * Part 14.5: The Deep Learning Technologies I am Excited About [[Video]]() [[Notebook]](t81_558_class_14_05_new_tech.ipynb)
#
#
# # Part 14.4: Training an Intrusion Detection System with KDD99
#
# The [KDD-99 dataset](http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html) is very famous in the security field and almost a "hello world" of intrusion detection systems in machine learning.
#
# # Read in Raw KDD-99 Dataset
# +
import pandas as pd
from tensorflow.keras.utils import get_file
try:
path = get_file('kddcup.data_10_percent.gz', origin='http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data_10_percent.gz')
except:
print('Error downloading')
raise
print(path)
# This file is a CSV, just no CSV extension or headers
# Download from: http://kdd.ics.uci.edu/databases/kddcup99/kddcup99.html
df = pd.read_csv(path, header=None)
print("Read {} rows.".format(len(df)))
# df = df.sample(frac=0.1, replace=False) # Uncomment this line to sample only 10% of the dataset
df.dropna(inplace=True,axis=1) # For now, just drop NA's (rows with missing values)
# The CSV file has no column heads, so add them
df.columns = [
'duration',
'protocol_type',
'service',
'flag',
'src_bytes',
'dst_bytes',
'land',
'wrong_fragment',
'urgent',
'hot',
'num_failed_logins',
'logged_in',
'num_compromised',
'root_shell',
'su_attempted',
'num_root',
'num_file_creations',
'num_shells',
'num_access_files',
'num_outbound_cmds',
'is_host_login',
'is_guest_login',
'count',
'srv_count',
'serror_rate',
'srv_serror_rate',
'rerror_rate',
'srv_rerror_rate',
'same_srv_rate',
'diff_srv_rate',
'srv_diff_host_rate',
'dst_host_count',
'dst_host_srv_count',
'dst_host_same_srv_rate',
'dst_host_diff_srv_rate',
'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate',
'dst_host_serror_rate',
'dst_host_srv_serror_rate',
'dst_host_rerror_rate',
'dst_host_srv_rerror_rate',
'outcome'
]
# display 5 rows
df[0:5]
# -
# # Analyzing a Dataset
#
# The following script can be used to give a high-level overview of how a dataset appears.
# +
ENCODING = 'utf-8'
def expand_categories(values):
result = []
s = values.value_counts()
t = float(len(values))
for v in s.index:
result.append("{}:{}%".format(v,round(100*(s[v]/t),2)))
return "[{}]".format(",".join(result))
def analyze(df):
print()
cols = df.columns.values
total = float(len(df))
print("{} rows".format(int(total)))
for col in cols:
uniques = df[col].unique()
unique_count = len(uniques)
if unique_count>100:
print("** {}:{} ({}%)".format(col,unique_count,int(((unique_count)/total)*100)))
else:
print("** {}:{}".format(col,expand_categories(df[col])))
expand_categories(df[col])
# +
# Analyze KDD-99
import pandas as pd
import os
import numpy as np
from sklearn import metrics
from scipy.stats import zscore
analyze(df)
# -
# # Encode the feature vector
# Encode every row in the database. This is not instant!
# +
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# +
# Now encode the feature vector
encode_numeric_zscore(df, 'duration')
encode_text_dummy(df, 'protocol_type')
encode_text_dummy(df, 'service')
encode_text_dummy(df, 'flag')
encode_numeric_zscore(df, 'src_bytes')
encode_numeric_zscore(df, 'dst_bytes')
encode_text_dummy(df, 'land')
encode_numeric_zscore(df, 'wrong_fragment')
encode_numeric_zscore(df, 'urgent')
encode_numeric_zscore(df, 'hot')
encode_numeric_zscore(df, 'num_failed_logins')
encode_text_dummy(df, 'logged_in')
encode_numeric_zscore(df, 'num_compromised')
encode_numeric_zscore(df, 'root_shell')
encode_numeric_zscore(df, 'su_attempted')
encode_numeric_zscore(df, 'num_root')
encode_numeric_zscore(df, 'num_file_creations')
encode_numeric_zscore(df, 'num_shells')
encode_numeric_zscore(df, 'num_access_files')
encode_numeric_zscore(df, 'num_outbound_cmds')
encode_text_dummy(df, 'is_host_login')
encode_text_dummy(df, 'is_guest_login')
encode_numeric_zscore(df, 'count')
encode_numeric_zscore(df, 'srv_count')
encode_numeric_zscore(df, 'serror_rate')
encode_numeric_zscore(df, 'srv_serror_rate')
encode_numeric_zscore(df, 'rerror_rate')
encode_numeric_zscore(df, 'srv_rerror_rate')
encode_numeric_zscore(df, 'same_srv_rate')
encode_numeric_zscore(df, 'diff_srv_rate')
encode_numeric_zscore(df, 'srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_count')
encode_numeric_zscore(df, 'dst_host_srv_count')
encode_numeric_zscore(df, 'dst_host_same_srv_rate')
encode_numeric_zscore(df, 'dst_host_diff_srv_rate')
encode_numeric_zscore(df, 'dst_host_same_src_port_rate')
encode_numeric_zscore(df, 'dst_host_srv_diff_host_rate')
encode_numeric_zscore(df, 'dst_host_serror_rate')
encode_numeric_zscore(df, 'dst_host_srv_serror_rate')
encode_numeric_zscore(df, 'dst_host_rerror_rate')
encode_numeric_zscore(df, 'dst_host_srv_rerror_rate')
# display 5 rows
df.dropna(inplace=True,axis=1)
df[0:5]
# This is the numeric feature vector, as it goes to the neural net
# Convert to numpy - Classification
x_columns = df.columns.drop('outcome')
x = df[x_columns].values
dummies = pd.get_dummies(df['outcome']) # Classification
outcomes = dummies.columns
num_classes = len(outcomes)
y = dummies.values
# -
df.groupby('outcome')['outcome'].count()
# # Train the Neural Network
# +
import pandas as pd
import io
import requests
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
# Create a test/train split. 25% test
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Create neural net
model = Sequential()
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(10, input_dim=x.shape[1], activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto')
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=2,epochs=1000)
# -
# Measure accuracy
pred = model.predict(x_test)
pred = np.argmax(pred,axis=1)
y_eval = np.argmax(y_test,axis=1)
score = metrics.accuracy_score(y_eval, pred)
print("Validation score: {}".format(score))
| t81_558_class_14_04_ids_kdd99.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Load libraries
# +
# base tools
import os, sys
sys.path.append(os.path.join(".."))
# data analysis
import numpy as np
from numpy.linalg import norm
from tqdm import tqdm
# tensorflow
import tensorflow_hub as hub
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
# from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input
# style utils
from utils.styletransfer import *
# matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
# -
# ## Helper functions
def extract_features(img_path, model):
"""
Extract features from image data using pretrained model (e.g. VGG16)
"""
# Define input image shape - remember we need to reshape
input_shape = (224, 224, 3)
# load image from file path
img = load_img(img_path, target_size=(input_shape[0],
input_shape[1]))
# convert to array
img_array = img_to_array(img)
# expand to fit dimensions
expanded_img_array = np.expand_dims(img_array, axis=0)
# preprocess image - see last week's notebook
preprocessed_img = preprocess_input(expanded_img_array)
# use the predict function to create feature representation
features = model.predict(preprocessed_img)
# flatten
flattened_features = features.flatten()
# normalise features
normalized_features = flattened_features / norm(features)
return flattened_features
# # Image search
# ## Load VGG16
model = ResNet50(weights='imagenet',
include_top=False,
pooling='avg',
input_shape=(224, 224, 3))
# ## Extract features from single image
features = extract_features('../data/img/florence.jpg', model)
# ## Iterate over folder
# path to the datasets
root_dir = '../data/jpg'
filenames = sorted(get_file_list(root_dir))
# __Extract features for each image__
feature_list = []
for i in tqdm(range(len(filenames))):
feature_list.append(extract_features(filenames[i], model))
# ## Nearest neighbours
from sklearn.neighbors import NearestNeighbors
neighbors = NearestNeighbors(n_neighbors=10,
algorithm='brute',
metric='cosine').fit(feature_list)
# __Calculate nearest neighbours for target__
distances, indices = neighbors.kneighbors([feature_list[250]])
# __Save indices, print data__
idxs = []
for i in range(1,6):
print(distances[0][i], indices[0][i])
idxs.append(indices[0][i])
# __Plot target image__
plt.imshow(mpimg.imread(filenames[250]))
# __Plot close images__
plt.imshow(mpimg.imread(filenames[251]))
# __Plot target and top 3 closest together__
# +
# plt target
plt.imshow(mpimg.imread(filenames[251]))
# plot 3 most similar
f, axarr = plt.subplots(1,3)
axarr[0].imshow(mpimg.imread(filenames[idxs[0]]))
axarr[1].imshow(mpimg.imread(filenames[idxs[1]]))
axarr[2].imshow(mpimg.imread(filenames[idxs[2]]))
# -
# # Style transfer
# __Load a quick style transfer model from TF Hub__
#
# You can find more details [here](https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization)
# Load TF-Hub module.
hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'
hub_module = hub.load(hub_handle)
# __Load the content image and the style image__
content_image = st_load("../data/img/florence.jpg")
style_image = st_load("../data/img/starry_night.jpg")
# __Process using the model__
outputs = hub_module(content_image, style_image)
stylized_image = outputs[0]
# __Show content, style, and stylized image__
show_n([content_image, style_image, stylized_image],
titles=['Original content image', 'Style image', 'Stylized image'])
| notebooks/session11_inclass_rdkm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
s='We promptly judged antique ivory buckles for the next prize'
z1=s.replace(" ",'')
z='abcdefghijklmnopqrstuvwzyz'
list2=list(z1.lower())
list2.sort()
set1=set(list2)
set2=set(z)
if sorted(set1)[0::]==sorted(set2)[0::]:
print("panagram")
# -
| trial scripts/trial script.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Automated Machine Learning
# _**Forecasting using the Energy Demand Dataset**_
#
# ## Contents
# 1. [Introduction](#introduction)
# 1. [Setup](#setup)
# 1. [Data and Forecasting Configurations](#data)
# 1. [Train](#train)
# 1. [Generate and Evaluate the Forecast](#forecast)
#
# Advanced Forecasting
# 1. [Advanced Training](#advanced_training)
# 1. [Advanced Results](#advanced_results)
# # Introduction<a id="introduction"></a>
#
# In this example we use the associated New York City energy demand dataset to showcase how you can use AutoML for a simple forecasting problem and explore the results. The goal is predict the energy demand for the next 48 hours based on historic time-series data.
#
# If you are using an Azure Machine Learning Compute Instance, you are all set. Otherwise, go through the [configuration notebook](../../../configuration.ipynb) first, if you haven't already, to establish your connection to the AzureML Workspace.
#
# In this notebook you will learn how to:
# 1. Creating an Experiment using an existing Workspace
# 1. Configure AutoML using 'AutoMLConfig'
# 1. Train the model using AmlCompute
# 1. Explore the engineered features and results
# 1. Generate the forecast and compute the out-of-sample accuracy metrics
# 1. Configuration and remote run of AutoML for a time-series model with lag and rolling window features
# 1. Run and explore the forecast with lagging features
# # Setup<a id="setup"></a>
# +
import logging
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import warnings
import os
# Squash warning messages for cleaner output in the notebook
warnings.showwarning = lambda *args, **kwargs: None
import azureml.core
from azureml.core import Experiment, Workspace, Dataset
from azureml.train.automl import AutoMLConfig
from datetime import datetime
# -
# This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
print("This notebook was created using version 1.35.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
# +
ws = Workspace.from_config()
# choose a name for the run history container in the workspace
experiment_name = 'automl-forecasting-energydemand'
# # project folder
# project_folder = './sample_projects/automl-forecasting-energy-demand'
experiment = Experiment(ws, experiment_name)
output = {}
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Run History Name'] = experiment_name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
# -
# ## Create or Attach existing AmlCompute
# A compute target is required to execute a remote Automated ML run.
#
# [Azure Machine Learning Compute](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute) is a managed-compute infrastructure that allows the user to easily create a single or multi-node compute. In this tutorial, you create AmlCompute as your training compute resource.
#
# #### Creation of AmlCompute takes approximately 5 minutes.
# If the AmlCompute with that name is already in your workspace this code will skip the creation process.
# As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your cluster.
amlcompute_cluster_name = "energy-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS12_V2',
max_nodes=6)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# -
# # Data<a id="data"></a>
#
# We will use energy consumption [data from New York City](http://mis.nyiso.com/public/P-58Blist.htm) for model training. The data is stored in a tabular format and includes energy demand and basic weather data at an hourly frequency.
#
# With Azure Machine Learning datasets you can keep a single copy of data in your storage, easily access data during model training, share data and collaborate with other users. Below, we will upload the datatset and create a [tabular dataset](https://docs.microsoft.com/bs-latn-ba/azure/machine-learning/service/how-to-create-register-datasets#dataset-types) to be used training and prediction.
# Let's set up what we know about the dataset.
#
# <b>Target column</b> is what we want to forecast.<br></br>
# <b>Time column</b> is the time axis along which to predict.
#
# The other columns, "temp" and "precip", are implicitly designated as features.
target_column_name = 'demand'
time_column_name = 'timeStamp'
dataset = Dataset.Tabular.from_delimited_files(path = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/nyc_energy.csv").with_timestamp_columns(fine_grain_timestamp=time_column_name)
dataset.take(5).to_pandas_dataframe().reset_index(drop=True)
# The NYC Energy dataset is missing energy demand values for all datetimes later than August 10th, 2017 5AM. Below, we trim the rows containing these missing values from the end of the dataset.
# Cut off the end of the dataset due to large number of nan values
dataset = dataset.time_before(datetime(2017, 10, 10, 5))
# ## Split the data into train and test sets
# The first split we make is into train and test sets. Note that we are splitting on time. Data before and including August 8th, 2017 5AM will be used for training, and data after will be used for testing.
# split into train based on time
train = dataset.time_before(datetime(2017, 8, 8, 5), include_boundary=True)
train.to_pandas_dataframe().reset_index(drop=True).sort_values(time_column_name).tail(5)
# split into test based on time
test = dataset.time_between(datetime(2017, 8, 8, 6), datetime(2017, 8, 10, 5))
test.to_pandas_dataframe().reset_index(drop=True).head(5)
# ### Setting the maximum forecast horizon
#
# The forecast horizon is the number of periods into the future that the model should predict. It is generally recommend that users set forecast horizons to less than 100 time periods (i.e. less than 100 hours in the NYC energy example). Furthermore, **AutoML's memory use and computation time increase in proportion to the length of the horizon**, so consider carefully how this value is set. If a long horizon forecast really is necessary, consider aggregating the series to a coarser time scale.
#
# Learn more about forecast horizons in our [Auto-train a time-series forecast model](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-forecast#configure-and-run-experiment) guide.
#
# In this example, we set the horizon to 48 hours.
forecast_horizon = 48
# ## Forecasting Parameters
# To define forecasting parameters for your experiment training, you can leverage the ForecastingParameters class. The table below details the forecasting parameter we will be passing into our experiment.
#
# |Property|Description|
# |-|-|
# |**time_column_name**|The name of your time column.|
# |**forecast_horizon**|The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly).|
# |**freq**|Forecast frequency. This optional parameter represents the period with which the forecast is desired, for example, daily, weekly, yearly, etc. Use this parameter for the correction of time series containing irregular data points or for padding of short time series. The frequency needs to be a pandas offset alias. Please refer to [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects) for more information.
# # Train<a id="train"></a>
#
# Instantiate an AutoMLConfig object. This config defines the settings and data used to run the experiment. We can provide extra configurations within 'automl_settings', for this forecasting task we add the forecasting parameters to hold all the additional forecasting parameters.
#
# |Property|Description|
# |-|-|
# |**task**|forecasting|
# |**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>|
# |**blocked_models**|Models in blocked_models won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).|
# |**experiment_timeout_hours**|Maximum amount of time in hours that the experiment take before it terminates.|
# |**training_data**|The training data to be used within the experiment.|
# |**label_column_name**|The name of the label column.|
# |**compute_target**|The remote compute for training.|
# |**n_cross_validations**|Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way.|
# |**enable_early_stopping**|Flag to enble early termination if the score is not improving in the short term.|
# |**forecasting_parameters**|A class holds all the forecasting related parameters.|
#
# This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the experiment_timeout_hours parameter value to get results.
# +
from azureml.automl.core.forecasting_parameters import ForecastingParameters
forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name,
forecast_horizon=forecast_horizon,
freq='H' # Set the forecast frequency to be hourly
)
automl_config = AutoMLConfig(task='forecasting',
primary_metric='normalized_root_mean_squared_error',
blocked_models = ['ExtremeRandomTrees', 'AutoArima', 'Prophet'],
experiment_timeout_hours=0.3,
training_data=train,
label_column_name=target_column_name,
compute_target=compute_target,
enable_early_stopping=True,
n_cross_validations=3,
verbosity=logging.INFO,
forecasting_parameters=forecasting_parameters)
# -
# Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while.
# One may specify `show_output = True` to print currently running iterations to the console.
remote_run = experiment.submit(automl_config, show_output=False)
remote_run.wait_for_completion()
# ## Retrieve the Best Model
# Below we select the best model from all the training iterations using get_output method.
best_run, fitted_model = remote_run.get_output()
fitted_model.steps
# ## Featurization
# You can access the engineered feature names generated in time-series featurization.
fitted_model.named_steps['timeseriestransformer'].get_engineered_feature_names()
# ### View featurization summary
# You can also see what featurization steps were performed on different raw features in the user data. For each raw feature in the user data, the following information is displayed:
#
# + Raw feature name
# + Number of engineered features formed out of this raw feature
# + Type detected
# + If feature was dropped
# + List of feature transformations for the raw feature
# Get the featurization summary as a list of JSON
featurization_summary = fitted_model.named_steps['timeseriestransformer'].get_featurization_summary()
# View the featurization summary as a pandas dataframe
pd.DataFrame.from_records(featurization_summary)
# # Forecasting<a id="forecast"></a>
#
# Now that we have retrieved the best pipeline/model, it can be used to make predictions on test data. We will do batch scoring on the test dataset which should have the same schema as training dataset.
#
# The inference will run on a remote compute. In this example, it will re-use the training compute.
test_experiment = Experiment(ws, experiment_name + "_inference")
# ### Retreiving forecasts from the model
# We have created a function called `run_forecast` that submits the test data to the best model determined during the training run and retrieves forecasts. This function uses a helper script `forecasting_script` which is uploaded and expecuted on the remote compute.
# +
from run_forecast import run_remote_inference
remote_run_infer = run_remote_inference(test_experiment=test_experiment,
compute_target=compute_target,
train_run=best_run,
test_dataset=test,
target_column_name=target_column_name)
remote_run_infer.wait_for_completion(show_output=False)
# download the inference output file to the local machine
remote_run_infer.download_file('outputs/predictions.csv', 'predictions.csv')
# -
# ### Evaluate
# To evaluate the accuracy of the forecast, we'll compare against the actual sales quantities for some select metrics, included the mean absolute percentage error (MAPE). For more metrics that can be used for evaluation after training, please see [supported metrics](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#regressionforecasting-metrics), and [how to calculate residuals](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-understand-automated-ml#residuals).
# load forecast data frame
fcst_df = pd.read_csv('predictions.csv', parse_dates=[time_column_name])
fcst_df.head()
# +
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from matplotlib import pyplot as plt
# use automl metrics module
scores = scoring.score_regression(
y_test=fcst_df[target_column_name],
y_pred=fcst_df['predicted'],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
print("[Test data scores]\n")
for key, value in scores.items():
print('{}: {:.3f}'.format(key, value))
# Plot outputs
# %matplotlib inline
test_pred = plt.scatter(fcst_df[target_column_name], fcst_df['predicted'], color='b')
test_test = plt.scatter(fcst_df[target_column_name], fcst_df[target_column_name], color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
# -
# # Advanced Training <a id="advanced_training"></a>
# We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation.
# ### Using lags and rolling window features
# Now we will configure the target lags, that is the previous values of the target variables, meaning the prediction is no longer horizon-less. We therefore must still specify the `forecast_horizon` that the model will learn to forecast. The `target_lags` keyword specifies how far back we will construct the lags of the target variable, and the `target_rolling_window_size` specifies the size of the rolling window over which we will generate the `max`, `min` and `sum` features.
#
# This notebook uses the blocked_models parameter to exclude some models that take a longer time to train on this dataset. You can choose to remove models from the blocked_models list but you may need to increase the iteration_timeout_minutes parameter value to get results.
# +
advanced_forecasting_parameters = ForecastingParameters(
time_column_name=time_column_name, forecast_horizon=forecast_horizon,
target_lags=12, target_rolling_window_size=4
)
automl_config = AutoMLConfig(task='forecasting',
primary_metric='normalized_root_mean_squared_error',
blocked_models = ['ElasticNet','ExtremeRandomTrees','GradientBoosting','XGBoostRegressor','ExtremeRandomTrees', 'AutoArima', 'Prophet'], #These models are blocked for tutorial purposes, remove this for real use cases.
experiment_timeout_hours=0.3,
training_data=train,
label_column_name=target_column_name,
compute_target=compute_target,
enable_early_stopping = True,
n_cross_validations=3,
verbosity=logging.INFO,
forecasting_parameters=advanced_forecasting_parameters)
# -
# We now start a new remote run, this time with lag and rolling window featurization. AutoML applies featurizations in the setup stage, prior to iterating over ML models. The full training set is featurized first, followed by featurization of each of the CV splits. Lag and rolling window features introduce additional complexity, so the run will take longer than in the previous example that lacked these featurizations.
advanced_remote_run = experiment.submit(automl_config, show_output=False)
advanced_remote_run.wait_for_completion()
# ### Retrieve the Best Model
best_run_lags, fitted_model_lags = advanced_remote_run.get_output()
# # Advanced Results<a id="advanced_results"></a>
# We did not use lags in the previous model specification. In effect, the prediction was the result of a simple regression on date, time series identifier columns and any additional features. This is often a very good prediction as common time series patterns like seasonality and trends can be captured in this manner. Such simple regression is horizon-less: it doesn't matter how far into the future we are predicting, because we are not using past data. In the previous example, the horizon was only used to split the data for cross-validation.
# +
test_experiment_advanced = Experiment(ws, experiment_name + "_inference_advanced")
advanced_remote_run_infer = run_remote_inference(test_experiment=test_experiment_advanced,
compute_target=compute_target,
train_run=best_run_lags,
test_dataset=test,
target_column_name=target_column_name,
inference_folder='./forecast_advanced')
advanced_remote_run_infer.wait_for_completion(show_output=False)
# download the inference output file to the local machine
advanced_remote_run_infer.download_file('outputs/predictions.csv', 'predictions_advanced.csv')
# -
fcst_adv_df = pd.read_csv('predictions_advanced.csv', parse_dates=[time_column_name])
fcst_adv_df.head()
# +
from azureml.automl.core.shared import constants
from azureml.automl.runtime.shared.score import scoring
from matplotlib import pyplot as plt
# use automl metrics module
scores = scoring.score_regression(
y_test=fcst_adv_df[target_column_name],
y_pred=fcst_adv_df['predicted'],
metrics=list(constants.Metric.SCALAR_REGRESSION_SET))
print("[Test data scores]\n")
for key, value in scores.items():
print('{}: {:.3f}'.format(key, value))
# Plot outputs
# %matplotlib inline
test_pred = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df['predicted'], color='b')
test_test = plt.scatter(fcst_adv_df[target_column_name], fcst_adv_df[target_column_name], color='g')
plt.legend((test_pred, test_test), ('prediction', 'truth'), loc='upper left', fontsize=8)
plt.show()
| how-to-use-azureml/automated-machine-learning/forecasting-energy-demand/auto-ml-forecasting-energy-demand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Network Analysis
#
# ---
#
#
# ## Introduction
#
# Networks are mathematical or graphical representations of patterns of relationships between entities. These relationships are defined by some measure of "closeness" between individuals, and can exist in an abstract or actual space (for example, whether you are related to someone versus how far away you live from each other). Networks have been used to model everything from airplane traffic to supply chains, and even amorphous materials like window glass, cells, and proteins. They can also be used to model relationships among people. Social networks are patterns of relationships among people or organizations that affect and are affected by actions of individuals within the network. Network analysis captures the effect of the complete pattern of connections among individuals in a group to help us perform structural analysis of outcomes of interest for individuals and the group as a whole.
#
# Networks can be represented as **graphs**, where a graph is made up of **nodes** connected by **ties**. The flexibility of network analysis means that the first step toward analysis is to clearly define what constitutes a node and what constitutes a tie in your network. There are several type of graphs: connected, unconnected, directional, and many more (see [glossary](#glossary-of-terms) for a list of terms).
#
# This tutorial is based on Chapter 8 of [Big Data and Social Science](https://github.com/BigDataSocialScience).
#
#
#
# ## Glossary of Terms
# - A **node** is an individual entity within a graph.
#
# - A **tie** is a link between nodes. Ties can be **undirected**, meaning they represent a symmetrical
# relationship, or **directed**, meaning they represent an asymmetrical relationship (one that doesn't necessarily
# go both ways).
#
# - A directed tie is known as an **arc**. An undirected tie is known as an **edge**.
# tth <NAME>, then he is also Facebook friends with me.
#
# - A **cutpoint** is a *node* that cannot be removed without disconnecting the network.
#
# - A **bridge** is a *tie* that cannot be removed without disconnecting the network.
#
# - Two nodes are said to be **reachable** when they are connected by an unbroken chain of relationships through
# other nodes.
#
# - **Network density** is the number of *actual* connections in a network divided by the number of *potential*
# connections in that network.
#
# - **Average distance** is the average path length between nodes in a graph. It is a measure of how many nodes
# it takes to transmit information across the network. This metric is only valid for fully connected graphs.
#
# - **Centrality** is the degree to which a given node influences the entire network.
#
# ## Table of Contents
#
# 1. [Loading the Data](#Loading-the-data)
# 2. [Representations of Networks](#Representations-of-Networks)
# 1. [Adjacency Matrix](#Adjacency-matrix)
# 2. [List of Edges](#List-of-edges)
# 3. [Graphs](#Graphs)
# 3. [Network Measures](#network-measures)
# 1. [Summary Statistics](#summary-statistics)
# 2. [Degree Distribution](#Degree-Distribution)
# 3. [Components and Reachability](#Components-and-reachability)
# 4. [Path Length](#Path-Length)
# 4. [Centrality Metrics](#Centrality-metrics)
# 1. [Degree Centrality](#Degree-Centrality)
# 2. [Closeness Centrality](#Closeness-Centrality)
# 3. [Betweenness Centrality](#Betweenness-Centrality)
# 5. [Cliques](#Cliques)
# 6. [Community Detection](#Community-Detection)
# 7. [Exercises](#Exercises)
# 8. [Resources](#resources)
# %pylab inline
from __future__ import print_function
import sys
import community
import networkx as nx
import seaborn as sns
import pandas as pd
from sqlalchemy import create_engine
# # Creating a Network
#
# In this tutorial we are going to explore employment patterns of individuals that have recently stoppped receiving tanf benefits. The first step in creating a network is defining the question or questions we want to explore using the network. This then allows us to define what a *node* and *tie* will be. In our case we want to explore employment patterns. A node in our case is a single individual, and a tie will exist between two individuals if they worked for the same employer as determined by the employer's ein number.
#
# The following is a SQL script to create a network of all people that exited in 2014 and were employed in the first quarter of 2015. First we are going to create a new table from the `idhs.ind_spells` table as `ada_class3.ind_spells_dates` with the start_date and end_date converted to date columns. This table is quite large so we are going to take of subset of the data and make two tables of people on tanf benefits. One is for people receiving tanf benefits for the last half of 2014 and and the second table if for people receiving tanf benefits for 2015. Then we can do a `LEFT JOIN` to find individuals who no longer received tanf benefits in 2015 that did receive benfits in 2014 and also a second `LEFT JOIN` to grab the `ssn_hash`. We can then grab the wage records for the first quarter of 2015 using the `ssn_hash`. From there we can to a `self-join` and join the table onto itself using the `ein` forming the network. EIN doesn't really tell us anything about the type of job somone has but the legal name of the business will. We can create a table of ein and legal name and join that to our network table.
#
# Note that every person has the same ein so we remove "self-ties", entries where ein is 000000000 and where the legal name is nan.
# ```
# -- make a new table where the dates are date type rather
# -- than text to do date manipulation
# \echo "Munging The Data"
# CREATE TABLE if NOT EXISTS ada_class3.ind_spells_dates AS
# SELECT recptno,
# benefit_type,
# to_date(start_date::text,'YYYY-MM-DD') start_date,
# to_date(end_date::text, 'YYYY-MM-DD') end_date
# FROM idhs.ind_spells;
#
# -- subset for 2014 of everyone on tanf46
# CREATE TABLE if NOT EXISTS ada_class3.individual_spells_2014 AS
# SELECT *
# FROM ada_class3.ind_spells_dates
# WHERE start_date > '2014-06-01' and
# end_date > '2014-12-31' and
# benefit_type = 'tanf46';
# -- make an index for faster queries
# CREATE INDEX if NOT EXISTS recptno_ind
# ON ada_class3.individual_spells_2014 (recptno);
#
# -- subset for 2015 of everone on tanf46
# CREATE TABLE if NOT EXISTS ada_class3.individual_spells_2015 AS
# SELECT *
# FROM ada_class3.ind_spells_dates
# WHERE start_date > '2015-01-01' AND
# end_date > '2015-12-31' and
# benefit_type = 'tan46';
# -- make an index for faster queries
# CREATE INDEX if NOT EXISTS receptno_ind
# ON ada_class3.individual_spells_2015 (recptno);
#
# --grab the records of everyone in 2014 that did not have
# --benefits in 2015
# CREATE TABLE if NOT EXISTS ada_class3.benefits_2014_not2015 as
# SELECT a.recptno recptno_2014,
# a.benefit_type benefit_type_2014,
# a.start_date start_date_2014,
# a.end_date end_date_2014,
# b.recptno recptno_2015,
# b.benefit_type benefit_type_2015,
# b.start_date start_date_2015,
# b.end_date end_date_2015,
# c.ssn_hash ssn_hash
# FROM ada_class3.individual_spells_2014 a
# LEFT JOIN ada_class3.individual_spells_2015 b ON a.recptno = b.recptno
# LEFT JOIN idhs.member c ON a.recptno = c.recptno
# WHERE b.recptno IS NULL;
#
# --grab the first quarter date from the ides data
# CREATE TABLE IF NOT EXISTS ada_class3.ssn_ein_2015_1 as
# SELECT ssn, ein
# FROM ides.il_wage
# where ssn in (select distinct(ssn_hash) from ada_class3.benefits_2014_not2015)
# and year = 2015
# and quarter = 1;
#
# CREATE TABLE IF NOT EXISTS ada_class3.ssn_ein AS
# SELECT ssn,ein, count(*)
# FROM ada_class3.ssn_ein_2015_1
# GROUP BY ssn, ein
# ORDER BY 3 desc;
#
# \echo "making the network"
# DROP TABLE IF EXISTS ada_class3.ein_network;
# CREATE TABLE IF NOT EXISTS ada_class3.ein_network AS
# SELECT a.ssn ssn_l,
# a.ein,
# b.ssn ssn_r
# FROM ada_class3.ssn_ein a
# JOIN ada_class3.ssn_ein b on a.ein = b.ein;
#
# DELETE FROM ada_class3.ein_network
# WHERE ssn_l = ssn_r
# OR ein = '000000000'
# OR ein='0';
#
# --map the ein number to legal name
# -- of the entity.
#
# DROP TABLE IF EXISTS ada_class3.ein_name;
# CREATE TABLE ada_class3.ein_name AS
# SELECT ein, name_legal, count(*)
# from ides.il_qcew_employers
# group by ein, name_legal
# order by 3 desc;
#
# DROP TABLE IF EXISTS ada_class3.ein_network_2015;
# CREATE TABLE ada_class3.ein_network_2015 AS
# SELECT n.ssn_l, n.ein, e.name_legal, n.ssn_r
# FROM ada_class3.ein_network n
# JOIN ada_class3.ein_name e ON n.ein = e.ein;
#
# DELETE FROM ada_class3.ein_network_2015
# WHERE name_legal = 'nan';
# ```
#
#
#
# # Loading the Data
#
# In this tutorial we will explore graphical representations of this network, degree metrics, centrality metrics, how to calculate the shortest path between nodes, and community detection. We will be using the [NetworkX Python Library](https://networkx.github.io) developed at Los Alamos National Laboratory (LANL).
#
# First we have to load the data from the database. *Note we did the hard work of creating the network in SQL and now doing our more complex analysis in Python.*
engine = create_engine("postgresql://10.10.2.10:5432/appliedda")
df_network = pd.read_sql('SELECT * from ada_class3.ein_network_2015;',
engine)
df_network.head()
network = list(zip(df_network.ssn_l, df_network.ssn_r))
G = nx.Graph()
G.add_edges_from(network)
# # Representations of Networks
#
# ## Adjacency Matrix
# One way to represent networks is an **adjacency matrix**, a binary (all entries either 0 or 1) square matrix. Each row represents the connections between one node and the other nodes in the network. For instance, the first row represents the first node. Each entry in a row corresponding to a node represents possible connections to the other nodes as indicated by 1 (connected) or 0 (not connected).
plt.figure(figsize=(30,30))
plt.spy(nx.adjacency_matrix(G))
# ## List of Edges
# Graphs can also be represented as **edge lists**, where you list the connections between nodes exhaustively. If we know the graph is undirected, we only need to list each relationship one time. For example, we say that 1 is connected to 32, but it would be redundant to also say that 32 is connected to 1. Representing a network as an edge list is typically preferable to an adjacency matrix in the case of a sparse matrix -- where most of the entries of the matrix are 0 due to taking much less space to store. An edge list is typically how a network is stored in a database.
network[:10]
# ## Graphs
# Networks can also be displayed as graphs, which is probably the most intuitive way to visualize them. The top visualization below emphasizes the nodes, or individuals, how close they are to one another, and the groups that emerge.
# The visualization below emphasizes the edges, or the connections themselves. *Note: this network is too large to visualize*
# + active=""
# nx.draw(G)
# -
# Due to the large number of nodes this visualization is not helpful. Given that we can't derive much information from this particular visualization we need to turn to other network measures.
# # Network Measures
# It is useful to know the size (in terms of nodes and ties) of the network, both to have an idea of the size and connectivity of the network, and because most of the measures you will use to describe the network will need
# to be standardized by the number of nodes or the number of potential connections.
#
# One of the most important things to understand about larger networks is the pattern of indirect connections among nodes, because it is these chains of indirect connections that make the network function as a whole, and make networks a
# useful level of analysis. Much of the power of networks is due to indirect ties that create **reachability.** Two nodes can reach each other if they are connected by an unbroken chain of relationships, often called **indirect ties**.
#
# Structural differences between node positions, the presence and characteristics of smaller "communities" within larger networks, and properties of the structure of the whole group can be quantified using different **network measures.**
# ## Summary Statistics
# Print out some summary statistics on the network
print( nx.info(G) )
# We see that there are 568892 ties (relationships) and 13716 nodes (individuals).
#
# The **average degree** of the network is the average number of edges connected to each node.
#
# We see that the average degree of this network is 83, meaning that the average individual in the network is connected to 83 other individuals. Recall we made the tie based on EIN, which means that in the first quarter the average person in our network worked with 83 people also receiving benefits in 2014, indicating these people are often working the same types of jobs.
# Print out the average density of the netwo
print(nx.density(G))
# The average density is calculated as the $$\text{average density} = \frac{\text{actual ties}}{\text{possible number of ties}} $$
#
# where the possible number of ties for an undirected graph (if every node had a tie to every other node) is $\frac{n(n-1)}{2}$.
#
# If every node were connected to every other node, the average density would be 1. If there were no ties between any of the nodes, the average density would be 0. The average density of this network is 0.0006, which indicates it is not a very dense network. In this example, we can interpret this to mean that individuals are mostly in small groups, and the groups don't overlap very much.
# Now that we have looked at some summary statistics as a whole we are going to drill down to the individual actors in our network.
# ## Degree Distribution (Who has the most relationships?)
#
#
# We can cast this question as a network analysis problem by asking *which node has the most ties*.
dict_degree = G.degree()
df_degree = pd.DataFrame.from_dict(dict_degree, orient='index')
df_degree.columns=['degree']
df_degree.index.name = 'node_id'
sns.set_style("whitegrid")
plt.figure(figsize=(22, 12))
sns.set_context("poster", font_scale=1.00, rc={"lines.linewidth": 1.00,"lines.markersize":8})
df_degree.sort_values(by='degree', ascending=False)[:10].plot(kind='barh')
# The last five entries have over 1000 connectionctions. This likely means they work for a large company.
df_degree.sort_values(by='degree', ascending=False)[:10]
G.neighbors('a7cb780013ee0fa3a2c48874e9d1c9a06eafa8a6d46fe3898f9529efc6d7c982')
# ## Components and Reachability
#
# Two nodes are said to be **reachable** when they are connected by an unbroken chain of relationships through other nodes. Networks in which more of the possible connections (direct and indirect) among nodes are realized are denser and more cohesive than networks in which fewer of these connections are realized.
#
# The reachability of individuals in a network is determined by membership in **components**, which are subsets of the
# larger network in which every member of the group is indirectly connected to every other. Imagining the standard node and line drawing of a graph, a component is a portion of the network where you can trace a path between every pair of nodes without ever lifting your pen.
#
# Many larger networks consist of a single dominant component including anywhere from 50% to 90% of the individuals, and a few smaller components that are not connected. In this case, is common to perform analysis on only the main connected component of the graph, because there is not a convenient way to mathematically represent how "far away" unconnected nodes are. In our karate class example, our graph is connected, meaning that you can reach any individual from any other individual by moving along the edges of the graph, so we don't need to worry about that problem.
#
#
# ## Path Length
#
# A **shortest path** between two nodes is a path from one node to the other, not repeating any nodes. One way to think of a shortest path between two individuals is how many people it would take to broker an introduction between them (think [six degrees of Kevin Bacon](https://en.wikipedia.org/wiki/Six_Degrees_of_Kevin_Bacon)).
#
# Most pairs will have several "shortest paths" between them; the * shortest path* is called the **geodesic**.
# Calculate the length of the shortest path between 12 and 15
ls_path = nx.shortest_path(G,
'a7cb780013ee0fa3a2c48874e9d1c9a06eafa8a6d46fe3898f9529efc6d7c982',
'<KEY>')
print('The path length from {} to {} is {}.'.format(
'a7cb780013ee0fa3a2c48874e9d1c9a06eafa8a6d46fe3898f9529efc6d7c982',
'<KEY>',
len(ls_path)))
print('path length: ', ls_path)
# In this case there is no path between the two nodes.
# Calculate the length of the shortest path between 12 and 15
ls_path = nx.shortest_path(G, 'a7cb780013ee0fa3a2c48874e9d1c9a06eafa8a6d46fe3898f9529efc6d7c982',
'92b3eaa82b2f68f96dd9c18dace00a642b6af88c1612b9ded6960c69389ce7eb')
print('The path length from {} to {} is {}.'.format(
'a7cb780013ee0fa3a2c48874e9d1c9a06eafa8a6d46fe3898f9529efc6d7c982',
'92b3eaa82b2f68f96dd9c18dace00a642b6af88c1612b9ded6960c69389ce7eb',
len(ls_path)))
print('path length: ', ls_path)
# The **average shortest path length** describes how quickly information or goods can disburse through the network.
#
# The average shortest length $l$ is defined as $$ l = \frac{1}{n(n-1)} \sum_{i \ne j}d(v_{i},v_{j}) $$ where $n$ is the number of nodes in the graph and $d(v_{i},v_{j})$ is the shortest path length between nodes $i$ and $j$.
print(nx.average_shortest_path_length(G))
# In this case, we cannot calculate the average shortest path, since our network is not fully connected (the network has islands within it that are cut off from the rest of the network). Since there is no way to calculate the distance between two nodes that can't be reached from one another, there is no way to calculate the average shortest distance across all pairs.
# # Centrality Metrics
#
# Centrality metrics measure how important, or "central," a node is to the network. These can indicate what individual has the most social contacts, who is closest to people, or the person where information most transfers through. There are many **centrality metrics** -- degree centrality, betweenness centrality, closeness centrality, eigenvalue centrality, percolation centrality, PageRank -- all capturing different aspects of a node's contribution to a network.
#
# Centrality measures are the most commonly used means to explore network effects at the level of certain individual participants. Typically, these metrics identify and describe a few important nodes, but don't tell us much about the rest of the nodes in the network. This is akin to Google's search results: the first few matches are the most relevant, but if you go a few pages in to the search results, you might as well have been searching for something else entirely.
# ## Degree Centrality (Who has the most relationships?)
#
# The most basic and intuitive measure of centrality, **degree centrality**, simply counts the number of ties that each node has. Degree centrality represents a clear measure of the prominence or visibility of a node. The degree centrality $C_{D}(x)$ of a node $x$ is
#
# $$C_{D}(x) = \frac{deg(x)}{n-1}$$
#
# where $deg(x)$ is the number of connections that node $x$ has, and $n-1$ is a normalization factor for the total amount of possible connections.
#
# If a node has no connections to any other nodes, its degree centrality will be 0. If it is directly connected to every other node, its degree centrality will be 1.
#
dict_degree_centrality = nx.degree_centrality(G)
df_degree_centrality = pd.DataFrame.from_dict(dict_degree_centrality, orient='index')
df_degree_centrality.columns=['degree_centrality']
df_degree_centrality.index.name = 'node_id'
df_degree_centrality.sort_values(by='degree_centrality',
ascending=False)[:10].plot(kind='barh')
# As we can see, this is simply a recasting of the [degree distribution](#degree-distribution).
# ## Closeness Centrality (Who has the shortest of shortest paths going between them?)
#
# **Closeness centrality** is based on the idea that networks position some individuals closer to or farther away
# from other individuals, and that shorter paths between actors increase the likelihood of communication, and
# consequently the ability to coordinate complicated activities. The closeness centrality $C_C(x)$ of a node $x$ is calculated as:
#
# $$C_C(x) = \frac{n-1}{\sum_{y}d(x,y)} $$
#
# where $d(x,y)$ is the length of the geodesic between nodes $x$ and $y$.
dict_closeness_centrality = {}
for ssn_hash in zip(*network[:25])[0]:
dict_closeness_centrality[ssn_hash] = nx.closeness_centrality(G,u=ssn_hash)
df_closeness_centrality = pd.DataFrame.from_dict(dict_closeness_centrality,
orient='index')
df_closeness_centrality.columns=['closeness_centrality']
df_closeness_centrality.index.name = 'node_id'
df_closeness_centrality.sort_values(by='closeness_centrality',
ascending=False)[:10].plot(kind='barh')
# The last three individuals have the highest closeness centrality. This implies that these individuals have the most close connections to the most members in the network. However, all of these individuals have a closeness centrality of around 0.025, so it is clear there is not really anyone in the dataset that is very closely related to a lot of the other members. This makes sense given the other statistics we've calculated about this graph - there are lots of small, disconnected groups.
# ## Betweenness Centrality (Who has the most shortest paths between them?)
#
# Where closeness assumes that communication and information flow increase with proximity, **betweenness centrality**
# captures "brokerage," or the idea that a node that is positioned "in between" many other pairs of nodes gains some individual advantage. To calculate betweenness, we must assume that when people search for new
# information through networks, they are capable of identifying the shortest path (so that we know that the path between two nodes actually includes the "in between" node); additionally, we must assume
# that when multiple shortest paths exist, each path is equally likely to be chosen.
#
# The betweenness centrality $C_B(x)$ of a node $x$ is given by
#
# $$ C_B{x} = \sum_{s,t} \frac{\sigma_{st}(x)}{\sigma_{st}}$$
#
# where $\sigma_{st}$ is the number of shortest paths from node $s$ to node $t$ and $\sigma_{st}(x)$ is the number of shortest paths $\sigma_{st}$ passing through node $x$. Intuitively, for each node, we look at how many of the shortest paths between every other pair of nodes includes that node.
#
dict_betweenness_centrality = nx.betweenness_centrality(G, k=50)
df_betweenness_centrality = pd.DataFrame.from_dict(dict_betweenness_centrality,
orient='index')
df_betweenness_centrality.columns=['betweeness_centrality']
df_betweenness_centrality.index.name = 'node_id'
df_betweenness_centrality.sort_values(by='betweeness_centrality',
ascending=False)[:10].plot(kind='barh')
# Given the small values for betweenness centrality, it appears that there is no large single broker in this network.
# # Cliques
#
# A clique is a maximally connected sub-network, or a group of individuals who are all connected to one another.
#
# In our case, this would be a group of individuals that are all connected to each other: We might expect to see a lot of cliques in this network, because we defined the relationships within our network based on these groupings.
cliques = list(nx.find_cliques(G))
import functools
#summary stats of cliques
num_cliques = len(cliques)
ls_len_cliqs = [len(cliq) for cliq in cliques ]
max_clique_size = max(ls_len_cliqs)
avg_clique_size = np.mean(ls_len_cliqs)
max_cliques = [c for c in cliques if len(c) == max_clique_size]
max_clique_sets = [set(c) for c in max_cliques]
people_in_max_cliques = list(functools.reduce(lambda x,y: x.intersection(y), max_clique_sets))
print(num_cliques)
print(max_clique_size)
print(avg_clique_size)
# There are *2231* cliques in the network. The maximum clique size is *689* people and the average clique size is *7.60*, ~8 people.
#
# Let's see what the maximum cliques look like.
max_cliques
Graph_max_clique1 = G.subgraph(max_cliques[0])
# + active=""
# nx.draw(Graph_max_clique1, with_labels=False)
# -
df_network[ df_network['ssn_l'].isin(max_cliques[0]) & df_network['ssn_r'].isin(max_cliques[0])]
# It appears WalMart is a popular employer and there are some smaller business that employ sub-populations of our largest clique.
# # Community Detection (This may take some time)
#
# In **community detection**, we try to find sub-networks, or communities, of densely populated connections. Community detection is similar to clustering, in that strong communities will display an abundance of intra-community (within community) connections and few inter-community (between community) connections.
#
#
#
# The technical implementation of the algorithm can be found [here](https://arxiv.org/pdf/0803.0476v2.pdf).
#
#
dict_clusters = community.best_partition(G)
clusters = [dict_clusters.get(node) for node in G.nodes()]
plt.axis("off")
#nx.draw_networkx(G,
# cmap = plt.get_cmap("terrain"),
# node_color = clusters,
# node_size = 600,
# with_labels = True,
# fontsize=200)
dict_clusters
# [Back to Table of Contents](#Table-of-Contents)
# # Resources
# - [International Network for Social Network Analysis](http://www.insna.org/) is a large, interdisciplinary association
# dedicated to network analysis.
# - [Pajek](http://mrvar.fdv.uni-lj.si/pajek/) is a freeware package for network analysis and visualization.
# - [Gephi](https://gephi.org/) is another freeware package that supports large-scale network visualization.
# - [Network Workbench](http://nwb.cns.iu.edu/) is a freeware package that supports extensive analysis and
# visualization of networks.
# - [NetworkX](https://networkx.github.io/) is the Python package used in this tutorial to analyze and visualize networks.
# - [iGraph](http://igraph.org/) is a network analysis package with implementations in R, Python, and C libraries.
# - [A Fast and Dirty Intro to NetworkX (and D3)](http://www.slideshare.net/arnicas/a-quick-and-dirty-intro-to-networkx-and-d3)
| notebooks/session_06/Introduction_to_Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
'''
Example code to retrieve and plot a user specified track from
the IBTrACs database
User inputs: Name, Year, Basin
Output: A map with the track
Python For Atmospheric Science By Example
<NAME>
North Carolina State University
'''
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from netCDF4 import Dataset
from matplotlib.dates import num2date,date2num
import xarray as xr
import numpy as np
import datetime as dt
import cftime as cf
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import pandas as pd
# where is ibtracs file located?
dataDir = "/home/anant/work/data/"
filename = "IBTrACS.since1980.v04r00.nc"
file = dataDir + filename
#User Input
#Enter the year, name and basin of the hurricane
Hurr_Year = 2005
Hurr_Name = 'katrina'
Hurr_Basin = "NA"
Hurr_Year = 2017
Hurr_Name = 'harvey'
Hurr_Basin = "NA"
#---------------------------------------------------------------------------
Hurr_Name = Hurr_Name.upper().encode('utf-8')
try:
ds = xr.open_dataset(file,decode_cf=True)
except:
print ("file not found. quitting code")
quit()
print ("Ibtracs file found and opened")
name = ds.name.values
# narrow down the storm indices that match the supplied TC name
# inds may be a list of one or more
inds = np.where(name == Hurr_Name)[0]
# read only the first time in the track for each of the storms identified above
# and extract the year using pandas
timeP = pd.to_datetime(ds.time[inds,0].values).year
basin = ds.basin[inds,0].values
#print (basin)
# match the year index
indYear = np.where(timeP == Hurr_Year)[0]
#print (indYear)
ns = len(indYear)
if ( ns > 1):
print ("More than one match! Now checking basin")
indBasin = np.where(basin == Hurr_basin)[0]
storm_index = inds[indBasin]
else:
print ("One match found")
storm_index = inds[indYear]
storm_lat = ds.lat[storm_index,:][0].values
storm_lon = ds.lon[storm_index,:][0].values
storm_nobs = int(ds.numobs[storm_index].values)
# -
# ## Now plot the track
# +
# Lambert Conformal Conic map.
m = Basemap(llcrnrlon=-100.,llcrnrlat=10.,urcrnrlon=-60.,urcrnrlat=40.,
projection='lcc',lat_1=20.,lat_2=40.,lon_0=-80.,
resolution ='l',area_thresh=1000.)
#Convert latitude and longitude to coordinates X and Y
x, y = m(storm_lon, storm_lat)
m.plot(x,y,color='k')
# draw coastlines, meridians and parallels.
m.drawcoastlines()
m.drawcountries()
m.drawmapboundary()
m.fillcontinents(color='w',lake_color='w')
m.drawparallels(np.arange(10,70,10),labels=[1,1,0,0])
m.drawmeridians(np.arange(-120,0,10),labels=[0,0,0,1])
titleString = 'Hurricane ' + Hurr_Name.decode('utf-8') + " " + str(Hurr_Year)
plt.title(titleString)
plt.show()
# -
## Now we show how to use cartopy to draw a track
# +
'''
Illustrate the use of cartopy
'''
import cartopy.crs as ccrs
import cartopy
#--------------------------------------------------------------------------------
# setup figure object
fig = plt.figure(figsize=(8,4), dpi=100)
# set map boundaries
south, north = 10, 40
west, east = -100, -70
central_lon, central_lat = (east+west)/2, (north+south)/2
extent = [west, east, south, north]
plt.figure(figsize=(12, 6))
ax = plt.axes(projection=ccrs.AlbersEqualArea(central_lon, central_lat))
ax.set_extent(extent)
#ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.LAND, facecolor='lightgray', edgecolor='lightgray')
#ax.add_feature(cartopy.feature.LAKES, edgecolor='black')
#ax.add_feature(cartopy.feature.RIVERS)
ax.gridlines()
# plot the track on the map
plt.plot(storm_lon,storm_lat,color='k',transform=ccrs.Geodetic())
titleString = 'Hurricane ' + Hurr_Name.decode('utf-8') + " " + str(Hurr_Year)
plt.title(titleString)
plt.show()
# -
| examples/ibtracs/.ipynb_checkpoints/track_tc-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: FASTAI
# language: python
# name: fastai
# ---
# # Brain age regression with fastai
#
# Join here: http://tiny.cc/k8sihz
#
# ( Model adapted from https://analyticsindiamag.com/a-hands-on-guide-to-regression-with-fast-ai )
# +
# Import all libraries needed for the exploration
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd # this is how we usually import pandas
import numpy as np # this is how we usually import numpy
import sys # only needed to determine Python version number
import matplotlib #only needed to determine Matplotlib version number
from fastai.tabular import *
# Enable inline plotting
# %matplotlib inline
# -
# Supress some warnings:
import warnings
warnings.filterwarnings('ignore')
print('Python version ' + sys.version)
print('Pandas version ' + pd.__version__)
print('Numpy version ' + np.__version__)
print('Matplotlib version ' + matplotlib.__version__)
print('Seaborn version ' + sns.__version__)
# ## Code
# !ls .
# ## Data
# !ls ../data
# ## Training data
train_data = pd.read_csv('../data/train.csv')
# ## Test data
test_data = pd.read_csv('../data/test.csv')
# ### Explore the data
# Check the number of data points in the data set
print('No observations:', len(train_data))
# Check the number of features in the data set
print('No variables:', len(train_data.columns))
# Check the data types
print(train_data.dtypes.unique())
train_data.shape
train_data.columns
for i, col in enumerate(train_data.columns, start=0):
print(i, col)
# We may have some categorical features, let's check them
train_data.select_dtypes(include=['O']).columns.tolist()
# Check any number of columns with NaN
print(train_data.isnull().any().sum(), ' / ', len(train_data.columns))
# Check number of data points with any NaN
print(train_data.isnull().any(axis=1).sum(), ' / ', len(train_data))
# ### Select features and targets
features = train_data.iloc[:,9:-1].columns.tolist()
target = train_data.iloc[:,-1].name
all_lh_features = [
'CSF', 'CC_Posterior', 'CC_Mid_Posterior', 'CC_Central', 'CC_Mid_Anterior', 'CC_Anterior', 'EstimatedTotalIntraCranialVol',
'Left-Lateral-Ventricle',
'Left-Inf-Lat-Vent',
'Left-Cerebellum-White-Matter',
'Left-Cerebellum-Cortex',
'Left-Thalamus-Proper',
'Left-Caudate',
'Left-Putamen',
'Left-Pallidum',
'Left-Hippocampus',
'Left-Amygdala',
'Left-Accumbens-area',
'Left-VentralDC',
'Left-vessel',
'Left-choroid-plexus',
'Left-WM-hypointensities',
'Left-non-WM-hypointensities',
'lhCortexVol',
'lhCerebralWhiteMatterVol',
'lhSurfaceHoles',
'lh.aparc.thickness',
'lh_bankssts_thickness',
'lh_caudalanteriorcingulate_thickness',
'lh_caudalmiddlefrontal_thickness',
'lh_cuneus_thickness',
'lh_entorhinal_thickness',
'lh_fusiform_thickness',
'lh_inferiorparietal_thickness',
'lh_inferiortemporal_thickness',
'lh_isthmuscingulate_thickness',
'lh_lateraloccipital_thickness',
'lh_lateralorbitofrontal_thickness',
'lh_lingual_thickness',
'lh_medialorbitofrontal_thickness',
'lh_middletemporal_thickness',
'lh_parahippocampal_thickness',
'lh_paracentral_thickness',
'lh_parsopercularis_thickness',
'lh_parsorbitalis_thickness',
'lh_parstriangularis_thickness',
'lh_pericalcarine_thickness',
'lh_postcentral_thickness',
'lh_posteriorcingulate_thickness',
'lh_precentral_thickness',
'lh_precuneus_thickness',
'lh_rostralanteriorcingulate_thickness',
'lh_rostralmiddlefrontal_thickness',
'lh_superiorfrontal_thickness',
'lh_superiorparietal_thickness',
'lh_superiortemporal_thickness',
'lh_supramarginal_thickness',
'lh_frontalpole_thickness',
'lh_temporalpole_thickness',
'lh_transversetemporal_thickness',
'lh_insula_thickness',
'lh_MeanThickness_thickness'
]
# +
# Make rh_ and Right- names (base on LHs)
# List comprehensions
rh_names = [w.replace('lh_', 'rh_') for w in all_lh_features]
Right_names = [w.replace('Left-', 'Right-') for w in rh_names]
most_rh_names = [w.replace('lhC', 'rhC') for w in Right_names]
all_rh_names = [w.replace('lhS', 'rhS') for w in most_rh_names]
all_rh_features = [w.replace('lh.', 'rh.') for w in all_rh_names]
all_rh_features
# -
train_data_lh = train_data[all_lh_features]
train_data_rh = train_data[all_rh_features]
train_data_lh.describe().T.round(2)
train_data_rh.describe().T.round(2)
# +
dropcolumns = [
'EstimatedTotalIntraCranialVol',
'CSF',
'CC_Posterior',
'CC_Mid_Posterior',
'CC_Central',
'CC_Mid_Anterior',
'CC_Anterior'
]
df_lh = train_data_lh.drop(dropcolumns, axis=1)
df_rh = train_data_rh.drop(dropcolumns, axis=1)
df_lh
# -
# ### Path / default location for saving/loading models
path = ''
# ### The dependent variable/target
dep_var = 'Age'
y = train_data[dep_var]
y.hist()
plt.show()
fig = plt.figure(figsize=(10, 6))
sns.kdeplot(y, shade=True, cut=0)
sns.rugplot(y)
plt.show()
# ### eTIV by Sex
fig = sns.boxplot(x='Sex', y='EstimatedTotalIntraCranialVol', data=train_data)
# # Starting the fastai approach
# %reload_ext autoreload
# %autoreload 2
import sys
print(sys.path)
import sys, fastai
print(sys.modules['fastai'])
from fastai.utils import *
show_install()
# ### GPU [monitoring](https://docs.fast.ai/dev/gpu.html#gpu-memory-notes)
import torch
print(torch.cuda.is_available())
if torch.cuda.is_available():
# # !nvidia-smi -h
# # !nvidia-smi --help-query-gpu
# !nvidia-smi
# ## Data organization
# ### The list of categorical features in the dataset
cat_names = ['Sex']
# ### The list of continuous features in the dataset
cont_names_1 = [
'Left-Lateral-Ventricle', 'Right-Lateral-Ventricle',
'lhCortexVol', 'rhCortexVol',
'lhCerebralWhiteMatterVol', 'rhCerebralWhiteMatterVol'
]
cont_names_2 = [
'Left-Lateral-Ventricle', 'Right-Lateral-Ventricle',
'lhCortexVol', 'rhCortexVol',
'lhCerebralWhiteMatterVol', 'rhCerebralWhiteMatterVol'
]
cont_names_3 = [
'Left-Lateral-Ventricle', 'Right-Lateral-Ventricle',
'lhCortexVol', 'rhCortexVol',
'lhCerebralWhiteMatterVol', 'rhCerebralWhiteMatterVol',
'Left-Hippocampus', 'Right-Hippocampus',
'EstimatedTotalIntraCranialVol',
]
cont_names = cont_names_3
# ### List of processes/transforms to be applied to the dataset
procs = [FillMissing, Categorify, Normalize]
# ### Start index for creating a validation set from train_data
# Setting the index for the Validation set. The start index and End index are set in such a way that it takes the last 20% data from the training set for validation.
start_indx = len(train_data) - int(len(train_data) * 0.2)
# ### End index for creating a validation set from train_data
end_indx = len(train_data)
# ### TabularList for Validation
val = (TabularList.from_df(train_data.iloc[start_indx:end_indx].copy(), path=path, cat_names=cat_names, cont_names=cont_names))
test = (TabularList.from_df(test_data, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs))
# ### TabularList for training
data = (TabularList.from_df(train_data, path=path, cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(start_indx,end_indx)))
.label_from_df(cols=dep_var)
.add_test(test)
.databunch())
# ### Display the data batch
data.show_batch(rows = 10)
# ## Initialising Neural Network
# We will initialize a neural network with 4 layers and the number of nodes in each layer as 300,200, 100 and 50 respectively.
#
# The network will use two primary metrics for evaluation:
#
# - Root Mean Squared Error(RMSE)
# - R-Squared
learn = tabular_learner(data, layers=[300,200, 100, 50], metrics= [rmse,r2_score])
# ### Show the complete summary of the model
learn.summary
# ## Training the model
# Exploring the learning rates
learn.lr_find(start_lr = 1e-05,end_lr = 1e+05, num_it = 100)
learn.recorder.plot()
# Learning rate is a hyper-parameter that controls how much the weights of the network is being adjusted with respect to the loss gradient. The lr_find method helps explore the learning rate in a specified range. The graph shows the deviation in loss with respect to the learning rate.
# ### Fitting data and training the network
# Train the network for 25 epochs
learn.fit_one_cycle(50)
# ### Evaluating the model
# The `show_results` method will display the data bunches along with predicted values.
# Display predictions on the training data
learn.show_results(ds_type=DatasetType.Train,rows = 5)
# Display predictions on the validation data
learn.show_results(ds_type=DatasetType.Valid)
# ### Fetching the Metrics
# Getting the training and validation errors
tr = learn.validate(learn.data.train_dl)
va = learn.validate(learn.data.valid_dl)
print("The metrics used in evaluating the network:", str(learn.metrics))
print("\nThe calculated RMSE & R-Squared for the training set :", tr[1:])
print("\nThe calculated RMSE & R-Squared for the validation set :", va[1:])
# #### SUMMARY
# The Root Mean Squared Error is the standard deviation of the errors/residuals. It tells us the ‘Goodness Of Fit’ of a model. The lower the value of RMSE the better the model.
#
# The R-Squared metric also called the coefficient of determination is used to understand the variation in the dependent variable(y) and the independent variable(X).The closer the value of R-Squared is to one, the better the model.
#
# The above output suggests that:
#
# The model/network was able to attain an RMSE of 1.4678 and an R-squared of 0.9726 while training and an RMSE of 3.1737 and an R-squared of 0.9107 while Validating on the validation set.
# ### Plotting the losses for training and validation
learn.recorder.plot_losses()
# The above graph shows the change in loss during the course of training the network. At the beginning of the training, we can see a high loss value. As the networks learned from the data, the loss started to drop until it could no longer improve during the course of training. The validation shows a relatively consistent and low loss values.
#
# Note: The validation losses are only calculated once per epoch, whereas training losses are calculated after each batch
# ### Plotting the learning rate, momentum and metrics
# Plotting momentum & learning rate
learn.recorder.plot_lr(show_moms=True)
# Plotting the metrics of evaluation
learn.recorder.plot_metrics()
# ### A simple analysis on the predictions of validation set
#
# Plotting the average Age for a given Sex, -- actual vs predicted
plt.figure(figsize=(30, 3))
# plt.plot(val.groupby(['Sex']).mean()['Age'], linewidth = 3, )
plt.plot(train_data.groupby(['Sex']).mean()['Age'], linewidth = 3, )
#plt.plot(val.groupby(['Sex']).mean()['Predicted'],linewidth = 5, ls = '--')
plt.title('Average Age by Sex')
plt.xlabel('Sex')
plt.ylabel('Age in yrs')
plt.legend()
plt.show()
# ## Predicting Using The Network
# Predicting for a single observation
# Test set data for row 0
test_data.iloc[0]
# +
# Predicting for the complete test set
test_predictions = learn.get_preds(ds_type=DatasetType.Test)[0]
# Converting the tensor output to a list of predicted values
test_predictions = [i[0] for i in test_predictions.tolist()]
# Converting the prediction to a dataframe
test_predictions = pd.DataFrame(test_predictions, columns = ['Age'])
test_predictions.head(2)
# -
test_predictions.tail(2)
sampleSubmission = pd.read_csv('../data/sampleSubmission.csv')
sampleSubmission.label = test_predictions.Age.round(0).astype(int)
my_test_predictions = sampleSubmission.copy()
# Writing the predictions to an csv file.
my_test_predictions.to_csv("fastai_solution_3.csv", index = False)
# !ls .
# +
fig = plt.figure(figsize=(10, 6))
plt.subplot(2, 1, 1)
sns.kdeplot(y, shade=True)
sns.rugplot(y)
plt.xlim(0, 100)
plt.title("Predicted age in training data set (n=%d)" % (len(train_data)) )
plt.subplot(2, 1, 2)
y_pred = test_predictions.Age
sns.kdeplot(y_pred, shade=True)
sns.rugplot(y_pred)
plt.xlim(0, 100)
fig.suptitle('Results:')
plt.title("\nPredicted age in test data set (n=%d)" % (len(my_test_predictions)) )
plt.show()
# -
my_test_predictions
# ### Discretization of Age variable
# Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point.
pd.qcut(train_data['Age'], 8).head(1)
| nbs/.ipynb_checkpoints/brain_age_regression_fastai-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import prtools as pr
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from jupyterthemes import jtplot
jtplot.style(theme="oceans16")
# -
# # Exercise 2.2
# +
bias = np.ones((2,1))
Z = np.hstack((XX, bias))
ZZ = np.dot(Z.T, Z)
print("Z:", "\n", str(Z))
print("ZZ:", "\n", str(ZZ))
print("ZZ_inv", "\n", "singular martrix")
# -
# # Exercise 2.3
# +
X = np.array([[1,1], [-2,1]])
Y = np.array([[-1], [1]])
XX = np.dot(X.T, X)
XX_inv = np.linalg.inv(XX)
XY = np.dot(X.T, Y)
w = np.dot(XX_inv, XY)
print("w:", '\n', str(w))
# -
sqres = np.sum(np.square(np.dot(X, w) - Y))
print(sqres)
# # Exercise 2.4
# +
X = np.array([-2, -1, 0, 3]).T
Y = np.array([1, 1, 2, 3]).T
XX = np.dot(X.T, X)
XX_inv = 1./XX
XY = np.dot(X.T, Y)
w = XX_inv * XY
print("w:", '\n', str(w))
sqres = np.sum(np.square(np.dot(X, w) - Y))
print(sqres)
# +
X = np.array([-2, -1, 0, 3]).reshape(4,1)
bias = np.ones((4,1))
X = np.hstack((X,bias))
XX = np.dot(X.T, X)
XX_inv = np.linalg.inv(XX)
XY = np.dot(X.T, Y)
w = np.dot(XX_inv, XY)
print("w:", '\n', str(w))
sqres = np.sum(np.square(np.dot(X, w) - Y))
print(sqres)
# -
# # Exercise 2.5
x = np.random.uniform(low=0, high=1, size=(400)).reshape(400,1)
y = x ** 2 + np.random.normal(size=(1))
# +
x1 = x
x2 = np.hstack((x1, x1 ** 2))
x3 = np.hstack((x2, x1 ** 3))
print(x1.shape)
print(x2.shape)
print(x3.shape)
# +
bias = np.ones((400,1))
Y = y
for xi in [x1, x2, x3]:
X = np.hstack((xi,bias))
XX = np.dot(X.T, X)
XX_inv = np.linalg.inv(XX)
XY = np.dot(X.T, Y)
w = np.dot(XX_inv, XY)
print("w:", '\n', str(w))
sqres = np.sum(np.square(np.dot(X, w) - Y))
print(sqres)
print()
# -
for xi in [x1, x2, x3]:
reg = LinearRegression().fit(xi, y)
y_pred = reg.predict(xi)
print("MSE:", mean_squared_error(y, y_pred))
print(reg.coef_, '\n', reg.intercept_)
print()
for xi in [x1, x2, x3]:
data = pr.gendatr(xi, y)
w = pr.linearr(data)
print("MSE:", pr.testr(data, w.eval(data)))
print("w:", +w)
print()
# # Exercise 2.7
x = np.random.randn(10000,2)
y = 50 * np.sin(x[:,0]) * np.sin(x[:,1])
data = pr.gendatr(x, y)
pr.scatterr(data)
# +
x1 = x
x2 = np.hstack((x1, x1 ** 2))
x3 = np.hstack((x2, x1 ** 3))
print(x1.shape)
print(x2.shape)
print(x3.shape)
# -
for xi in [x1, x2, x3]:
reg = LinearRegression().fit(xi, y)
y_pred = reg.predict(xi)
print("MSE:", mean_squared_error(y, y_pred))
print(reg.coef_, '\n', reg.intercept_)
print()
y = x[:, 0] * x[:, 1]
for xi in [x1, x2]:
reg = LinearRegression().fit(xi, y)
y_pred = reg.predict(xi)
print("MSE:", mean_squared_error(y, y_pred))
print(reg.coef_, '\n', reg.intercept_)
print()
data = pr.gendatr(x1, y)
w = pr.linearr(data)
pr.scatterr(data)
pr.plotr(w)
pr.testr(data, w.eval(data))
# # Exercise 2.9 (c)
feature = np.array([[0,0], [0,1], [2,1], [2,2]])
label = np.array([-1, -1, 1, 1])
data = pr.prdataset(feature, label)
pr.scatterd(data)
FLD = pr.fisherc(data)
pr.plotc(FLD)
transFeature = feature
transFeature[:, 0] = feature[:, 0]/2
transData = pr.prdataset(transFeature, targets=label)
pr.scatterd(transData)
FLD = pr.fisherc(transData)
pr.plotc(FLD)
# # Exercise 2.11
# +
from numpy import pi
x = np.array([1, 2, 6, pi, 5]).reshape(5,1)
y = np.array([1, 1, 1, -1, -1]).reshape(5,1)
data = pr.prdataset(x, targets=y)
pr.scatterd(data)
# -
def h_xa(xSpace = np.linspace(-2, 2, 500), a = 0):
h = np.array([1 if x>a else -1 for x in xSpace])
plt.plot(xSpace, h)
plt.show()
return h
h = h_xa(a=0)
h = h_xa(a=-1)
# # Exercise 2.15
data = pr.gendats([20,20], dim=2, delta=6)
pr.scatterd(data)
feature = +data
label = pr.genlab([20,20], [-1, 1])
# plt.scatter(+data[:,0], +data[:,1], c=label)
# plt.show()
# +
# implement perceptron algorithm
def addBias(x):
bias = np.ones([len(x),1])
x = np.hstack((x,bias))
print("feature's shape:", x.shape)
return x
def forward(x, w):
y = np.dot(x, w) # x(nxd) dot w(dx1) = y(nx1)
y_hat = np.where(y>=0, 1, -1)
return y_hat
def criterion(x, y, w):
y_hat = forward(x, w)
t = np.where(y == y_hat[:,0], 0, 1)
a = np.dot(x, w)
loss = np.sum(a[:,0] * t)
gradient = np.dot(x.T, t.reshape([t.shape[0],1]))
# print(y_hat[:5].T)
# print(y[:5])
# print(t[:5])
# print(a[:5,0] * t[:5])
# print(np.sum(a[:5,0] * t[:5]))
return loss, gradient
def paramsUpdate(x, y, w, rate):
_, gradient = criterion(x, y, w)
w += -rate * gradient
return w
def backward(x, y, w, rate):
loss = np.float("inf")
while(loss != 0.):
w = paramsUpdate(x, y, w, rate)
loss, _ = criterion(x, y, w)
return w
def decisionBoundary(data, w):
x_min = np.min(+data[:,0])
x_max = np.max(+data[:,0])
feature_x = np.linspace(x_min, x_max, num=500)
featuree_y = -w[0]/w[1] * feature_x -w[2]/w[1]
pr.scatterd(data)
plt.plot(feature_x, featuree_y)
plt.show()
# -
feature = addBias(feature)
w = np.ones([len(feature[0]),1])
y_hat = forward(feature, w)
print(y_hat[:10].T)
w = np.ones([len(feature[0]),1])
Ep, gradient = criterion(feature, label, w)
print(Ep)
print(gradient)
w = np.ones([len(feature[0]),1])
w = paramsUpdate(feature, label, w, rate=0.01)
print(w)
w = np.ones([len(feature[0]),1])
w = backward(feature, label, w, rate=0.01)
print(w)
y_hat = forward(feature, w)
acc = np.sum(y_hat[:,0] == label)
print(acc)
decisionBoundary(data, w)
# # Exercise 2.9 (b)
#[x1, x2, x3] -> [x1-x2, 2*x2, 2*x1+x3]
linearTrans = np.array([[1,-1,0], [0,2,0], [2,0,1]]).T
testX = np.array([3,6,9]).reshape(1,3)
transX = np.dot(testX, linearTrans)
print("transX: ", transX)
transFeature = np.dot(feature, linearTrans)
transY_hat = forward(transFeature, w)
acc = np.sum(y_hat[:,0] == label)
print(acc)
plt.scatter(transFeature[:,0], transFeature[:,1], c=label)
transData = pr.prdataset(transFeature, targets=label)
decisionBoundary(transData, w)
plt.show()
# # Exercise 2.16
fisher = pr.fisherc()
fisher.train(data)
pr.scatterd(data)
pr.plotc(fisher)
| week2/Week_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming with Python
#
# ## Episode 1a - Introduction - Analysing Patient Data
#
# Teaching: 60 min,
# Exercises: 30 min
#
# Objectives
#
# - Assign values to variables.
#
# - Explain what a library is and what libraries are used for.
#
# - Import a Python library and use the functions it contains.
#
# - Read tabular data from a file into a program.
#
# - Select individual values and subsections from data.
#
# - Perform operations on arrays of data.
# ## Our Dataset
# In this episode we will learn how to work with CSV files in Python. Our dataset contains patient inflammation data - where each row represents a different patient and the column represent inflammation data over a series of days.
#
# 
#
#
# However, before we discuss how to deal with many data points, let's learn how to work with single data values.
#
#
# ## Variables
# Any Python interpreter can be used as a calculator:
#
# ```
# 3 + 5 * 4
# ```
3 + 5 * 4
# This is great but not very interesting. To do anything useful with data, we need to assign its value to a variable. In Python, we can assign a value to a variable, using the equals sign ``=``. For example, to assign value 60 to a variable ``weight_kg``, we would execute:
#
# ```
# weight_kg = 60
# ```
weight_kg = 60
# From now on, whenever we use ``weight_kg``, Python will substitute the value we assigned to it. In essence, a variable is just a name for a value.
#
# ```
# weight_kg + 5
# ```
weight_kg + 5
# In Python, variable names:
#
# - can include letters, digits, and underscores - `A-z, a-z, _`
# - cannot start with a digit
# - are case sensitive.
#
# This means that, for example:
#
# `weight0` is a valid variable name, whereas `0weight` is not
# `weight` and `Weight` are different variables
#
# #### Types of data
# Python knows various types of data. Three common ones are:
#
# - integer numbers (whole numbers)
# - floating point numbers (numbers with a decimal point)
# - and strings (of characters).
#
# In the example above, variable `weight_kg` has an integer value of `60`. To create a variable with a floating point value, we can execute:
#
# ```
# weight_kg = 60.0
# ```
weight_kg = 60.0
# And to create a string we simply have to add single or double quotes around some text, for example:
#
# ```
# weight_kg_text = 'weight in kilograms:'
# ```
#
# To display the value of a variable to the screen in Python, we can use the print function:
#
# ```
# print(weight_kg)
# ```
weight_kg_text = 'weight in kilograms:'
print(weight_kg)
# We can display multiple things at once using only one print command:
#
# ```
# print(weight_kg_text, weight_kg)
# ```
print(weight_kg_text, weight_kg)
# Moreover, we can do arithmetic with variables right inside the print function:
#
# ```
# print('weight in pounds:', 2.2 * weight_kg)
# ```
print('weight in pounds:', 2.2*weight_kg)
# The above command, however, did not change the value of ``weight_kg``:
#
# ```
# print(weight_kg)
# ```
print(weight_kg)
# To change the value of the ``weight_kg`` variable, we have to assign `weight_kg` a new value using the equals `=` sign:
#
# ```
# weight_kg = 65.0
# print('weight in kilograms is now:', weight_kg)
# ```
weight_kg = 65.0
print('weight in kilograms is now:', weight_kg)
# #### Variables as Sticky Notes
#
# A variable is analogous to a sticky note with a name written on it: assigning a value to a variable is like writing a value on the sticky note with a particular name.
#
# This means that assigning a value to one variable does not change values of other variables (or sticky notes). For example, let's store the subject's weight in pounds in its own variable:
#
# ```
# # There are 2.2 pounds per kilogram
# weight_lb = 2.2 * weight_kg
# print(weight_kg_text, weight_kg, 'and in pounds:', weight_lb)
# ```
# There are 2.2 pounds per kilogram
weight_lb = 2.2 * weight_kg
print(weight_kg_text, weight_kg, 'and in pounds:', weight_lb)
# #### Updating a Variable
#
# Variables calculated from other variables do not change their value just because the original variable changed its value (unlike cells in Excel):
#
# ```
# weight_kg = 100.0
# print('weight in kilograms is now:', weight_kg, 'and weight in pounds is still:', weight_lb)
# ```
weight_kg = 100.0
print('weight in kilograms is now:', weight_kg, 'and weight in pounds is still:', weight_lb)
# Since `weight_lb` doesn't *remember* where its value comes from, so it is not updated when we change `weight_kg`.
weight_lb = 2.2*weight_kg
print(weight_lb)
# ## Libraries
#
# Words are useful, but what's more useful are the sentences and stories we build with them (or indeed entire books or whole libraries). Similarly, while a lot of powerful, general tools are built into Python, specialised tools built up from these basic units live in *libraries* that can be called upon when needed.
# ### Loading data into Python
#
# In order to load our inflammation dataset into Python, we need to access (import in Python terminology) a library called `NumPy` (which stands for Numerical Python).
#
# In general you should use this library if you want to do fancy things with numbers, especially if you have matrices or arrays. We can import `NumPy` using:
#
# ```
# import numpy
# ```
import numpy
# Importing a library is like getting a piece of lab equipment out of a storage locker and setting it up on the bench. Libraries provide additional functionality to the basic Python package, much like a new piece of equipment adds functionality to a lab space. Just like in the lab, importing too many libraries can sometimes complicate and slow down your programs - so we only import what we need for each program. Once we've imported the library, we can ask the library to read our data file for us:
#
# ```
# numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')
# ```
# The expression `numpy.loadtxt(...)` is a function call that asks Python to run the function `loadtxt` which belongs to the `numpy` library. This dot `.` notation is used everywhere in Python: the thing that appears before the dot contains the thing that appears after.
#
# As an example, <NAME> is the John that belongs to the Smith family. We could use the dot notation to write his name smith.john, just as `loadtxt` is a function that belongs to the `numpy` library.
#
# `numpy.loadtxt` has two parameters: the name of the file we want to read and the delimiter that separates values on a line. These both need to be character strings (or strings for short), so we put them in quotes.
#
# Since we haven't told it to do anything else with the function's output, the notebook displays it. In this case, that output is the data we just loaded. By default, only a few rows and columns are shown (with ... to omit elements when displaying big arrays). To save space, Python displays numbers as 1. instead of 1.0 when there's nothing interesting after the decimal point.
#
# Our call to `numpy.loadtxt` read our file but didn't save the data in memory. To do that, we need to assign the array to a variable. Just as we can assign a single value to a variable, we can also assign an array of values to a variable using the same syntax. Let's re-run `numpy.loadtxt` and save the returned data:
#
# ```
# data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')
# ```
data = numpy.loadtxt(fname='data/inflammation-01.csv', delimiter=',')
# This statement doesn't produce any output because we've assigned the output to the variable `data`. If we want to check that the data has been loaded, we can print the variable's value:
#
# ```
# print(data)
# ```
print(data)
# Now that the data is in memory, we can manipulate it. First, let's ask Python what type of thing `data` refers to:
#
# ```
# print(type(data))
# ```
print(type(data))
# The output tells us that data currently refers to an N-dimensional array, the functionality for which is provided by the `NumPy` library. This data correspond to arthritis patients' inflammation. The rows are the individual patients, and the columns are their daily inflammation measurements.
#
# #### Data Type
#
# A NumPy array contains one or more elements of the same type. The type function will only tell you that a variable is a NumPy array but won't tell you the type of thing inside the array. We can find out the type of the data contained in the NumPy array.
#
# ```
# print(data.dtype)
# ```
print(data.dtype)
# This tells us that the NumPy array's elements are floating-point numbers.
#
# With the following command, we can see the array's shape:
#
# ```
# print(data.shape)
# ```
# The output tells us that the data array variable contains 60 rows and 40 columns. When we created the variable data to store our arthritis data, we didn't just create the array; we also created information about the array, called members or attributes. This extra information describes data in the same way an adjective describes a noun. data.shape is an attribute of data which describes the dimensions of data. We use the same dotted notation for the attributes of variables that we use for the functions in libraries because they have the same part-and-whole relationship.
#
# If we want to get a single number from the array, we must provide an index in square brackets after the variable name, just as we do in math when referring to an element of a matrix. Our inflammation data has two dimensions, so we will need to use two indices to refer to one specific value:
#
# ```
# print('first value in data:', data[0, 0])
# print('middle value in data:', data[30, 20])
# ```
# The expression `data[30, 20]` accesses the element at row 30, column 20. While this expression may not surprise you, `data[0, 0]` might.
#
# #### Zero Indexing
#
# Programming languages like Fortran, MATLAB and R start counting at 1 because that's what human beings have done for thousands of years. Languages in the C family (including C++, Java, Perl, and Python) count from 0 because it represents an offset from the first value in the array (the second value is offset by one index from the first value). This is closer to the way that computers represent arrays (if you are interested in the historical reasons behind counting indices from zero, you can read Mike Hoye's blog post).
#
# As a result, if we have an M×N array in Python, its indices go from 0 to M-1 on the first axis and 0 to N-1 on the second. It takes a bit of getting used to, but one way to remember the rule is that the index is how many steps we have to take from the start to get the item we want.
# #### In the Corner
#
# What may also surprise you is that when Python displays an array, it shows the element with index `[0, 0]` in the upper left corner rather than the lower left. This is consistent with the way mathematicians draw matrices but different from the Cartesian coordinates. The indices are (row, column) instead of (column, row) for the same reason, which can be confusing when plotting data.
# #### Slicing data
#
# An index like `[30, 20]` selects a single element of an array, but we can select whole sections as well. For example, we can select the first ten days (columns) of values for the first four patients (rows) like this:
#
# ```
# print(data[0:4, 0:10])
# ```
# The slice `[0:4]` means, *Start at index 0 and go up to, but not including, index 4*.
#
# Again, the up-to-but-not-including takes a bit of getting used to, but the rule is that the difference between the upper and lower bounds is the number of values in the slice.
#
# Also, we don't have to start slices at `0`:
#
# ```
# print(data[5:10, 0:10])
# ```
# and we don't have to include the upper or lower bound on the slice.
#
# If we don't include the lower bound, Python uses 0 by default; if we don't include the upper, the slice runs to the end of the axis, and if we don't include either (i.e., if we just use `:` on its own), the slice includes everything:
#
# ```
# small = data[:3, 36:]
# print('small is:')
# print(small)
# ```
# The above example selects rows 0 through 2 and columns 36 through to the end of the array.
#
# thus small is:
# ```
# [[ 2. 3. 0. 0.]
# [ 1. 1. 0. 1.]
# [ 2. 2. 1. 1.]]
# ```
#
# Arrays also know how to perform common mathematical operations on their values. The simplest operations with data are arithmetic: addition, subtraction, multiplication, and division. When you do such operations on arrays, the operation is done element-by-element. Thus:
#
# ```
# doubledata = data * 2.0
# ```
# will create a new array doubledata each element of which is twice the value of the corresponding element in data:
#
# ```
# print('original:')
# print(data[:3, 36:])
# print('doubledata:')
# print(doubledata[:3, 36:])
# ```
# If, instead of taking an array and doing arithmetic with a single value (as above), you did the arithmetic operation with another array of the same shape, the operation will be done on corresponding elements of the two arrays. Thus:
#
# ```
# tripledata = doubledata + data
# ```
# will give you an array where `tripledata[0,0]` will equal `doubledata[0,0]` plus `data[0,0]`, and so on for all other elements of the arrays.
#
# ```
# print('tripledata:')
# print(tripledata[:3, 36:])
# ```
# ## Exercises
# ### Variables
#
# What values do the variables mass and age have after each statement in the following program?
# ```
# mass = 47.5
# age = 122
# mass = mass * 2.0
# age = age - 20
# print(mass, age)
# ```
# Test your answers by executing the commands.
# Solution:
# ### Sorting Out References
#
# What does the following program print out?
# ```
# first, second = 'Grace', 'Hopper'
# third, fourth = second, first
# print(third, fourth)
# ```
# Solution:
# ### Slicing Strings
# A section of an array is called a slice. We can take slices of character strings as well:
# ```
# element = 'oxygen'
# print('first three characters:', element[0:3])
# print('last three characters:', element[3:6])
# ```
#
# What is the value of `element[:4]` ? What about `element[4:]`? Or `element[:]` ?
#
# What about `element[-1]` and `element[-2]` ?
# Solution:
# Given those answers, explain what `element[1:-1]` does.
# Solution:
# ### Thin Slices
#
# The expression `element[3:3]` produces an empty string, i.e., a string that contains no characters. If data holds our array of patient data, what does `data[3:3, 4:4]` produce? What about `data[3:3, :]` ?
# Solution:
# ## Key Points
# Import a library into a program using import library_name.
#
# Use the numpy library to work with arrays in Python.
#
# Use `variable` `=` `value` to assign a value to a variable in order to record it in memory.
#
# Variables are created on demand whenever a value is assigned to them.
#
# Use `print(something)` to display the value of something.
#
# The expression `array.shape` gives the shape of an array.
#
# Use `array[x, y]` to select a single element from a 2D array.
#
# Array indices start at 0, not 1.
#
# Use `low:high` to specify a slice that includes the indices from low to high-1.
#
# All the indexing and slicing that works on arrays also works on strings.
#
# Use `#` and some kind of explanation to add comments to programs.
# # Save, and version control your changes
#
# - save your work: `File -> Save`
# - add all your changes to your local repository: `Terminal -> git add .`
# - commit your updates a new Git version: `Terminal -> git commit -m "End of Episode 1"`
# - push your latest commits to GitHub: `Terminal -> git push`
| lessons/python/.ipynb_checkpoints/ep1a-introduction-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # 16 PDEs: Waves – Students
#
# (See *Computational Physics* Ch 21 and *Computational Modeling* Ch 6.5.)
# ## Background: waves on a string
#
# Assume a 1D string of length $L$ with mass density per unit length $\rho$ along the $x$ direction. It is held under constant tension $T$ (force per unit length). Ignore frictional forces and the tension is so high that we can ignore sagging due to gravity.
#
#
# ### 1D wave equation
# The string is displaced in the $y$ direction from its rest position, i.e., the displacement $y(x, t)$ is a function of space $x$ and time $t$.
#
# For small relative displacements $y(x, t)/L \ll 1$ and therefore small slopes $\partial y/\partial x$ we can describe $y(x, t)$ with a *linear* equation of motion:
# Newton's second law applied to short elements of the string with length $\Delta x$ and mass $\Delta m = \rho \Delta x$: the left hand side contains the *restoring force* that opposes the displacement, the right hand side is the acceleration of the string element:
#
# \begin{align}
# \sum F_{y}(x) &= \Delta m\, a(x, t)\\
# T \sin\theta(x+\Delta x) - T \sin\theta(x) &= \rho \Delta x \frac{\partial^2 y(x, t)}{\partial t^2}
# \end{align}
#
# The angle $\theta$ measures by how much the string is bent away from the resting configuration.
# Because we assume small relative displacements, the angles are small ($\theta \ll 1$) and we can make the small angle approximation
#
# $$
# \sin\theta \approx \tan\theta = \frac{\partial y}{\partial x}
# $$
#
# and hence
# \begin{align}
# T \left.\frac{\partial y}{\partial x}\right|_{x+\Delta x} - T \left.\frac{\partial y}{\partial x}\right|_{x} &= \rho \Delta x \frac{\partial^2 y(x, t)}{\partial t^2}\\
# \frac{T \left.\frac{\partial y}{\partial x}\right|_{x+\Delta x} - T \left.\frac{\partial y}{\partial x}\right|_{x}}{\Delta x} &= \rho \frac{\partial^2 y}{\partial t^2}
# \end{align}
# or in the limit $\Delta x \rightarrow 0$ a linear hyperbolic PDE results:
#
# \begin{gather}
# \frac{\partial^2 y(x, t)}{\partial x^2} = \frac{1}{c^2} \frac{\partial^2 y(x, t)}{\partial t^2}, \quad c = \sqrt{\frac{T}{\rho}}
# \end{gather}
#
# where $c$ has the dimension of a velocity. This is the (linear) **wave equation**.
# ### General solution: waves
# General solutions are propagating waves:
#
# If $f(x)$ is a solution at $t=0$ then
#
# $$
# y_{\mp}(x, t) = f(x \mp ct)
# $$
#
# are also solutions at later $t > 0$.
# Because of linearity, any linear combination is also a solution, so the most general solution contains both right and left propagating waves
#
# $$
# y(x, t) = A f(x - ct) + B g(x + ct)
# $$
#
# (If $f$ and/or $g$ are present depends on the initial conditions.)
# In three dimensions the wave equation is
#
# $$
# \boldsymbol{\nabla}^2 y(\mathbf{x}, t) - \frac{1}{c^2} \frac{\partial^2 y(\mathbf{x}, t)}{\partial t^2} = 0\
# $$
# ### Boundary and initial conditions
# * The boundary conditions could be that the ends are fixed
#
# $$y(0, t) = y(L, t) = 0$$
#
# * The *initial condition* is a shape for the string, e.g., a Gaussian at the center
#
# $$
# y(x, t=0) = g(x) = y_0 \frac{1}{\sqrt{2\pi\sigma}} \exp\left[-\frac{(x - x_0)^2}{2\sigma^2}\right]
# $$
#
# at time 0.
# * Because the wave equation is *second order in time* we need a second initial condition, for instance, the string is released from rest:
#
# $$
# \frac{\partial y(x, t=0)}{\partial t} = 0
# $$
#
# (The derivative, i.e., the initial displacement velocity is provided.)
# ### Analytical solution
# Solve (as always) with *separation of variables*.
#
# $$
# y(x, t) = X(x) T(t)
# $$
#
# and this yields the general solution (with boundary conditions of fixed string ends and initial condition of zero velocity) as a superposition of normal modes
#
# $$
# y(x, t) = \sum_{n=0}^{+\infty} B_n \sin k_n x\, \cos\omega_n t,
# \quad \omega_n = ck_n,\ k_n = n \frac{2\pi}{L} = n k_0.
# $$
#
# (The angular frequency $\omega$ and the wave vector $k$ are determined from the boundary conditions.)
# The coefficients $B_n$ are obtained from the initial shape:
#
# $$
# y(x, t=0) = \sum_{n=0}^{+\infty} B_n \sin n k_0 x = g(x)
# $$
# In principle one can use the fact that $\int_0^L dx \sin m k_0 x \, \sin n k_0 x = \pi \delta_{mn}$ (orthogonality) to calculate the coefficients:
#
# \begin{align}
# \int_0^L dx \sin m k_0 x \sum_{n=0}^{+\infty} B_n \sin n k_0 x &= \int_0^L dx \sin(m k_0 x) \, g(x)\\
# \pi \sum_{n=0}^{+\infty} B_n \delta_{mn} &= \dots \\
# B_m &= \pi^{-1} \dots
# \end{align}
#
# (but the analytical solution is ugly and I cannot be bothered to put it down here.)
# ## Numerical solution
#
# 1. discretize wave equation
# 2. time stepping: leap frog algorithm (iterate)
# Use the central difference approximation for the second order derivatives:
#
# \begin{align}
# \frac{\partial^2 y}{\partial t^2} &\approx \frac{y(x, t+\Delta t) + y(x, t-\Delta t) - 2y(x, t)}{\Delta t ^2} = \frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\Delta t^2}\\
# \frac{\partial^2 y}{\partial x^2} &\approx \frac{y(x+\Delta x, t) + y(x-\Delta x, t) - 2y(x, t)}{\Delta x ^2} = \frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\Delta x^2}
# \end{align}
#
# and substitute into the wave equation to yield the *discretized* wave equation:
# $$
# \frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\Delta x^2} = \frac{1}{c^2} \frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\Delta t^2}
# $$
# #### Student activity: derive the finite difference version of the 1D wave equation
# Re-arrange so that the future terms $j+1$ can be calculated from the present $j$ and past $j-1$ terms:
#
# $$
# ? = ?
# $$
# This is the time stepping algorithm for the wave equation.
# ## Numerical implementation
#
# +
# if you have plotting problems, try
# # %matplotlib inline
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('ggplot')
# -
# Implement the time stepping algorithm in the code below. Look for sections `# TODO`.
# +
L = 0.5 # m
Nx = 50
Nt = 100
Dx = L/Nx
# TODO: choose Dt
Dt = # s
rho = 1.5e-2 # kg/m
tension = 150 # N
c = np.sqrt(tension/rho)
# TODO: calculate beta
beta =
beta2 =
print("c = {0} m/s".format(c))
print("Dx = {0} m, Dt = {1} s, Dx/Dt = {2} m/s".format(Dx, Dt, Dx/Dt))
print("beta = {}".format(beta))
X = np.linspace(0, L, Nx+1) # need N+1!
def gaussian(x, y0=0.05, x0=L/2, sigma=0.1*L):
return y0/np.sqrt(2*np.pi*sigma) * np.exp(-(x-x0)**2/(2*sigma**2))
# displacements at j-1, j, j+1
y0 = np.zeros_like(X)
y1 = np.zeros_like(y0)
y2 = np.zeros_like(y0)
# save array
y_t = np.zeros((Nt+1, Nx+1))
# boundary conditions
# TODO: set boundary conditions
y2[:] = y0
# initial conditions: velocity 0, i.e. no difference between y0 and y1
y0[1:-1] = y1[1:-1] = gaussian(X)[1:-1]
# save initial
t_index = 0
y_t[t_index, :] = y0
t_index += 1
y_t[t_index, :] = y1
for jt in range(2, Nt):
# TODO: time stepping algorithm
t_index += 1
y_t[t_index, :] = y2
print("Iteration {0:5d}".format(jt), end="\r")
else:
print("Completed {0:5d} iterations: t={1} s".format(jt, jt*Dt))
# -
# ### 1D plot
# Plot the output in the save array `y_t`. Vary the time steps that you look at with `y_t[start:end]`.
#
# We indicate time by color changing.
ax = plt.subplot(111)
ax.set_prop_cycle("color", [plt.cm.viridis_r(i) for i in np.linspace(0, 1, len(y_t))])
ax.plot(X, y_t.T);
# ### 1D Animation
# For 1D animation to work in a Jupyter notebook, use
# %matplotlib notebook
# If no animations are visible, restart kernel and execute the `%matplotlib notebook` cell as the very first one in the notebook.
#
# We use `matplotlib.animation` to look at movies of our solution:
import matplotlib.animation as animation
# The `update_wave()` function simply re-draws our image for every `frame`.
# +
y_limits = 1.05*y_t.min(), 1.05*y_t.max()
fig1 = plt.figure(figsize=(5,5))
ax = fig1.add_subplot(111)
ax.set_aspect(1)
def update_wave(frame, data):
global ax, Dt, y_limits
ax.clear()
ax.set_xlabel("x (m)")
ax.set_ylabel("y (m)")
ax.plot(X, data[frame])
ax.set_ylim(y_limits)
ax.text(0.1, 0.9, "t = {0:3.1f} ms".format(frame*Dt*1e3), transform=ax.transAxes)
wave_anim = animation.FuncAnimation(fig1, update_wave, frames=len(y_t), fargs=(y_t,),
interval=30, blit=True, repeat_delay=100)
# -
# ### 3D plot
# (Uses functions from previous lessons.)
# +
def plot_y(y_t, Dx, Dt, step=1):
X, Y = np.meshgrid(range(y_t.shape[0]), range(y_t.shape[1]))
Z = y_t.T[Y, X]
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_wireframe(Y*Dx, X*Dt*step, Z)
ax.set_ylabel(r"time $t$ (s)")
ax.set_xlabel(r"position $x$ (m)")
ax.set_zlabel(r"displacement $y$ (m)")
fig.tight_layout()
return ax
def plot_surf(y_t, Dt, Dx, step=1, filename=None, offset=-1, zlabel=r'displacement',
elevation=40, azimuth=20, cmap=plt.cm.coolwarm):
"""Plot y_t as a 3D plot with contour plot underneath.
Arguments
---------
y_t : 2D array
displacement y(t, x)
filename : string or None, optional (default: None)
If `None` then show the figure and return the axes object.
If a string is given (like "contour.png") it will only plot
to the filename and close the figure but return the filename.
offset : float, optional (default: 20)
position the 2D contour plot by offset along the Z direction
under the minimum Z value
zlabel : string, optional
label for the Z axis and color scale bar
elevation : float, optional
choose elevation for initial viewpoint
azimuth : float, optional
chooze azimuth angle for initial viewpoint
"""
t = np.arange(y_t.shape[0])
x = np.arange(y_t.shape[1])
T, X = np.meshgrid(t, x)
Y = y_t.T[X, T]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X*Dx, T*Dt*step, Y, cmap=cmap, rstride=2, cstride=2, alpha=1)
cset = ax.contourf(X*Dx, T*Dt*step, Y, 20, zdir='z', offset=offset+Y.min(), cmap=cmap)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel(zlabel)
ax.set_zlim(offset + Y.min(), Y.max())
ax.view_init(elev=elevation, azim=azimuth)
cb = fig.colorbar(surf, shrink=0.5, aspect=5)
cb.set_label(zlabel)
if filename:
fig.savefig(filename)
plt.close(fig)
return filename
else:
return ax
# -
plot_y(y_t, Dx, Dt, step)
plot_surf(y_t, Dt, Dx, step, offset=0, cmap=plt.cm.coolwarm)
# ## von Neumann stability analysis: Courant condition
# Assume that the solutions of the discretized equation can be written as normal modes
#
# $$
# y_{m,j} = \xi(k)^j e^{ikm\Delta x}, \quad t=j\Delta t,\ x=m\Delta x
# $$
#
# The time stepping algorith is stable if
#
# $$
# |\xi(k)| < 1
# $$
# Insert normal modes into the discretized equation
#
#
# $$
# y_{i,j+1} = 2(1 - \beta^2)y_{i,j} - y_{i, j-1} + \beta^2 (y_{i+1,j} + y_{i-1,j}), \quad
# \beta := \frac{c}{\Delta x/\Delta t}
# $$
#
# and simplify (use $1-\cos x = 2\sin^2\frac{x}{2}$):
#
# $$
# \xi^2 - 2(1-2\beta^2 s^2)\xi + 1 = 0, \quad s=\sin(k\Delta x/2)
# $$
#
# The characteristic equation has roots
#
# $$
# \xi_{\pm} = 1 - 2\beta^2 s^2 \pm \sqrt{(1-2\beta^2 s^2)^2 - 1}.
# $$
#
# It has one root for
#
# $$
# \left|1-2\beta^2 s^2\right| = 1,
# $$
#
# i.e., for
#
# $$
# \beta s = 1
# $$
#
# We have two real roots for
#
# $$
# \left|1-2\beta^2 s^2\right| < 1 \\
# \beta s > 1
# $$
#
# but one of the roots is always $|\xi| > 1$ and hence these solutions will diverge and not be stable.
#
# For
#
# $$
# \left|1-2\beta^2 s^2\right| ≥ 1 \\
# \beta s ≤ 1
# $$
#
# the roots will be *complex conjugates of each other*
#
# $$
# \xi_\pm = 1 - 2\beta^2s^2 \pm i\sqrt{1-(1-2\beta^2s^2)^2}
# $$
#
# and the *magnitude*
#
# $$
# |\xi_{\pm}|^2 = (1 - 2\beta^2s^2)^2 - (1-(1-2\beta^2s^2)^2) = 1
# $$
#
# is unity: Thus the solutions will not grow and will be *stable* for
#
# $$
# \beta s ≤ 1\\
# \frac{c}{\frac{\Delta x}{\Delta t}} \sin\frac{k \Delta x}{2} ≤ 1
# $$
#
# Assuming the "worst case" for the $\sin$ factor (namely, 1), the **condition for stability** is
#
# $$
# c ≤ \frac{\Delta x}{\Delta t}
# $$
#
# or
#
# $$
# \beta ≤ 1.
# $$
#
# This is also known as the **Courant condition**. When written as
#
# $$
# \Delta t ≤ \frac{\Delta x}{c}
# $$
#
# it means that the time step $\Delta t$ (for a given $\Delta x$) must be *smaller than the time that the wave takes to travel one grid step*.
#
#
| 16_PDEs_waves/16_PDEs_waves-students.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demystifying Neural Networks
#
# ---
# # Autograd DAG
#
# How `autograd` can actually perform so many chain rules?
# It builds a *Directed Acyclic Graph* (DAG).
#
# We will analyse one directed graph on top of a tiny ANN.
# Such a DAG can become big very quickly,
# therefore we will use only two layers.
import autograd.numpy as np
from autograd import grad
# Since we will not actually train the ANN below,
# we will define a very simple activation function: $y = 2x$.
# The hyperbolic tangent is quite complex and would make DAG
# very long.
x = np.array([[0.3],
[0.1],
[0.5]])
y = np.array([[1.],
[0.]])
w1 = np.array([[0.3, 0.1, 0.2],
[0.2, -0.1, -0.1],
[0.7, 0.5, -0.3],
[0.5, 0.5, -0.5]])
w1b = np.array([[0.3],
[0.2],
[0.2],
[0.3]])
w2 = np.array([[0.2, 0.3, 0.1, 0.1],
[0.7, -0.2, -0.1, 0.3]])
w2b = np.array([[ 0.3],
[-0.2]])
def act(x):
return 2*x
# We define an ANN function as normal and execute it with against $\vec{x}$ and $\vec{y}$.
# Our only interest are the gradients not the actual output of the ANN.
# +
def netMSE(arg):
x, w1, w1b, w2, w2b, y = arg
y_hat = act(w2 @ act(w1 @ x + w1b) + w2b)
return np.mean((y - y_hat)**2)
netMSE_grad = grad(netMSE)
grads = netMSE_grad([x, w1, w1b, w2, w2b, y])
for g in grads:
print(g)
# -
# These are the final gradients against every single weight.
#
# Below we have the complete graph that has been constructed
# in order to compute these gradients.
# The graph has been constructed when the function executed.
# Then, after the function finished executing the graph has been
# walked backwards to calculate the gradients.
#
# The ID's at the nodes of the graph are increasing when walking the graph
# top to bottom and decreasing when walking bottom to top.
# `autograd` computes gradients in order from the biggest node ID
# to the lowest node ID, this way one can be sure that all gradients needed
# to compute the gradient on the current graph node are already computed.
#
# The computation at each node is performed using the *Jacobian Vector Product*
# (JVP) rule for the operation that was originally performed on the node.
# Each operation that can be differentiated by `autograd` has a JVP rule.
# For example, there are JVP rules for sum, subtraction, or even mean operations.
# 
#
# <div style="text-align:right;font-size:0.7em;">graph-ann.svg</div>
# In summary: `autograd` builds a DAG and then walks it backwards
# performing the chain rule.
# It is this *backwards* that is meant in the backpropagation technique
# of ANN training.
| 12-autograd-dag.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Add the facets overview python code to the python path
import sys
sys.path.append('./python')
# Load UCI census train and test data into dataframes.
import pandas as pd
features = ["Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"]
train_data = pd.read_csv(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
names=features,
sep=r'\s*,\s*',
engine='python',
na_values="?")
test_data = pd.read_csv(
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
names=features,
sep=r'\s*,\s*',
skiprows=[0],
engine='python',
na_values="?")
# +
# Calculate the feature statistics proto from the datasets and stringify it for use in facets overview
from generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
import base64
gfsg = GenericFeatureStatisticsGenerator()
proto = gfsg.ProtoFromDataFrames([{'name': 'train', 'table': train_data},
{'name': 'test', 'table': test_data}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
# +
# Display the facets overview visualization for this data
from IPython.core.display import display, HTML
HTML_TEMPLATE = """<link rel="import" href="/nbextensions/facets-dist/facets-jupyter.html" >
<facets-overview id="elem"></facets-overview>
<script>
document.querySelector("#elem").protoInput = "{protostr}";
</script>"""
html = HTML_TEMPLATE.format(protostr=protostr)
display(HTML(html))
| facets_overview/Overview_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Trust Scores applied to Iris
# + [markdown] pycharm={"name": "#%% md\n"}
# It is important to know when a machine learning classifier's predictions can be trusted. Relying on the classifier's (uncalibrated) prediction probabilities is not optimal and can be improved upon. *Trust scores* measure the agreement between the classifier and a modified nearest neighbor classifier on the test set. The trust score is the ratio between the distance of the test instance to the nearest class different from the predicted class and the distance to the predicted class. Higher scores correspond to more trustworthy predictions. A score of 1 would mean that the distance to the predicted class is the same as to another class.
#
# The original paper on which the algorithm is based is called [To Trust Or Not To Trust A Classifier](https://arxiv.org/abs/1805.11783). Our implementation borrows heavily from https://github.com/google/TrustScore, as does the example notebook.
# + pycharm={"name": "#%%\n"}
import matplotlib
# %matplotlib inline
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedShuffleSplit
from alibi.confidence import TrustScore
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Load and prepare Iris dataset
# + pycharm={"name": "#%%\n"}
dataset = load_iris()
# + [markdown] pycharm={"name": "#%% md\n"}
# Scale data
# + pycharm={"name": "#%%\n"}
dataset.data = (dataset.data - dataset.data.mean(axis=0)) / dataset.data.std(axis=0)
# + [markdown] pycharm={"name": "#%% md\n"}
# Define training and test set
# + pycharm={"name": "#%%\n"}
idx = 140
X_train,y_train = dataset.data[:idx,:], dataset.target[:idx]
X_test, y_test = dataset.data[idx+1:,:], dataset.target[idx+1:]
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Fit model and make predictions
# + pycharm={"name": "#%%\n"}
np.random.seed(0)
clf = LogisticRegression(solver='liblinear', multi_class='auto')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(f'Predicted class: {y_pred}')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Basic Trust Score Usage
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Initialise Trust Scores and fit on training data
# + [markdown] pycharm={"name": "#%% md\n"}
# The trust score algorithm builds [k-d trees](https://en.wikipedia.org/wiki/K-d_tree) for each class. The distance of the test instance to the $k$th nearest neighbor of each tree (or the average distance to the $k$th neighbor) can then be used to calculate the trust score. We can optionally filter out outliers in the training data before building the trees. The example below uses the *distance_knn* (`filter_type`) method to filter out the 5% (`alpha`) instances of each class with the highest distance to its 10th nearest neighbor (`k_filter`) in that class.
# + pycharm={"name": "#%%\n"}
ts = TrustScore(k_filter=10, # nb of neighbors used for kNN distance or probability to filter out outliers
alpha=.05, # target fraction of instances to filter out
filter_type='distance_knn', # filter method: None, 'distance_knn' or 'probability_knn'
leaf_size=40, # affects speed and memory to build KDTrees, memory scales with n_samples / leaf_size
metric='euclidean', # distance metric used for the KDTrees
dist_filter_type='point') # 'point' uses distance to k-nearest point
# 'mean' uses average distance from the 1st to the kth nearest point
# + pycharm={"name": "#%%\n"}
ts.fit(X_train, y_train, classes=3) # classes = nb of prediction classes
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Calculate Trust Scores on test data
# + [markdown] pycharm={"name": "#%% md\n"}
# Since the trust score is the ratio between the distance of the test instance to the nearest class different from the predicted class and the distance to the predicted class, higher scores correspond to more trustworthy predictions. A score of 1 would mean that the distance to the predicted class is the same as to another class. The `score` method returns arrays with both the trust scores and the class labels of the closest not predicted class.
# + pycharm={"name": "#%%\n"}
score, closest_class = ts.score(X_test,
y_pred, k=2, # kth nearest neighbor used
# to compute distances for each class
dist_type='point') # 'point' or 'mean' distance option
print(f'Trust scores: {score}')
print(f'\nClosest not predicted class: {closest_class}')
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Comparison of Trust Scores with model prediction probabilities
# + [markdown] pycharm={"name": "#%% md\n"}
# Let's compare the prediction probabilities from the classifier with the trust scores for each prediction. The first use case checks whether trust scores are better than the model's prediction probabilities at identifying correctly classified examples, while the second use case does the same for incorrectly classified instances.
#
# First we need to set up a couple of helper functions.
# + [markdown] pycharm={"name": "#%% md\n"}
# * Define a function that handles model training and predictions for a simple logistic regression:
# + pycharm={"name": "#%%\n"}
def run_lr(X_train, y_train, X_test):
clf = LogisticRegression(solver='liblinear', multi_class='auto')
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred_proba = clf.predict_proba(X_test)
probas = y_pred_proba[range(len(y_pred)), y_pred] # probabilities of predicted class
return y_pred, probas
# + [markdown] pycharm={"name": "#%% md\n"}
# * Define the function that generates the precision plots:
# + pycharm={"name": "#%%\n"}
def plot_precision_curve(plot_title,
percentiles,
labels,
final_tp,
final_stderr,
final_misclassification,
colors = ['blue', 'darkorange', 'brown', 'red', 'purple']):
plt.title(plot_title, fontsize=18)
colors = colors + list(cm.rainbow(np.linspace(0, 1, len(final_tp))))
plt.xlabel("Percentile", fontsize=14)
plt.ylabel("Precision", fontsize=14)
for i, label in enumerate(labels):
ls = "--" if ("Model" in label) else "-"
plt.plot(percentiles, final_tp[i], ls, c=colors[i], label=label)
plt.fill_between(percentiles,
final_tp[i] - final_stderr[i],
final_tp[i] + final_stderr[i],
color=colors[i],
alpha=.1)
if 0. in percentiles:
plt.legend(loc="lower right", fontsize=14)
else:
plt.legend(loc="upper left", fontsize=14)
model_acc = 100 * (1 - final_misclassification)
plt.axvline(x=model_acc, linestyle="dotted", color="black")
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# * The function below trains the model on a number of folds, makes predictions, calculates the trust scores, and generates the precision curves to compare the trust scores with the model prediction probabilities:
# + pycharm={"name": "#%%\n"}
def run_precision_plt(X, y, nfolds, percentiles, run_model, test_size=.5,
plt_title="", plt_names=[], predict_correct=True, classes=3):
def stderr(L):
return np.std(L) / np.sqrt(len(L))
all_tp = [[[] for p in percentiles] for _ in plt_names]
misclassifications = []
mult = 1 if predict_correct else -1
folds = StratifiedShuffleSplit(n_splits=nfolds, test_size=test_size, random_state=0)
for train_idx, test_idx in folds.split(X, y):
# create train and test folds, train model and make predictions
X_train, y_train = X[train_idx, :], y[train_idx]
X_test, y_test = X[test_idx, :], y[test_idx]
y_pred, probas = run_model(X_train, y_train, X_test)
# target points are the correctly classified points
target_points = np.where(y_pred == y_test)[0] if predict_correct else np.where(y_pred != y_test)[0]
final_curves = [probas]
# calculate trust scores
ts = TrustScore()
ts.fit(X_train, y_train, classes=classes)
scores, _ = ts.score(X_test, y_pred)
final_curves.append(scores) # contains prediction probabilities and trust scores
# check where prediction probabilities and trust scores are above a certain percentage level
for p, perc in enumerate(percentiles):
high_proba = [np.where(mult * curve >= np.percentile(mult * curve, perc))[0] for curve in final_curves]
if 0 in map(len, high_proba):
continue
# calculate fraction of values above percentage level that are correctly (or incorrectly) classified
tp = [len(np.intersect1d(hp, target_points)) / (1. * len(hp)) for hp in high_proba]
for i in range(len(plt_names)):
all_tp[i][p].append(tp[i]) # for each percentile, store fraction of values above cutoff value
misclassifications.append(len(target_points) / (1. * len(X_test)))
# average over folds for each percentile
final_tp = [[] for _ in plt_names]
final_stderr = [[] for _ in plt_names]
for p, perc in enumerate(percentiles):
for i in range(len(plt_names)):
final_tp[i].append(np.mean(all_tp[i][p]))
final_stderr[i].append(stderr(all_tp[i][p]))
for i in range(len(all_tp)):
final_tp[i] = np.array(final_tp[i])
final_stderr[i] = np.array(final_stderr[i])
final_misclassification = np.mean(misclassifications)
# create plot
plot_precision_curve(plt_title, percentiles, plt_names, final_tp, final_stderr, final_misclassification)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Detect correctly classified examples
# + [markdown] pycharm={"name": "#%% md\n"}
# The x-axis on the plot below shows the percentiles for the model prediction probabilities of the predicted class for each instance and for the trust scores. The y-axis represents the precision for each percentile. For each percentile level, we take the test examples whose trust score is above that percentile level and plot the percentage of those points that were correctly classified by the classifier. We do the same with the classifier's own model confidence (i.e. softmax probabilities). For example, at percentile level 80, we take the top 20% scoring test examples based on the trust score and plot the percentage of those points that were correctly classified. We also plot the top 20% scoring test examples based on model probabilities and plot the percentage of those that were correctly classified. The vertical dotted line is the error of the logistic regression classifier. The plots are an average over 10 folds of the dataset with 50% of the data kept for the test set.
#
# The *Trust Score* and *Model Confidence* curves then show that the model precision is typically higher when using the trust scores to rank the predictions compared to the model prediction probabilities.
# + pycharm={"name": "#%%\n"}
X = dataset.data
y = dataset.target
percentiles = [0 + 0.5 * i for i in range(200)]
nfolds = 10
plt_names = ['Model Confidence', 'Trust Score']
plt_title = 'Iris -- Logistic Regression -- Predict Correct'
# + pycharm={"name": "#%%\n"}
run_precision_plt(X, y, nfolds, percentiles, run_lr, plt_title=plt_title,
plt_names=plt_names, predict_correct=True)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Detect incorrectly classified examples
# + [markdown] pycharm={"name": "#%% md\n"}
# By taking the *negative of the prediction probabilities and trust scores*, we can also see on the plot below how the trust scores compare to the model predictions for incorrectly classified instances. The vertical dotted line is the accuracy of the logistic regression classifier. The plot shows the precision of identifying incorrectly classified instances. Higher is obviously better.
# + pycharm={"name": "#%%\n"}
percentiles = [50 + 0.5 * i for i in range(100)]
plt_title = 'Iris -- Logistic Regression -- Predict Incorrect'
run_precision_plt(X, y, nfolds, percentiles, run_lr, plt_title=plt_title,
plt_names=plt_names, predict_correct=False)
| doc/source/examples/trustscore_iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "skip"}
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import cm
import pandas as pd
import numpy as np
from numpy import atleast_2d
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_mutual_info_score
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from mpl_toolkits.mplot3d import Axes3D
# -
cmap = cm.get_cmap('viridis')
pd.options.display.float_format = '{:,.2f}'.format
# + [markdown] slideshow={"slide_type": "slide"}
# ### Load Iris Data
# + slideshow={"slide_type": "fragment"}
iris = load_iris()
iris.keys()
# + [markdown] slideshow={"slide_type": "slide"}
# ### Create DataFrame
# + slideshow={"slide_type": "fragment"}
features = iris.feature_names
data = pd.DataFrame(data=np.column_stack([iris.data, iris.target]),
columns=features + ['label'])
data.label = data.label.astype(int)
data.info()
# + [markdown] slideshow={"slide_type": "skip"}
# ### Standardize Data
# + slideshow={"slide_type": "skip"}
scaler = StandardScaler()
features_standardized = scaler.fit_transform(data[features])
n = len(data)
# + [markdown] slideshow={"slide_type": "skip"}
# ### Reduce Dimensionality to visualize clusters
# + slideshow={"slide_type": "skip"}
pca = PCA(n_components=2)
features_2D = pca.fit_transform(features_standardized)
# + hide_input=false slideshow={"slide_type": "slide"}
ev1, ev2 = pca.explained_variance_ratio_
ax = plt.figure().gca(title='2D Projection',
xlabel='Explained Variance: {:.2%}'.format(ev1),
ylabel='Explained Variance: {:.2%}'.format(ev2))
ax.scatter(*features_2D.T, c=data.label, s=10);
# + [markdown] slideshow={"slide_type": "slide"}
# ### Perform GMM clustering
# + slideshow={"slide_type": "fragment"}
n_components = 3
gmm = GaussianMixture(n_components=n_components)
gmm.fit(features_standardized)
data['clusters'] = gmm.predict(features_standardized)
fig, axes = plt.subplots(ncols=2)
labels, clusters = data.label, data.clusters
mi = adjusted_mutual_info_score(labels, clusters)
axes[0].scatter(*features_2D.T, c=data.label, s=10)
axes[0].set_title('Original Data')
axes[1].scatter(*features_2D.T, c=data.clusters, s=10)
axes[1].set_title('Clusters | MI={:.2f}'.format(mi))
# + [markdown] slideshow={"slide_type": "slide"}
# ### Visualized Gaussian Distributions
# + slideshow={"slide_type": "fragment"}
xmin, ymin = features_2D.min(axis=0)
xmax, ymax = features_2D.max(axis=0)
x = np.linspace(xmin, xmax, 500)
y = np.linspace(ymin, ymax, 500)
X, Y = np.meshgrid(x, y)
simulated_2D = np.column_stack([np.ravel(X), np.ravel(Y)])
simulated_4D = pca.inverse_transform(simulated_2D)
Z = atleast_2d(np.clip(np.exp(gmm.score_samples(simulated_4D)), a_min=0, a_max=1)).reshape(X.shape)
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.set_aspect('equal')
CS = ax.contour(X, Y, Z, cmap='Greens', alpha=.5)
CB = plt.colorbar(CS, shrink=0.8)
ax.scatter(*features_2D.T, c=data.label, s=15)
fig.tight_layout();
# + slideshow={"slide_type": "slide"}
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
CS = ax.contourf3D(X, Y, Z, cmap='RdBu_r', alpha=.5)
CB = plt.colorbar(CS, shrink=0.8)
ax.scatter(*features_2D.T, c=data.label, s=15)
fig.tight_layout()
path = '/Users/Stefan/Dropbox (Personal)/Data Science/AI & ML/Algorithmic Trading/12 Unsupervised Learning/figures/gaussian3D.png'
fig.savefig(path, dpi=300)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Bayesian Information Criterion
# + slideshow={"slide_type": "fragment"}
bic = {}
for n_components in range(2, 8):
gmm = GaussianMixture(n_components=n_components)
gmm.fit(features_standardized)
bic[n_components] = gmm.bic(features_standardized)
pd.Series(bic)
| Chapter12/03_clustering_algorithms/06_gaussian_mixture_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NLP datasets
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.text import *
from fastai.gen_doc.nbdoc import *
# -
# This module contains the [`TextDataset`](/text.data.html#TextDataset) class, which is the main dataset you should use for your NLP tasks. It automatically does the preprocessing steps described in [`text.transform`](/text.transform.html#text.transform). It also contains all the functions to quickly get a [`TextDataBunch`](/text.data.html#TextDataBunch) ready.
# ## Quickly assemble your data
# You should get your data in one of the following formats to make the most of the fastai library and use one of the factory methods of one of the [`TextDataBunch`](/text.data.html#TextDataBunch) classes:
# - raw text files in folders train, valid, test in an ImageNet style,
# - a csv where some column(s) gives the label(s) and the folowwing one the associated text,
# - a dataframe structured the same way,
# - tokens and labels arrays,
# - ids, vocabulary (correspondance id to word) and labels.
#
# If you are assembling the data for a language model, you should define your labels as always 0 to respect those formats. The first time you create a [`DataBunch`](/basic_data.html#DataBunch) with one of those functions, your data will be preprocessed automatically. You can save it, so that the next time you call it is almost instantaneous.
#
# Below are the classes that help assembling the raw data in a [`DataBunch`](/basic_data.html#DataBunch) suitable for NLP.
# + hide_input=true
show_doc(TextLMDataBunch, title_level=3)
# -
# All the texts in the [`datasets`](/datasets.html#datasets) are concatenated and the labels are ignored. Instead, the target is the next word in the sentence.
# + hide_input=true
show_doc(TextLMDataBunch.create)
# + hide_input=true
show_doc(TextClasDataBunch, title_level=3)
# + hide_input=true
show_doc(TextClasDataBunch.create)
# -
# All the texts are grouped by length (with a bit of randomness for the training set) then padded so that the samples have the same length to get in a batch.
# + hide_input=true
show_doc(TextDataBunch, title_level=3)
# + hide_input=true
jekyll_warn("This class can only work directly if all the texts have the same length.")
# -
# ### Factory methods (TextDataBunch)
# All those classes have the following factory methods.
# + hide_input=true
show_doc(TextDataBunch.from_folder)
# -
# The floders are scanned in `path` with a <code>train</code>, `valid` and maybe `test` folders. Text files in the <code>train</code> and `valid` folders should be places in subdirectories according to their classes (not applicable for a language model). `tokenizer` will be used to parse those texts into tokens.
#
# You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
# + hide_input=true
show_doc(TextDataBunch.from_csv)
# -
# This method will look for `csv_name` in `path`, and maybe a `test` csv file opened with `header`. You can specify `text_cols` and `label_cols`. If there are several `text_cols`, the texts will be concatenated together with an optional field token. If there are several `label_cols`, the labels will be assumed to be one-hot encoded and `classes` will default to `label_cols` (you can ignore that argument for a language model). `tokenizer` will be used to parse those texts into tokens.
#
# You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
# + hide_input=true
show_doc(TextDataBunch.from_df)
# -
# This method will use `train_df`, `valid_df` and maybe `test_df` to build the [`TextDataBunch`](/text.data.html#TextDataBunch) in `path`. You can specify `text_cols` and `label_cols`. If there are several `text_cols`, the texts will be concatenated together with an optional field token. If there are several `label_cols`, the labels will be assumed to be one-hot encoded and `classes` will default to `label_cols` (you can ignore that argument for a language model). `tokenizer` will be used to parse those texts into tokens.
#
# You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
# + hide_input=true
show_doc(TextDataBunch.from_tokens)
# -
# This function will create a [`DataBunch`](/basic_data.html#DataBunch) from `trn_tok`, `trn_lbls`, `val_tok`, `val_lbls` and maybe `tst_tok`.
#
# You can pass a specific `vocab` for the numericalization step (if you are building a classifier from a language model you fine-tuned for instance). kwargs will be split between the [`TextDataset`](/text.data.html#TextDataset) function and to the class initialization, you can precise there parameters such as `max_vocab`, `chunksize`, `min_freq`, `n_labels`, `tok_suff` and `lbl_suff` (see the [`TextDataset`](/text.data.html#TextDataset) documentation) or `bs`, `bptt` and `pad_idx` (see the sections LM data and classifier data).
# + hide_input=true
show_doc(TextDataBunch.from_ids)
# -
# Texts are already preprocessed into `train_ids`, `train_lbls`, `valid_ids`, `valid_lbls` and maybe `test_ids`. You can specify the corresponding `classes` if applicable. You must specify a `path` and the `vocab` so that the [`RNNLearner`](/text.learner.html#RNNLearner) class can later infer the corresponding sizes in the model it will create. kwargs will be passed to the class initialization.
# ### Load and save
# To avoid losing time preprocessing the text data more than once, you should save/load your [`TextDataBunch`](/text.data.html#TextDataBunch) using thse methods.
# + hide_input=true
show_doc(TextDataBunch.load)
# + hide_input=true
show_doc(TextDataBunch.save)
# -
# ### Example
# Untar the IMDB sample dataset if not already done:
path = untar_data(URLs.IMDB_SAMPLE)
path
# Since it comes in the form of csv files, we will use the corresponding `text_data` method. Here is an overview of what your file you should look like:
pd.read_csv(path/'texts.csv').head()
# And here is a simple way of creating your [`DataBunch`](/basic_data.html#DataBunch) for language modelling or classification.
data_lm = TextLMDataBunch.from_csv(Path(path), 'texts.csv')
data_clas = TextClasDataBunch.from_csv(Path(path), 'texts.csv')
# ## The TextList input classes
# Behind the scenes, the previous functions will create a training, validation and maybe test [`TextList`](/text.data.html#TextList) that will be tokenized and numericalized (if needed) using [`PreProcessor`](/data_block.html#PreProcessor).
# + hide_input=true
show_doc(Text, title_level=3)
# + hide_input=true
show_doc(TextList, title_level=3)
# -
# `vocab` contains the correspondance between ids and tokens, `pad_idx` is the id used for padding. You can pass a custom `processor` in the `kwargs` to change the defaults for tokenization or numericalization. It should have the following form:
processor = [TokenizeProcessor(tokenizer=SpacyTokenizer('en')), NumericalizeProcessor(max_vocab=30000)]
# See below for all the arguments those tokenizers can take.
# + hide_input=true
show_doc(TextList.label_for_lm)
# + hide_input=true
show_doc(TextList.from_folder)
# + hide_input=true
show_doc(TextList.show_xys)
# + hide_input=true
show_doc(TextList.show_xyzs)
# + hide_input=true
show_doc(OpenFileProcessor, title_level=3)
# + hide_input=true
show_doc(open_text)
# + hide_input=true
show_doc(TokenizeProcessor, title_level=3)
# -
# `tokenizer` is uded on bits of `chunsize`. If `mark_fields=True`, add field tokens between each parts of the texts (given when the texts are read in several columns of a dataframe). See more about tokenizers in the [transform documentation](/text.transform.html).
# + hide_input=true
show_doc(NumericalizeProcessor, title_level=3)
# -
# Uses `vocab` for this (if not None), otherwise create one with `max_vocab` and `min_freq` from tokens.
# ## Language Model data
# A language model is trained to guess what the next word is inside a flow of words. We don't feed it the different texts separately but concatenate them all together in a big array. To create the batches, we split this array into `bs` chuncks of continuous texts. Note that in all NLP tasks, we don't use the usual convention of sequence length being the first dimension so batch size is the first dimension and sequence lenght is the second. Here you can read the chunks of texts in lines.
path = untar_data(URLs.IMDB_SAMPLE)
data = TextLMDataBunch.from_csv(path, 'texts.csv')
x,y = next(iter(data.train_dl))
example = x[:15,:15].cpu()
texts = pd.DataFrame([data.train_ds.vocab.textify(l).split(' ') for l in example])
texts
# + hide_input=true
jekyll_warn("If you are used to another convention, beware! fastai always uses batch as a first dimension, even in NLP.")
# -
# This is all done internally when we use [`TextLMDataBunch`](/text.data.html#TextLMDataBunch), by wrapping the dataset in the following pre-loader before calling a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader).
# + hide_input=true
show_doc(LanguageModelPreLoader)
# -
# Takes the texts from `dataset` that have certain `lengths` (if this argument isn't passed, `lengths` are computed at initiliazation). It will prepare the data for batches with a batch size of `bs` and a sequence length `bptt`. If `backwards=True`, reverses the original text. If `shuffle=True`, we shuffle the texts before going through them, at the start of each epoch. If `batch_first=True`, the last batch of texts (with a sequence length < `bptt`) is discarded.
# ## Classifier data
# When preparing the data for a classifier, we keep the different texts separate, which poses another challenge for the creation of batches: since they don't all have the same length, we can't easily collate them together in batches. To help with this we use two different techniques:
# - padding: each text is padded with the `PAD` token to get all the ones we picked to the same size
# - sorting the texts (ish): to avoid having together a very long text with a very short one (which would then have a lot of `PAD` tokens), we regroup the texts by order of length. For the training set, we still add some randomness to avoid showing the same batches at every step of the training.
#
# Here is an example of batch with padding (the padding index is 1, and the padding is applied before the sentences start).
path = untar_data(URLs.IMDB_SAMPLE)
data = TextClasDataBunch.from_csv(path, 'texts.csv')
iter_dl = iter(data.train_dl)
_ = next(iter_dl)
x,y = next(iter_dl)
x[-10:,:20]
# This is all done internally when we use [`TextClasDataBunch`](/text.data.html#TextClasDataBunch), by using the following classes:
# + hide_input=true
show_doc(SortSampler)
# -
# This pytorch [`Sampler`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Sampler) is used for the validation and (if applicable) the test set.
# + hide_input=true
show_doc(SortishSampler)
# -
# This pytorch [`Sampler`](https://pytorch.org/docs/stable/data.html#torch.utils.data.Sampler) is generally used for the training set.
# + hide_input=true
show_doc(pad_collate)
# -
# This will collate the `samples` in batches while adding padding with `pad_idx`. If `pad_first=True`, padding is applied at the beginning (before the sentence starts) otherwise it's applied at the end.
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
show_doc(TextList.new)
show_doc(TextList.get)
show_doc(TokenizeProcessor.process_one)
show_doc(TokenizeProcessor.process)
show_doc(OpenFileProcessor.process_one)
show_doc(NumericalizeProcessor.process)
show_doc(NumericalizeProcessor.process_one)
show_doc(TextList.reconstruct)
show_doc(LanguageModelPreLoader.on_epoch_begin)
show_doc(LanguageModelPreLoader.on_epoch_end)
# ## New Methods - Please document or move to the undocumented section
| docs_src/text.data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import twint
c = twint.Config()
c.Search = ['<NAME>'] # topic
c.Limit = 500 # number of Tweets to scrape
c.Store_csv = True # store tweets in a csv file
c.Output = "taylor_swift_tweets.csv" # path to csv file
twint.run.Search(c)
# -
pip install nest_asyncio
import nest_asyncio
nest_asyncio.apply()
# +
import twint
c = twint.Config()
c.Search = ['<NAME>'] # topic
c.Limit = 500 # number of Tweets to scrape
c.Store_csv = True # store tweets in a csv file
c.Output = "taylor_swift_tweets.csv" # path to csv file
twint.run.Search(c)
| Twitter_twint/TweetsFromTwitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Remote Atomic Swap - Alice's code
# #### 04.5 Winter School on Smart Contracts
# ##### <NAME> (<EMAIL>)
# 2022-02-04
#
# * Simple Atomic Swap: Alice send 2 Algos to Bob, and Bob send 1 Algos to Alice.
# * Usually, we would swap ALGOs for an ASA, this is just to simplify the example
# * They are not in the same room.
# * They exchange transactions via Email
# + [markdown] tags=[]
# ### Setup
# See notebook 04.1, the lines below will always automatically load functions in `algo_util.py`, the five accounts and the Purestake credentials
# + tags=[]
# Loading shared code and credentials
import sys, os
codepath = '..'+os.path.sep+'..'+os.path.sep+'sharedCode'
sys.path.append(codepath)
from algo_util import *
cred = load_credentials()
# Shortcuts to directly access the main accounts
Alice = cred['Alice']
Bob = cred['Bob']
# + tags=[]
from algosdk import account, mnemonic
from algosdk.v2client import algod
from algosdk.future import transaction
from algosdk.future.transaction import PaymentTxn, SignedTransaction
import algosdk.error
import json
import pandas as pd
import base64
# + tags=[]
# Initialize the algod client (Testnet or Mainnet)
algod_client = algod.AlgodClient(algod_token='', algod_address=cred['algod_test'], headers=cred['purestake_token'])
# -
# ## The remote swap
# #### Step 1: Prepare two transactions
# * Alice prepares both transactions
# * Similarly, a programmer could prepare the two transactions and send one to Alice and one to Bob
# Inspect the suggested_params
sp = algod_client.suggested_params()
print(json.dumps(vars(sp), indent=4))
algod_client.status()["last-round"]
# ##### How much time to do we have from creating the TX to signing and sending it?
# * Algorand max window = 1000 blocks
# * Already being proposed
# in rounds
print(sp.last - sp.first)
# in minutes (assuming 3 sec pro round)
print( (sp.last - sp.first)*3/60 )
# ##### Possibly we want to extend the time for Bob to sign the transaction
# * Start validity window later
sp.first = sp.first+10 # start later
sp.last = sp.last+10 # end later
# +
amt_1 = int(2*1E6)
txn_1 = transaction.PaymentTxn(Alice["public"], sp, Bob["public"], amt_1)
amt_2 = int(1*1E6)
txn_2 = transaction.PaymentTxn(Bob["public"], sp, Alice["public"], amt_2)
# -
# #### Step 2: create and assign group id
gid = transaction.calculate_group_id([txn_1, txn_2])
txn_1.group = gid
txn_2.group = gid
# #### Step 3: Send transaction file to Bob
# * Transaction `txn_2` is now ready and can be sent to Bob
# * To be able to save it into a file, we need to `dictify` it
import pickle
data = txn_2.dictify()
file = open("Bob_txn.txt", 'wb')
pickle.dump(data, file)
file.close()
# #### Step 4: Now it is Bob's turn
# * We can assume that they exchange files via email or a similar service
# * Open the notebook `04.5b_WSC` and create a file of the signed transaction
# #### Step 5: Retrieve Bob's signed transaction
# Instead of defining it as 'algosdk.future.transaction.PaymentTxn.undictify(...)', we use 'algosdk.future.transaction.SignedTransaction.undictify(...)'
# + tags=[]
file = open("Bob_signed.txt", 'rb')
data = pickle.load(file)
# To undictify, we need the SignedTransaction function
stxn_2 = SignedTransaction.undictify(data)
# -
# #### Step 6: Alice has to sign her transaction
# + tags=[]
stxn_1 = txn_1.sign(Alice['private'])
# -
# #### Step 7: Alice collects everything and sends the transaction
# * This part could also be taken over by a third party
# + tags=[]
# Step 7.1: collect
signed_group = [stxn_1, stxn_2]
# Step 7.2: send
txid = algod_client.send_transactions(signed_group)
# Step 7.3: wait for confirmation
txinfo = wait_for_confirmation(algod_client, txid)
| ClassMaterial/04 - Tokens/04 code/04.5a_WSC_Atomic_Divided_Alice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: dev
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Update sklearn to prevent version mismatches
# #!pip install sklearn --upgrade
# +
# install joblib. This will be used to save your model.
# Restart your kernel after installing
# #!pip install joblib
# -
import pandas as pd
# # Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("resources/exoplanet_data.csv")
# Drop the null columns where all values are null
df = df.dropna(axis='columns', how='all')
# Drop the null rows
df = df.dropna()
df.head()
# # Select your features (columns)
# +
# Set features. This will also be used as your x values.
#selected_features = df[['names', 'of', 'selected', 'features', 'here']]
feature_list = df.columns.to_list()
feature_list.remove("koi_disposition")
removal_list = []
for x in feature_list:
if "err" in x:
removal_list.append(x)
print(removal_list)
selected_features = df[feature_list].drop(columns=removal_list)
selected_features.head()
# -
# # Create a Train Test Split
#
# Use `koi_disposition` for the y values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(selected_features, df["koi_disposition"], random_state=13)
X_train.head()
# # Pre-processing
#
# Scale the data using the MinMaxScaler and perform some feature selection
# +
# Scale your data
from sklearn.preprocessing import MinMaxScaler
X_scaler = MinMaxScaler().fit(X_train)
#y_scaler = MinMaxScaler().fit(y_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
#y_train_scaled = y_scaler.transform(y_train)
#y_test_scaled = y_scaler.transform(y_train)
# -
# # Train the Model
#
#
# +
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from tensorflow.keras.utils import to_categorical
# Step 1: Label-encode data set
label_encoder = LabelEncoder()
label_encoder.fit(y_train)
encoded_y_train = label_encoder.transform(y_train)
encoded_y_test = label_encoder.transform(y_test)
# Step 2: Convert encoded labels to one-hot-encoding
y_train_categorical = to_categorical(encoded_y_train)
y_test_categorical = to_categorical(encoded_y_test)
# +
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Create model and add layers
model = Sequential()
model.add(Dense(units=200, activation='relu', input_dim=20))
model.add(Dense(units=200, activation='relu'))
model.add(Dense(units=3, activation='softmax'))
# Compile and fit the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# -
model.fit(
X_train_scaled,
y_train_categorical,
batch_size=20,
epochs=90,
shuffle=True,
verbose=3
)
model_loss, model_accuracy = model.evaluate(
X_test_scaled, y_test_categorical, verbose=3)
print(
f"Normal Neural Network - Loss: {model_loss}, Accuracy: {model_accuracy}")
# # Hyperparameter Tuning
#
# Use `GridSearchCV` to tune the model's parameters
# Create the GridSearchCV model
from sklearn.model_selection import GridSearchCV
param_grid = {'batch_size': [10,30,60,80,100],
'epochs': [30,50,75,100,150,200]}
grid = GridSearchCV(model, param_grid, verbose=3)
# Train the model with GridSearch
grid.fit(X_train,y_train)
print(grid.best_params_)
print(grid.best_score_)
# # Save the Model
# +
# save your model by updating "your_name" with your name
# and "your_model" with your model variable
# be sure to turn this in to BCS
# if joblib fails to import, try running the command to install in terminal/git-bash
#import joblib
#filename = 'DeepNN.sav'
#joblib.dump(model, filename)
model.save("DeepNN.h5")
# -
| model_5 - DeepNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ds
# language: python
# name: ds
# ---
# Copyright (c) 2021, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
# ## Getting Started with ocifs
#
# The Oracle Cloud Infrastructure (OCI) Object Storage filesystem (ocifs) is an fsspec implementation for use with Object Storage.
# ### Quickstart with Pandas
# Begin by importing `ocifs` and `pandas`. When importing `ocifs`, you are registering the `oci` protocol with `pandas`:
import ocifs
import pandas as pd
# Now that the `oci` protocol is registered with `pandas`, you can read and write from and to Object Storage as easily as you can locally. For example, you could read an Excel file, `path/file.xls`, from your bucket in a namespace easily using:
df = pd.read_excel("oci://bucket@namespace/path/file.xls",
storage_options={"config": "~/.oci/config"})
df.to_parquet("oci://bucket@namespace/path/file.parquet",
storage_options={"config": "~/.oci/config"})
# You could also use [Dask](https://docs.dask.org/en/latest/index.html):
# +
from dask import dataframe as dd
ddf = dd.read_csv("oci://bucket@namespace/path/file*.csv",
storage_options={"config": "~/.oci/config"})
# -
# The `storage_options` parameter contains a dictionary of arguments that are passed to the underlying `OCIFileSystem` method. The following `docstring` lists the valid arguments to storage options:
# +
# ocifs.OCIFileSystem?
# -
# ### Quickstart to UNIX Operations
# You can interact with the filesytem directly using most UNIX commands like `ls`, `cp`, `exists`, `mkdir`, `rm`, `walk`, `find`, and so on.
# Instantiate a filesystem from your configuration, see Getting Connected. Every filesystem instance operates within the home region of the configuration. The `cp` command is the only command that has cross-region support. You must create a unique filesystem instance for each region.
fs = ocifs.OCIFileSystem(config="~/.oci/config", profile="DEFAULT", default_block_size=5*2**20)
fs.ls("oci://bucket@namespace/path")
# []
fs.touch("oci://bucket@namespace/path/file.txt")
fs.exists("oci://bucket@namespace/path/file.txt")
# True
fs.cat("oci://bucket@namespace/path/file.txt")
# ""
fs.rm("oci://bucket@namespace", recursive=True)
fs.exists("oci://bucket@namespace/path/file.txt")
# False
# Following are examples of how you can use the `OCIFileSystem` and `OCIFile` objects.
| docs/source/getting-started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tutorial on large-scale Thompson sampling
#
# This demo currently considers four approaches to discrete Thompson sampling on `m` candidates points:
#
# 1. **Exact sampling with Cholesky:** Computing a Cholesky decomposition of the corresponding `m x m` covariance matrix which reuqires `O(m^3)` computational cost and `O(m^2)` space. This is the standard approach to sampling from a Gaussian process, but the quadratic memory usage and cubic compliexity limits the number of candidate points.
#
# 2. **Contour integral quadrature (CIQ):** CIQ [1] is a Krylov subspace method combined with a rational approximation that can be used for computing matrix square roots of covariance matrices, which is the main bottleneck when sampling from a Gaussian process. CIQ relies on computing matrix vector multiplications with the exact kernel matrix which requires `O(m^2)` computational complexity and space. The space complexity can be lowered to `O(m)` by using [KeOps](https://github.com/getkeops/keops), which is necessary to scale to large values of `m`.
#
# 3. **Lanczos:** Rather than using CIQ, we can solve the linear systems `K^(1/2) v = b` using Lanczos and the conjugate gradient (CG) method. This will be faster than CIQ, but will generally produce samples of worse quality. Similarly to CIQ, we need to use KeOps as we reqiuire computing matrix vector multiplications with the exact kernel matrix.
#
# 4. **Random Fourier features (RFFs):** The RFF kernel was originally proposed in [2] and we use it as implemented in GPyTorch. RFFs are computationally cheap to work with as the computational cost and space are both `O(km)` where `k` is the number of Fourier features. Note that while Cholesky and CIQ are able to generate exact samples from the GP model, RFFs are an unbiased approximation and the resulting samples often aren't perfectly calibrated.
#
#
# [1] [<NAME>, et al. "Fast matrix square roots with applications to Gaussian processes and Bayesian optimization.", Advances in neural information processing systems (2020)](https://proceedings.neurips.cc/paper/2020/file/fcf55a303b71b84d326fb1d06e332a26-Paper.pdf)
#
# [2] [<NAME>, and <NAME>. "Random features for large-scale kernel machines.", Advances in neural information processing systems (2007)](https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
# +
import os
import time
from contextlib import ExitStack
import torch
from torch.quasirandom import SobolEngine
import gpytorch
import gpytorch.settings as gpts
import pykeops
from botorch.fit import fit_gpytorch_model
from botorch.generation import MaxPosteriorSampling
from botorch.models import SingleTaskGP
from botorch.test_functions import Hartmann
from botorch.utils.transforms import unnormalize
from gpytorch.constraints import Interval
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import MaternKernel, RFFKernel, ScaleKernel
from gpytorch.kernels.keops import MaternKernel as KMaternKernel
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.mlls import ExactMarginalLogLikelihood
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dtype = torch.double
SMOKE_TEST = os.environ.get("SMOKE_TEST")
# -
pykeops.test_torch_bindings() # Make sure the KeOps bindings are working
# +
hart6 = Hartmann(dim=6, negate=True).to(device=device, dtype=dtype)
dim = hart6.dim
def eval_objective(x):
"""This is a helper function we use to unnormalize and evalaute a point"""
return hart6(unnormalize(x, hart6.bounds))
# -
def get_initial_points(dim, n_pts, seed=None):
sobol = SobolEngine(dimension=dim, scramble=True, seed=seed)
X_init = sobol.draw(n=n_pts).to(dtype=dtype, device=device)
return X_init
def generate_batch(
X,
Y,
batch_size,
n_candidates,
sampler="cholesky", # "cholesky", "ciq", "rff"
use_keops=False,
):
assert sampler in ("cholesky", "ciq", "rff", "lanczos")
assert X.min() >= 0.0 and X.max() <= 1.0 and torch.all(torch.isfinite(Y))
# NOTE: We probably want to pass in the default priors in SingleTaskGP here later
kernel_kwargs = {"nu": 2.5, "ard_num_dims": X.shape[-1]}
if sampler == "rff":
base_kernel = RFFKernel(**kernel_kwargs, num_samples=1024)
else:
base_kernel = (
KMaternKernel(**kernel_kwargs) if use_keops else MaternKernel(**kernel_kwargs)
)
covar_module = ScaleKernel(base_kernel)
# Fit a GP model
train_Y = (Y - Y.mean()) / Y.std()
likelihood = GaussianLikelihood(noise_constraint=Interval(1e-8, 1e-3))
model = SingleTaskGP(X, train_Y, likelihood=likelihood, covar_module=covar_module)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
fit_gpytorch_model(mll)
# Draw samples on a Sobol sequence
sobol = SobolEngine(X.shape[-1], scramble=True)
X_cand = sobol.draw(n_candidates).to(dtype=dtype, device=device)
# Thompson sample
with ExitStack() as es:
if sampler == "cholesky":
es.enter_context(gpts.max_cholesky_size(float("inf")))
elif sampler == "ciq":
es.enter_context(gpts.fast_computations(covar_root_decomposition=True))
es.enter_context(gpts.max_cholesky_size(0))
es.enter_context(gpts.ciq_samples(True))
es.enter_context(gpts.minres_tolerance(2e-3)) # Controls accuracy and runtime
es.enter_context(gpts.num_contour_quadrature(15))
elif sampler == "lanczos":
es.enter_context(gpts.fast_computations(covar_root_decomposition=True))
es.enter_context(gpts.max_cholesky_size(0))
es.enter_context(gpts.ciq_samples(False))
elif sampler == "rff":
es.enter_context(gpts.fast_computations(covar_root_decomposition=True))
thompson_sampling = MaxPosteriorSampling(model=model, replacement=False)
X_next = thompson_sampling(X_cand, num_samples=batch_size)
return X_next
def run_optimization(sampler, n_candidates, n_init, max_evals, batch_size, use_keops=False, seed=None):
X = get_initial_points(dim, n_init, seed)
Y = torch.tensor([eval_objective(x) for x in X], dtype=dtype, device=device).unsqueeze(-1)
print(f"{len(X)}) Best value: {Y.max().item():.2e}")
while len(X) < max_evals:
# Create a batch
start = time.time()
X_next = generate_batch(
X=X,
Y=Y,
batch_size=min(batch_size, max_evals - len(X)),
n_candidates=n_candidates,
sampler=sampler,
use_keops=use_keops,
)
end = time.time()
print(f"Generated batch in {end - start:.1f} seconds")
Y_next = torch.tensor(
[eval_objective(x) for x in X_next], dtype=dtype, device=device
).unsqueeze(-1)
# Append data
X = torch.cat((X, X_next), dim=0)
Y = torch.cat((Y, Y_next), dim=0)
print(f"{len(X)}) Best value: {Y.max().item():.2e}")
return X, Y
# +
batch_size = 5
n_init = 10
max_evals = 60
seed = 0 # To get the same Sobol points
shared_args = {
"n_init": n_init,
"max_evals": max_evals,
"batch_size": batch_size,
"seed": seed,
}
# -
USE_KEOPS = True if not SMOKE_TEST else False
N_CAND = 50000 if not SMOKE_TEST else 10
N_CAND_CHOL = 10000 if not SMOKE_TEST else 10
# ## Track memory footprint
# %load_ext memory_profiler
# ## Cholesky with 10,000 candidates
# %memit X_chol, Y_chol = run_optimization("cholesky", N_CAND_CHOL, **shared_args)
# ## RFF with 50,000 candidates
# %memit X_rff, Y_rff = run_optimization("rff", N_CAND, **shared_args)
# ## Lanczos
# %memit X_lanczos, Y_lanczos = run_optimization("lanczos", N_CAND, use_keops=USE_KEOPS, **shared_args)
# ## CIQ with 50,000 candidates
# %memit X_ciq, Y_ciq = run_optimization("ciq", N_CAND, use_keops=USE_KEOPS, **shared_args)
# ## Plot
# +
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(10, 8))
matplotlib.rcParams.update({"font.size": 20})
results = [
(Y_chol.cpu(), "Cholesky-10,000", "b", "", 14, "--"),
(Y_rff.cpu(), "RFF-50,000", "r", ".", 16, "-"),
(Y_lanczos.cpu(), "Lanczos-50,000", "m", "^", 9, "-"),
(Y_ciq.cpu(), "CIQ-50,000", "g", "*", 12, "-"),
]
optimum = hart6.optimal_value
ax = fig.add_subplot(1, 1, 1)
names = []
for res, name, c, m, ms, ls in results:
names.append(name)
fx = res.cummax(dim=0)[0]
t = 1 + np.arange(len(fx))
plt.plot(t[0::2], fx[0::2], c=c, marker=m, linestyle=ls, markersize=ms)
plt.plot([0, max_evals], [hart6.optimal_value, hart6.optimal_value], "k--", lw=3)
plt.xlabel("Function value", fontsize=18)
plt.xlabel("Number of evaluations", fontsize=18)
plt.title("Hartmann6", fontsize=24)
plt.xlim([0, max_evals])
plt.ylim([0.5, 3.5])
plt.grid(True)
plt.tight_layout()
plt.legend(
names + ["Global optimal value"],
loc="lower right",
ncol=1,
fontsize=18,
)
plt.show()
# -
| tutorials/thompson_sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("winequality.csv")
df.head(10)
# # # Min-Max Normalization
# +
def Max_min(v):
b = (((v-Minimum)/(Maximum-Minimum))*(New_Maximum-New_minimum))+New_minimum
return b
a = df['total sulfur dioxide']
print(a)
Minimum = df['total sulfur dioxide'].min()
print("Minimum = ",Minimum)
Maximum = df['total sulfur dioxide'].max()
print("Maximum = ",Maximum)
# +
New_Maximum = 1
New_minimum = 0 #Range(0-1)
b = []
for i in list(a): #Normalization
b.append(Max_min(i))
df["Normalizes"] = b #adding column
df = df.filter(["total sulfur dioxide","Normalizes"]) #Extract only required Column
df
# -
# # # Z-Score Normalization
# +
b = df["total sulfur dioxide"]
def Z_score(v,Mean,Standard_deviation):
Z = (v-Mean)/Standard_deviation
return Z
Mean = df["total sulfur dioxide"].mean()
print("Mean = ",Mean)
Standard_deviation = df["total sulfur dioxide"].std()
print("Standard_deviation = ",Standard_deviation)
# +
c = []
for j in list(b):
c.append(Z_score(j,Mean,Standard_deviation)) #Normalization
df["Normalizes"] = c
df = df.filter(["total sulfur dioxide","Normalizes"]) #Extract only required Column
df
# -
# # # Decimal Normalization
# +
c = df["total sulfur dioxide"]
Maximum = df['total sulfur dioxide'].max()
print("Maximum = ",Maximum)
length = len(str(int(Maximum)))
print(length)
def Decimal(v):
D = v / 10**length #Decimal Normalizations
return D
x = []
for j in list(c):
x.append(Decimal(j)) #Normalization
df["Normalizes"] = x
df = df.filter(["total sulfur dioxide","Normalizes"]) #Extract only required Column
df
| Normalisation/normalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.datasets import make_blobs, make_moons, make_regression, make_classification
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
from scipy.stats import pearsonr, spearmanr
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, explained_variance_score
# from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, roc_curve, roc_auc_score
# clustering
# from sklearn.datasets import make_blobs, make_moons
# import matplotlib.pyplot as plt
# import seaborn as sns
# import pandas as pd
# import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.cluster import KMeans
# from sklearn.metrics import adjusted_rand_score, adjusted_mutual_info_score
# from scipy.stats import pearsonr, spearmanr
# data exploration
# from sklearn.datasets import make_regression, make_moons
# import matplotlib.pyplot as plt
# import seaborn as sns
# import pandas as pd
# import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.linear_model import LinearRegression
# from sklearn.neural_network import MLPRegressor
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.metrics import r2_score, explained_variance_score
# from scipy.stats import pearsonr, spearmanr
# classification
# from sklearn.datasets import make_classification, make_moons
# import matplotlib.pyplot as plt
# import seaborn as sns
# import pandas as pd
# import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.linear_model import LogisticRegression
# from sklearn.neural_network import MLPClassifier
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.metrics import f1_score, roc_curve, roc_auc_score
# from scipy.stats import pearsonr, spearmanr
# regression
# from sklearn.datasets import make_regression, make_moons
# import matplotlib.pyplot as plt
# import seaborn as sns
# import pandas as pd
# import numpy as np
# from sklearn.model_selection import train_test_split
# from sklearn.linear_model import LinearRegression
# from sklearn.neural_network import MLPRegressor
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.metrics import r2_score, explained_variance_score
# from scipy.stats import pearsonr, spearmanr
# +
def interval_transform(x, a, b):
m = x.min()
ma = x.max()
alpha_inv = (1 - m/ma)*ma/(a - b)
alpha = 1/alpha_inv
beta = b - alpha*m
f = lambda x: alpha*x + beta
return f(x)
def make_noise_feature(x):
n_features = x.shape[1]
n_samples = x.shape[0]
weights = np.random.uniform(1e-4, 1e-2, n_features)
noise = np.random.normal(1, 5, n_samples)
signal = np.sum(weights*x, -1)
return signal + noise
def calculate_pvalues(df,
method = spearmanr
):
"""
Assumes df with only numeric entries clean of null entries.
"""
dfcols = pd.DataFrame(columns=df.columns)
pvalues = dfcols.transpose().join(dfcols, how='outer')
for r in df.columns:
for c in df.columns:
pvalues[r][c] = round(method(df[r], df[c])[1], 4)
return pvalues
def correlation_matrix(df,
method = "pearson",
annot_bool = False,
annot_size = 20
):
# Compute the correlation matrix
corr = df.corr(method = method)
if annot_bool:
annot = corr.copy()
if method == "pearson":
sig_meth = pearsonr
else:
sig_meth = spearmanr
pval = calculate_pvalues(df, sig_meth)
# create three masks
r0 = corr.applymap(lambda x: '{:.2f}'.format(x))
r1 = corr.applymap(lambda x: '{:.2f}*'.format(x))
r2 = corr.applymap(lambda x: '{:.2f}**'.format(x))
r3 = corr.applymap(lambda x: '{:.2f}***'.format(x))
# apply them where appropriate --this could be a single liner
annot = annot.where(pval>0.1,r0)
annot = annot.where(pval<=0.1,r1)
annot = annot.where(pval<=0.05,r2)
annot = annot.mask(pval<=0.01,r3)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 11))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},
annot = annot,
fmt = "",
annot_kws={"size": annot_size},
vmin = -1,
vmax = 1,
)
# +
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[42, 39], [39.5, 38.3]]),
shuffle=False,
random_state=42,
#difficulty,
cluster_std=1.4,
)
X2, y2 = make_blobs(
n_samples=n_samples,
n_features=2,
centers=np.array([[44, 39.8], [38, 37.9]]),
cluster_std=1.2,
shuffle=False,
random_state=6,
#difficulty,
)
X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
# -
X.shape
gpa_column = interval_transform(data[0], 1, 4)
passed_column = interval_transform(data[1], 0, 100)
# +
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(y, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"passed_percent",
"degree",
]
df_full = pd.DataFrame(full_data,
columns=columns)
# -
df_full.to_csv('/home/john/research/tutorials/clustering/data/clustering_data.csv', index=False)
pwd
# +
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_regression(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
#n_redundant=n_redu,
shuffle=False,
random_state=42,
#difficulty
effective_rank=2,
noise=0.6,
tail_strength=0.2,
bias=12,
)
X2, y2 = make_regression(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
#n_redundant=n_redu,
shuffle=False,
random_state=6,
#difficulty
effective_rank=1,
noise=1.1,
tail_strength=0.3,
bias=10,
)
#X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
# -
attendance_column = interval_transform(data[2], 0, 100)
gpa_column = interval_transform(data[1], 1, 4)
passed_column = interval_transform(data[0], 0, 100)
sex_column = make_noise_feature(X)
sex_column = (sex_column > sex_column.mean()).astype(int)
hsgpa_column = interval_transform(make_noise_feature(X), 0, 4)
ethn_column = make_noise_feature(X)
ethn_column = pd.qcut(ethn_column, q=[0, .25, .5, 1], labels=[0, 1, 2])
fci_post = interval_transform(y, 0, 30)
# +
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(attendance_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(sex_column, axis=-1),
np.expand_dims(hsgpa_column, axis=-1),
np.expand_dims(ethn_column, axis=-1),
np.expand_dims(fci_post, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"attendance",
"passed_percent",
"sex",
"hsGPA",
"ethnicity",
"fci_post"]
df_full = pd.DataFrame(full_data,
columns=columns)
# -
df_full.to_csv('/home/john/research/tutorials/exploring-data/data/regression_data.csv', index=False)
# +
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_regression(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
#n_redundant=n_redu,
shuffle=False,
random_state=42,
#difficulty
effective_rank=2,
noise=0.6,
tail_strength=0.2,
bias=12,
)
X2, y2 = make_regression(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
#n_redundant=n_redu,
shuffle=False,
random_state=6,
#difficulty
effective_rank=1,
noise=1.1,
tail_strength=0.3,
bias=10,
)
#X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
# -
attendance_column = interval_transform(data[2], 0, 100)
gpa_column = interval_transform(data[1], 1, 4)
passed_column = interval_transform(data[0], 0, 100)
sex_column = make_noise_feature(X)
sex_column = (sex_column > sex_column.mean()).astype(int)
hsgpa_column = interval_transform(make_noise_feature(X), 0, 4)
ethn_column = make_noise_feature(X)
ethn_column = pd.qcut(ethn_column, q=[0, .25, .5, 1], labels=[0, 1, 2])
fci_post = interval_transform(y, 0, 30)
# +
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(attendance_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(sex_column, axis=-1),
np.expand_dims(hsgpa_column, axis=-1),
np.expand_dims(ethn_column, axis=-1),
np.expand_dims(fci_post, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"attendance",
"passed_percent",
"sex",
"hsGPA",
"ethnicity",
"fci_post"]
df_full = pd.DataFrame(full_data,
columns=columns)
# -
df_full.to_csv('/home/john/research/tutorials/regression/data/regression_data.csv', index=False)
# +
n_info = 3
n_redu = 0
n_samples=2000
#making nonlinear decision boundaries requires multiple blob like features
X1, y1 = make_classification(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
n_redundant=n_redu,
n_clusters_per_class=2,
shuffle=False,
random_state=42,
#difficulty
flip_y=0.05,
class_sep=.7,
weights=[0.6, 0.4]
)
X2, y2 = make_classification(
n_samples=n_samples,
n_features=3,
n_informative=n_info,
n_redundant=n_redu,
n_clusters_per_class=4,
shuffle=False,
random_state=6,
#difficulty
flip_y=0.05,
class_sep=0.6,
weights=[0.7, 0.3],
)
X3, y3 = make_moons(n_samples=2*n_samples, noise=1, random_state=42)
X = np.concatenate([X1, X2], axis=0)
y = np.concatenate([y1, y2], axis=0)
data = np.concatenate([X, np.expand_dims(y, -1)], -1)
data = pd.DataFrame(data)
# -
attendance_column = interval_transform(data[2], 0, 100)
gpa_column = interval_transform(data[1], 1, 4)
passed_column = interval_transform(data[0], 0, 100)
sex_column = make_noise_feature(X)
sex_column = (sex_column > sex_column.mean()).astype(int)
hsgpa_column = interval_transform(make_noise_feature(X), 0, 4)
ethn_column = make_noise_feature(X)
ethn_column = pd.qcut(ethn_column, q=[0, .25, .5, 1], labels=[0, 1, 2])
# +
full_data = np.concatenate(
[
np.expand_dims(gpa_column, axis=-1),
np.expand_dims(attendance_column, axis=-1),
np.expand_dims(passed_column, axis=-1),
np.expand_dims(sex_column, axis=-1),
np.expand_dims(hsgpa_column, axis=-1),
np.expand_dims(ethn_column, axis=-1),
np.expand_dims(y, axis=-1)
],
axis=1
)
columns = [
"cGPA",
"attendance",
"passed_percent",
"sex",
"hsGPA",
"ethnicity",
"failed_course"]
df_full = pd.DataFrame(full_data,
columns=columns)
# -
df_full.to_csv('/home/john/research/tutorials/classification/data/classification_data.csv', index=False)
| data-creation/make_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Language Basics, IPython, and Jupyter Notebooks
#
#
# *****
# ___________
# # %quickref
#
# ____________________
# start Jupyter app:
# $jupyter notebook --port=9876
import numpy as np
np.random.seed(12345)
np.set_printoptions(precision=4, suppress=True)
import numpy as np
data = {i : np.random.randn() for i in range(7)}
data
# +
from numpy.random import randn
data = {i : randn() for i in range(7)} #makes an index starting at zero
#data = {randn() for i in range(7)} #no index number
print(data)
# +
#tab completion and inspect objects by adding '?'
# print?
# -
#push current directory to the stack, move to the new directory
# %pushd pass
# %popd #move to the stack you popped off earlier
# %dirs
# %dhist
_dh
# %env
# %matplotlib
#returns an object, is a list of the console output
# ip_info = !ifconfig virbr0 | grep "inet"
# !ifconfig
ip_info[0].strip()
def add_concat(coconutA, coconutB):
"""
This is an example of a doc string
Add two coconuts together
Returns
-------
the_sum : type of arguments
"""
return coconutA + coconutB
add_concat('bowling with ', 'coconuts -->')
# +
# #add_concat?
# add_concat??
# +
# np.*load*?
# -
# ### The %run Command
#
# +
def sir_Robin(x, y, z):
return (x + y) / z
a = 5
b = 6
c = 7.5
result = sir_Robin(a, b, c)
# -
sir_Robin(55,77,22)
# ```python
# # %run ipython_script_test.py
# # %load ipython_script_test.py
# ```
# #### Interrupting running code
#
#
# ____________________________________
# Executing Code from the Clipboard
#
# # %paste
#
# # %cpaste
#
# # %timeit
#
# # %time
#
# # %debug
#
# # %pwd
#
#
# ### Matplotlib Integration
# %matplotlib
# %matplotlib inline
# ## Python Language Basics
# ### Language Semantics
#
#
# Everything is an object.
#
# Indentation seperates code blocks, not braces.
#
# for x in array: #colon means more stuff tabbed in on next line
#
# if x < pivot:
#
# less.append(x)
#
# else:
#
# greater.append(x)
#
# Single line, multiple arguments seperated by semicolon
# a_thing = 5; b_obj = 6; c_semprini = 7
#
# Comments # single line
# '''
# multi
# line
# comments
# '''
# #### Variables and argument passing
a = [1, 2, 3]
b = a
a.append(4)
b
def append_element(some_list, element):
some_list.append(element)
import string as st
sir_Robin = st.ascii_letters + st.digits
#st.printable
#st.hexdigits
#st.octdigits
#letters concats lower & upper
print(sir_Robin)
append_element(a, 'semprini')
print(a)
# #### Dynamic references, strong types
#a = 5
#type(a)
a = 'foo'
type(a)
'5' + 5
a = 4.5
b = 2
# String formatting, to be visited later
print('a is {0}, b is {1}'.format(type(a), type(b)))
a / b
a = 5
isinstance(a, int)
a = 5; b = 4.5
isinstance(a, (int, float))
isinstance(b, (int, float))
# #### Attributes and methods
# ```python
# In [1]: a = 'foo'
#
# In [2]: a.<Press Tab>
# a.capitalize a.format a.isupper a.rindex a.strip
# a.center a.index a.join a.rjust a.swapcase
# a.count a.isalnum a.ljust a.rpartition a.title
# a.decode a.isalpha a.lower a.rsplit a.translate
# a.encode a.isdigit a.lstrip a.rstrip a.upper
# a.endswith a.islower a.partition a.split a.zfill
# a.expandtabs a.isspace a.replace a.splitlines
# a.find a.istitle a.rfind a.startswith
# ```
a = 'coconut_foo'
type(a)
getattr(a, 'split')
# #### Duck typing
def isiterable(obj):
try:
iter(obj)
return True
except TypeError: # not iterable
return False
print(isiterable('a string'))
print(isiterable([1, 2, 3]))
print(isiterable(5))
x = [5, 6, 7, 9]
if not isinstance(x, list) and isiterable(x):
x = list(x)
print(x)
# #### Imports
# ```python
# # some_module.py
# PI = 3.14159
#
# def f(x):
# return x + 2
#
# def g(a, b):
# return a + b
# ```
# import some_module
# result = some_module.f(5)
# pi = some_module.PI
# from some_module import f, g, PI
# result = g(5, PI)
# import some_module as sm
# from some_module import PI as pi, g as gf
#
# r1 = sm.f(pi)
# r2 = gf(6, pi)
# #### Binary operators and comparisons
5 - 7
12 + 21.5
5 <= 2
a = [1, 2, 3]
b = a
c = list(a)
a is b
a is not c
a == c
a = None
a is None
# #### Mutable and immutable objects
a_list = ['foo', 2, [4, 5]]
a_list[2] = (3, 4)
a_list
a_tuple = (3, 5, (4, 5))
a_tuple[1] = 'four'
# ### Scalar Types
# #### Numeric types
ival = 17239871
ival ** 6
fval = 7.243
fval2 = 6.78e-5
3 / 2
3 // 2
# %time
import math
g = [1,3,5,7,17,21]
p = [12,122,112,1212,1222,1112]
t =[-12,-122,-112,-1212,-1222,-1112]
for x in g:
print(hex(x))
for y in p:
print(oct(y))
for f in g:
print(math.factorial(f))
for h in t:
print(abs(h))
import math
for u in p:
print(math.sin(u))
pII = math.pi
print(math.sin(pII/2))
print(math.cos(pII))
print(math.tan(pII))
#print(math.asin(pII))
#math.acos
#math.atan
PII = math.pi
round(pII, 50)
complex(99,88)
for x in g:
print(bin(x))
for y in p:
print(math.sqrt(y))
for w in g:
print(math.log(w))
print('^-----semprini-----^')
def kung_pow(x, y):
print(math.pow(x,y))
kung_pow(8, 8)
for x in g:
print(math.pow(x,2))
# #### Strings
# Strings can be coded with single or double quotes, use doubles if string has one single in it.
# Escape characters are \t for tab and \n for newline
a = 'one way of writing a string'
b = "another way"
a.replace('writing', 'inflating')
print(a)
b.split
a.splitlines()
c = """
This is a longer string that \n
spans multiple lines \n
I am the bottom line
"""
c.split('\n')
c.count('\n')
c.splitlines()
a = 'this is a string'
a[10] = 'f'
b = a.replace('string', 'longer string')
b
print(type(a))
print(type(s))
a = 5.6
s = str(a)
print(s)
s = 'python'
list(s)
s[:3]
s = '12\\34'
print(s)
s = r'this\has\no\special\characters'
s
a = 'this is the first half '
b = 'and this is the second half'
a + b
template = '{0:.2f} {1:s} are worth US${2:d}'
template.format(4.5560, 'Argentine Pesos', 1)
"Whizzo butter time is {0} GMT".format('15:00')
'semPriNi'.capitalize()
'.sEmPriNi'.lower()
'semprini'.upper()
#Center the string between the given spaces/padding, optional fill character
'Semprini'.center(13, '~')
'Semprini'.ljust(13, '~')
'Semprini'.rjust(13,'^')
'44444'.rjust(7,'0')
#true if matches end of string
'Semprini'.endswith('ini')
#return the index position of a substring, limit search with optional start/end positions
'Semprini'.find('mp')
#True if all characters are letters or digits
'43234', 'semprini', '#$%#@'.isalnum()
'{_43.2$34', 'semprini', '#$%#@'.isalpha()
# #### Bytes and Unicode
val = "español"
val
val_utf8 = val.encode('utf-8')
val_utf8
type(val_utf8)
val_utf8.decode('utf-8')
val.encode('latin1')
val.encode('utf-16')
val.encode('utf-16le')
bytes_val = b'this is bytes'
bytes_val
decoded = bytes_val.decode('utf8')
decoded # this is str (Unicode) now
# #### Booleans
True and True
False or True
# ### Type Casting and Type Conversions
# '''
# Use built-in functions to convert from one type to another.
# float(x)
# list(x)
# int(x)
#
#
# '''
s = '3.14159'
fval = float(s)
print(type(fval))
#int(fval)
#bool(fval)
#bool(0)
print(fval)
# #### None
# None is the Python null value type. If a function does not explicitly
# return a value, it implicitly returns None.
a = None
a is None
b = 5
b is not None
#none is also a common default value for function arguments
def add_and_maybe_multiply(a, b, c=None):
result = a + b
if c is not None:
result = result * c
return result
#None is also a unique
type(None)
# #### Dates and times
from datetime import datetime, date, time
dt = datetime(2019, 10, 28, 20, 15, 35)
#dt.day
#dt.minute
print(dt.date())
print(dt.time())
dt.strftime('%m/%d/%Y %H:%M')
#convert the dt into international scientific date format
dt.strftime('%A %d-%b-%Y %H:%M:%S.%f')
datetime.strptime('20091031', '%Y%m%d')
dt.replace(minute=0, second=0)
dt2 = datetime(2011, 11, 15, 22, 30)
delta = dt2 - dt
delta
type(delta)
dt
dt + delta
# ### Control Flow
# """
# Python has several built-in keywords for conditional logic, loops and
# other standard control flow concepts found in other languages.
# #### if, elif, and else
# The if statement is one of the most well-known control flow statement
# types. It checks a condition that, if True, evaluates the code
# if ..elif in 4 lines may not be the adaptable or consise
# y if x else z is the same as *=(((x and y) or z))
def bad_words(badw):
if badw < 0:
print('Sorry, you are out of money ;)')
elif badw == 0:
print('Do you need House credits?')
elif 0 < badw < 5:
print('Cleaning out your pocket change?')
else:
print('Oooh, a big spender !!!')
print('You have this many credits:-->', badw)
bad_words(40)
if x < 0:
print('It's negative')
elif x == 0:
print('Equal to zero')
elif 0 < x < 5:
print('Positive but smaller than 5')
else:
print('Positive and larger than or equal to 5')
a = 5; b = 7
c = 8; d = 4
if a < b or c > d:
print('Made it')
test_pidgen = 4 > 8 > 2 > 1
tp = test_pidgen
if tp == True:
print('Mr <NAME> approves!')
if tp == False:
print('No, no, no, no, no spam for you ;(')
gui_drop_menu = 'coconuts'
print({'gumbys': 1.55,
'brain specialist': 5.23,
'parrot eggs': 0.99,
'moustach': 0.49,
'lupines': 1.23,
'semprini': 9.99,
'coconuts': 2.56,
'': "Type something in, I'm no mind reader"}[gui_drop_menu])
menu_branch = {'gumbys': 1.55,
'brain specialist': 5.23,
'parrot eggs': 0.99,
'moustach': 0.49,
'lupines': 1.23,
'semprini': 9.99,
'coconuts': 2.56,
'lemon curry?': 4.35,
'': "Type something in, I'm no mind reader"} #catch empty strings
print(menu_branch.get('lemon curry?', 'Choose well you will --Yoda'))
def poc_order_up(c):
choice = c
if choice in menu_branch:
print(menu_branch[choice])
else:
print('No, no, no, no, no spam for you ;(')
poc_order_up('parrot eggs')
# ##Boolean Tests and the value of being truthful.
# True and False are custom versions of ints 1 and 0, more readable.
# A T/F value is inherent in all objects.
# Nonzero numbers & nonempty = True.
# Zero numbers, empty objects, None(special object) = False.
# and & or can return true or false, always return a coconut.
# Once the results are known, the statement stops evaluating.
# and
# or
# not
# 'Short-circtuit evaluation' = py stops when it finds the first true operand.
#return left side if false
45 and 54, 54 and 99, 66 and 989 #both are true, returns the right
#here '[]' is false, stops and returns
[] and {}
#here 321 is true, so
321 and []
# ###for loops
# for value in collection:
# # do something with value
sequence = [1, 2, None, 4, None, 5]
total = 0
for value in sequence:
if value is None:
continue
total += value
print(value)
sequence = [1, 2, 0, 4, 6, 5, 2, 1]
total_until_5 = 0
for value in sequence:
if value == 5:
break
total_until_5 += value
print(value)
for g in range(85, 88, 1):
for j in range(4, 8):
if j > g:
break
print((g, j))
a = ((x and y) or z)
a = y if x else z
#boolean cheat codes
#select from a set of objects
#assigns x to the first nonempty
x = a or b or c or None #or default
#good for
L = [1, 0, 2, 0, '<NAME>', '', 'rumham', []]
#fetch the true values
list(filter(bool, L))
[x for x in L if x]
#truth aggregate
any(L), all(L)
# ## while loops
# The general iteration construct in Python,
# it keeps executing a block or blocks until test at top == True.
# When it becomes false the control moves to the block that follows.
# If starts at false, the block never runs and the while statement is skipped
#slice off the string until the last letter
x = 'semprini'
while x:
print(x, end=' ')
x = x[1:]
a=5; b=115
while a < b:
print(a, end=' ')
a *= 2
x = 256
total = 0
while x > 0:
if total > 500:
break
total += x
x = x // 2
# #### pass
# used during function development, place holder for return statements.
if x < 0:
print('negative!')
elif x == 0:
# TODO: put something smart here
pass
else:
print('positive!')
# #### Range settings for generators of lists, etc
range(10)
list(range(10))
print(list(range(0, 20, 2)))
print(list(range(5, 0, -1)))
seq = [1, 2, 3, 4]
for i in range(len(seq)):
val = seq[i]
print(seq)
seq = (1,2,3,4)
for i in range(len(seq)):
val = seq[i]
if i % 3 == 0 or i % 5 == 0:
seq += 1
sum = ()
for i in range(355, 666):
# % is the modulo operator
if i % 9 == 0 or i % 5 == 0:
print(i,i/2)
# #### Ternary expressions
# if/else multiline statements can be constructed on one line. eg
# if x:
# a = y
# else:
# a = z
#
# this can be rewriten as: a = y if x else z,
if x:
a = y
else:
a = z
a = 'trew' if 'spam' else 'flause'
print(a)
a = 'trew' if '' else 'flause'
print(a)
# %%writefile DnD_dice.py
#code source <NAME> 03_02_double_dice
import random
for x in range(1, 2):
die_4 = random.randint(1,4)
die_6 = random.randint(1, 6)
die_8 = random.randint(1,8)
die_10 = random.randint(1,10)
die_12 = random.randint(1,12)
die_20 = random.randint(1,20)
total = die_4 + die_6 + die_8 + die_10 + die_12 + die_20
print("Total=", total)
if total == 6:
print('Lucky Sixes!')
if total == 11:
print('Eleven Thrown!')
if die_12 == die_20:
print('Double Thrown!')
print("D20 =", die_20)
if die_20 == 20:
print('Max D20!!')
print("D12 =", die_12)
if die_12 == 12:
print('Max D12!!')
print("D08 =", die_8)
if die_8 == 8:
print('Max D8!!')
print("D06 =", die_6)
if die_6 == 6:
print('Max D6!!')
print("D04 =", die_4)
if die_4 == 4:
print('Max D4!!')
# !python DnD_dice.py
x = -5
'Non-negative' if x >= 0 else 'Negative'
# +
# %%writefile bridge_keeper.py
#source code from <NAME> 04_09_hangman_full_solution
import random
words = ['albatross', 'spam', 'camelot', 'spamalot', 'crunchyfrog', 'deadparrot', 'semprini',]
lives_remaining = 42
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win 3 thingies! Well Done!')
break
if lives_remaining == 0:
print('A witch! Buuurn her!!')
print('The word was: ' + word)
break
def pick_a_word():
word_position = random.randint(0, len(words) - 1)
return words[word_position]
def get_guess(word):
print_word_with_blanks(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def print_word_with_blanks(word):
#create an empty new global variable,
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
# letter found
display_word = display_word + letter
else:
# letter not found
display_word = display_word + '-'
print(display_word)
def process_guess(guess, word):
if len(guess) > 1 and len(guess) == len(word):
return whole_word_guess(guess, word)
else:
return single_letter_guess(guess, word)
def whole_word_guess(guess, word):
global lives_remaining
if guess.lower() == word.lower():
return True
else:
lives_remaining = lives_remaining - 1
return False
def single_letter_guess(guess, word):
global guessed_letters
global lives_remaining
if word.find(guess) == -1:
# letter guess was incorrect
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess.lower()
if all_letters_guessed(word):
return True
return False
def all_letters_guessed(word):
for letter in word:
if guessed_letters.find(letter.lower()) == -1:
return False
return True
play()
# -
# #try and except example from <NAME>
#
# Read each line in a file, find a match for 'semprini'. Exeption is raised if file is not found.
#
#
# 06_03_file_readline.py
#
#
# +
# %%writefile semprini_detector.py
words_file = 'data/monty_python_keywords.txt'
try:
f = open(words_file)
line = f.readline()
while line != '':
if line == 'semprini\n':
print('A semprini has been detected in the file')
break
line = f.readline()
f.close()
except IOError:
print("Cannot find file: " + words_file)
# -
# !python semprini_detector.py
#
| 00_mt_core_types_loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
import spikeextractors as se
import spiketoolkit as st
import spikewidgets as sw
import tqdm.notebook as tqdm
from scipy.signal import periodogram, spectrogram
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import holoviews as hv
import holoviews.operation.datashader
import holoviews.operation.timeseries
hv.extension("bokeh")
import panel as pn
import panel.widgets as pnw
pn.extension()
from LoisLFPutils.utils import *
# +
# Path to the data folder in the repo
data_path = r""
# !!! start assign jupyter notebook parameter(s) !!!
data_path = '2021-02-21_12-04-54_Or179_Or177_afternoon'
# !!! end assign jupyter notebook parameter(s) !!!
# +
data_path = os.path.join('../../../../data/',data_path)
# Path to the raw data in the hard drive
with open(os.path.normpath(os.path.join(data_path, 'LFP_location.txt'))) as f:
OE_data_path = f.read()
# -
# ### Get each bird's recording, and their microphone channels
# This needs to be less repetitive
if 'Or177' in data_path:
# Whole recording from the hard drive
recording = se.BinDatRecordingExtractor(OE_data_path,30000,40, dtype='int16')
# Note I am adding relevant ADC channels
# First bird
Or179_recording = se.SubRecordingExtractor(
recording,
channel_ids=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15, 32])
# Second bird
Or177_recording = se.SubRecordingExtractor(
recording,
channel_ids=[16, 17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 33])
# Bandpass fiter microphone recoridngs
mic_recording = st.preprocessing.bandpass_filter(
se.SubRecordingExtractor(recording,channel_ids=[32,33]),
freq_min=500,
freq_max=14000
)
else:
# Whole recording from the hard drive
recording = se.BinDatRecordingExtractor(OE_data_path, 30000, 24, dtype='int16')
# Note I am adding relevant ADC channels
# First bird
Or179_recording = se.SubRecordingExtractor(
recording,
channel_ids=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16])
# Bandpass fiter microphone recoridngs
mic_recording = st.preprocessing.bandpass_filter(
se.SubRecordingExtractor(recording,channel_ids=[16]),
freq_min=500,
freq_max=1400
)
# +
# Get wav files
wav_names = [file_name for file_name in os.listdir(data_path) if file_name.endswith('.wav')]
wav_paths = [os.path.join(data_path,wav_name) for wav_name in wav_names]
# Get tranges for wav files in the actual recording
# OE_data_path actually contains the path all the way to the .bin. We just need the parent directory
# with the timestamp.
# Split up the path
OE_data_path_split= OE_data_path.split(os.sep)
# Take only the first three. os.path is weird so we manually add the separator after the
# drive name.
OE_parent_path = os.path.join(OE_data_path_split[0] + os.sep, *OE_data_path_split[1:3])
# Get all time ranges given the custom offset.
tranges=np.array([
get_trange(OE_parent_path, path, offset=datetime.timedelta(seconds=0), duration=3)
for path in wav_paths])
# -
wav_df = pd.DataFrame({'wav_paths':wav_paths, 'wav_names':wav_names, 'trange0':tranges[:, 0], 'trange1':tranges[:, 1]})
wav_df.head()
# Connect the wav files to the recording. Manually input to gut check yourself. If it is before 2021 02 21 at 11:00 am PST, you need to add a time delay.
wav_f,_,_,_=wav_df.loc[0,:]
wav_f, data_path
datetime.datetime(2021,2,23,8,11,1) - datetime.datetime(2021, 2, 22,22,0,20)
paths, name, tr0, tr1 = wav_df.loc[0,:]
sw.plot_spectrogram(mic_recording, trange= [tr0,tr1+10], freqrange=[300,4000], nfft=2**10, channel=32)
np.linspace(0,130,14)
# +
# Set up widgets
wav_selector = pnw.Select(options=[(i, name) for i, name in enumerate(wav_df.wav_names.values)], name="Select song file")
window_radius_selector = pnw.Select(options=[0,1,2,3,4,5,6,7,8, 10,20,30,40,60], value=8, name="Select window radius")
spect_chan_selector = pnw.Select(options=list(range(16)), name="Spectrogram channel")
spect_freq_lo = pnw.Select(options=np.linspace(0,130,14).tolist(), value=20, name="Low frequency for spectrogram (Hz)")
spect_freq_hi = pnw.Select(options=np.linspace(130,0,14).tolist(), value=40, name="Hi frequency for spectrogram (Hz)")
log_nfft_selector = pnw.Select(options=np.linspace(10,16,7).tolist(), value=14, name="magnitude of nfft (starts at 256)")
@pn.depends(
wav_selector=wav_selector.param.value,
window_radius=window_radius_selector.param.value,
spect_chan=spect_chan_selector.param.value,
spect_freq_lo=spect_freq_lo.param.value,
spect_freq_hi=spect_freq_hi.param.value,
log_nfft=log_nfft_selector.param.value
)
def create_figure(wav_selector,
window_radius, spect_chan,
spect_freq_lo, spect_freq_hi, log_nfft):
# Each column in each row to a tuple that we unpack
wav_file_path, wav_file_name, tr0, tr1 = wav_df.loc[wav_selector[0],:]
# Set up figure
fig,axes = plt.subplots(4,1, figsize=(16,12))
# Get wav file numpy recording object
wav_recording = get_wav_recording(wav_file_path)
# Apply offset and apply window radius
tr0 = tr0 - window_radius
# Add duration of wav file
tr1 = tr1 + window_radius +wav_recording.get_num_frames()/wav_recording.get_sampling_frequency()
'''Plot sound spectrogram (Hi fi mic)'''
sw.plot_spectrogram(wav_recording, channel=0, freqrange=[300,14000],ax=axes[0],cmap='magma')
axes[0].set_title('Hi fi mic spectrogram')
'''Plot sound spectrogram (Lo fi mic)'''
if 'Or179' in wav_file_name:
LFP_recording = Or179_recording
elif 'Or177' in wav_file_name:
LFP_recording = Or177_recording
mic_channel = LFP_recording.get_channel_ids()[-1]
sw.plot_spectrogram(
mic_recording,
mic_channel,
trange=[tr0, tr1],
freqrange=[600,14000],
ax=axes[1],cmap='magma'
)
axes[1].set_title('Lo fi mic spectrogram')
'''Plot LFP timeseries (smoothed)'''
chan_ids = np.array([LFP_recording.get_channel_ids()]).flatten()
sw.plot_timeseries(
st.preprocessing.bandpass_filter(
se.SubRecordingExtractor(LFP_recording),
freq_min=25,
freq_max=45
),
channel_ids=[chan_ids[spect_chan]],
trange=[tr0, tr1],
ax=axes[2]
)
axes[2].set_title('Raw LFP')
# Clean lines
for line in plt.gca().lines:
line.set_linewidth(0.1)
'''Plot LFP spectrogram'''
sw.plot_spectrogram(
LFP_recording,
channel=chan_ids[spect_chan],
freqrange=[spect_freq_lo,spect_freq_hi],
trange=[tr0, tr1],
ax=axes[3],
nfft=int(2**log_nfft)
)
axes[3].set_title('LFP')
for i, ax in enumerate(axes):
ax.set_yticks([ax.get_ylim()[1]])
ax.set_yticklabels([ax.get_ylim()[1]])
ax.set_xlabel('')
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# Show 30 Hz
axes[3].set_yticks([30, axes[3].get_ylim()[1]])
axes[3].set_yticklabels([30, axes[3].get_ylim()[1]])
return fig
dash = pn.Column(
pn.Row(wav_selector, window_radius_selector,spect_chan_selector),
pn.Row(spect_freq_lo,spect_freq_hi,log_nfft_selector),
create_figure
);
# -
# ## Deep dive into a single channel
dash
# ## Looking at all channels at once
# +
# Make chanmap
chanmap=np.array([[3, 7, 11, 15],[2, 4, 10, 14],[4, 8, 12, 16],[1, 5, 9, 13]])
# Set up widgets
wav_selector = pnw.Select(options=[(i, name) for i, name in enumerate(wav_df.wav_names.values)], name="Select song file")
window_radius_selector = pnw.Select(options=[10,20,30,40,60], name="Select window radius")
spect_freq_lo = pnw.Select(options=np.linspace(0,130,14).tolist(), name="Low frequency for spectrogram (Hz)")
spect_freq_hi = pnw.Select(options=np.linspace(130,0,14).tolist(), name="Hi frequency for spectrogram (Hz)")
log_nfft_selector = pnw.Select(options=np.linspace(10,16,7).tolist(),value=14, name="magnitude of nfft (starts at 256)")
def housekeeping(wav_selector, window_radius):
# Each column in each row to a tuple that we unpack
wav_file_path, wav_file_name, tr0, tr1 = wav_df.loc[wav_selector[0],:]
# Get wav file numpy recording object
wav_recording = get_wav_recording(wav_file_path)
# Apply offset and apply window radius
offset = 0
tr0 = tr0+ offset-window_radius
# Add duration of wav file
tr1 = tr1+ offset+window_radius+wav_recording.get_num_frames()/wav_recording.get_sampling_frequency()
return wav_recording, wav_file_name, tr0, tr1
@pn.depends(
wav_selector=wav_selector.param.value,
window_radius=window_radius_selector.param.value)
def create_sound_figure(wav_selector, window_radius):
# Housekeeping
wav_recording, wav_file_name, tr0, tr1 = housekeeping(wav_selector, window_radius)
# Set up figure for sound
fig,axes = plt.subplots(1,2, figsize=(16,2))
'''Plot sound spectrogram (Hi fi mic)'''
sw.plot_spectrogram(wav_recording, channel=0, freqrange=[300,14000], ax=axes[0],cmap='magma')
axes[0].set_title('Hi fi mic spectrogram')
'''Plot sound spectrogram (Lo fi mic)'''
if 'Or179' in wav_file_name:
LFP_recording = Or179_recording
elif 'Or177' in wav_file_name:
LFP_recording = Or177_recording
mic_channel = LFP_recording.get_channel_ids()[-1]
sw.plot_spectrogram(
mic_recording,
mic_channel,
trange=[tr0, tr1],
freqrange=[600,4000],
ax=axes[1],cmap='magma'
)
axes[1].set_title('Lo fi mic spectrogram')
for ax in axes:
ax.axis('off')
return fig
@pn.depends(
wav_selector=wav_selector.param.value,
window_radius=window_radius_selector.param.value,
spect_freq_lo=spect_freq_lo.param.value,
spect_freq_hi=spect_freq_hi.param.value,
log_nfft=log_nfft_selector.param.value
)
def create_LFP_figure(wav_selector, window_radius,
spect_freq_lo, spect_freq_hi, log_nfft):
# Housekeeping
wav_recording, wav_file_name, tr0, tr1 = housekeeping(wav_selector, window_radius)
fig,axes=plt.subplots(4,4,figsize=(16,8))
'''Plot LFP'''
for i in range(axes.shape[0]):
for j in range(axes.shape[1]):
ax = axes[i][j]
sw.plot_spectrogram(recording, chanmap[i][j], trange=[tr0, tr1],
freqrange=[spect_freq_lo,spect_freq_hi],
nfft=int(2**log_nfft), ax=ax,cmap='magma')
ax.axis('off')
# Set channel as title
ax.set_title(chanmap[i][j])
# Clean up
for i in range(axes.shape[0]):
for j in range(axes.shape[1]):
ax=axes[i][j]
ax.set_yticks([ax.get_ylim()[1]])
ax.set_yticklabels([ax.get_ylim()[1]])
ax.set_xlabel('')
# Show 30 Hz
ax.set_yticks([30, ax.get_ylim()[1]])
ax.set_yticklabels([30, ax.get_ylim()[1]])
return fig
dash = pn.Column(
pn.Row(wav_selector,window_radius_selector),
pn.Row(spect_freq_lo,spect_freq_hi,log_nfft_selector),
create_sound_figure, create_LFP_figure
);
# -
dash
# # Sleep data analysis
csvs = [os.path.normpath(os.path.join(data_path,file)) for file in os.listdir(data_path) if file.endswith('.csv')]
csvs
csv = csvs[0]
df = pd.read_csv(csv)
del df['Unnamed: 0']
df.head()
# +
csv_name = csv.split(os.sep)[-1]
rec=None
if 'Or179' in csv_name:
rec = st.preprocessing.resample(Or179_recording, 500)
elif 'Or177' in csv_name:
rec = st.preprocessing.resample(Or177_recording, 500)
# Get second to last element in split
channel = int(csv_name.split('_')[-2])
# +
window_slider = pn.widgets.DiscreteSlider(
name='window size',
options=[*range(1,1000)],
value=1
)
window_slider_raw = pn.widgets.DiscreteSlider(
name='window size (raw timeseries)',
options=[*range(1,1000)],
value=1
)
freq_slider_1 = pn.widgets.DiscreteSlider(
name='f (Hz)',
options=[*range(1,200)],
value=30
)
freq_slider_2 = pn.widgets.DiscreteSlider(
name='f (Hz)',
options=[*range(1,200)],
value=10
)
freq_slider_3 = pn.widgets.DiscreteSlider(
name='f (Hz)',
options=[*range(1,200)],
value=4
)
range_slider = pn.widgets.RangeSlider(
start=0,
end=df.t.max(),
step=10,
value=(0, 500),
name="Time range",
value_throttled=(0,500)
)
@pn.depends(window=window_slider.param.value,
freq_1=freq_slider_1.param.value,
freq_2=freq_slider_2.param.value,
freq_3=freq_slider_3.param.value,
rang=range_slider.param.value_throttled)
def plot_ts(window, freq_1, freq_2, freq_3, rang):
subdf = df.loc[
((df['f']==freq_1)|(df['f']==freq_2)|(df['f']==freq_3))
& ((df['t'] > rang[0]) & (df['t'] < rang[1])),:]
return hv.operation.timeseries.rolling(
hv.Curve(
data = subdf,
kdims=["t", "f"],
vdims="logpower"
).groupby("f").overlay().opts(width=1200, height=300),
rolling_window=window
)
@pn.depends(window=window_slider_raw.param.value, rang=range_slider.param.value_throttled)
def plot_raw_ts(window, rang):
sr = rec.get_sampling_frequency()
return hv.operation.datashader.datashade(
hv.operation.timeseries.rolling(
hv.Curve(
rec.get_traces(channel_ids=[channel], start_frame=sr*rang[0], end_frame=sr*rang[1]).flatten()
),
rolling_window=window
),
aggregator="any"
).opts(width=1200, height=300)
pn.Column(
window_slider,window_slider_raw,freq_slider_1, freq_slider_2, freq_slider_3,range_slider,
plot_ts,
plot_raw_ts
)
# -
# # TODOs:
# - Does phase vary systematically with frequency???
# - Does the log power increase with time over the nzight??
# - Observation: these birds start singing around 6, before the lights turn on.
# - Possibly add spikes for when song occurs
# - Possibly add timerange slider
| code/processing/2021-02-21_12-04-54/_run_jnb/2021-02-21_12-04-54_Or179_Or177_afternoon-output (2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# **Bagging with CPD**
suppressMessages(library(tree))
load("../transformed data/golub3571.rda")
load("../transformed data/paper9.rda")
# Settings as specified in the paper
p = 40 # number of genes for FLDA
B = 50 # Aggregation predictors
N = 200 # repeat classification N times
d = c(0.05, 0.1,0.25, 0.5, 0.75, 1) # CPD parameter
set.seed(2017)
cbine_data = data.frame(response = factor(total3571_response), scale_golub_merge)
d = 0.75
# implement CPD
CPD = function(d, x1, x2){
a = runif(nrow(x1), 0, d)
a*x1+(1-a)*x2
}
# helper function for each bagging with CPD
my_cpdhelper = function(train, test){
id1 = sample(nrow(train), replace = T)
id2 = sample(nrow(train), replace = T)
temp = CPD(d, train[id1, -1], train[id2,-1])
temp_md = tree(response~., data = data.frame(temp, response = train$response[id1]))
predict(temp_md, test, type = "class")
}
#initialize the error vector
cpd_error = numeric(N)
# repeat N times
for(i in 1:N){
cpd_index = mysplit(nrow(cbine_data))
cpd_train = cbine_data[-cpd_index,]
cpd_test = cbine_data[cpd_index,]
# gene selection
temp_bw = order(BW(cpd_train[, -1], cpd_train$response), decreasing = T)[1:p]
cpd_train_t = data.frame(response = cpd_train$response, cpd_train[,temp_bw+1])
cpd_test_t= data.frame(response = cpd_test$response, cpd_test[,temp_bw+1])
t1 = replicate(B, my_cpdhelper(cpd_train_t, cpd_test_t))
pred = apply(t1, 1, function(x) ifelse(sum(x == "AML")>sum(x =="ALL"), "AML", "ALL"))
cpd_error[i] = sum(pred != cpd_test_t$response)
}
resultCPD = c(Median = median(cpd_error), Upper_quartile = quantile(cpd_error, 0.75))
resultCPD
| ReproducingMLpipelines/Paper9/ModelBaggingCPD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Layer
#
# In this notebook, we visualize four filtered outputs (a.k.a. activation maps) of a convolutional layer.
#
# In this example, *we* are defining four filters that are applied to an input image by initializing the **weights** of a convolutional layer, but a trained CNN will learn the values of these weights.
#
# <img src='notebook_ims/conv_layer.gif' height=60% width=60% />
# ### Import the image
# +
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
# TODO: Feel free to try out your own images here by changing img_path
# to a file path to another image on your computer!
img_path = 'data/udacity_sdc.png'
# load color image
bgr_img = cv2.imread(img_path)
# convert to grayscale
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# -
print(gray_img)
print(gray_img.astype("float32")/255)
# +
# normalize, rescale entries to lie in [0,1]
gray_img = gray_img.astype("float32")/255
print(gray_img.shape)
# plot image
plt.imshow(gray_img, cmap='gray')
plt.show()
# -
# ### Define and visualize the filters
# +
import numpy as np
## TODO: Feel free to modify the numbers here, to try out another filter!
filter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
print('Filter shape: ', filter_vals.shape)
# +
# Defining four different filters,
# all of which are linear combinations of the `filter_vals` defined above
# define four filters
filter_1 = filter_vals
filter_2 = -filter_1
filter_3 = filter_1.T
filter_4 = -filter_3
filters = np.array([filter_1, filter_2, filter_3, filter_4])
# -
# visualize all four filters
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y]<0 else 'black')
# ## Define a convolutional layer
#
# The various layers that make up any neural network are documented, [here](http://pytorch.org/docs/stable/nn.html). For a convolutional neural network, we'll start by defining a:
# * Convolutional layer
#
# Initialize a single convolutional layer so that it contains all your created filters. Note that you are not training this network; you are initializing the weights in a convolutional layer so that you can visualize what happens after a forward pass through this network!
#
#
# #### `__init__` and `forward`
# To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the forward behavior of a network that applyies those initialized layers to an input (`x`) in the function `forward`. In PyTorch we convert all inputs into the Tensor datatype, which is similar to a list data type in Python.
#
# Below, I define the structure of a class called `Net` that has a convolutional layer that can contain four 4x4 grayscale filters.
# +
import torch
import torch.nn as nn
import torch.nn.functional as F
# define a neural network with a single convolutional layer with four filters
class Net(nn.Module):
def __init__(self, weight):
super(Net, self).__init__()
# initializes the weights of the convolutional layer to be the weights of the 4 defined filters
k_height, k_width = weight.shape[2:]
# assumes there are 4 grayscale filters
self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
# calculates the output of a convolutional layer
# pre- and post-activation
conv_x = self.conv(x)
activated_x = F.relu(conv_x)
# returns both layers
return conv_x, activated_x
# instantiate the model and set the weights
weight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)
model = Net(weight)
# print out the layer in the network
print(model)
# -
print(filters)
print(torch.from_numpy(filters).unsqueeze(1))
print(torch.from_numpy(filters).unsqueeze(1).shape)
print(torch.from_numpy(filters).unsqueeze(1).shape[2:])
# **torch.unsqueeze(input, dim, out=None) → Tensor**
# Returns a new tensor with a dimension of size one inserted at the specified position.
# input (Tensor) – the input tensor
# dim (int) – the index at which to insert the singleton dimension
# out (Tensor, optional) – the output tensor
x = torch.tensor([1, 2, 3, 4])
print(torch.unsqueeze(x, 0))
print(torch.unsqueeze(x, 1))
# ### Visualize the output of each filter
#
# First, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.
# helper function for visualizing the output of a given layer
# default number of filters is 4
def viz_layer(layer, n_filters= 4):
fig = plt.figure(figsize=(20, 20))
for i in range(n_filters):
ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])
# grab layer outputs
ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')
ax.set_title('Output %s' % str(i+1))
# Let's look at the output of a convolutional layer, before and after a ReLu activation function is applied.
# +
# plot original image
plt.imshow(gray_img, cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# convert the image into an input Tensor
gray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)
print(gray_img_tensor.shape) # [1, 1, 213, 320]
# get the convolutional layer (pre and post activation)
conv_layer, activated_layer = model(gray_img_tensor)
# visualize the output of a conv layer
viz_layer(conv_layer)
# -
# #### ReLu activation
#
# In this model, we've used an activation function that scales the output of the convolutional layer. We've chose a ReLu function to do this, and this function simply turns all negative pixel values in 0's (black). See the equation pictured below for input pixel values, `x`.
#
# <img src='notebook_ims/relu_ex.png' height=50% width=50% />
# after a ReLu is applied
# visualize the output of an activated conv layer
viz_layer(activated_layer)
| convolutional-neural-networks/conv-visualization/conv_visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tributary as t
import pyEX as p
import pandas as pd
# +
def sma_close(data):
dat = [x['close'] for x in data]
return sum(dat)/len(dat)
def crosses(state, data):
sma_short = data[0]
sma_long = data[1]
if state.golden:
# short moved above long
if sma_short < sma_long:
state.golden = False
else:
# long moved above short
if sma_short > sma_long:
state.golden = True
# clean output data
data = data[2]
data['sma_short'] = sma_short
data['sma_long'] = sma_long
return data
# -
def wrap(ticker):
# data source
data = t.Delay(t.UnrollDataFrame(p.Client(version="sandbox").chartDF(ticker, '3m')), .05)
# windows
window1 = t.Window(data, size=3, full_only=False)
window2 = t.Window(data, size=20, full_only=False)
# avgs
sma1 = t.Apply(sma_close, window1)
sma2 = t.Apply(sma_close, window2)
# merge streams
merged = t.Reduce(sma1, sma2, data)
calc = t.Apply(t.State(crosses, golden=False), merged)
# psp = t.Print(calc)
psp = t.Perspective(calc, columns=['close', 'sma_short', 'sma_long'], plugin='y_line', row_pivots=['index'])
return psp
run = []
# # AAPL
run.append(wrap('aapl'))
# # IBM
run.append(wrap('ibm'))
# # TSLA
run.append(wrap('tsla'))
# +
def correlation(state, data):
if state.dat is None:
state.dat = []
new = {s: data[i]['changePercent'] for i, s in enumerate(state.symbols)}
state.dat.append(new)
df = pd.DataFrame(state.dat)
return df.corr()
reduced = t.Reduce(*run)
final = t.Perspective(t.Apply(t.State(correlation, dat=None, symbols=['aapl', 'ibm', 'tsla']), reduced), plugin='heatmap', columns=['aapl', 'ibm', 'tsla'], row_pivots=['index'], aggregates={'aapl':'last', 'ibm': 'last', 'tsla': 'last'})
# -
t.run(final)
t.GraphViz(final)
| examples/broken/finjs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="mF7QOb-0g9Xz"
# # Install SlayerPytorch on Colab
# After the installations the runtime needs to be restarted.
# ```
# exit()
# ```
# Will restart the runtime without deleting files. The runtime will automatically start. And if you press "run all" the run is not interrupted and works till the end.
# + id="aSnbF5w-jTGQ"
# !git clone https://github.com/bamsumit/slayerPytorch
# !pip install dv
# !pip install ninja
exit()
# + id="HhdpSnPWkG8r"
# %cd slayerPytorch/
# !python setup.py install
exit()
# + [markdown] id="7F0vk1qah9Dh"
# Test to verify if everything went well with the installation
# + id="35lkjdNJl20b"
# %cd slayerPytorch/test/
# !python -m unittest
# + [markdown] id="3VznTeL0iuNs"
# # Get the dataset
# Download the dataset from [here](https://ibm.ent.box.com/s/3hiq58ww1pbbjrinh367ykfdf60xsfm8).
# Be careful to select the right path when you unzip the files.
# + id="jiAYSYo6oz5l"
# !unzip './DVSGesturedataset.zip' -d /content/slayerPytorch
# + [markdown] id="KkXiMZTmj6W6"
# # Pre-process the dataset
# Every sample contains all the actions performed in sequence by a subject. This program is able to divide the samples into smaller ones, containing single actions.
#
# + id="1HBUYAmLqagO"
import os
import numpy as np
import matplotlib.pyplot as plt
import slayerSNN as snn
from dv import LegacyAedatFile
path = '/content/slayerPytorch/DVSGesturedataset/DvsGesture/'
actionName = [
'hand_clapping',
'right_hand_wave',
'left_hand_wave',
'right_arm_clockwise',
'right_arm_counter_clockwise',
'left_arm_clockwise',
'left_arm_counter_clockwise',
'arm_roll',
'air_drums',
'air_guitar',
'other_gestures',
]
def readAedatEvent(filename):
xEvent = []
yEvent = []
pEvent = []
tEvent = []
with LegacyAedatFile(filename) as f:
for event in f:
xEvent.append(event.x)
yEvent.append(event.y)
pEvent.append(event.polarity)
tEvent.append(event.timestamp/1000)
return xEvent, yEvent, pEvent, tEvent
def splitData(filename, path):
x,y,p,t = readAedatEvent(path+filename+'.aedat')
labels = np.loadtxt(path + filename + '_labels.csv', delimiter=',', skiprows=1)
labels[:,0] -= 1
labels[:,1:]
if not os.path.isdir(path+ 'data/' + filename):
os.makedirs(os.path.join(path,'data/' + filename))
lastAction = 100
for action, tst, ten in labels:
if action == lastAction: continue # This is to ignore second arm_roll samples
print(actionName[int(action)])
ind = (t >= tst/1000) & (t < ten/1000)
ind_in = np.argmax(ind)
ind_end = ind_in+np.argmin(ind[(ind.argmax()):-1])
if ind_end == ind_in:
ind_end = 0
TD = snn.io.event(x[ind_in:ind_end-1], y[ind_in:ind_end-1], p[ind_in:ind_end-1], (t[ind_in:ind_end-1] - tst/1000))
# snn.io.showTD(TD)
lastAction = action
snn.io.encodeNpSpikes(path+'data/'+ filename + '/{:g}.npy'.format(action), TD)
if __name__ == '__main__':
user = np.arange(29) + 1
lighting = [
'fluorescent',
'fluorescent_led',
'lab',
'led',
'natural',
]
count = 0
for id in user:
for light in lighting:
filename = 'user{:02d}_{}'.format(id, light)
if os.path.isfile(path + filename + '.aedat'):
print(count, filename)
splitData(filename, path)
count += 1
| DVS128Gesture/DVS128GesturePreprocessor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jakefed1/jakefed1.github.io/blob/master/survey_lab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="uglH-di7VkiS"
import pandas as pd
import numpy as np
import seaborn as sns
import altair as alt
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="kJzLDI2mVhqJ" outputId="9ef1c4a7-f4ce-453b-c2b0-474ace9aae0a"
survey_path = "surveyedited.csv"
survey_df = pd.read_csv(survey_path)
print(survey_df.head(10))
# + colab={"base_uri": "https://localhost:8080/"} id="YpXrattQV4Dc" outputId="69bce78d-8e89-4822-edef-125fb20c0a1c"
print(survey_df.shape)
print(survey_df.columns)
print(survey_df.describe())
# + [markdown] id="D63J6btcV6py"
# ## Intro: About the Dataset
#
# For my lab, I looked at a dataset titled _Young People Survey_ that was created from a survey conducted by Slovakian college students in 2013. The survey, which was was aimed at friends of the students and other young individuals, consisted of 150 questions that ranged in subject from personality to background to habits. The goal of the study was to explore common trends and traits of young people. A total of 1,010 people filled out the questionaire, making for a dataset of 1,010 rows and 151 columns. Some of the columns contain categorical variables, but the vast majority contain quantitative variables, including both discrete and continuous variables. There are a few missing values in each column, and there could be response bias in a few of the questions, like smoking and drinking habits, as students may not want to admit they take part in such activities knowing that their responses are being recorded and saved. However, these issues seem to be small and do not seem to pose threats to the validity of the dataset as a whole.
# + [markdown] id="w4vUv67JV95y"
# ## Hypotheses
#
# I want to specifically analyze the music tastes of the young individuals in the survey. I want to know if hip hop music is preferred by young people over other genres like rock and punk. To do this, I will build a 2 sample means confidence interval to estimate the difference in the true population means between hip hop and rock, and then I will build another confidence interval of the same type, substituting punk for rock. The test between hip hop and rock will be indicated by the hebrew letter א, while the test between hip hop and punk will be indicated by the hebrew letter ב. Means must be used instead of proportions because the question in the survey asked participants to assign a rating to each genre from 1-5 with 5 being the best and 1 being the worst. The mean rating can serve as a decent estimator of the overall opinion of the genre.
#
# H0א: μ(hip hop) = μ(rock); The true population mean rating for hip hop is equal to the true population mean rating for rock. There is no difference between the two, young people do not prefer one of the genres over the other.
#
# HAא: μ(hip hop) != μ(rock); The true population mean rating for hip hop is not equal to the true population mean rating for rock. There is a difference between the two, young people prefer one of the genres over the other.
#
# H0ב: μ(hip hop) = μ(punk); The true population mean rating for hip hop is equal to the true population mean rating for punk. There is no difference between the two, young people do not prefer one of the genres over the other.
#
# HAב: μ(hip hop) != μ(punk); The true population mean rating for hip hop is not equal to the true population mean rating for punk. There is a difference between the two, young people prefer one of the genres over the other.
# + [markdown] id="5ogf4KM6WI33"
# ## Exploring the Dataset
#
# Out of the three genres, rock has the highest average rating, coming in at 3.76. It is followed by hip hop at 2.91 and then punk at 2.46. The histograms show that the rating most assigned to rock was a perfect 5, making it unsurprising that rock has an extraordinarily high average rating. The distribution for hip hop is nearly uniform, as each rating has nearly the same frequency, with only slightly fewer people giving the genre a perfect 5, dragging down its mean ever so slightly from 3 to 2.91. Finally, the rating most assigned to punk was a poor 1, causing the mean to be significantly lower than the other two genres.
# + colab={"base_uri": "https://localhost:8080/"} id="x6ZkqieRWJ-y" outputId="d2841250-7674-447d-b7de-74da5163074d"
survey_df2 = survey_df[['Hiphop, Rap', 'Rock', 'Punk']]
print(survey_df2.describe())
# + colab={"base_uri": "https://localhost:8080/", "height": 851} id="qeHLVWmjWNiL" outputId="e043b7e6-3e8f-45f0-efaa-938db863646c"
raphist = sns.histplot(data = survey_df2, x = 'Hiphop, Rap')
plt.xlabel('Hip hop rating')
plt.ylabel('Frequency')
plt.title('Distribution of hip hop ratings')
plt.show()
rockhist = sns.histplot(data = survey_df2, x = 'Rock')
plt.xlabel('Rock rating')
plt.ylabel('Frequency')
plt.title('Distribution of rock ratings')
plt.show()
punkhist = sns.histplot(data = survey_df2, x = 'Punk')
plt.xlabel('Punk rating')
plt.ylabel('Frequency')
plt.title('Distribution of punk ratings')
plt.show()
# + [markdown] id="4r2_QpfLWYwC"
# ## Confidence Intervals Using Bootstrap Method
#
# To build confidence intervals, I created new columns for the differences between hip hop rating and rock rating as well as hip hop and punk rating. I then used the bootstrap method, taking a sample of size 50 from my dataset, and resampling from that sample with replacement 1,000 times, taking the mean of each of the 1,000 samples. The 5th and 95th percentiles of the differences serve as the boundaries for the confidence intervals.
#
# The 90% confidence interval for the true population mean difference between hip hop rating and rock rating is (-1.1, -0.34). We are 90% confident the true population mean difference between hip hop rating and rock rating is between -1.1 and -0.34. Since 0 is not in our interval, it is not a plausible value, meaning it is not plausible that there is no difference between the hip hop rating and rock rating, and we can reject our null hypothesis H0א which states there is no difference between the hip hop rating and rock rating.
#
# The 90% confidence interval for the true population mean difference between hip hop rating and punk rating is (0.26, 1.0). We are 90% confident the true population mean difference between hip hop rating and punk rating is between 0.26 and 1.0. Since 0 is not in our interval, it is not a plausible value, meaning it is not plausible that there is no difference between the hip hop rating and punk rating, and we can reject our null hypothesis H0ב which states there is no difference between the hip hop rating and punk rating.
# + colab={"base_uri": "https://localhost:8080/", "height": 501} id="ZTio0FinWaPX" outputId="3b1166b3-7b81-4c38-e6a8-6b671b23f14c"
survey_df2["hiphopminusrock"] = survey_df2["Hiphop, Rap"] - survey_df2["Rock"]
survey_df2["hiphopminuspunk"] = survey_df2["Hiphop, Rap"] - survey_df2["Punk"]
survey_df2.head(10)
survey_df2.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="Q5wCOeDNWjtF" outputId="4d8895b6-c471-4f33-fa4d-d52cd5cbb469"
#Take a sample and bootstrap it
survey_df3 = survey_df2.sample(50)
def bootstrap_sample(df):
bootstrapdf = df.sample(len(df), replace = True)
return bootstrapdf.mean()
def bootstrap_samples(N, df):
x = 0
list = []
while x < N:
list.append(bootstrap_sample(df))
x+=1
return list
hhmrbs = bootstrap_samples(1000, survey_df3['hiphopminusrock'])
survey_df4 = pd.DataFrame(hhmrbs, columns=['Y'])
a = survey_df4['Y'].quantile(0.05)
b = survey_df4['Y'].quantile(0.95)
print(a,b)
hhmpbs = bootstrap_samples(1000, survey_df3['hiphopminuspunk'])
survey_df5 = pd.DataFrame(hhmpbs, columns=['Y'])
c = survey_df5['Y'].quantile(0.05)
d = survey_df5['Y'].quantile(0.95)
print(c,d)
# + [markdown] id="u1zr8N4nWo0l"
# ## Conclusion
#
# In conclusion, it does appear there is a statistically significant difference between the mean rating for hip hop music and rock music, as well as hip hop music and punk music. The data seems to indicate that while hip hop is preferred by young people over punk, rock is preferred over hip hop.
| survey_lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_pytorch_p36
# language: python
# name: conda_pytorch_p36
# ---
# # Amazon SageMaker 持ち込みPyTorchコンテナによる MNIST の学習(ノートブックclassic用)
# ### 1.背景
#
# MNISTは、手書き文字の分類に広く使用されているデータセットです。 70,000個のラベル付きの28x28ピクセルの手書き数字のグレースケール画像で構成されています。 データセットは、60,000個のトレーニング画像と10,000個のテスト画像に分割されます。 手書きの数字 0から9の合計10のクラスがあります。
#
# このチュートリアルでは、SageMaker のマネージドコンテナイメージ以外に、独自のコンテナイメージを持ち込める持ち込みのことを学習していただくために、持ち込みの独自コンテナを使用して SageMaker で MNIST モデルをトレーニングする方法を示します。
# ### 2.セットアップ
#
# SageMaker セッションを作成し、設定を開始し
# Dockerfileから、持ち込み用のコンテンナを作成していきます。
# Dockerfileはcontainerディレクトリ配下に配置されていて、PyTorchコンテナにrequirements.txt(pip installするパッケージリスト)経由でSageMaker Training Toolkitをpipインストールするように記述されています。
#
# SageMaker Training Toolkitは、どのDockerコンテナにも簡単に追加でき、SageMakerと互換性のあるモデルをトレーニングすることができるパッケージです。詳細は下記をご確認ください。
# https://github.com/aws/sagemaker-training-toolkit
#
# 最後にdocker buildコマンドによりビルドしていきます。
import boto3
# +
# %%time
IMAGE_NAME = 'toolkit-container-pytorch'
TAG=':1.11'
REGION = boto3.session.Session().region_name
# %cd ./container
# !docker build -t {IMAGE_NAME}{TAG} .
# %cd ../
# -
# ### 3.Amazon ECRリポジトリの作成とDockerImageの登録
#
# Amazon ECRにsagemaker-toolkit-container-pytorchというレポジトリを作成し、ノートブックインスタンスに作成したDockerImageをdocker pushコマンドでコミットしていきます。
#
# なお、ノートブックインスタンスに設定しているIAM RoleにSagemakerFullAccessポリシーをアタッチしている場合(ノートブックインスタンス起動時にdefaultでアタッチされています)はレポジトリ名にsagemakerというキーワードを含める必要がありますのでご注意ください
# +
# %%time
MY_ACCOUNT_ID = boto3.client('sts').get_caller_identity().get('Account')
MY_ECR_ENDPOINT = f'{MY_ACCOUNT_ID}.dkr.ecr.{REGION}.amazonaws.com/'
MY_REPOSITORY_URI = f'{MY_ECR_ENDPOINT}sagemaker-{IMAGE_NAME}'
MY_IMAGE_URI = f'{MY_REPOSITORY_URI}{TAG}'
# !$(aws ecr get-login --region {REGION} --registry-ids {MY_ACCOUNT_ID} --no-include-email)
# リポジトリの作成
# !aws ecr delete-repository --repository-name sagemaker-{IMAGE_NAME} --force # 同名のリポジトリがあった場合削除
# !aws ecr create-repository --repository-name sagemaker-{IMAGE_NAME}
# !docker tag {IMAGE_NAME}{TAG} {MY_IMAGE_URI}
# !docker push {MY_IMAGE_URI}
print(f'コンテナイメージは {MY_IMAGE_URI} へ登録されています。')
# -
# ### 4.セットアップ
#
# 学習およびモデルデータに使用する S3 バケットとプレフィックスは、ノートブックインスタンス、トレーニング、およびホスティングと同じリージョン内にある必要があります。
#
# データへの学習およびホスティングアクセスを提供するために使用される IAM ロール arn を用います。 ノートブックインスタンス、学習インスタンス、および/またはホスティングインスタンスに複数のロールが必要な場合は、 sagemaker.get_execution_role() を、適切な IAM ロール arn 文字列に置き換えてください。
import sagemaker
from sagemaker.pytorch import PyTorch
bucket = sagemaker.session.Session().default_bucket()
prefix = 'sagemaker/DEMO-pytorch-mnist-byoc'
role = sagemaker.get_execution_role()
print(bucket)
# このノートブックのコードは、以前からのノートブックインスタンスで実行する場合と、SageMaker Studio のノートブックで実行する場合とで挙動が異なります。以下のセルを実行することで、いまの実行環境が以前からのノートブックインスタンスなのか、SageMaker Studio のノートブックなのかを判定して、on_studioに記録します。この結果に基づいて、以降のノートブックの実行を次のように変更します。
#
# データセットの展開先を変更します。SageMaker Studio を利用する場合、home のディレクトリは EFS をマウントして実現されており、データセットを展開する際にやや時間を要します。そこで home 以外のところへ展開するようにします。
# ### 5.データの取得
# !aws s3 cp s3://fast-ai-imageclas/mnist_png.tgz . --no-sign-request
# !tar -xvzf mnist_png.tgz
# +
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch
import os
data_dir = 'data'
training_dir = 'mnist_png/training'
test_dir = 'mnist_png/testing'
os.makedirs(data_dir, exist_ok=True)
training_data = datasets.ImageFolder(root=training_dir,
transform=transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
test_data = datasets.ImageFolder(root=test_dir,
transform=transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))]))
training_data_loader = DataLoader(training_data, batch_size=len(training_data))
training_data_loaded = next(iter(training_data_loader))
torch.save(training_data_loaded, os.path.join(data_dir, 'training.pt'))
test_data_loader = DataLoader(test_data, batch_size=len(test_data))
test_data_loaded = next(iter(test_data_loader))
torch.save(test_data_loaded, os.path.join(data_dir, 'test.pt'))
# -
# ### 6.データをS3にアップロードする
#
# データセットを S3 にアップロードするには、 sagemaker.Session.upload_data 関数を使用します。 戻り値として入力した S3 のロケーションは、後で学習ジョブを実行するときに使用します。
inputs = sagemaker.session.Session().upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
print('input spec (in this case, just an S3 path): {}'.format(inputs))
# ### 7.学習を開始する
# 学習の条件を設定するため、Estimator クラスの子クラスの PyTorch オブジェクトを作成します。 ここでは、PyTorchスクリプト、IAMロール、および(ジョブごとの)ハードウェア構成を渡す PyTorch Estimator を定義しています。また合わせてentry_point を指定することで、依存するスクリプト群をコンテナにコピーして、学習時に使用することが可能です。
#
# トレーニングジョブを開始する際、Amazon S3のどこにソースが格納されているか、起動するモジュールは何かをsagemaker-training-toolkitライブラリに知らせる必要があります。今回はPython SDKのフレームワーク使用していますので自動的に設定されています。
# Estimator Classを利用する際は必要になりますのでご注意ください。
#
# 最後に、Amazon SageMaker Python SDKで定義されている汎用Estimatorオブジェクトのfit()メソッドを呼び出すことで、学習ジョブを実行することができます
# +
estimator = PyTorch(
entry_point='train.py',
image_uri=MY_IMAGE_URI,
role=sagemaker.get_execution_role(),
hyperparameters={
'lr':0.01,
'batch-size':16
},
instance_count=1,
instance_type='ml.g4dn.xlarge',
)
estimator.fit({'training': inputs})
#Estimatorクラスで学習する場合
#estimator = sagemaker.estimator.Estimator(
# image_uri=MY_IMAGE_URI,
# role=sagemaker.get_execution_role(),
# hyperparameters={
# 'lr':0.01,
# 'batch-size':16,
# 'sagemaker_program' : 'train.py',
# 'sagemaker_submit_directory' : 's3://'+bucket+'/'+estimator._current_job_name+'/source/sourcedir.tar.gz'
# },
# instance_count=1,
# instance_type='ml.g4dn.xlarge',
#)
#estimator.fit({'training': inputs})
| sagemaker/sagemaker-traning/byoc/PyTorch/classic/pytorch-toolkit-container.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# name: python37364bitbaseconda863a9d2b2ce24774beb380f071c4d0fa
# ---
# # Machine Learning Course
#
# Data from 2019-nCoV
# +
import matplotlib.pyplot as plt
import numpy as np
y_data = [24324., 20438., 17205., 14380., 11791., 9692., 7711., 5974., 4515., 2744., 1975., 1287., 830., 571., 440. ]
x_data = [15., 14., 13., 12., 11., 10., 9., 8., 7., 6., 5., 4., 3., 2., 1. ]
# +
# y_data = b + w1 * x_data + w2 * x_data*x_data
b = -2200
w1 = 1000
w2 = 0
lr_b = 0
lr_w1 = 0
lr_w2 = 0
lr = 100 #learning rate
iteration = 10000
diff = 0
for n in range(len(x_data)):
diff = diff + (y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])**2
# store initial values for plotting
b_history = [b]
w1_history = [w1]
w2_history = [w2]
diff_history = [diff]
# Iterations
for i in range(iteration):
b_grad = 0.0
w1_grad = 0.0
w2_grad = 0.0
for n in range(len(x_data)):
b_grad = b_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*1.0
w1_grad = w1_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*x_data[n]
w2_grad = w2_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*x_data[n]*x_data[n]
lr_b += b_grad**2
lr_w1 += w1_grad**2
lr_w2 += w2_grad**2
# update parameters.
b = b - lr/np.sqrt(lr_b) * b_grad
w1 = w1 - lr/np.sqrt(lr_w1) * w1_grad
w2 = w2 - lr/np.sqrt(lr_w2) * w2_grad
# store parameters for plotting
diff = 0
for n in range(len(x_data)):
diff = diff + (y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])**2
b_history.append(b)
w1_history.append(w1)
w2_history.append(w2)
diff_history.append(diff)
# -
'''
# y_data = b + w1 * x_data + w2 * x_data*x_data
b = -2200
w1 = 1000
w2 = 0
lr = 0.000005 #learning rate
iteration = 500000
diff = 0
for n in range(len(x_data)):
diff = diff + (y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])**2
# store initial values for plotting
b_history = [b]
w1_history = [w1]
w2_history = [w2]
diff_history = [diff]
# Iterations
for i in range(iteration):
b_grad = 0.0
w1_grad = 0.0
w2_grad = 0.0
for n in range(len(x_data)):
b_grad = b_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*1.0
w1_grad = w1_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*x_data[n]
w2_grad = w2_grad - 2.0*(y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])*x_data[n]*x_data[n]
# update parameters.
b = b - lr * b_grad
w1 = w1 - lr * w1_grad
w2 = w2 - lr * w2_grad
# store parameters for plotting
diff = 0
for n in range(len(x_data)):
diff = diff + (y_data[n] - b - w1*x_data[n] - w2*x_data[n]*x_data[n])**2
b_history.append(b)
w1_history.append(w1)
w2_history.append(w2)
diff_history.append(diff)
'''
# +
# plot the figure
plt.figure(dpi=100)
plt.plot(x_data, y_data, 'o-', ms=3, lw=1.5, color='black',label="Disease")
x_grad = np.arange(1,17)
y_grad = [i*i*w2 + i*w1 + b for i in x_grad]
plt.plot(x_grad,y_grad, 'o-', ms=3, lw=1.5, color='red',label="Prediction")
plt.rcParams['font.sans-serif']=['SimHei']
plt.title('2019 nCoV Regression Model\n2019新型冠状病毒回归模型')
plt.ylabel(u'Confirmed case/确诊病例')
plt.xlabel(u'Date(from Jan 22)/日期(从1月22日始)')
plt.legend()
plt.show()
print('y = x**2*',w2,'+x*',w1,'+',b)
# -
plt.plot(range(iteration+1),diff_history)
plt.show()
print(16*16*w2+16*w1+b)
print(17*17*w2+17*w1+b)
| MachineLearning_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reproducible experimental protocol
# This notebook builds the database with all the information we need to perform domain-adversarial speech activity detection.
# ## Requirements
# ### Python packages
#
# - pyannote.audio
# - pyannote.core
# - pyannote.database
# - pandas
# ### Datasets
# - `ldc2019e31`: [Second DIHARD Challenge Development Data](https://coml.lscp.ens.fr/dihard/)
# - `ldc2019e32`: [Second DIHARD Challenge Evaluation Data](https://coml.lscp.ens.fr/dihard/)
# - `musan`: [A corpus of MUsic, Speech, And Noise](https://www.openslr.org/17/)
# + pycharm={"is_executing": false}
# where ldc2019e31 dataset has been downloaded
ldc2019e31 = '/vol/corpora1/data/ldc/ldc2019e31/LDC2019E31_Second_DIHARD_Challenge_Development_Data'
# where ldc2019e32 dataset has been downloaded
ldc2019e32 = '/vol/corpora1/data/ldc/ldc2019e32/LDC2019E32_Second_DIHARD_Challenge_Evaluation_Data_V1.1'
# where MUSAN has been downloaded from https://www.openslr.org/17/
musan = '/vol/corpora4/musan'
# where github.com/hbredin/DomainAdversarialVoiceActivityDetection has been cloned
ROOT = '/vol/work1/bredin/jsalt/DomainAdversarialVoiceActivityDetection'
# + pycharm={"is_executing": false}
# create 'database' sub-directory that is meant to store audio and reference files
# !mkdir -p {ROOT}/database/DIHARD
# + pycharm={"is_executing": false}
# define utility functions
from pyannote.core import Timeline
from pyannote.core import Annotation
from typing import TextIO
def write_rttm(file: TextIO, reference: Annotation):
"""Write reference annotation to "rttm" file
Parameters
----------
file : file object
reference : `pyannote.core.Annotation`
Reference annotation
"""
for s, t, l in reference.itertracks(yield_label=True):
line = (
f'SPEAKER {reference.uri} 1 {s.start:.3f} {s.duration:.3f} '
f'<NA> <NA> {l} <NA> <NA>\n'
)
file.write(line)
def write_uem(file: TextIO, uem: Timeline):
"""Write evaluation map to "uem" file
Parameters
----------
file : file object
uem : `pyannote.core.Timeline`
Evaluation timeline
"""
for s in uem:
line = f'{uem.uri} 1 {s.start:.3f} {s.end:.3f}\n'
file.write(line)
# -
# ## Preparing the DIHARD dataset
# For some reason, the development and evaluation subsets have files that share the same names: `DH_0001` to `DH_0192` exist in both subsets.
# To avoid any confusion in `pyannote.database`, we create symbolic links so we can distinguish `dev/DH_0001` from `tst/DH_0001`.
# + pycharm={"is_executing": false}
# !ln --symbolic {ldc2019e31}/data/single_channel/flac {ROOT}/database/DIHARD/dev
# !ln --symbolic {ldc2019e32}/data/single_channel/flac {ROOT}/database/DIHARD/tst
# + pycharm={"is_executing": false}
from pandas import read_csv
# load list of test files (and their domain)
tst = read_csv(f'{ldc2019e32}/docs/sources.tbl',
delim_whitespace=True,
names=['uri', 'language', 'domain', 'source'],
index_col='uri').filter(like='DH', axis=0)
# load list of development files (and their domain)
dev = read_csv(f'{ldc2019e31}/docs/sources.tbl',
delim_whitespace=True,
names=['uri', 'language', 'domain', 'source'],
index_col='uri').filter(like='DH', axis=0)
# obtain list of domains
dihard_domains = sorted(dev.domain.unique())
# -
# The next cell will create four files per (domain, subset) pair:
# - `{domain}.{subset}.txt` contains list of files
# - `{domain}.{subset.rttm` contains manual annotation
# - `{domain}.{subset}.uem` contains unpartitioned evaluation map (uem)
# - `{domain}.domain.{subset}.txt` contains file-to-domain mapping
# + pycharm={"is_executing": false}
from pyannote.database.util import load_rttm
from pyannote.database.util import load_uem
from pyannote.audio.features.utils import get_audio_duration
from pyannote.core import Segment
# split ldc2019e31 into training set (two third) and developement set (one third)
# for each domain in ldc2019e31
for domain, files in dev.groupby('domain'):
# load unpartitioned evaluation map (uem)
uems = load_uem(f'{ldc2019e31}/data/single_channel/uem/{domain}.uem')
# create four files per (domain, subset) pair
# {domain}.{subset}.txt contains list of files
# {domain}.{subset}.rttm contains manual annotation
# {domain}.{subset}.uem contains unpartitioned evaluation map (uem)
# {domain}.domain.{subset}.txt contains file-to-domain mapping
with open(f'{ROOT}/database/DIHARD/{domain}.dev.txt', 'w') as uris_dev, \
open(f'{ROOT}/database/DIHARD/{domain}.trn.txt', 'w') as uris_trn, \
open(f'{ROOT}/database/DIHARD/{domain}.dev.rttm', 'w') as rttm_dev, \
open(f'{ROOT}/database/DIHARD/{domain}.trn.rttm', 'w') as rttm_trn, \
open(f'{ROOT}/database/DIHARD/{domain}.dev.uem', 'w') as uem_dev, \
open(f'{ROOT}/database/DIHARD/{domain}.trn.uem', 'w') as uem_trn, \
open(f'{ROOT}/database/DIHARD/{domain}.domain.dev.txt', 'w') as domain_dev, \
open(f'{ROOT}/database/DIHARD/{domain}.domain.trn.txt', 'w') as domain_trn:
# for each file in current domain
for i, (uri, file) in enumerate(files.iterrows()):
duration = get_audio_duration({'audio': f'{ROOT}/database/DIHARD/dev/{uri}.flac'})
# ugly hack to avoid rounding errors: this has the effect of not considering
# the last millisecond of each file
duration -= 0.001
support = Segment(0, duration)
# i = 0 ==> dev
# i = 1 ==> trn
# i = 2 ==> trn
# i = 3 ==> dev
# i = 4 ==> trn
# i = 5 ==> trn
# i = 6 ==> dev
# ...
f_uris = uris_trn if i % 3 else uris_dev
f_uris.write(f'dev/{uri}\n')
# dump domain to disk
f_domain = domain_trn if i % 3 else domain_dev
f_domain.write(f'dev/{uri} {domain}\n')
# load and crop reference (cf above hack)
reference = load_rttm(f'{ldc2019e31}/data/single_channel/rttm/{uri}.rttm')[uri]
reference.uri = f'dev/{uri}'
reference = reference.crop(support, mode='intersection')
# dump reference to disk
f_rttm = rttm_trn if i % 3 else rttm_dev
write_rttm(f_rttm, reference)
# load and crop unpartitioned evaluation map
uem = uems[uri]
uem.uri = f'dev/{uri}'
uem = uem.crop(support, mode='intersection')
# dump uem to disk
f_uem = uem_trn if i % 3 else uem_dev
write_uem(f_uem, uem)
# same as above but applied to ldc2019e32 that is used entirely for test
for domain, files in tst.groupby('domain'):
uems = load_uem(f'{ldc2019e32}/data/single_channel/uem/{domain}.uem')
with open(f'{ROOT}/database/DIHARD/{domain}.tst.txt', 'w') as f_uris, \
open(f'{ROOT}//database/DIHARD/{domain}.tst.rttm', 'w') as f_rttm, \
open(f'{ROOT}/database/DIHARD/{domain}.tst.uem', 'w') as f_uem, \
open(f'{ROOT}/database/DIHARD/{domain}.domain.tst.txt', 'w') as f_domain:
for i, (uri, file) in enumerate(files.iterrows()):
duration = get_audio_duration({'audio': f'{ROOT}/database/DIHARD/tst/{uri}.flac'})
duration -= 0.001
support = Segment(0, duration)
f_uris.write(f'tst/{uri}\n')
f_domain.write(f'tst/{uri} {domain}\n')
reference = load_rttm(f'{ldc2019e32}/data/single_channel/rttm/{uri}.rttm')[uri]
reference.uri = f'tst/{uri}'
reference = reference.crop(support, mode='intersection')
write_rttm(f_rttm, reference)
uem = uems[uri]
uem.uri = f'tst/{uri}'
uem = uem.crop(support, mode='intersection')
write_uem(f_uem, uem)
# -
# Create `database.yml`:
# + pycharm={"is_executing": false}
import yaml
database_yml = {
'Databases': {
'DIHARD': f'{ROOT}/database/DIHARD/{{uri}}.flac',
'MUSAN': f'{musan}/{{uri}}.wav',
},
'Protocols': {
'DIHARD': {'SpeakerDiarization': {}},
'X': {'SpeakerDiarization': {}}
}
}
for domain in dihard_domains:
database_yml['Protocols']['DIHARD']['SpeakerDiarization'][f'{domain}'] = {}
for subset, short in {'train': 'trn', 'development': 'dev', 'test': 'tst'}.items():
database_yml['Protocols']['DIHARD']['SpeakerDiarization'][f'{domain}'][subset] = {
'uris': f'{ROOT}/database/DIHARD/{domain}.{short}.txt',
'annotation': f'{ROOT}/database/DIHARD/{domain}.{short}.rttm',
'annotated': f'{ROOT}/database/DIHARD/{domain}.{short}.uem',
'domain': f'{ROOT}/database/DIHARD/{domain}.domain.{short}.txt',
}
all_but_domain = sorted(set(dihard_domains) - {domain})
database_yml['Protocols']['X']['SpeakerDiarization'][f'DIHARD_LeaveOneDomainOut_{domain}'] = {}
for subset in ['train', 'development']:
database_yml['Protocols']['X']['SpeakerDiarization'][f'DIHARD_LeaveOneDomainOut_{domain}'][subset] = {
f'DIHARD.SpeakerDiarization.{other_domain}': [subset] for other_domain in all_but_domain
}
database_yml['Protocols']['X']['SpeakerDiarization'][f'DIHARD_LeaveOneDomainOut_{domain}']['test'] = {
f'DIHARD.SpeakerDiarization.{domain}': ['test']
}
database_yml['Protocols']['X']['SpeakerDiarization']['DIHARD_Official'] = {
subset: {
f'DIHARD.SpeakerDiarization.{domain}': [subset] for domain in dihard_domains
} for subset in ['train', 'development', 'test']
}
with open(f'{ROOT}/database.yml', 'w') as f:
f.write(yaml.dump(database_yml,
default_flow_style=False))
# -
# Setting `PYANNOTE_DATABASE_CONFIG` environment variable to `{ROOT}/database.yml` will give you a bunch of `pyannote.database` protocols:
#
# - `X.SpeakerDiarization.DIHARD_Official` is the official protocol for `DIHARD2`
# - `X.SpeakerDiarization.DIHARD_LeaveOneDomainOut_{domain}` uses all domains but {domain} in the training and development sets, and only {domain} in the test set.
# Once, you're done with the data preparation step, you can go back to [the main README](../README.md) to run the experiments.
| database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/maity2001/E-Commerce-Customer-Projec/blob/main/ecomerce_project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="D30YGuyryXTY"
# ___
# You just got some contract work with an Ecommerce company based in New York City that sells clothing online but they also have in-store style and clothing advice sessions. Customers come in to the store, have sessions/meetings with a personal stylist, then they can go home and order either on a mobile app or website for the clothes they want.
#
# The company is trying to decide whether to focus their efforts on their mobile app experience or their website.
# + id="sbwMAVcFyXTd"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + id="JtKlSOgmyXTe"
customers = pd.read_csv("Ecommerce Customers")
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="8v7TQIhwyXTf" outputId="affb7464-8fc2-4d85-a2e4-fcd64400b47a"
customers.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="d9rTQ5j9yXTg" outputId="e99254e9-377d-4ef2-d481-1439a0e9ba42"
customers.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="ZZOT5mzvyXTg" outputId="6171ef3d-a73f-4402-968d-a0596f5e1e8c"
customers.info()
# + [markdown] id="4qkFumncyXTh"
# ## EDA
#
#
# + id="Jt41xaiEyXTh"
sns.set_palette("GnBu_d")
sns.set_style('whitegrid')
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="QQALPTXgyXTh" outputId="dafa9d4e-0ea3-4d2e-e694-e865b179d2da"
# More time on site, more money spent.
sns.jointplot(x='Time on Website',y='Yearly Amount Spent',data=customers)
# + [markdown] id="r854pVjEyXTi"
# ** Do the same but with the Time on App column instead. **
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="RNbK8M_ryXTi" outputId="463753c0-5f7d-4df1-f2fb-7aac8cd68847"
sns.jointplot(x='Time on App',y='Yearly Amount Spent',data=customers)
# + [markdown] id="czy02ccwyXTi"
# ** Use jointplot to create a 2D hex bin plot comparing Time on App and Length of Membership.**
# + colab={"base_uri": "https://localhost:8080/", "height": 458} id="ZrZvtFqcyXTi" outputId="7f656e57-277a-454f-a5a1-fc6c8cc1ac5c"
sns.jointplot(x='Time on App',y='Length of Membership',kind='hex',data=customers)
# + colab={"base_uri": "https://localhost:8080/", "height": 920} id="y9qB9-nwyXTj" outputId="33ffc499-2a65-4505-9c1e-fb6409c0b62c"
sns.pairplot(customers)
# + [markdown] id="v83ZKJ9lyXTj"
# **Based off this plot what looks to be the most correlated feature with Yearly Amount Spent?**
# + id="WM9Ct92OyXTj"
# Length of Membership
# + colab={"base_uri": "https://localhost:8080/", "height": 400} id="VVNDeXCLyXTj" outputId="476c5a99-e73f-47ec-e815-8b25594d92d6"
sns.lmplot(x='Length of Membership',y='Yearly Amount Spent',data=customers)
# + [markdown] id="avZ6-VFnyXTk"
# ## Training and Testing Data
#
#
# + id="ZlT_Oc0QyXTk"
y = customers['Yearly Amount Spent']
# + id="TR1WHt_YyXTk"
X = customers[['Avg. Session Length', 'Time on App','Time on Website', 'Length of Membership']]
# + id="UwzWWhOkyXTk"
from sklearn.model_selection import train_test_split
# + id="ONNI2qa_yXTk"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# + [markdown] id="pVhZMINcyXTl"
# ## Training the Model
#
# + id="WkmvSJ3-yXTl"
from sklearn.linear_model import LinearRegression
# + id="80HD8QUPyXTl"
lm = LinearRegression()
# + colab={"base_uri": "https://localhost:8080/"} id="zGgY1hHayXTl" outputId="3f6cfba3-1209-4a72-dca4-19efa465f9b8"
lm.fit(X_train,y_train)
# + [markdown] id="wP0G4JdUyXTm"
# **Print out the coefficients of the model**
# + colab={"base_uri": "https://localhost:8080/"} id="mKTkYXJbyXTm" outputId="4fbfaf3a-c0d6-477f-eb8f-7d54c983e073"
# The coefficients
print('Coefficients: \n', lm.coef_)
# + [markdown] id="BFcXMcx2yXTm"
# ## Predicting Test Data
#
#
# + id="LexeIqcQyXTm"
predictions = lm.predict( X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="ori23JVkyXTm" outputId="5e33df6a-4398-4d09-c34e-1cb25bfa1754"
plt.scatter(y_test,predictions)
plt.xlabel('Y Test')
plt.ylabel('Predicted Y')
# + colab={"base_uri": "https://localhost:8080/"} id="nJq7aA9OyXTn" outputId="0ebb72f0-2a68-4692-8aaf-34871ed9e882"
# calculate these metrics by hand!
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# + [markdown] id="Bwa98aeAyXTn"
# ## Residuals
#
#
# **Plot a histogram of the residuals and make sure it looks normally distributed. Use either seaborn distplot, or just plt.hist().**
# + colab={"base_uri": "https://localhost:8080/", "height": 333} id="FFYXnwK8yXTn" outputId="d5f632a7-a3e2-452d-a595-6a681d757ea7"
sns.distplot((y_test-predictions),bins=50);
# + [markdown] id="8AWjx5NCyXTn"
# ## Conclusion
# We still want to figure out the answer to the original question, do we focus our efforst on mobile app or website development? Or maybe that doesn't even really matter, and Membership Time is what is really important. Let's see if we can interpret the coefficients at all to get an idea.
#
# ** Recreate the dataframe below. **
# + colab={"base_uri": "https://localhost:8080/", "height": 172} id="BDLGNSS3yXTn" outputId="08f2b59e-64d3-40ee-dd06-4f9d85b576e4"
coeffecients = pd.DataFrame(lm.coef_,X.columns)
coeffecients.columns = ['Coeffecient']
coeffecients
# + [markdown] id="0Y-r2yXKyXTo"
# Interpreting the coefficients:
#
# - Holding all other features fixed, a 1 unit increase in **Avg. Session Length** is associated with an **increase of 25.98 total dollars spent**.
# - Holding all other features fixed, a 1 unit increase in **Time on App** is associated with an **increase of 38.59 total dollars spent**.
# - Holding all other features fixed, a 1 unit increase in **Time on Website** is associated with an **increase of 0.19 total dollars spent**.
# - Holding all other features fixed, a 1 unit increase in **Length of Membership** is associated with an **increase of 61.27 total dollars spent**.
# + [markdown] id="thzJbbYPyXTo"
# **focus more on their mobile app or on their website?**
# + [markdown] id="surzolnmyXTo"
#
# This is tricky, there are two ways to think about this: Develop the Website to catch up to the performance of the mobile app, or develop the app more since that is what is working better. This sort of answer really depends on the other factors going on at the company, you would probably want to explore the relationship between Length of Membership and the App or the Website before coming to a conclusion!
#
| ecomerce_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import Statements
import pandas as pd
import numpy as np
# ## Define functions
# +
def rand_0_1():
return np.random.randint(0, 2)
def rand_0_6():
sum = 4 * rand_0_1() + 2 * rand_0_1() + rand_0_1()
while sum == 0 or sum == 7:
sum = 4 * rand_0_1() + 2 * rand_0_1() + rand_0_1()
return sum
# +
a = []
for i in range(100000):
a.append(rand_0_6())
a = pd.Series(a)
print(a.value_counts() / a.size)
| Uniform random distributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
# +
df1 = pd.read_csv('letter-ngrams/ngrams1.csv').set_index('1-gram', drop=True)
df2 = pd.read_csv('letter-ngrams/ngrams2.csv').set_index('2-gram', drop=True)
col = df2.columns
print(col)
ngrams2_any = df2['*/*']
ngrams2 = df2.T[col.str.contains('\*/') & (~ col.str.contains('-')) & (col != '*/*')].T
# sort by total occurance
sort = np.argsort(ngrams2.sum(axis = 1))[::-1]
ngrams2 = ngrams2.iloc[sort]
# normalize
ngrams2 = (ngrams2.T /ngrams2.sum(axis = 1)).T
ngrams2.head()
# -
from decrypt import Decrypt
# +
s = """Zvd zvpvyb lzvdc imcq dktat qkt pvmzysab ptqdttz qkt jhsccgjsh
szy omszqme yvesgz gc cgqmsqty. Evcq utth qksq trwtagetzqsh
cdgqjk ctqqgznc szy wvgzqta atsygznc sat vz qkgc cgyt. Pmq cvet
dvmhy qkgzl qkt pvmzysab ztsata, vqktac dvmhy qkgzl gq usaqkta,
szy eszb dvmhy watuta zvq qv qkgzl spvmq gq. Gz usjq, qkt esqqta
gc vu ftab hgqqht gewvaqszjt gz wasjqgjt. Qkgc gc ptjsmct vu qkt
geetzct yguutatzjt gz cjsht ptqdttz qkgznc uva dkgjk
omszqme-etjkszgjsh ytcjagwqgvz gc zmetagjshhb tcctzqgsh szy qkvct
vaygagzsaghb wtajtwqgpht pb kmesz ptgznc. Ztftaqkthtcc, qkt
evfspghgqb vu qkt pvmzysab gc vu vzhb swwavrgesqt fshgygqb;
ytevzcqasqgvzc vu gq ytwtzy vz ztnhtjqgzn zmeptac dkgjk sat
ceshh, pmq zvq xtav, dkgjk egnkq qtzy qv xtav uva gzugzgqthb
hsant cbcqtec, pmq sat vzhb ftab ceshh uva atsh ugzgqt cbcqtec.
S qktvab uvmzyty gz qkgc dsb vz sanmetzqc vu eszgutcqhb
swwavrgesqt jksasjqta, kvdtfta nvvy qkt swwavrgesqgvz, gc cmathb
vu wavfgcgvzsh zsqmat. Gq cttec htngqgesqt qv cwtjmhsqt vz kvd
qkt qktvab egnkq tfvhft. Pmq vu jvmact zv vzt gc vphgnty qv ivgz
gz cmjk cwtjmhsqgvz.
Ivkz Pthh""".lower()
for symb in ';., \n-':
s = s.replace(symb,' ')
# my_df = pd.DataFrame(index = ngrams2.columns)
from collections import defaultdict
my_df = {k:defaultdict(int) for k in ngrams2.columns}
for word in s.split():
n = len(word)
if n > 10:
continue
for i in range(n-1):
key = ''
two_gram = word[i:i+2]
key = '*/{}:{}'.format(i+1, i+2)
my_df[key][two_gram] += 1
my_df = pd.DataFrame(my_df)
my_df
# axis0 = 2gram, axis1 = pos
sort = np.argsort(my_df.sum(axis = 1))[::-1]
my_df = my_df.iloc[sort]
# -
#normalize
my_df = (my_df.T/my_df.sum(axis = 1)).T
my_df.head()
# +
# my_df[my_df.index.str[0] == 't']# .head()
# -
ngrams2.head()#[ngrams2.index.str[0] == 'H'].head()
# +
word_lengths = pd.Series(s.split()).apply(len)
h,bins, _ = plt.hist(word_lengths,bins = np.max(word_lengths)-1 )
plt.show()
# h*np.arange(np.max(word_lengths)-1)
plt.bar(bins[:-1], h*np.arange(1, np.max(word_lengths)), width = 0.9)
# +
fig,ax =plt.subplots(1)
x, y = df2['2-gram'], df2['*/*']
sort = np.argsort(y)
x_temp = np.arange(x.size)
ax.scatter(x_temp,y)
ax.set_xticks(x_temp)
ax.set_xticklabels(x)
1
ax.set_yscale('log')
# -
# ? plt.bar
df
| decryption/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="becoming-match"
# format code with "black" formatter. optional
# %load_ext nb_black
# + [markdown] id="fewer-anchor"
# # recreate "representation learning" paper
#
# <NAME>, <NAME> (2019). Representation learning of genomic sequence motifs with convolutional neural networks. _PLOS Computational Biology_ 15(12): e1007560. https://doi.org/10.1371/journal.pcbi.1007560
#
# Also at https://www.biorxiv.org/content/10.1101/362756v4.full
# + [markdown] id="clinical-curve"
# ## install python dependencies
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5350, "status": "ok", "timestamp": 1612204356625, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="round-record" outputId="c65681a8-9bdd-4da8-8b59-710d4cc2467b"
# %pip install --no-cache-dir https://github.com/p-koo/tfomics/tarball/master
# + [markdown] id="numerical-block"
# ## load data
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2023, "status": "ok", "timestamp": 1612204413835, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="aggressive-logistics" outputId="ce166bb4-05e3-4f97-b58d-5197efffadb9"
# !wget --timestamping https://www.dropbox.com/s/c3umbo5y13sqcfp/synthetic_dataset.h5
# + executionInfo={"elapsed": 284, "status": "ok", "timestamp": 1612204415906, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="assumed-mapping"
from pathlib import Path
import h5py
import numpy as np
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 712, "status": "ok", "timestamp": 1612204417184, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="institutional-chance" outputId="6730a268-4869-4397-cf24-77fe8774606e"
data_path = Path("synthetic_dataset.h5")
with h5py.File(data_path, "r") as dataset:
x_train = dataset["X_train"][:].astype(np.float32)
y_train = dataset["Y_train"][:].astype(np.float32)
x_valid = dataset["X_valid"][:].astype(np.float32)
y_valid = dataset["Y_valid"][:].astype(np.int32)
x_test = dataset["X_test"][:].astype(np.float32)
y_test = dataset["Y_test"][:].astype(np.int32)
x_train = x_train.transpose([0, 2, 1])
x_valid = x_valid.transpose([0, 2, 1])
x_test = x_test.transpose([0, 2, 1])
N, L, A = x_train.shape
print(f"{N} sequences, {L} nts per sequence, {A} nts in alphabet")
# + [markdown] id="activated-purchase"
# ## Max-pooling influences ability to build hierarchical motif representations
#
# >The goal of this computational task is to simultaneously make 12 binary predictions for the presence or absence of each transcription factor motif in the sequence.
#
# + [markdown] id="tested-dimension"
# ### make CNN models
#
# from methods > cnn models
#
# >All CNNs take as input a 1-dimensional one-hot-encoded sequence with 4 channels (one for each nucleotide: A, C, G, T), then processes the sequence with two convolutional layers, a fully-connected hidden layer, and a fully-connected output layer with 12 output neurons that have sigmoid activations for binary predictions. Each convolutional layer consists of a 1D cross-correlation operation, which calculates a running sum between convolution filters and the inputs to the layer, followed by batch normalization (Ioffe and Szegedy, 2015), which independently scales the features learned by each convolution filter, and a non-linear activation with a rectified linear unit (ReLU), which replaces negative values with zero.
# >
# >The first convolutional layer employs 30 filters each with a size of 19 and a stride of 1. The second convolutional layer employs 128 filters each with a size of 5 and a stride of 1. All convolutional layers incorporate zero-padding to achieve the same output length as the inputs. Each convolutional layer is followed by max-pooling with a window size and stride that are equal, unless otherwise stated. The product of the two max-pooling window sizes is equal to 100. Thus, if the first max-pooling layer has a window size of 2, then the second max-pooling window size is 50. This constraint ensures that the number of inputs to the fully-connected hidden layer is the same across all models. The fully-connected hidden layer employs 512 units with ReLU activations.
# >
# >Dropout (Srivastava et al, 2014), a common regularization technique for neural networks, is applied during training after each convolutional layer, with a dropout probability set to 0.1 for convolutional layers and 0.5 for fully-connected hidden layers. During training, we also employed L2-regularization with a strength equal to 1e-6. The parameters of each model were initialized according to (He et al, 2015), commonly known as He initialization.
# >
# >All models were trained with mini-batch stochastic gradient descent (mini-batch size of 100 sequences) for 100 epochs, updating the parameters after each mini-batch with Adam updates (Kingma and Ba, 2014), using recommended default parameters with a constant learning rate of 0.0003. Training was performed on a NVIDIA GTX Titan X Pascal graphical processing unit with acceleration provided by cuDNN libraries (Chetlur et al, 2014). All reported performance metrics and saliency logos are drawn strictly from the test set using the model parameters which yielded the lowest binary cross-entropy loss on the validation set, a technique known as early stopping.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1693, "status": "ok", "timestamp": 1612204420504, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="varied-indonesian" outputId="5ad9a143-4428-4932-805f-1816e9ee453c"
import pandas as pd
import tensorflow as tf
print("tensorflow version", tf.__version__)
tfk = tf.keras
tfkl = tf.keras.layers
def get_model(
pool1: int, pool2: int, n_classes: int = 12, batch_size: int = None
) -> tfk.Sequential:
"""Return a Model object with two convolutional layers, a
fully-connected hidden layer, and output. Sigmoid activation is
applied to logits.
Parameters
----------
pool1 : int
Size of pooling window in the max-pooling operation after the first
convolution.
pool2 : int
Size of pooling window in the max-pooling operation after the second
convolution.
n_classes : int
Number of output units.
batch_size : int
Batch size of input. If `None`, batch size can be variable.
Returns
-------
Instance of `tf.keras.Sequential`. This model is not compiled.
"""
if pool1 * pool2 != 100:
raise ValueError("product of pool sizes must be 100")
l2_reg = tfk.regularizers.l2(1e-6)
return tfk.Sequential(
[
tfkl.Input(shape=(L, A), batch_size=batch_size),
# layer 1
tfkl.Conv1D(
filters=30,
kernel_size=19,
strides=1,
padding="same",
use_bias=False,
kernel_regularizer=l2_reg,
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.MaxPool1D(pool_size=pool1, strides=pool1),
tfkl.Dropout(0.1),
# layer 2
tfkl.Conv1D(
filters=128,
kernel_size=5,
strides=1,
padding="same",
use_bias=False,
kernel_regularizer=l2_reg,
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.MaxPool1D(pool_size=pool2, strides=pool2),
tfkl.Dropout(0.1),
# layer 3
tfkl.Flatten(),
tfkl.Dense(
units=512, activation=None, use_bias=None, kernel_regularizer=l2_reg
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.Dropout(0.5),
# layer 4 (output). do not use activation (ie linear activation) so we can inspect
# the logits later.
tfkl.Dense(
units=n_classes,
activation=None,
use_bias=True,
kernel_initializer=tfk.initializers.GlorotNormal(),
bias_initializer=tfk.initializers.Zeros(),
name="logits",
),
tfkl.Activation(tf.nn.sigmoid, name="predictions"),
]
)
# + [markdown] id="adapted-leonard"
# ### train models
# -
save_dir = Path("models")
pool_pairs = [(1, 100), (2, 50), (4, 25), (10, 10), (25, 4), (50, 2), (100, 1)]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 559313, "status": "ok", "timestamp": 1612204982870, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="colonial-discretion" outputId="37533dc5-04b2-4f60-c8f9-1fa7dafaabe0"
for pool1, pool2 in pool_pairs:
print(f"++ training model with pool sizes {pool1}, {pool2}")
model = get_model(pool1=pool1, pool2=pool2)
metrics = [
tfk.metrics.AUC(curve="ROC", name="auroc"),
tfk.metrics.AUC(curve="PR", name="aupr"), # precision-recall
]
model.compile(
optimizer=tfk.optimizers.Adam(learning_rate=0.001),
loss=tfk.losses.BinaryCrossentropy(from_logits=False),
metrics=metrics,
)
callbacks = [
tfk.callbacks.EarlyStopping(
monitor="val_aupr",
patience=20,
verbose=1,
mode="max",
restore_best_weights=False,
),
tfk.callbacks.ReduceLROnPlateau(
monitor="val_aupr",
factor=0.2,
patience=5,
min_lr=1e-7,
mode="max",
verbose=1,
),
]
# train
history: tfk.callbacks.History = model.fit(
x=x_train,
y=y_train,
batch_size=100,
epochs=100,
shuffle=True,
validation_data=(x_valid, y_valid),
callbacks=callbacks,
verbose=2,
)
# save
save_dir.mkdir(exist_ok=True)
filepath = save_dir / f"model-{pool1:03d}-{pool2:03d}.h5"
model.save(filepath)
# cannot save directly with json standard lib because numpy datatypes
# will cause an error. pandas converts things for us.
df_hist = pd.DataFrame(history.history)
df_hist.to_json(filepath.with_suffix(".json"))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 342, "status": "ok", "timestamp": 1612205081750, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="PX0E1vw26Q-p" outputId="af343159-b22a-44ae-a524-c24a71c06343"
# !ls $save_dir
# + [markdown] id="developed-harvey"
# ### evaluate models
#
# End goal is to get percent matches with JASPAR data.
# -
# Download JASPAR database.
# !wget --timestamping https://www.dropbox.com/s/ha1sryrxfhx7ex7/JASPAR_CORE_2016_vertebrates.meme
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 145740, "status": "ok", "timestamp": 1612205282517, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "00715572159212236562"}, "user_tz": 300} id="split-basics" outputId="692b5c65-9af4-47c4-e524-39fde6013602" language="bash"
# # only run this if tomtom program not found
# if command -v tomtom; then
# echo "tomtom program installed"
# exit
# fi
# mkdir meme-src
# cd meme-src
# curl -fL https://meme-suite.org/meme/meme-software/5.3.1/meme-5.3.1.tar.gz | tar xz --strip-components 1
# ./configure --prefix=$HOME/meme --with-url=http://meme-suite.org --enable-build-libxml2 --enable-build-libxslt
# make
# make test
# make install
# +
# add meme programs to PATH
import os
os.environ["PATH"] += f'{os.pathsep}{Path.home() / "meme" / "bin"}'
# +
from collections import namedtuple
import matplotlib.pyplot as plt
import subprocess
import tfomics
import tfomics.impress
# +
# Container for comparison between motifs and filters for one model.
meme_entry = namedtuple(
"meme_entry",
"match_fraction match_any filter_match filter_qvalue min_qvalue num_counts",
)
outputs = {}
for pool1, pool2 in pool_pairs:
print("\n++++ evaluating cnn", pool1, pool2)
# Load model.
model = tfk.models.load_model(save_dir / f"model-{pool1:03d}-{pool2:03d}.h5")
_ = model.evaluate(x_test, y_test)
# layers: (0)conv -> (1)batchnorm -> (2)relu
W = tfomics.moana.filter_activations(
x_test=x_test, model=model, layer=2, window=20, threshold=0.5
)
# Create meme file
W_clipped = tfomics.moana.clip_filters(W, threshold=0.5, pad=3)
meme_file = save_dir / f"filters-{pool1:03d}-{pool2:03d}.meme"
tfomics.moana.meme_generate(W_clipped, output_file=meme_file, prefix="filter")
print("++ saved motifs to", meme_file)
# Use tomtom to determine which motifs our filters are similar to.
print("++ running tomtom")
output_path = "filters"
jaspar_path = "JASPAR_CORE_2016_vertebrates.meme"
args = [
"tomtom",
"-thresh",
"0.5",
"-dist",
"pearson",
"-evalue",
"-oc",
output_path,
meme_file,
jaspar_path,
]
ret = subprocess.run(args, check=True)
# See which motifs the filters are similar to.
num_filters = moana.count_meme_entries(meme_file)
out = evaluate.motif_comparison_synthetic_dataset(
Path(output_path) / "tomtom.tsv", num_filters=num_filters
)
# Save comparisons to dict.
this_meme_entry = meme_entry(*out)
outputs[f"cnn-{pool1:03d}-{pool2:03d}"] = this_meme_entry
# Plot logos with motif names.
fig = plt.figure(figsize=(25, 4))
tfomics.impress.plot_filters(W, fig, num_cols=6, names=this_meme_entry.filter_match, fontsize=14)
fig.suptitle(f"filters - cnn {pool1} x {pool2}")
plt.savefig(save_dir / f"filter-logos-{pool1:03d}-{pool2:03d}.pdf")
plt.show()
# -
print("match fractions")
for k, v in outputs.items():
print(f"{k}: {v.match_fraction:0.3f}")
# ## Sensitivity of motif representations to the number of filters
# ## Motif representations are not very sensitive to 1st layer filter size
# ## Motif representations are affected by the ability to assemble whole motifs in deeper layers
l2_reg = tfk.regularizers.l2(1e-6)
cnn_50_2 = tfk.Sequential(
[
tfkl.Input(shape=(L, A)),
# layer 1
tfkl.Conv1D(
filters=30,
kernel_size=19,
strides=1,
padding="same",
use_bias=False,
kernel_regularizer=l2_reg,
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.MaxPool1D(pool_size=50, strides=2),
tfkl.Dropout(0.1),
# layer 2
tfkl.Conv1D(
filters=128,
kernel_size=5,
strides=1,
padding="same",
use_bias=False,
kernel_regularizer=l2_reg,
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.MaxPool1D(pool_size=50, strides=50),
tfkl.Dropout(0.1),
# layer 3
tfkl.Flatten(),
tfkl.Dense(
units=512, activation=None, use_bias=None, kernel_regularizer=l2_reg
),
tfkl.BatchNormalization(),
tfkl.Activation(tf.nn.relu),
tfkl.Dropout(0.5),
# layer 4 (output). do not use activation (ie linear activation) so we can inspect
# the logits later.
tfkl.Dense(
units=12,
activation=None,
use_bias=True,
kernel_initializer=tfk.initializers.GlorotNormal(),
bias_initializer=tfk.initializers.Zeros(),
name="logits",
),
tfkl.Activation(tf.nn.sigmoid, name="predictions"),
]
)
# ## Distributed representations build whole motif representations in deeper layers
# + id="resistant-premiere"
| representation-learning/01-representation-learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kishanRaj10/18CSE044-DMDW-LAB/blob/main/DMDW_Assignment_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="JG77FONDT5Ps"
# **1. Downloading a dataset from kaggle and uploading it in jupyter notebook or google colab & Performing any random operation on the dataset.**
# + colab={"base_uri": "https://localhost:8080/", "height": 915} id="F_FvPuKeUSp-" outputId="d47436bc-0f36-4ffd-8252-f0e35b78f734"
import pandas as pd
url="https://raw.githubusercontent.com/kishanRaj10/18CSE044-DMDW-LAB/main/Tweets.csv"
df=pd.read_csv(url)
df
# + colab={"base_uri": "https://localhost:8080/", "height": 445} id="rFjaxWLLUbHu" outputId="b19596a7-4918-4a48-eb67-cf40ecfbedf4"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 479} id="AdHN3OpuWUHp" outputId="9f0e39d3-a0e0-42d0-cb20-4953c33b8e4c"
df.tail()
# + [markdown] id="_djn39_uWbsh"
# **2.Practice 10 python programs using numpy libraries.**
# + colab={"base_uri": "https://localhost:8080/"} id="1BqOWi4YWiZH" outputId="d1ada4c5-b6a7-4502-ba0b-e1a1411b7738"
#1.Program to create a 5x5 identity matrix, i.e. diagonal elements are 1, the rest are 0.
import numpy as np
x = np.eye(5,dtype=int)
print("x =\n",x)
# + colab={"base_uri": "https://localhost:8080/"} id="JLUElt0uXLQi" outputId="a3611703-ce74-42e0-9ec7-6ea3223a3133"
#2.Program to convert a given array into a list and then convert it into a list again.
import numpy as np
a = [[1, 2], [3, 4]]
x = np.array(a)
print("list to array:\n",x)
a2 = x.tolist()
print("array to list:\n",a2)
print(a == a2)
# + colab={"base_uri": "https://localhost:8080/"} id="qcJOeZcoXYwd" outputId="52bbfa0a-2fcf-4938-d15d-1fbcf41388ab"
#3.Program to convert the Fahrenheit degree values into Centigrade degrees. Fahrenheit values are stored into a NumPy array.
import numpy as np
N=int(input())# not more than N inputs will be considered
fvalues=list(map(float, input().split(' ')[:N]))
F = np.array(fvalues)
print("Values in Fahrenheit degrees:")
print(F)
print("Values in Centigrade degrees:")
print(5*F/9 - 5*32/9)
# + colab={"base_uri": "https://localhost:8080/"} id="oXP9dxUTXq04" outputId="c037465d-b03c-46d6-e34e-8fa9865b8d91"
#4.Program to compute the determinant of a given square array.
import numpy as np
from numpy import linalg as la
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
if R==C:
print("Enter the entries in a single line (separated by space): ")
entries = list(map(int, input().split()))
matrix = np.array(entries).reshape(R, C)
print("Original ",R,"x",C,"-D matrix")
print(matrix)
print("Determinant of the said ",R,"x",C,"-D matrix")
print(np.linalg.det(matrix))
else:
print("Invalid Dimensional Array Input")
# + colab={"base_uri": "https://localhost:8080/"} id="sFdYTN_aYqp7" outputId="14b936a8-eb90-4b7c-9843-a14ee6ef53d5"
#5.Program to create a 5x5 array with random values and find the minimum and maximum values.
import numpy as np
x = np.random.random((5,5))
print("Original Array:")
print(x)
xmin, xmax = x.min(), x.max()
print("Minimum and Maximum Values:")
print(xmin, xmax)
# + colab={"base_uri": "https://localhost:8080/"} id="6WOb47EhYyKL" outputId="e71aaedf-4130-41f3-f408-07f49cc56ba4"
#6.Program to sort the specified number of elements from beginning of a given array.
import numpy as np
nums = np.random.rand(15)
print("Original array:")
print(nums)
print("\nSorted first 5 elements:")
print(nums[np.argpartition(nums,range(8))])
# + colab={"base_uri": "https://localhost:8080/"} id="aHSHk8PbY4FF" outputId="62f06e7e-e973-4945-ca9d-90de5c82a3da"
#7.Program to calculate cumulative sum of the elements along a given axis, sum over rows for each of the 3 columns and sum over columns for each of the 2 rows of a given 3x3 array.
import numpy as np
x = np.array([[1,2,3], [4,5,6]])
print("Original array: ")
print(x)
print("Cumulative sum of the elements along a given axis:")
r = np.cumsum(x)
print(r)
print("\nSum over rows for each of the 3 columns:")
r = np.cumsum(x,axis=0)
print(r)
print("\nSum over columns for each of the 2 rows:")
r = np.cumsum(x,axis=1)
print(r)
# + colab={"base_uri": "https://localhost:8080/"} id="WQvDWWYAZCT2" outputId="11a59d2f-4986-4b7c-95ec-0b915985b70e"
#8.Program to test element-wise of a given array for finiteness (not infinity or not Not a Number), positive or negative infinity, for NaN, for NaT (not a time), for negative infinity, for positive infinity.
import numpy as np
print("\nTest element-wise for finiteness (not infinity or not Not a Number):")
print(np.isfinite(1))
print(np.isfinite(0))
print(np.isfinite(np.nan))
print("\nTest element-wise for positive or negative infinity:")
print(np.isinf(np.inf))
print(np.isinf(np.nan))
print(np.isinf(np.NINF))
print("Test element-wise for NaN:")
print(np.isnan([np.log(-1.),1.,np.log(0)]))
print("Test element-wise for NaT (not a time):")
print(np.isnat(np.array(["NaT", "2016-01-01"], dtype="datetime64[ns]")))
print("Test element-wise for negative infinity:")
x = np.array([-np.inf, 0., np.inf])
y = np.array([2, 2, 2])
print(np.isneginf(x, y))
print("Test element-wise for positive infinity:")
x = np.array([-np.inf, 0., np.inf])
y = np.array([2, 2, 2])
print(np.isposinf(x, y))
# + colab={"base_uri": "https://localhost:8080/"} id="gicSJTb4ZLmU" outputId="0f616d98-52fa-4998-c447-c3ba3f765982"
#9.Program to get the dates of yesterday, today and tomorrow.
import numpy as np
yesterday = np.datetime64('today', 'D') - np.timedelta64(1, 'D')
print("Yestraday: ",yesterday)
today = np.datetime64('today', 'D')
print("Today: ",today)
tomorrow = np.datetime64('today', 'D') + np.timedelta64(1, 'D')
print("Tomorrow: ",tomorrow)
# + colab={"base_uri": "https://localhost:8080/"} id="8s880mPWZSXC" outputId="d921a722-ee18-4507-abde-3b0eaf4cf7ec"
#10.Write a NumPy program to remove the leading and trailing whitespaces of all the elements of a given array.
import numpy as np
x = np.array([' python exercises ', ' PHP ', ' java ', ' C++'], dtype=np.str)
print("Original Array:")
print(x)
stripped = np.char.strip(x)
print("\nRemove the leading and trailing whitespaces: ", stripped)
# + [markdown] id="0kPgm_28ZaHC"
# **3.Practice 10 python programs using pandas libraries.**
# + colab={"base_uri": "https://localhost:8080/", "height": 488} id="7orHvGpgZfk0" outputId="b56b7189-8759-4f3f-c01b-5b01a532049b"
#1.Create a dataframe of ten rows, four columns with random values. Write a Pandas program to display bar charts in dataframe on specified columns.
import pandas as pd
import numpy as np
np.random.seed(24)
df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))],
axis=1)
df.iloc[0, 2] = np.nan
df.iloc[3, 3] = np.nan
df.iloc[4, 1] = np.nan
df.iloc[9, 4] = np.nan
print("Original array:")
print(df)
print("\nBar charts in dataframe:")
df.style.bar(subset=['B', 'C'], color='#d65f5f')
# + colab={"base_uri": "https://localhost:8080/"} id="3MRygxMVZpb3" outputId="521f873f-f986-46e1-908d-4c3c7fa797da"
#8
"""Write a Pandas program to split the following dataset using group by on 'salesman_id' and find the first order date for each group.
Test Data:
ord_no purch_amt ord_date customer_id salesman_id
0 70001 150.50 2012-10-05 3002 5002
1 70009 270.65 2012-09-10 3001 5003
2 70002 65.26 2012-10-05 3001 5001
3 70004 110.50 2012-08-17 3003 5003
4 70007 948.50 2012-09-10 3002 5002
5 70005 2400.60 2012-07-27 3002 5001
6 70008 5760.00 2012-09-10 3001 5001
7 70010 1983.43 2012-10-10 3004 5003
8 70003 2480.40 2012-10-10 3003 5003
9 70012 250.45 2012-06-27 3002 5002
10 70011 75.29 2012-08-17 3003 5003
11 70013 3045.60 2012-04-25 3001 5001"""
import pandas as pd
pd.set_option('display.max_rows', None)
#pd.set_option('display.max_columns', None)
df = pd.DataFrame({
'ord_no':[70001,70009,70002,70004,70007,70005,70008,70010,70003,70012,70011,70013],
'purch_amt':[150.5,270.65,65.26,110.5,948.5,2400.6,5760,1983.43,2480.4,250.45, 75.29,3045.6],
'ord_date': ['2012-10-05','2012-09-10','2012-10-05','2012-08-17','2012-09-10','2012-07-27','2012-09-10','2012-10-10','2012-10-10','2012-06-27','2012-08-17','2012-04-25'],
'customer_id':[3005,3001,3002,3009,3005,3007,3002,3004,3009,3008,3003,3002],
'salesman_id': [5002,5005,5001,5003,5002,5001,5001,5004,5003,5002,5004,5001]})
print("Original Orders DataFrame:")
print(df)
print("\nGroupby to find first order date for each group(salesman_id):")
result = df.groupby('salesman_id')['ord_date'].min()
print(result)
# + colab={"base_uri": "https://localhost:8080/"} id="qh5PwDQuZ23w" outputId="5cc78371-4b03-43ba-a7b6-3b5052c01903"
#3.Write a Pandas program to create
#a) Datetime object for Jan 15 2012.
#b) Specific date and time of 9:20 pm.
#c) Local date and time.
#d) A date without time.
#e) Current date.
#f) Time from a datetime.
#g) Current local time.
import datetime
from datetime import datetime
print("Datetime object for Jan 11 2012:")
print(datetime(2012, 1, 11))
print("\nSpecific date and time of 9:20 pm")
print(datetime(2011, 1, 11, 21, 20))
print("\nLocal date and time:")
print(datetime.now())
print("\nA date without time: ")
print(datetime.date(datetime(2012, 5, 22)))
print("\nCurrent date:")
print(datetime.now().date())
print("\nTime from a datetime:")
print(datetime.time(datetime(2012, 12, 15, 18, 12)))
print("\nCurrent local time:")
print(datetime.now().time())
# + colab={"base_uri": "https://localhost:8080/"} id="b3R3ambEZ_SI" outputId="eb1cac2f-2734-44cd-af05-0691913c6ffa"
#4.Write a Pandas program to extract only phone number from the specified column of a given DataFrame.
import pandas as pd
import re as re
pd.set_option('display.max_columns', 10)
df = pd.DataFrame({
'company_code': ['c0001','c0002','c0003', 'c0003', 'c0004'],
'company_phone_no': ['Company1-Phone no. 4695168357','Company2-Phone no. 8088729013','Company3-Phone no. 6204658086', 'Company4-Phone no. 5159530096', 'Company5-Phone no. 9037952371']
})
print("Original DataFrame:")
print(df)
def find_phone_number(text):
ph_no = re.findall(r"\b\d{10}\b",text)
return "".join(ph_no)
df['number']=df['company_phone_no'].apply(lambda x: find_phone_number(x))
print("\Extracting numbers from dataframe columns:")
print(df)
# + id="269-H3YRaQiM" outputId="81c02c06-12db-43b2-f8d1-304c89ae0b0f" colab={"base_uri": "https://localhost:8080/"}
#5.Program to iterate over rows in a DataFrame.
import pandas as pd
import numpy as np
exam_data = [{'name':'Anastasia', 'score':12.5}, {'name':'Dima','score':9}, {'name':'Katherine','score':16.5}]
df = pd.DataFrame(exam_data)
for index, row in df.iterrows():
print(row['name'], row['score'])
# + id="S2IeejTLaXcz" outputId="d79b6d37-172c-4a4a-c3cf-3362360fa384" colab={"base_uri": "https://localhost:8080/"}
#6.Program to add, subtract, multiple and divide two Pandas Series.
import pandas as pd
ds1 = pd.Series([2, 4, 6, 8, 10])
ds2 = pd.Series([1, 3, 5, 7, 9])
ds = ds1 + ds2
print("Add two Series:")
print(ds)
print("Subtract two Series:")
ds = ds1 - ds2
print(ds)
print("Multiply two Series:")
ds = ds1 * ds2
print(ds)
print("Divide Series1 by Series2:")
ds = ds1 / ds2
print(ds)
# + id="bQ8fYOlqagGh" outputId="a5d615cd-7df0-4c5c-e872-21156f34278b" colab={"base_uri": "https://localhost:8080/"}
#7
"""Write a Pandas program to find and replace the missing values in a given DataFrame which do not have any valuable information.
Example:
Missing values: ?, --
Replace those values with NaN
Test Data:
ord_no purch_amt ord_date customer_id salesman_id
0 70001 150.5 ? 3002 5002
1 NaN 270.65 2012-09-10 3001 5003
2 70002 65.26 NaN 3001 ?
3 70004 110.5 2012-08-17 3003 5001
4 NaN 948.5 2012-09-10 3002 NaN
5 70005 2400.6 2012-07-27 3001 5002
6 -- 5760 2012-09-10 3001 5001
7 70010 ? 2012-10-10 3004 ?
8 70003 12.43 2012-10-10 -- 5003
9 70012 2480.4 2012-06-27 3002 5002
10 NaN 250.45 2012-08-17 3001 5003
11 70013 3045.6 2012-04-25 3001 -- """
import numpy as np
pd.set_option('display.max_rows', None)
#pd.set_option('display.max_columns', None)
df = pd.DataFrame({
'ord_no':[70001,np.nan,70002,70004,np.nan,70005,"--",70010,70003,70012,np.nan,70013],
'purch_amt':[150.5,270.65,65.26,110.5,948.5,2400.6,5760,"?",12.43,2480.4,250.45, 3045.6],
'ord_date': ['?','2012-09-10',np.nan,'2012-08-17','2012-09-10','2012-07-27','2012-09-10','2012-10-10','2012-10-10','2012-06-27','2012-08-17','2012-04-25'],
'customer_id':[3002,3001,3001,3003,3002,3001,3001,3004,"--",3002,3001,3001],
'salesman_id':[5002,5003,"?",5001,np.nan,5002,5001,"?",5003,5002,5003,"--"]})
print("Original Orders DataFrame:")
print(df)
print("\nReplace the missing values with NaN:")
result = df.replace({"?": np.nan, "--": np.nan})
print(result)
# + id="s12goU1FauKM" outputId="b539d27d-a45e-45cc-b164-07fa6799bceb" colab={"base_uri": "https://localhost:8080/"}
#8.Write a Pandas program to find out the alcohol consumption details in the year '1986' or '1989' where WHO region is 'Americas' or 'Europe' from the world alcohol consumption dataset.
import pandas as pd
url = "https://github.com/kishanRaj10/18CSE044-DMDW-LAB/blob/main/Tweets.csv"
w_a_con =pd.read_csv(url)
print("World alcohol consumption sample data:")
print(w_a_con.head())
print("\nThe world alcohol consumption details in the year ‘1986’ or ‘1989’ where WHO region is ‘Americas’ or 'Europe':")
print(w_a_con[((w_a_con['Year']==1985) | (w_a_con['Year']==1989)) & ((w_a_con['WHO region']=='Americas') | (w_a_con['WHO region']=='Europe'))].head(10))
# + id="Ts7ZdCJWa2oP" outputId="f9b3bc22-d13f-4864-a322-55a9b56aa910" colab={"base_uri": "https://localhost:8080/"}
#9.Program to Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame.
import pandas as pd
df1 = pd.DataFrame({'A': [None, 0, None], 'B': [3, 4, 5]})
df2 = pd.DataFrame({'A': [1, 1, 3], 'B': [3, None, 3]})
df1.combine_first(df2)
print("Original DataFrames:")
print(df1)
print("--------------------")
print(df2)
print("\nMerge two dataframes with different columns:")
result = df1.combine_first(df2)
print(result)
# + id="AAnV5UEebCLU" outputId="626b5aca-edb7-4a0b-d1e4-02b1c822edc8" colab={"base_uri": "https://localhost:8080/"}
#10.Program to start index with different value rather than 0 in a given DataFrame.
import pandas as pd
df = pd.DataFrame({
'school_code': ['s001','s002','s003','s001','s002','s004'],
'class': ['V', 'V', 'VI', 'VI', 'V', 'VI'],
'name': ['<NAME>','<NAME>','<NAME>', '<NAME>', '<NAME>', '<NAME>'],
'date_of_birth': ['15/05/2002','17/05/2002','16/02/1999','25/09/1998','11/05/2002','15/09/1997'],
'weight': [35, 37, 33, 30, 31, 32]})
print("Original DataFrame:")
print(df)
print("\nDefault Index Range:")
print(df.index)
df.index += 10
print("\nNew Index Range:")
print(df.index)
print("\nDataFrame with new index:")
print(df)
# + id="glPTiCPNbMMv"
| DMDW_Assignment_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df_cust_data = pd.read_excel('Customer_Data.xlsx')
df_plant = pd.read_excel('Plant Master.xlsx')
df_invoice = pd.read_csv('Final_invoice.csv',error_bad_lines=False, index_col=False, dtype='unicode')
# +
remove_cols = []
for column in df_invoice.columns:
if ((df_invoice[column].isnull().sum()*100)/len(df_invoice)) > 10:
remove_cols.append(column)
df_invoice.drop(columns=remove_cols, axis = 1,inplace=True)
df_invoice.columns = df_invoice.columns.str.replace(r'[^\w\s]', '')
df_invoice.columns = df_invoice.columns.str.replace(' ', '')
df_invoice.columns = df_invoice.columns.str.upper()
# -
df_invoice.columns
df_invoice.info()
# +
num_columns = ['LABOURTOTAL','MISCTOTAL','OSLTOTAL','PARTSTOTAL','TOTALAMTWTDTAX']
for col in num_columns:
df_invoice[col].fillna(0)
df_invoice[col] = df_invoice[col].astype(float)
df_invoice['UNNAMED0'] = df_invoice['UNNAMED0'].astype(int)
# +
df_invoice.drop(index=df_invoice[df_invoice['TOTALAMTWTDTAX'] == 0].index,inplace=True)
df_invoice.drop(index=df_invoice[df_invoice['TOTALAMTWTDTAX'] < 0].index,inplace=True)
df_invoice.drop(index=df_invoice[df_invoice['LABOURTOTAL'] < 0].index,inplace=True)
# -
df_invoice.shape
df_pincodes = pd.read_csv('Pincode.csv',error_bad_lines=False, index_col=False, dtype='unicode', encoding= 'unicode_escape')
df_pincodes = df_pincodes[['Pincode','District','StateName']]
df_pincodes.rename(columns={"District": "PO_District", "StateName": "PO_StateName"},inplace=True)
df_pincodes.sort_values(by=['Pincode','PO_District'],inplace=True)
df_pincodes.drop_duplicates(subset='Pincode', keep='first',inplace=True)
df_pincodes['PO_StateName'] = df_pincodes['PO_StateName'].str.upper()
df_pincodes['StateName'] = df_pincodes['PO_StateName']
df_invoice['DISTRICT'] = df_invoice['DISTRICT'].str.upper()
# +
df_invalid_state = df_invoice[df_invoice['DISTRICT'] != df_invoice['StateName']].copy()
df_invalid_state['PO_District'] = df_invalid_state['CITY']
df_invalid_state['StateName'] = df_invalid_state['DISTRICT']
df_invalid_city = df_invalid_state[['CITY','PLANT']][df_invalid_state['CITY'].str.len() < 3].copy()
df_invalid_city = df_invalid_city.join(df_plant.set_index('Plant'), on='PLANT')
for index, row in df_invalid_city.iterrows():
df_invalid_state.loc[index,'PO_District'] = row['City']
# -
df_invoice.shape
#Drop records from invoice against invalid
df_invoice.drop(index=df_invalid_state.index,inplace=True)
#append invalid data in invoice data
df_final= pd.concat([df_invoice,df_invalid_state])
#Sort on column UNNAMED0
df_final.sort_values(by='UNNAMED0',inplace=True)
#Drop Old City name / District
df_final.drop(columns=['CITY','DISTRICT'],axis=1,inplace=True)
#Rename City name form Pincode data
df_final.rename(columns={"PO_District": "CITY", "StateName": "STATE"},inplace=True)
df_final['CITY'] = df_final['CITY'].str.upper()
df_final['STATE'] = df_final['STATE'].str.upper()
df_final.shape
df_final.columns
#Combine date time for job card and invoice date time to calculate difference
df_final['JOBCARDDATETIME'] = pd.to_datetime(df_invoice['JOBCARDDATE'] + ' ' + df_invoice['JOBCARDTIME'])
df_final['INVOICEDATETIME'] = pd.to_datetime(df_invoice['INVOICEDATE'] + ' ' + df_invoice['INVOICETIME'])
df_final['SERVICETIME'] =(df_final['INVOICEDATETIME']-df_final['JOBCARDDATETIME']).astype('timedelta64[D]')
#For car we have mutiple entries in Invoice remove duplicate records
df_car_regn = df_final[['REGNNO' , 'STATE','CITY','CUSTOMERNO','MAKE']].copy()
df_car_regn.sort_values(by=['REGNNO'],inplace=True)
df_car_regn.drop_duplicates(subset=['REGNNO'], keep='first',inplace=True)
#Which areas have most cars?
total_cars = df_car_regn['STATE'].value_counts()
plot = total_cars.plot(kind='bar', title = "Total no. of cars across all states", figsize=(15,6))
plt.xticks(fontsize =10 , rotation=90);
#Which make/car is more popular?
total_cars = df_car_regn['MAKE'].value_counts()
plot = total_cars.plot(kind='bar', title = "Total no. of cars Make wise", figsize=(15,15))
plt.xticks(fontsize =10 , rotation=90);
#Which type of service is popular in a certain area?
res = df_final.groupby(['STATE','ORDERTYPE']).size().unstack()
#print(res)
# Plot stacked bar chart
res.plot(kind='bar', stacked=True, figsize=(15,15))
plt.xlabel('State')
plt.ylabel('Service Type')
# Display plot
plt.show()
#What is service structure for particular make/car?
res = df_final.groupby(['MAKE','ORDERTYPE']).size().unstack()
#print(res)
# Plot stacked bar chart
res.plot(kind='bar', stacked=True, figsize=(15,15))
plt.ylabel('Service Type')
plt.xlabel('Make')
# Display plot
plt.show()
res = df_invoice.groupby(['ORDERTYPE']).count()['CUSTOMERNO'].to_frame()
#print(res)
# Plot stacked bar chart
res.plot(kind='bar', stacked=True, figsize=(10,5))
plt.xlabel('Service Type')
plt.ylabel('Counts of Services')
# Display plot
plt.show()
df_final.describe()
#Check outlier for service time
import seaborn as sns
plt.figure(figsize=(15,8))
plt.xticks(rotation = 45)
sns.boxplot('ORDERTYPE', 'SERVICETIME', data=df_final)
#Delete service took more than 30 days
res = df_final[df_final['SERVICETIME'] <= 30]
plt.figure(figsize=(15,8))
plt.xticks(rotation = 45)
sns.boxplot('ORDERTYPE', 'SERVICETIME', data=res)
from sklearn.cluster import KMeans
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot as plt
# %matplotlib inline
df_final.columns
# +
#Get the revnue based on State/City create dataframe on sum of labour count number of invoices
df_sum = pd.DataFrame(df_final.groupby(['STATE','CITY','MAKE','CUSTTYPE','ORDERTYPE']) \
['LABOURTOTAL','MISCTOTAL','OSLTOTAL','PARTSTOTAL','TOTALAMTWTDTAX'].sum().reset_index())
df_count = pd.DataFrame(df_final.groupby(['STATE','CITY','MAKE','CUSTTYPE','ORDERTYPE'])['TOTALAMTWTDTAX'].count().reset_index())
df_count.rename(columns={"TOTALAMTWTDTAX": "COUNT"},inplace=True)
df = df_sum.merge(df_count, left_on=['STATE','CITY','MAKE','CUSTTYPE','ORDERTYPE'], \
right_on=['STATE','CITY','MAKE','CUSTTYPE','ORDERTYPE'])
plt.scatter(x=df['COUNT'],y=df['TOTALAMTWTDTAX'])
plt.xlabel('Count of Servies')
plt.ylabel('Total Amount With Tax')
plt.show()
# -
df.to_csv('Spending_analysis.csv')
#For cluster with high cost group with customer type & Order type (Service)
res = df[df.cluster == 3]
print(res.shape)
res = res.pivot_table(index='STATE', columns='CUSTTYPE',values='TOTALAMTWTDTAX').reset_index()
res.plot(kind='bar', title = "State Wise Spending Cluster 3", figsize=(10,5),x='STATE')
plt.xticks(fontsize =10 , rotation=90);
#For cluster with high cost group with customer type & Order type (Service)
res = df[df.cluster == 1]
print(res.shape)
res = res.pivot_table(index='STATE', columns='CUSTTYPE',values='TOTALAMTWTDTAX').reset_index()
res.plot(kind='bar', title = "State Wise Spending Cluster 1", figsize=(10,5),x='STATE')
plt.xticks(fontsize =10 , rotation=90);
#For cluster with high cost group with customer type & Order type (Service)
res = df[df.cluster == 0]
res = res.pivot_table(index='STATE', columns='CUSTTYPE',values='TOTALAMTWTDTAX').reset_index()
res.plot(kind='bar', title = "State Wise Spending Cluster 0", figsize=(10,5),x='STATE')
plt.xticks(fontsize =10 , rotation=90);
| The customer ownership across the states.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import boto3
import numpy as np
import pandas as pd
# -
client = boto3.client('s3')
resource = boto3.resource('s3')
my_bucket = resource.Bucket('my-bucket')
path = "s3://daanmatchdatafiles/from Shekhar/Final_Data_ngoimpact.com.xlsx"
xl = pd.ExcelFile(path)
print(xl.sheet_names)
NGOIMPACT = xl.parse(xl.sheet_names[0])
NGOIMPACT.head()
| [DIR] from Shekhar/NGO Impact/NGO_Impact.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sos
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SoS
# language: sos
# name: sos
# ---
# + [markdown] kernel="SoS"
# # Extending SoS
# + [markdown] kernel="SoS"
# SoS can be easily extended with new actions, targets, converters, file previewers. To make the extension available to other users, you can either create and distribute a separate package, or extend SoS and send us a [pull request](https://help.github.com/articles/about-pull-requests/). Please open a ticket and discuss the idea with us before you send a pull request.
# + [markdown] kernel="SoS"
# ## Understanding `entry_points`
# + [markdown] kernel="SoS"
# SoS makes extensive use of [**entry points**](http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins), which allows external modules to register their features in the file system to make them available to other modules. It can be confusing initially but [this stack overflow ticket](http://stackoverflow.com/questions/774824/explain-python-entry-points) explains the `entry_points` mechanism quite well.
#
# To register additional feature with SoS, your package should define one or more sos-recognizable `entry_points` such as `sos-languages`, `sos-targets`, and `sos-actions`, with a syntax similar to
#
# ```
# entry_points='''
# [sos-language]
# ruby = sos_ruby.kernel:sos_ruby
#
# [sos-targets]
# Ruby_Library = sos_ruby.target:Ruby-Library
# '''
# ```
#
# With the installation of this package, `sos` would be able to obtain a class `sos_ruby` from module `sos_ruby.kernel`, and use it to work with the `ruby` language.
# + [markdown] kernel="SoS"
# ## Defining your own actions
# + [markdown] kernel="SoS"
# Under the hood an action is a normal Python function that is decorated as `SoS_Action`. The `decorator` defines the common interface of actions and calls the actual function. To define your own action, you generally need to
#
# ```
# from sos.actions import SoS_Action
#
# @SoS_Action()
# def my_action(*args, **kwargs):
# pass
# ```
#
# The decorator accepts an optional parameter `acceptable_args=['*']` which can be used to specify a list of acceptable parameter (`*` matches all keyword args). An exception will be raised if an action is defined with a list of `acceptable_args` and is called with an unrecognized argument.
# + [markdown] kernel="SoS"
# You then need to add an entry to `entry_points` in your `setup.py` file as
#
# ```
# [sos-actions]
# my_action = mypackage.mymodule:my_action
# ```
# + [markdown] kernel="SoS"
# The most important feature of an SoS actions is that they can behave differently in different `run_mode`, which can be `dryrun`, `run`, or `interactive` (for SoS Notebook). Depending on the nature of your action, you might want to do nothing for in `dryrun` mode and give more visual feedback in `interactive` mode. The relevant code would usually look like
#
# ```
# if env.config['run_mode'] == 'dryrun':
# return None
# ```
#
# Because actions are often used in script format with ignored return value, actions usually return `None` for success, and raise an exception when error happens.
# + [markdown] kernel="SoS"
# If the execution of action depends on some other targets, you can raise an `UnknownTarget` with the target so that the target can be obtained, and the SoS step and the action will be re-executed after the target is obtained. For example, if your action depends on a particular `R_library`, you can test the existence of the target as follows:
#
# ```
# from sos.targets import UnknownTarget
# from sos.targets_r import R_library
#
# @SoS_Action()
# def my_action(script, *args, **kwargs):
# if not R_library('somelib').target_exists():
# raise UnknownTarget(R_library('somelib'))
# # ...
# ```
# + [markdown] kernel="SoS"
# ## Additional targets
# + [markdown] kernel="SoS"
# Additional target should be derived from [`BaseTarget`](https://github.com/vatlab/SoS/blob/master/src/sos/targets.py).
#
# ```
# from sos.targets import BaseTarget
#
# class my_target(BaseTarget):
# def __init__(self, *args, **kwargs):
# super(my_target, self).__init__(self)
#
# def target_name(self):
# ...
#
# def target_exists(self, mode='any'):
# ...
#
# def target_signature(self):
# ...
#
# ```
#
# Any target type should define the three functions:
#
# * `target_name`: name of the target for reporting purpose.
# * `target_exists`: check if the target exists. This function accepts a parameter `mode` which can `target`, `signature`, or `any`, which you can safely ignore.
# * `target_signature`: returns any immutable Python object (usually a string) that uniquely identifies the target so that two targets can be considered the same (different) if their signatures are the same (different). The signature is used to detect if a target has been changed.
#
# The details of this class can be found at the source code of [`BaseTarget`](https://github.com/vatlab/SoS/blob/master/src/sos/targets.py). The [`R_Library`](https://github.com/vatlab/SoS/blob/master/src/sos/targets_r.py) provides a good example of a **virtual target** that does not have a fixed corresponding file, can be checked for existence, and actually attempts to obtain (install a R library) the target when it is checked.
#
# After you defined your target, you will need to add an appropriate entry point to make it available to SoS:
#
# ```
# [sos-targets]
# my_target = mypackage.targets:my_target
# ```
# + [markdown] kernel="SoS"
# ## File format conversion
# + [markdown] kernel="SoS"
# To convert between sos and another file format, you would need to define two function, one returnning an [`argparse.ArgumentParser`](https://docs.python.org/3/library/argparse.html) that parse converter arguments, and one performing real file conversion.
#
# Suppose you would like to convert `.sos` to a `.xp` format, you can define these two functions as follows
#
# ```
# import argparse
# from sos.parser import SoS_Script
#
# def get_my_converter_parser():
# parser = argparse.ArgumentParser('sos_xp')
# parser.add_argument('--theme',
# help='Style of output format')
# return parser
#
# def my_converter(source_file, dest_file, args=None, unknown_args=[]):
# # parse additional_args to obtain converter-specific options
# # then convert from source_file to dest_file
# script = SoS_Script(source_file)
# for section in script.sections:
# # do something
#
# if __name__ == '__main__':
# parser = get_my_converter_parser()
# args, unknown_args = parser.parse_known_args(sys.argv[3:])
# my_converter(sys.argv[1], sys.argv[2], args, unknown_args)
#
# ```
#
# You can then register the converter in `setup.py` as
#
# ```
# [sos-converters]
# fromExt-toExt.parser: mypackage.mymodule:get_my_converter_parser
# fromExt-toExt.func: mypackage.mymodule:my_converter
# ```
#
# Here `fromExt` is file extension without leading dot, `toExt` is destination file extension without leading dot, or a format specified by the `--to` parameter of command `sos convert`. If `dest_file` is unspecified, the output should be written to standard output.
#
# This example uses `if __name__ == '__main__'` section so that the converter can be used as a standandalone program, which is not needed but a great way for testing purposes. Note that the input and output files are handled by `sos convert` so the parser only needs to parse converter-specific options.
# + [markdown] kernel="SoS"
# ## Preview additional formats
#
# Adding a preview function is very simple. All you need to do is define a function that returns preview information, and add an entry point to link the function to certain file format.
#
# More specifically, a previewer should be specified as
#
# ```
# pattern,priority = preview_module:func
# ```
#
# or
#
# ```
# module:func,priority = preview_module:func
# ```
#
# where
#
# 1. `pattern` is a pattern that matches incoming filename (see module fnmatch.fnmatch for details)
# 2. `module:func` specifies a function in module that detects the type of input file.
# 3. `priority` is an integer number that indicates the priority of previewer in case multiple pattern or function matches the same file. Developers of third-party previewer can override an existing previewer by specifying a higher priority number.
# 4. `preview_module:func` points to a function in a module. The function should accept a filename as the only parameter, and returns either
#
# * A string that will be displayed as plain text to standard output.
# * A dictionary that will be returned as `data` field of `display_data` (see [Jupyter documentation](http://jupyter-client.readthedocs.io/en/latest/messaging.html) for details). The dictionary typically has `text/html` for HTML output, "text/plain" for plain text, and "text/png" for image presentation of the file.
# + [markdown] kernel="SoS"
# ## Adding a subcommad (addon)
# + [markdown] kernel="SoS"
# If you would like to add a complete subcommand as an addon to SoS, you will need to define two functions and add them to `setup.py` as two entry points, one with suffix `.args` and one with suffix `.func`.
#
# ```
# [sos_addons]
# myaddon.args = yourpackage.module:addon_parser
# myaddon.func = yourpackage.module:addon_func
# ```
#
# The `addon_parser` function should use module `argparse` to return an `ArgumentParser` object. SoS would obtain this parser and add it as a subparse of the SoS main parser so that the options can be parsed as
#
# ```
# sos myaddon options
# ```
#
# The `addon_func` should be defined as
#
# ```
# def addon_func(args, unknown_args)
# ```
#
# with `args` being the parsed known arguments, and `unknown_args` being a list of unknown arguments that you can process by yourself.
| src/user_guide/extending_sos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # boolog
# ___
#
#
# ### Messing with Data Analysis
#
#
# I got off on a tangent of logging, reading CSV files, and it convinced me to try and learn `pandas` and `matplotlib` better.
#
#
#
# 2019.10.31
# - Using Fifa FUT data to experiment with `pandas` and `matplotlib`
# ### Bash Commands
# ---
# Essentially a graveyard of things I forgot to pip install before opening up `jupyter`...
# !pip3 install --upgrade pip
# # !pip3 install torch torchvision
# ### Magic Commands
# ---
# %matplotlib inline
# %reload_ext autoreload
# %autoreload 2
# ### Imports
# ---
# +
# System
import os
import sys
from pathlib import Path
from datetime import datetime, timedelta
# from itertools import count
# Web
import requests
# Data
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Computer Vision
import cv2
# Machine Learning
import torch
import torchvision
# -
# #### Logger
# ---
# [`loguru`](https://github.com/Delgan/loguru) is my go-to source for all my logging needs.
# +
from loguru import logger
logger.add("output.log",
format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}",
backtrace=True,
diagnose=True) # Set 'False' to not leak sensitive data in prod
logger.info(f"{'_'*25} Logger Start {'_'*25}")
# -
# ### Constants
# ---
# Paths
PATH = Path()
# #### Data
# ---
# data = pd.read_csv('data.csv')
data1 = [1, 5, 7, 8, 10]
data2 = [0, 0.1, 0.5, 0.4, 0.6]
# ### Matplotlib
# ---
# +
plt.style.use('seaborn')
plt.tight_layout()
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
# Plot
ax1.plot(data1, data2, label="ax 1")
ax2.plot(data1, data1, label="ax 2")
# Titles
ax1.set_title('Testing Matplotlib')
ax1.set_xlabel('X Label')
ax1.set_ylabel('Y Label')
ax1.legend(loc='upper left')
ax2.legend(loc='upper left')
# -
| blog/2019/10_31/fut_scrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Replacing ligand parameters in an already-parametrized system
#
# This example applies SMIRNOFF-format parameters to a BRD4 inhibitor from the [living review on binding free energy benchmark systems](https://www.annualreviews.org/doi/abs/10.1146/annurev-biophys-070816-033654) by <NAME> Gilson. The BRD4 system comes from the [accompanying GitHub repository](https://github.com/MobleyLab/benchmarksets/tree/master/input_files/BRD4).
#
# This example uses [ParmEd](http://parmed.github.io) to take a protein-ligand system parameterized with an alternate force field, and replace the force field used for the ligand with an OpenFF force field. This example is meant to illustrate how to apply parameters to a single ligand, but it's also easy to process many ligands.
#
# ### Loading the already-parametrized system
# Retrieve protein and ligand files for BRD4 and a docked inhibitor from the benchmark systems GitHub repository
# https://github.com/MobleyLab/benchmarksets
import requests
repo_url = 'https://raw.githubusercontent.com/MobleyLab/benchmarksets/master/input_files/'
sources = {
'system.prmtop' : repo_url + 'BRD4/prmtop-coords/BRD4-1.prmtop',
'system.crd' : repo_url + 'BRD4/prmtop-coords/BRD4-1.crds',
'ligand.sdf' : repo_url + 'BRD4/sdf/ligand-1.sdf',
'ligand.pdb' : repo_url + 'BRD4/pdb/ligand-1.pdb'
}
for (filename, url) in sources.items():
r = requests.get(url)
open(filename, 'w').write(r.text)
#Read AMBER to ParmEd Structure object
import parmed
in_prmtop = 'system.prmtop'
in_crd = 'system.crd'
orig_structure = parmed.amber.AmberParm(in_prmtop, in_crd)
# Let's inspect the unique molecules in the system
pieces = orig_structure.split()
for piece in pieces:
print(f"There are {len(piece[1])} instance(s) of {piece[0]}")
# * The first molecule species has 2035 atoms, so it's probably the protein
# * The second molecule species has 26 atoms, which is the size of our ligand
# * The third and fourth molecule species have 32 and 35 copies, respectively, and one atom each. They are probably counterions
# * The fifth molecule species has 11,000 copies with three atoms each, so these are our waters.
#
# We could drill into the ParmEd objects to find more about these if needed.
#
# **It's important to note that `pieces[1]` is the parameterized ligand, as we will be replacing it further down in this example.** If you apply this notebook to a system with a different number of components, or with objects in a different order, you may need to change some of the code below accordingly.
#
# ### Generating an Open Force Field Toolkit `Topology` for the ligand
#
# Here we assume a complicated scenario -- We have a SDF of our ligand available (`ligand.sdf`), containing bond orders and enough detail about the molecule for us to parameterize the ligand. However, this SDF does not necessarily have the same atom indexing or coordinates as the original ligand in `system.prmtop` and `system.crd`. If we mix up the ligand atom indices and try to use the original ligand coordinates, the ligand's initial geometry will be nonsense. So, we've also got a copy of the ligand as `ligand.pdb` (which we could have extracted from a dump of our system to PDB format, if desired), and we're going to use that as a reference to get the atom indexing right.
#
# This example will use the `simtk.openmm.app.PDBFile` class to read `ligand.pdb` and then use `Topology.from_openmm` to create an OpenFF Topology that contains the ligand in the correct atom ordering.
#
# If you **know** that this indexing mismatch will never occur for your data sources, and that your SDFs always contain the correct ordering, you can skip this step by simply running `ligand_off_topology = ligand_off_molecule.to_topology()`
#
#
# +
from openff.toolkit.topology import Molecule, Topology
from simtk.openmm.app import PDBFile
ligand_off_molecule = Molecule('ligand.sdf')
ligand_pdbfile = PDBFile('ligand.pdb')
ligand_off_topology = Topology.from_openmm(ligand_pdbfile.topology,
unique_molecules=[ligand_off_molecule])
# -
# ### Parametrizing the ligand
#
# <div class="alert alert-block alert-warning">
# <b>Note:</b> Even though we plan to constrain bond lengths to hydrogen, we load "openff_unconstrained-1.0.0.offxml". This is because our workflow will involve loading the OFF-parametrized ligand using ParmEd, which <a href="https://github.com/openforcefield/openff-toolkit/issues/444#issuecomment-547211377"> applies its own hydrogen bonds at a later time, and will fail if it attempts to maniuplate an OpenMM system that already contains them.</a>
# </div>
#
#
# Here we begin by loading a SMIRNOFF force field -- in this case, the OpenFF-1.0 force field, "Parsley".
#
# Once loaded, we create a new OpenMM system containing the ligand, then use ParmEd to create a `Structure` from that system. We'll re-combine this `Structure` object with those for the protein, ions, etc. later.
# +
# Load the SMIRNOFF-format Parsley force field
from openff.toolkit.typing.engines.smirnoff import ForceField
force_field = ForceField('openff_unconstrained-1.0.0.offxml')
ligand_system = force_field.create_openmm_system(ligand_off_topology)
new_ligand_structure = parmed.openmm.load_topology(ligand_off_topology.to_openmm(),
ligand_system,
xyz=pieces[1][0].positions)
# -
# It's possible to save out ligand parameters at this point, if desired; here we do so to AMBER and GROMACS format just for inspection.
new_ligand_structure.save('tmp.prmtop', overwrite=True)
new_ligand_structure.save('tmp.inpcrd', overwrite=True)
new_ligand_structure.save('tmp.gro', overwrite=True)
new_ligand_structure.save('tmp.top', overwrite=True)
# ### Check for discrepancies between the original ligand and its replacement
#
# Here we check that the number of atoms are the same, and the same elements occur in the same order. This will catch many (but not all) errors where someone provided an SDF file for a different ligand than the one present in the system. It will miss errors where they happen to provide a different ligand with the same number of atoms, the same elements, in the same order -- which is unlikely to happen, but not impossible.
# +
# Check how many atoms and which order elements are in the new ligand
n_atoms_new = len(new_ligand_structure.atoms)
elements_new = [atom.element for atom in new_ligand_structure.atoms]
# Check how many atoms and which order elements are in the old ligand
old_ligand_structure, n_copies = pieces[1]
n_atoms_old = len(old_ligand_structure.atoms)
elements_old = [atom.element for atom in old_ligand_structure.atoms]
print(f"There are {n_atoms_old} in the old ligand structure and {n_atoms_new} atoms "
f"in the new ligand structure")
# Print out error message if number of atoms doesn't match
if n_atoms_new != n_atoms_old:
print("Error: Number of atoms in input ligand doesn't match number extracted "
"from prmtop file.")
if elements_new != elements_old:
print("Error: Elements in input ligand don't match elements in the ligand "
"from the prmtop file.")
print(f"Old elements: {elements_old}")
print(f"New elements: {elements_new}")
# -
# That looks OK -- we're seeing a consistent number of atoms in both structures, and no errors about inconsistent elements. That means we're OK to proceed and start combining our ParmEd `Structure` objects.
#
# ### Combine receptor and ligand structures
#
# Now, we make a new ParmEd `Structure` for the complex, and begin adding the pieces of our system back together. Recall that above, we used ParmEd to split different portions of the system into a list of tuples called `pieces`, where the list items are tuples consisting of (`Structure`, `N`) where `N` denotes the number of times that piece occurs. We have just one protein, for example, but many water molecules.
#
# **Here, we begin by combining our original protein with our new ligand**.
#
# We also print out a lot of info as we do so just to check that we're ending up with the number of atom types we expect.
# +
# Create a new, empty system
complex_structure = parmed.Structure()
# Add the protein
complex_structure += pieces[0][0]
print("BEFORE SYSTEM COMBINATION (just protein)")
print("Unique atom names:", sorted(list(set([atom.atom_type.name for atom in complex_structure]))))
print("Number of unique atom types:", len(set([atom.atom_type for atom in complex_structure])))
print("Number of unique epsilons:", len(set([atom.epsilon for atom in complex_structure])))
print("Number of unique sigmas:", len(set([atom.sigma for atom in complex_structure])))
print()
print("BEFORE SYSTEM COMBINATION (just ligand)")
print("Unique atom names:", sorted(list(set([atom.atom_type.name for atom in new_ligand_structure]))))
print("Number of unique atom types:", len(set([atom.atom_type for atom in new_ligand_structure])))
print("Number of unique epsilons:", len(set([atom.epsilon for atom in new_ligand_structure])))
print("Number of unique sigmas:", len(set([atom.sigma for atom in new_ligand_structure])))
print()
# Add the ligand
complex_structure += new_ligand_structure
print("AFTER LIGAND ADDITION (protein+ligand)")
print("Unique atom names:", sorted(list(set([atom.atom_type.name for atom in complex_structure]))))
print("Number of unique atom types:", len(set([atom.atom_type for atom in complex_structure])))
print("Number of unique epsilons:", len(set([atom.epsilon for atom in complex_structure])))
print("Number of unique sigmas:", len(set([atom.sigma for atom in complex_structure])))
# -
# This looks good. We see that the protein alone has 33 atom types, which have 14 unique sigma/epsilon values, and the ligand has six atom types with five unique sigma/epsilon values. After combining, we end up with 39 atom types having 19 unique sigma and epsilon values, which is correct.
#
# If you're astute, you'll notice the number of atom names doesn't add up. That's OK -- the atom names are just cosmetic attributes and don't affect the assigned parameters.
#
# ### Add the ions and water back into the system
#
# Remember, we split our system into protein + ligand + ions + water, and then we took out and replaced the ligand, generating a new `Structure` of the complex. Now we need to re-insert the ions and the water. First we'll handle the ions.
#
# Here, ParmEd has a convenient overload of the multiplication operator, so that if we want a `Structure` with N copies of an ion, we just ask it to multiply the `Structure` of an individual ion by the number of occurrences of that ion.
# +
# Add ions
just_ion1_structure = parmed.Structure()
just_ion1_structure += pieces[2][0]
just_ion1_structure *= len(pieces[2][1])
just_ion2_structure = parmed.Structure()
just_ion2_structure += pieces[3][0]
just_ion2_structure *= len(pieces[3][1])
complex_structure += just_ion1_structure
complex_structure += just_ion2_structure
print("AFTER ION ADDITION (protein+ligand+ions)")
print("Unique atom names:", sorted(list(set([atom.atom_type.name for atom in complex_structure]))))
print("Number of unique atom types:", len(set([atom.atom_type for atom in complex_structure])))
print("Number of unique epsilons:", len(set([atom.epsilon for atom in complex_structure])))
print("Number of unique sigmas:", len(set([atom.sigma for atom in complex_structure])))
# -
# Finally, we do that same thing for the water present in our system:
# +
# Add waters
just_water_structure = parmed.Structure()
just_water_structure += pieces[4][0]
just_water_structure *= len(pieces[4][1])
complex_structure += just_water_structure
print("AFTER WATER ADDITION (protein+ligand+ions+water)")
print("Unique atom names:", sorted(list(set([atom.atom_type.name for atom in complex_structure]))))
print("Number of unique atom types:", len(set([atom.atom_type for atom in complex_structure])))
print("Number of unique epsilons:", len(set([atom.epsilon for atom in complex_structure])))
print("Number of unique sigmas:", len(set([atom.sigma for atom in complex_structure])))
# -
# ### Now that we've re-combined the system, handle the coordinates and box vectors
#
# The above dealt with the chemical topology and parameters for the system, which is most of what we need -- but not quite all. We still have to deal with the coordinates, and also with the information on the simulation box. So, our final stage of setup is to handle the coordinates and box vectors. This is straightforward -- we just need to copy the original coordinates and box vectors. Nothing fancy is needed:
# Copy over the original coordinates and box vectors
complex_structure.coordinates = orig_structure.coordinates
complex_structure.box_vectors = orig_structure.box_vectors
# ### Export to AMBER and GROMACS formats
#
# We started off in AMBER format, and presumably may want to continue in that format -- so let's write out to AMBER and GROMACS format:
# +
# Export the Structure to AMBER files
complex_structure.save('complex.prmtop', overwrite=True)
complex_structure.save('complex.inpcrd', overwrite=True)
# Export the Structure to Gromacs files
complex_structure.save('complex.gro', overwrite=True)
complex_structure.save('complex.top', overwrite=True)
# -
# That should conclude our work in this example. However, perhaps we should just doublecheck by ensuring we can actually run some dynamics on the combined system without any trouble.
#
#
# ## As a test, run some dynamics on the combined system
#
# First, we create an OpenMM system, as we've done in other examples here. We can do this, in this case, using ParmEd's built-in `createSystem` functionality already attached to the combined `Structure`. We ask for a reasonable cutoff, constrained hydrogen bonds (note that **this keyword argument overrides the fact that we use the `unconstrained` force field above**; the ligand (and all other molecules in the system) **will** have covalent bonds to hydrogen constrainted), PME, and rigid water:
# +
from simtk.openmm import app, unit, LangevinIntegrator
import numpy as np
from parmed.openmm import NetCDFReporter
system = complex_structure.createSystem(nonbondedMethod=app.PME,
nonbondedCutoff=9*unit.angstrom,
constraints=app.HBonds,
rigidWater=True)
# -
# Next we'll set up the integrator, a reporter to write the trajectory, pick the timestep, and then go on to minimize the energy and run a very short amount of dynamics after setting the temperature to 300K:
# +
integrator = LangevinIntegrator(300*unit.kelvin,
1/unit.picosecond,
0.001*unit.picoseconds)
simulation = app.Simulation(complex_structure.topology, system, integrator)
# Depending on where your system came from, you may want to
# add something like (30, 30, 30)*Angstrom to center the protein
# (no functional effect, just visualizes better)
#simulation.context.setPositions(complex_structure.positions + np.array([30, 30, 30])*unit.angstrom)
simulation.context.setPositions(complex_structure.positions)
nc_reporter = NetCDFReporter('trajectory.nc', 10)
simulation.reporters.append(nc_reporter)
# -
simulation.minimizeEnergy()
minimized_coords = simulation.context.getState(getPositions=True).getPositions()
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
simulation.step(1000)
| examples/swap_amber_parameters/swap_existing_ligand_parameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cflows
# language: python
# name: cflows
# ---
# ## Config
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# +
# %load_ext autoreload
# %autoreload 2
from pathlib import Path
from experiment import data_path, device
m = 512 # Manifold dimension
model_name = f'cifar10-boat-manifold-{m}-cef-joint'
checkpoint_path = data_path / 'cef_models' / model_name
gen_path = data_path / 'generated' / model_name
# -
# ## Load data
# +
import torchvision
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
transform = transforms.Compose([
transforms.ToTensor(),
])
image_folder = data_path / f'cifar10-manifold-{m}-boat'
train_data = torchvision.datasets.ImageFolder(root=str(image_folder), transform=transform)
held_out = len(train_data) // 10
train_data, val_data = random_split(train_data, [len(train_data) - held_out, held_out])
# -
# ## Define model
# +
from nflows import cef_models
flow = cef_models.Cifar10CEFlow(m).to(device)
# -
# ## Train
# +
import torch.optim as opt
from experiment import train_injective_flow
optim = opt.Adam(flow.parameters(), lr=0.001)
scheduler = opt.lr_scheduler.CosineAnnealingLR(optim, 1000)
def weight_schedule():
'''Yield epoch weights for likelihood and recon loss, respectively'''
for _ in range(1000):
yield 0.01, 100000
scheduler.step()
train_loader = DataLoader(train_data, batch_size=512, shuffle=True, num_workers=30)
val_loader = DataLoader(val_data, batch_size=512, shuffle=True, num_workers=30)
train_injective_flow(flow, optim, scheduler, weight_schedule, train_loader, val_loader,
model_name, checkpoint_path=checkpoint_path, checkpoint_frequency=25)
# -
# ## Generate some samples
# +
from experiment import save_samples
save_samples(flow, num_samples=10000, gen_path=gen_path, checkpoint_epoch=-1, batch_size=512)
| experiments/cifar10-boat-manifold-512-cef-joint.ipynb |
Subsets and Splits