text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# Soccerstats Predictions v1.2 The changelog from v1.1: * Train on `train` data, and validate using `test` data. ## A. Data Cleaning & Preparation ### 1. Read csv file ``` # load and cache data stat_df = sqlContext.read\ .format("com.databricks.spark.csv")\ .options(header = True)\ .load("data/teamFixtures.csv")\ .cache() # from pyspark.sql.functions import isnan, when, count, col # count hyphen nulls ("-") per column # stat_df.select([count(when(stat_df[c] == "-", c)).alias(c) for c in stat_df.columns]).show() ``` ### 2. Filter-out "gameFtScore" column values ``` from pyspark.sql.functions import udf from pyspark.sql.types import StringType # replace non-"-" values with null: gameFtScore nullify_ft_scores = udf( lambda row_value: None if row_value != "-" else row_value, StringType() ) # replace "-" values with null: HTS_teamAvgOpponentPPG, ATS_teamAvgOpponentPPG nullify_hyphen_cols = udf( lambda row_value: None if row_value == "-" else row_value, StringType() ) stat_df = (stat_df.withColumn("gameFtScore", nullify_ft_scores(stat_df.gameFtScore))) stat_df = (stat_df.withColumn("HTS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.HTS_teamAvgOpponentPPG)) .withColumn("ATS_teamAvgOpponentPPG", nullify_hyphen_cols(stat_df.ATS_teamAvgOpponentPPG)) ) # drop Null values stat_df = stat_df.dropna() stat_df.select("gameFtScore", "HTS_teamAvgOpponentPPG", "ATS_teamAvgOpponentPPG").show(5) print("Total rows: {}".format(stat_df.count())) ``` ### 3. Write-out new dataframe to Json ``` # optional: save to file # stat_df.coalesce(1).write.format('json').save('sstats_fixtures.json') ``` ### 4. Read fixtures Json to dataframe ``` fx_df = spark.read.json('data/fixtures1.json') fx_df.printSchema() ``` ### 5. Encode "fixture_id" on stat_df dataframe ``` import hashlib from pyspark.sql.functions import array def encode_string(value): return hashlib.sha1( value.encode("utf-8") ).hexdigest() # add an encoded col to "stat_df"; fixture_id fxcol_df = udf( lambda row_value: encode_string(u"".join([x for x in row_value])), StringType() ) stat_df = (stat_df.withColumn("fixture_id", fxcol_df(array( "leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName" )))) # display some encoded fixtures stat_df.select("fixture_id").show(5, False) ``` ### 6. Concat the two dataframes: "stat_df" and "fx_df" ``` from pyspark.sql.functions import col # use "left-outer-join" to concat full_df = stat_df.alias("a")\ .join(fx_df, stat_df.fixture_id == fx_df.fixture_id, "left_outer")\ .select(*[col("a."+c) for c in stat_df.columns] + [fx_df.ft_score]) full_df.select("leagueName", "leagueDivisionName", "gamePlayDate", "gameHomeTeamName", "gameAwayTeamName", "ft_score").show(5, False) ``` ### 7. Assess damage on "ft_score " nulls ``` # count nulls per column def count_null(df, col): return df.where(df[col].isNull()).count() print("Total rows: {}".format(full_df.count())) print("Ft_score nulls: {}".format(count_null(full_df, "ft_score"))) # drop null values in ft_Score full_df = full_df.dropna() print("Total rows: {}".format(full_df.count())) print("Ft_score nulls: {}".format(count_null(full_df, "ft_score"))) ``` ## B. Deep Learning ### 1. Clean data ``` # drop unnecessary columns ml_df = full_df.drop( "gameID", "gamePlayDate", "gamePlayTime", "gameHomeTeamName", "gameAwayTeamName", "gameHomeTeamID","gameAwayTeamID", "leagueName", "leagueDivisionName", "gameFtScore", "fixture_id" ) # separate col types: double & string # double type features dtype_features = [ "leagueCompletion", "HTS_teamPosition", "HTS_teamGamesPlayed", "HTS_teamGamesWon", "HTS_teamGamesDraw", "HTS_teamGamesLost", "HTS_teamGoalsScored", "HTS_teamGoalsConceded", "HTS_teamPoints", "HTS_teamPointsPerGame", "HTS_teamPPGlast8", "HTS_homeGamesWon", "HTS_homeGamesDraw", "HTS_homeGamesLost", "HTS_homeGamesPlayed", "HTS_awayGamesWon", "HTS_awayGamesDraw", "HTS_awayGamesLost", "HTS_awayGamesPlayed", "HTS_teamPPGHome", "HTS_teamPPGAway", "HTS_teamAvgOpponentPPG", "HTS_homeGoalMargin_by1_wins", "HTS_homeGoalMargin_by1_losses", "HTS_homeGoalMargin_by2_wins", "HTS_homeGoalMargin_by2_losses", "HTS_homeGoalMargin_by3_wins", "HTS_homeGoalMargin_by3_losses", "HTS_homeGoalMargin_by4p_wins", "HTS_homeGoalMargin_by4p_losses", "HTS_awayGoalMargin_by1_wins", "HTS_awayGoalMargin_by1_losses", "HTS_awayGoalMargin_by2_wins", "HTS_awayGoalMargin_by2_losses", "HTS_awayGoalMargin_by3_wins", "HTS_awayGoalMargin_by3_losses", "HTS_awayGoalMargin_by4p_wins", "HTS_awayGoalMargin_by4p_losses", "HTS_totalGoalMargin_by1_wins", "HTS_totalGoalMargin_by1_losses", "HTS_totalGoalMargin_by2_wins", "HTS_totalGoalMargin_by2_losses", "HTS_totalGoalMargin_by3_wins", "HTS_totalGoalMargin_by3_losses", "HTS_totalGoalMargin_by4p_wins", "HTS_totalGoalMargin_by4p_losses", "HTS_homeGoalsScored", "HTS_homeGoalsConceded", "HTS_homeGoalsScoredPerMatch", "HTS_homeGoalsConcededPerMatch", "HTS_homeScored_ConcededPerMatch", "HTS_awayGoalsScored", "HTS_awayGoalsConceded", "HTS_awayGoalsScoredPerMatch", "HTS_awayGoalsConcededPerMatch", "HTS_awayScored_ConcededPerMatch", "ATS_teamPosition", "ATS_teamGamesPlayed", "ATS_teamGamesWon", "ATS_teamGamesDraw", "ATS_teamGamesLost", "ATS_teamGoalsScored", "ATS_teamGoalsConceded", "ATS_teamPoints", "ATS_teamPointsPerGame", "ATS_teamPPGlast8", "ATS_homeGamesWon", "ATS_homeGamesDraw", "ATS_homeGamesLost", "ATS_homeGamesPlayed", "ATS_awayGamesWon", "ATS_awayGamesDraw", "ATS_awayGamesLost", "ATS_awayGamesPlayed", "ATS_teamPPGHome", "ATS_teamPPGAway", "ATS_teamAvgOpponentPPG", "ATS_homeGoalMargin_by1_wins", "ATS_homeGoalMargin_by1_losses", "ATS_homeGoalMargin_by2_wins", "ATS_homeGoalMargin_by2_losses", "ATS_homeGoalMargin_by3_wins", "ATS_homeGoalMargin_by3_losses", "ATS_homeGoalMargin_by4p_wins", "ATS_homeGoalMargin_by4p_losses", "ATS_awayGoalMargin_by1_wins", "ATS_awayGoalMargin_by1_losses", "ATS_awayGoalMargin_by2_wins", "ATS_awayGoalMargin_by2_losses", "ATS_awayGoalMargin_by3_wins", "ATS_awayGoalMargin_by3_losses", "ATS_awayGoalMargin_by4p_wins", "ATS_awayGoalMargin_by4p_losses", "ATS_totalGoalMargin_by1_wins", "ATS_totalGoalMargin_by1_losses", "ATS_totalGoalMargin_by2_wins", "ATS_totalGoalMargin_by2_losses", "ATS_totalGoalMargin_by3_wins", "ATS_totalGoalMargin_by3_losses", "ATS_totalGoalMargin_by4p_wins", "ATS_totalGoalMargin_by4p_losses", "ATS_homeGoalsScored", "ATS_homeGoalsConceded", "ATS_homeGoalsScoredPerMatch", "ATS_homeGoalsConcededPerMatch", "ATS_homeScored_ConcededPerMatch", "ATS_awayGoalsScored", "ATS_awayGoalsConceded", "ATS_awayGoalsScoredPerMatch", "ATS_awayGoalsConcededPerMatch", "ATS_awayScored_ConcededPerMatch" ] # string type features stype_features = [ "HTS_teamGoalsDifference", "HTS_teamCleanSheetPercent", "HTS_homeOver1_5GoalsPercent", "HTS_homeOver2_5GoalsPercent", "HTS_homeOver3_5GoalsPercent", "HTS_homeOver4_5GoalsPercent", "HTS_awayOver1_5GoalsPercent", "HTS_awayOver2_5GoalsPercent", "HTS_awayOver3_5GoalsPercent", "HTS_awayOver4_5GoalsPercent", "HTS_homeCleanSheets", "HTS_homeWonToNil", "HTS_homeBothTeamsScored", "HTS_homeFailedToScore", "HTS_homeLostToNil", "HTS_awayCleanSheets", "HTS_awayWonToNil", "HTS_awayBothTeamsScored", "HTS_awayFailedToScore", "HTS_awayLostToNil", "HTS_homeScored_ConcededBy_0", "HTS_homeScored_ConcededBy_1", "HTS_homeScored_ConcededBy_2", "HTS_homeScored_ConcededBy_3", "HTS_homeScored_ConcededBy_4", "HTS_homeScored_ConcededBy_5p", "HTS_homeScored_ConcededBy_0_or_1", "HTS_homeScored_ConcededBy_2_or_3", "HTS_homeScored_ConcededBy_4p", "HTS_awayScored_ConcededBy_0", "HTS_awayScored_ConcededBy_1", "HTS_awayScored_ConcededBy_2", "HTS_awayScored_ConcededBy_3", "HTS_awayScored_ConcededBy_4", "HTS_awayScored_ConcededBy_5p", "HTS_awayScored_ConcededBy_0_or_1", "HTS_awayScored_ConcededBy_2_or_3", "HTS_awayScored_ConcededBy_4p", "ATS_teamGoalsDifference", "ATS_teamCleanSheetPercent", "ATS_homeOver1_5GoalsPercent", "ATS_homeOver2_5GoalsPercent", "ATS_homeOver3_5GoalsPercent", "ATS_homeOver4_5GoalsPercent", "ATS_awayOver1_5GoalsPercent", "ATS_awayOver2_5GoalsPercent", "ATS_awayOver3_5GoalsPercent", "ATS_awayOver4_5GoalsPercent", "ATS_homeCleanSheets", "ATS_homeWonToNil", "ATS_homeBothTeamsScored", "ATS_homeFailedToScore", "ATS_homeLostToNil", "ATS_awayCleanSheets", "ATS_awayWonToNil", "ATS_awayBothTeamsScored", "ATS_awayFailedToScore", "ATS_awayLostToNil", "ATS_homeScored_ConcededBy_0", "ATS_homeScored_ConcededBy_1", "ATS_homeScored_ConcededBy_2", "ATS_homeScored_ConcededBy_3", "ATS_homeScored_ConcededBy_4", "ATS_homeScored_ConcededBy_5p", "ATS_homeScored_ConcededBy_0_or_1", "ATS_homeScored_ConcededBy_2_or_3", "ATS_homeScored_ConcededBy_4p", "ATS_awayScored_ConcededBy_0", "ATS_awayScored_ConcededBy_1", "ATS_awayScored_ConcededBy_2", "ATS_awayScored_ConcededBy_3", "ATS_awayScored_ConcededBy_4", "ATS_awayScored_ConcededBy_5p", "ATS_awayScored_ConcededBy_0_or_1", "ATS_awayScored_ConcededBy_2_or_3", "ATS_awayScored_ConcededBy_4p" ] # cast types to columns: doubles ml_df = ml_df.select(*[col(c).cast("double").alias(c) for c in dtype_features] + stype_features + [ml_df.ft_score]) # add extra column; over/under over_under_udf = udf( lambda r: "over" if (int(r.split("-")[0]) + int(r.split("-")[1])) > 2 else "under", StringType() ) ml_df = (ml_df.withColumn("over_under", over_under_udf(ml_df.ft_score))) ml_df.select("ft_score", "over_under").show(5) # drop "ft_score" ml_df = ml_df.drop("ft_score") from pyspark.sql.types import DoubleType # convert percent cols to float percent_udf = udf( lambda r: float(r.split("%")[0])/100, DoubleType() ) ml_df = ml_df.select(*[percent_udf(col(col_name)).name(col_name) for col_name in stype_features] + dtype_features + [ml_df.over_under]) ``` ### 2. Some featurization ``` from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorIndexer, VectorAssembler from pyspark.sql import Row from pyspark.ml import Pipeline # index the label; "over_under" si = StringIndexer(inputCol = "over_under", outputCol = "over_under_indx") df_indexed = si\ .fit(ml_df)\ .transform(ml_df)\ .drop("over_under")\ .withColumnRenamed("over_under_indx", "over_under") from pyspark.ml.feature import Normalizer from pyspark.sql.functions import mean, stddev # normalize feature columns; [(x - mean)/std_dev] def normalize_col(df, cols): # find mean & std for each column aggExpr = [] aggStd = [] for col in cols: aggExpr.append(mean(df[col]).alias(col)) aggStd.append(stddev(df[col]).alias(col + "_stddev")) averages = df.agg(*aggExpr).collect()[0] std_devs = df.agg(*aggStd).collect()[0] # standardize dataframe for col in cols: df = df.withColumn(col + "_norm", ((df[col] - averages[col]) / std_devs[col + "_stddev"])) return df, averages, std_devs # normalize dataframe feature_cols = dtype_features + stype_features df_indexed, averages, std_devs = normalize_col(df_indexed, feature_cols) # # display some normalized column # df_indexed.select("HTS_teamPosition", "HTS_teamPosition_norm").show(5) from pyspark.ml.linalg import Vectors from pyspark.sql import Row feature_cols = [col+"_norm" for col in feature_cols] df_indexed = df_indexed[feature_cols + ["over_under"]] # # vectorize labels and features # row = Row("label", "features") # label_fts = df_indexed.rdd.map( # lambda r: (row(r[-1], Vectors.dense(r[:-1]))) # ).toDF() # label_fts.show(5) # label_fts.select("features").take(1) # split train/test values train, test = df_indexed.randomSplit([0.8, 0.2]) # split train/validate values train, validate = train.randomSplit([0.9, 0.1]) print("Train shape: '{}, {}'".format(train.count(), len(train.columns))) print("Test shape: '{}, {}'".format(test.count(), len(test.columns))) print("Validate shape: '{}, {}'".format(validate.count(), len(validate.columns))) ``` ### 3. Compose Neural-network ``` import numpy as np X = np.array(train.select(feature_cols).collect()) y = np.array(train.select("over_under").collect()) print("train features shape: '{}'".format(X.shape)) print("train labels shape: '{}'".format(y.shape)) X_test = np.array(test.select(feature_cols).collect()) y_test = np.array(test.select("over_under").collect()) print("test features shape: '{}'".format(X_test.shape)) print("test labels shape: '{}'".format(y_test.shape)) # get some Keras essentials from keras.models import Sequential from keras.layers import Dense, Dropout # build model model = Sequential() model.add(Dense(60, activation="relu", input_dim = 187)) model.add(Dropout(0.4)) model.add(Dense(50, activation="relu")) # output layer model.add(Dense(1, activation="sigmoid")) # compile & evaluate training model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(X, y, epochs=1000, batch_size=60) # evaluate the model scores = model.evaluate(X_test, y_test) print("{}: {}%".format(model.metrics_names[1], scores[1]*100)) print("Loss: {}".format(scores[0])) ```
github_jupyter
# Credits Updated to detectwaste by: * Sylwia Majchrowska ``` %matplotlib inline import sys from pycocotools.coco import COCO import json import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns; sns.set() import os import skimage import skimage.io as io import copy def show_values_on_bars(axs, h_v="v", space=0.4): def _show_on_single_plot(ax): if h_v == "v": for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = int(p.get_height()) ax.text(_x, _y, value, ha="center") elif h_v == "h": for p in ax.patches: _x = p.get_x() + p.get_width() + float(space) _y = p.get_y() + p.get_height() value = int(p.get_width()) ax.text(_x, _y, value, ha="left") if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) ``` ## TrashCan 1.0 - background: under watter - classes: 8 - comment: captured frames of 3 videos (very similiar photos of the same objects) - annotation: inastance masks ``` dataDir='/dih4/dih4_2/wimlds/data/TrashCan_v1/material_version' dataType='all' annFile='{}/instances_{}_trashcan.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['trash_wood']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) # select only trash #trash_categories = [item for item in dataset['categories'] if item['name'].startswith('trash')] cat_names = [item['name'] for item in dataset['categories'] if item['name'].startswith('trash')] #trash_categories # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(dataset['categories']),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) print(len(dataset['images']), len([ann for ann in dataset['annotations'] if ann['image_id'] in [i['id'] for i in dataset['images']]])) def trashcan_to_detectwaste(label): metals_and_plastics = ['trash_plastic', 'trash_metal'] non_recyclable = ['trash_fabric', 'trash_rubber', 'trash_paper'] other = ['trash_fishing_gear'] bio = ['trash_wood'] unknown = ['trash_etc'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in non_recyclable): label="non-recyclable" elif(label in other): label="other" elif(label in bio): label="bio" elif(label in unknown): label="unknown" else: print(label, "is non-trashcan label") label = "unknown" return label ``` ## Trash-ICRA19 - background: under watter - classes: 7 - comment: captured frames of 3 videos (very similiar photos of the same objects) - annotation: bboxes ``` dataDir='/dih4/dih4_2/wimlds/data/trash_icra19/' dataType='all' annFile='{}/{}_icra_coco.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['rubber']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) # select only trash allowed_items = ['plastic', 'unknown', 'cloth', 'rubber', 'metal', 'wood', 'platstic', 'paper', 'papper'] cat_names = [item['name'] for item in dataset['categories'] if item['name'] in allowed_items] trash_categories = [item for item in dataset['categories'] if item['name'] in allowed_items] print(trash_categories) # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) print(len(dataset['images']), len([ann for ann in dataset['annotations'] if ann['image_id'] in [i['id'] for i in dataset['images']]])) def trashicra_to_detectwaste(label): metals_and_plastics = ['plastic', 'metal', 'rubber'] non_recyclable = ['cloth', 'paper'] bio = ['wood'] unknown = ['unknown'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in non_recyclable): label="non-recyclable" elif(label in bio): label="bio" elif(label in unknown): label="unknown" else: print(label, "is non-trashicra label") label = "unknown" return label ``` ## UAVVaste - background: outside - classes: 1 - comment: very distance trash (from dron) - annotation: instance masks ``` dataDir='/dih4/dih4_2/wimlds/data/uavvaste' dataType='images' annFile='{}/annotations.json'.format(dataDir,dataType) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['rubber']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) ``` ## Drink waste - background: indoor - classes: 4 - comment: very similiar photos of the same objects - annotation: bboxes ``` dataDir='/dih4/dih4_2/wimlds/data/' dataType='drinking-waste/YOLO_imgs' annFile='{}/drinkwaste_coco.json'.format(dataDir) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Glass']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) def drinkingwaste_to_detectwaste(label): metals_and_plastics = ['PET', 'HDPEM', 'AluCan'] glass = ['Glass'] if (label in metals_and_plastics): label="metals_and_plastics" elif(label in glass): label="glass" else: print(label, "is non-drinkingwaste label") label = "unknown" return label ``` ## MJU-Waste v1.0 - background: indoor, in hand - classes: 1 - comment: such simply background, labolatroy - annotation: instance masks (and depth - RGBD images) ``` dataDir='/dih4/dih4_2/wimlds/data/mju-waste-v1' dataType='JPEGImages' type_ann='all' annFile='{}/mju-waste/{}.json'.format(dataDir, type_ann) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Rubbish']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) ``` ## wade-ai - background: outside, google maps - classes: 1 - comment: roads and pavements - annotation: instance masks ``` dataDir='/dih4/dih4_2/wimlds/data/wade-ai' dataType='wade-ai_images' type_ann='all' annFile='{}/{}_wade_ai.json'.format(dataDir, type_ann) # initialize COCO api for instance annotations coco=COCO(annFile) # display COCO categories and supercategories cats = coco.loadCats(coco.getCatIds()) nms=[cat['name'] for cat in cats] print('COCO categories: \n{}\n'.format(', '.join(nms))) nms = set([cat['supercategory'] for cat in cats]) print('COCO supercategories: \n{}'.format(', '.join(nms))) # load and display image catIds = coco.getCatIds(catNms=['Rubbish']); imgIds = coco.getImgIds(catIds=catIds); img_id = imgIds[np.random.randint(0,len(imgIds))] print('Image n°{}'.format(img_id)) img = coco.loadImgs(img_id)[0] img_name = '%s/%s/%s'%(dataDir, dataType, img['file_name']) #img_name = '%s/%s'%(dataDir, img['file_name']) print('Image name: {}'.format(img_name)) I = io.imread(img_name) plt.figure() plt.imshow(I) plt.axis('off') # load and display instance annotations plt.imshow(I); plt.axis('off') annIds = coco.getAnnIds(imgIds=img['id'], catIds=catIds) anns = coco.loadAnns(annIds) coco.showAnns(anns)#, draw_bbox=True) with open(annFile, 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: ann['category_id'] = 1 cat_histogram[ann['category_id'] - 1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) with open('/dih4/dih4_home/smajchrowska/detect-waste/annotations/annotations_binary_train.json', 'r') as f: dataset = json.loads(f.read()) cat_names = [item['name'] for item in dataset['categories'] if item['name']] trash_categories = dataset['categories'] # define variables categories = dataset['categories'] anns = dataset['annotations'] imgs = dataset['images'] nr_cats = len(categories) nr_annotations = len(anns) nr_images = len(imgs) # Count annotations cat_histogram = np.zeros(len(trash_categories),dtype=int) for ann in dataset['annotations']: cat_histogram[ann['category_id']-1] += 1 # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(5,15)) # Convert to DataFrame df = pd.DataFrame({'Categories': cat_names, 'Number of annotations': cat_histogram}) df = df.sort_values('Number of annotations', 0, False) # Plot the histogram sns.set_color_codes("pastel") sns.set(style="whitegrid") plot_1 = sns.barplot(x="Number of annotations", y="Categories", data=df, label="Total", color="b") show_values_on_bars(plot_1, "h", 0.3) len(imgs) ```
github_jupyter
## In this notebook we are going to Predict the Growth of Google Stock using LSTM Model and CRISP-DM. ``` #importing the libraries import math import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from keras.models import Sequential from keras.layers import Dense, LSTM import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') """For LSTM model please use Numpy --version = 1.19 or lower Cause latest Tensorflow array don't accept np tensors """ ``` # Data Understanding The data is already processed to price-split values so it is easy to analysis but we are creating new tables to optimize our model ``` #importing Price Split Data data = pd.read_csv('prices-split-adjusted.csv') data #checking data for null values data.isnull().sum() ``` # Data Preprocessing Creating Table for a specific Stock ``` #Initializing the Dataset for the Stock to be Analysized data = data.loc[(data['symbol'] == 'GOOG')] data = data.drop(columns=['symbol']) data = data[['date','open','close','low','volume','high']] data #Number of rows and columns we are working with data.shape ``` Ploting the closing price of the Stock ``` plt.figure(figsize=(16,8)) plt.title('Closing Price of the Stock Historically') plt.plot(data['close']) plt.xlabel('Year', fontsize=20) plt.ylabel('Closing Price Historically ($)', fontsize=20) plt.show() ``` #### Here we can see that there is Long-Term growth in this stock. # Preparing Data for LSTM Here we are going to use LSTM to more accurate prediction of the stock value change. We are checking for accuracy on a particular Stock. First we create a seperate dataframe only with "Close" cloumn ``` #Getting the rows and columns we need data = data.filter(['close']) dataset = data.values #Find out the number of rows that are present in this dataset in order to train our model. training_data_len = math.ceil(len(dataset)* .8) training_data_len ``` Scaling the Data to make better Predictions ``` scaler = MinMaxScaler(feature_range=(0,1)) scaled_data = scaler.fit_transform(dataset) scaled_data #Creating a train test datasets train_data = scaled_data[0:training_data_len , :] x_train = [] y_train = [] for j in range(60, len(train_data)): x_train.append(train_data[j-60:j,0]) y_train.append(train_data[j,0]) if j<=60: print(x_train) print(y_train) print() x_train, y_train = np.array(x_train), np.array(y_train) x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) x_train.shape ``` # Building LSTM Model ``` model = Sequential() model.add(LSTM(50, return_sequences=True, input_shape = (x_train.shape[1], 1))) model.add(LSTM(50, return_sequences=False)) model.add(Dense(25)) model.add(Dense(1)) model.compile(optimizer='adam', loss='mean_squared_error') ``` ##### Training the Model ``` model.fit(x_train, y_train, batch_size=1, epochs=1) test_data = scaled_data[training_data_len - 60: , :] x_test = [] y_test = dataset[training_data_len:, :] for j in range(60, len(test_data)): x_test.append(test_data[j-60:j, 0]) x_test = np.array(x_test) x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) predictions = model.predict(x_test) predictions = scaler.inverse_transform(predictions) #Finding the Root Mean Squared Error for the Stock rmse = np.sqrt( np.mean( predictions - y_test)**2) rmse ``` # Visualization ### Plotting Acutal Close values vs Predicted Values in LR Model ``` #builing close value and prediction value table for comparison train = data[:training_data_len] val = data[training_data_len:] val['Predictions'] = predictions plt.figure(figsize=(16,8)) plt.title('LSTM Model Data') plt.xlabel('Date', fontsize=16) plt.ylabel('Close Price', fontsize=16) plt.plot(train['close']) plt.plot(val[['close', 'Predictions']]) plt.legend(['Trained Dataset', 'Actual Value', 'Predictions']) plt.show() ``` # Evaluation of the model Making table for Actual price and Predicted Price ``` #actual close values against predictions val new_data = pd.read_csv('prices-split-adjusted.csv') new_data = data.filter(['close']) last_60_days = new_data[-60:].values last_60_scaled = scaler.transform(last_60_days) X_test = [] X_test.append(last_60_scaled) X_test = np.array(X_test) X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1)) predicted_price = model.predict(X_test) predicted_price = scaler.inverse_transform(predicted_price) print('The predicted price of the final value of the dataset', predicted_price) new_data.tail(1) ``` #### The predicted price is USD 122.0, whereas the actual observed value is USD 115.82 ``` #check predicted values predictions = model.predict(x_test) #Undo scaling predictions = scaler.inverse_transform(predictions) #Calculate RMSE score rmse=np.sqrt(np.mean(((predictions- y_test)**2))) rmse neww_data = pd.read_csv('prices-split-adjusted.csv') val.describe() x = val.close.mean() y = val.Predictions.mean() Accuracy = x/y*100 print("The accuracy of the model is " , Accuracy) ``` The LSTM model Accuracy is 99.39% As we can see the predictions made by LSTM model show a greater accuracy than LR model. So we can finally conclude that the stock is going to grow for long-term.
github_jupyter
# Training a dense neural network The handwritten digit recognition is a classification problem. We will start with the simplest possible approach for image classification - a fully-connected neural network (which is also called a *perceptron*). We use `pytorchcv` helper to load all data we have talked about in the previous unit. ``` !wget https://raw.githubusercontent.com/MicrosoftDocs/pytorchfundamentals/main/computer-vision-pytorch/pytorchcv.py import torch import torch.nn as nn import torchvision import matplotlib.pyplot as plt import pytorchcv pytorchcv.load_mnist() ``` ## Fully-connected dense neural networks A basic **neural network** in PyTorch consists of a number of **layers**. The simplest network would include just one fully-connected layer, which is called **Linear** layer, with 784 inputs (one input for each pixel of the input image) and 10 outputs (one output for each class). ![A graph showing how an image is broken into layers based on the pixels.](./images/dense-onelayer-network.png) As we discussed above, the dimension of our digit images is $1\times28\times28$. Because the input dimension of a fully-connected layer is 784, we need to insert another layer into the network, called **Flatten**, to change tensor shape from $1\times28\times28$ to $784$. We want $n$-th output of the network to return the probability of the input digit being equal to $n$. Because the output of a fully-connected layer is not normalized to be between 0 and 1, it cannot be thought of as probability. To turn it into a probability we need to apply another layer called **Softmax**. In PyTorch, it is easier to use **LogSoftmax** function, which will also compute logarithms of output probabilities. To turn the output vector into the actual probabilities, we need to take **torch.exp** of the output. Thus, the architecture of our network can be represented by the following sequence of layers: ![An image showing the architecture of the network broken into a sequence of layers.](./images/onelayer-network-layers.png) It can be defined in PyTorch in the following way, using `Sequential` syntax: ``` net = nn.Sequential( nn.Flatten(), nn.Linear(784,10), # 784 inputs, 10 outputs nn.LogSoftmax()) ``` ## Training the network A network defined this way can take any digit as input and produce a vector of probabilities as an output. Let's see how this network performs by giving it a digit from our dataset: ``` print('Digit to be predicted: ',data_train[0][1]) torch.exp(net(data_train[0][0])) ``` As you can see the network predicts similar probabilities for each digit. This is because it has not been trained on how to recognize the digits. We need to give it our training data to train it on our dataset. To train the model we will need to create **batches** of our datasets of a certain size, let's say 64. PyTorch has an object called **DataLoader** that can create batches of our data for us automatically: ``` train_loader = torch.utils.data.DataLoader(data_train,batch_size=64) test_loader = torch.utils.data.DataLoader(data_test,batch_size=64) # we can use larger batch size for testing ``` The training process steps are as follows: 1. We take a minibatch from the input dataset, which consists of input data (features) and expected result (label). 2. We calculate the predicted result for this minibatch. 3. The difference between this result and expected result is calculated using a special function called the **loss function** 4. We calculate the gradients of this loss function with respect to model weights (parameters), which are then used to adjust the weights to optimize the performance of the network. The amount of adjustment is controlled by a parameter called **learning rate**, and the details of optimization algorithm are defined in the **optimizer** object. 5. We repeat those steps until the whole dataset is processed. One complete pass through the dataset is called **an epoch**. Here is a function that performs one epoch training: ``` def train_epoch(net,dataloader,lr=0.01,optimizer=None,loss_fn = nn.NLLLoss()): optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr) net.train() total_loss,acc,count = 0,0,0 for features,labels in dataloader: optimizer.zero_grad() out = net(features) loss = loss_fn(out,labels) #cross_entropy(out,labels) loss.backward() optimizer.step() total_loss+=loss _,predicted = torch.max(out,1) acc+=(predicted==labels).sum() count+=len(labels) return total_loss.item()/count, acc.item()/count train_epoch(net,train_loader) ``` Since this function is pretty generic we will be able to use it later in our other examples. The function takes the following parameters: * **Neural network** * **DataLoader**, which defines the data to train on * **Loss Function**, which is a function that measures the difference between the expected result and the one produced by the network. In most of the classification tasks `NLLLoss` is used, so we will make it a default. * **Optimizer**, which defined an *optimization algorithm*. The most traditional algorithm is *stochastic gradient descent*, but we will use a more advanced version called **Adam** by default. * **Learning rate** defines the speed at which the network learns. During learning, we show the same data multiple times, and each time weights are adjusted. If the learning rate is too high, new values will overwrite the knowledge from the old ones, and the network would perform badly. If the learning rate is too small it results in a very slow learning process. Here is what we do when training: * Switch the network to training mode (`net.train()`) * Go over all batches in the dataset, and for each batch do the following: - compute predictions made by the network on this batch (`out`) - compute `loss`, which is the discrepancy between predicted and expected values - try to minimize the loss by adjusting weights of the network (`optimizer.step()`) - compute the number of correctly predicted cases (**accuracy**) The function calculates and returns the average loss per data item, and training accuracy (percentage of cases guessed correctly). By observing this loss during training we can see whether the network is improving and learning from the data provided. It is also important to control the accuracy on the test dataset (also called **validation accuracy**). A good neural network with a lot of parameters can predict with decent accuracy on any training dataset, but it may poorly generalize to other data. That's why in most cases we set aside part of our data, and then periodically check how well the model performs on them. Here is the function to evaluate the network on test dataset: ``` def validate(net, dataloader,loss_fn=nn.NLLLoss()): net.eval() count,acc,loss = 0,0,0 with torch.no_grad(): for features,labels in dataloader: out = net(features) loss += loss_fn(out,labels) pred = torch.max(out,1)[1] acc += (pred==labels).sum() count += len(labels) return loss.item()/count, acc.item()/count validate(net,test_loader) ``` We train the model for several epochs observing training and validation accuracy. If training accuracy increases while validation accuracy decreases that would be an indication of **overfitting**. Meaning it will do well on your dataset but not on new data. Below is the training function that can be used to perform both training and validation. It prints the training and validation accuracy for each epoch, and also returns the history that can be used to plot the loss and accuracy on the graph. ``` def train(net,train_loader,test_loader,optimizer=None,lr=0.01,epochs=10,loss_fn=nn.NLLLoss()): optimizer = optimizer or torch.optim.Adam(net.parameters(),lr=lr) res = { 'train_loss' : [], 'train_acc': [], 'val_loss': [], 'val_acc': []} for ep in range(epochs): tl,ta = train_epoch(net,train_loader,optimizer=optimizer,lr=lr,loss_fn=loss_fn) vl,va = validate(net,test_loader,loss_fn=loss_fn) print(f"Epoch {ep:2}, Train acc={ta:.3f}, Val acc={va:.3f}, Train loss={tl:.3f}, Val loss={vl:.3f}") res['train_loss'].append(tl) res['train_acc'].append(ta) res['val_loss'].append(vl) res['val_acc'].append(va) return res # Re-initialize the network to start from scratch net = nn.Sequential( nn.Flatten(), nn.Linear(784,10), # 784 inputs, 10 outputs nn.LogSoftmax()) hist = train(net,train_loader,test_loader,epochs=5) ``` This function logs messages with the accuracy on training and validation data from each epoch. It also returns this data as a dictionary (called **history**). We can then visualize this data to better understand our model training. ``` plt.figure(figsize=(15,5)) plt.subplot(121) plt.plot(hist['train_acc'], label='Training acc') plt.plot(hist['val_acc'], label='Validation acc') plt.legend() plt.subplot(122) plt.plot(hist['train_loss'], label='Training loss') plt.plot(hist['val_loss'], label='Validation loss') plt.legend() ``` The diagram on the left shows the `training accuracy` increasing (which corresponds to the network learning to classify our training data better and better), while `validation accuracy` starts to fall. The diagram on the right show the `training loss` and `validation loss`, you can see the `training loss` decreasing (meaning its performing better) and the `validation loss` increasing (meaning its performing worse). These graphs would indicate the model is **overfitted**. ## Visualizing network weights Now lets visualize our weights of our neural network and see what they look like. When the network is more complex than just one layer it can be a difficult to visulize the results like this. However, in our case (classification of a digit) it happens by multiplying the initial image by a weight matrix allowing us to visualize the network weights with a bit of added logic. Let's create a `weight_tensor` which will have a dimension of 784x10. This tensor can be obtained by calling the `net.parameters()` method. In this example, if we want to see if our number is 0 or not, we will multiply input digit by `weight_tensor[0]` and pass the result through a softmax normalization to get the answer. This results in the weight tensor elements somewhat resembling the average shape of the digit it classifies: ``` weight_tensor = next(net.parameters()) fig,ax = plt.subplots(1,10,figsize=(15,4)) for i,x in enumerate(weight_tensor): ax[i].imshow(x.view(28,28).detach()) ``` ## Takeaway Training a neural network in PyTorch can be programmed with a training loop. It may seem like a complicated process, but in real life we need to write it once, and we can then re-use this training code later without changing it. We can see that a single-layer dense neural network shows relatively good performance, but we definitely want to get higher than 91% on accuracy! In the next unit, we will try to use multi-level perceptrons.
github_jupyter
![data-x](http://oi64.tinypic.com/o858n4.jpg) --- # Pandas Introduction ### with Stock Data and Correlation Examples **Author list:** Alexander Fred-Ojala & Ikhlaq Sidhu **References / Sources:** Includes examples from Wes McKinney and the 10min intro to Pandas **License Agreement:** Feel free to do whatever you want with this code ___ ## What Does Pandas Do? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p1.jpg"> ## What is a Pandas Table Object? <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/pandas-p2.jpg"> # Import packages ``` # import packages import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # for plotting import seaborn as sns # for plotting and styling # jupyter notebook magic to display plots in output %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # make the plots bigger ``` # Part 1 ### Simple creation and manipulation of Pandas objects **Key Points:** Pandas has two / three main data types: * Series (similar to numpy arrays, but with index) * DataFrames (table or spreadsheet with Series in the columns) * Panels (3D version of DataFrame, not as common) ### It is easy to create a DataFrame ### We use `pd.DataFrame(**inputs**)` and can insert almost any data type as an argument **Function:** `pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)` Input data can be a numpy ndarray (structured or homogeneous), dict, or DataFrame. Dict can contain Series, arrays, constants, or list-like objects as the values. ``` # Try it with an array np.random.seed(0) # set seed for reproducibility a1 = np.array(np.random.randn(3)) a2 = np.array(np.random.randn(3)) a3 = np.array(np.random.randn(3)) print (a1) print (a2) print (a3) # Create our first DataFrame w/ an np.array - it becomes a column df0 = pd.DataFrame(a1) print(type(df0)) df0 # DataFrame from list of np.arrays df0 = pd.DataFrame([a1, a2, a3]) df0 # notice that there is no column label, only integer values, # and the index is set automatically # DataFrame from 2D np.array ax = np.random.randn(9).reshape(3,3) ax df0 = pd.DataFrame(ax,columns=['rand_normal_1','Random Again','Third'], index=[100,200,99]) # we can also assign columns and indices, sizes have to match df0 # DataFrame from a Dictionary dict1 = {'A':a1, 'B':a2} df1 = pd.DataFrame(dict1) df1 # note that we now have columns without assignment # We can easily add another column (just as you add values to a dictionary) df1['C']=a3 df1 # We can add a list with strings and ints as a column df1['L'] = ["Something", 3, "words"] df1 ``` # Pandas Series object ### Like an np.array, but we can combine data types and it has its own index Note: Every column in a DataFrame is a Series ``` print(df1[['L','A']]) print(type(df1['L'])) df1 # We can also rename columns df1 = df1.rename(columns = {'L':'Renamed'}) df1 # We can delete columns del df1['C'] df1 # or drop columns df1.drop('A',axis=1,inplace=True) # does not change df1 if we don't set inplace=True df1 df1 # or drop rows df1.drop(0) # Example: view only one column df1['B'] # Or view several column df1[['B','Renamed']] ``` # Other ways of slicing In the 10 min Pandas Guide, you will see many ways to view, slice a dataframe * view/slice by rows, eg `df[1:3]`, etc. * view by index location, see `df.iloc` (iloc) * view by ranges of labels, ie index label 2 to 5, or dates feb 3 to feb 25, see `df.loc` (loc) * view a single row by the index `df.xs` (xs) or `df.ix` (ix) * filtering rows that have certain conditions * add column * add row * How to change the index and more... ``` print (df1[0:2]) # ok df1 df1.iloc[1,1] df1 ``` # Part 2 ## Finance example: Large Data Frames ### Now, lets get some data in CSV format. See https://www.quantshare.com/sa-43-10-ways-to-download-historical-stock-quotes-data-for-free ``` !ls data/ # We can download data from the web by using pd.read_csv # A CSV file is a comma seperated file # We can use this 'pd.read_csv' method with urls that host csv files base_url = 'https://google.com/finance?output=csv&q=' dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) dfg dfg.head() # show first five values dfg.tail(3) # last three dfg.columns # returns columns, can be used to loop over dfg.index # return ``` # Convert the index to pandas datetime object ``` dfg['Date'][0] type(dfg['Date'][0]) dfg.index = pd.to_datetime(dfg['Date']) # set index dfg.drop(['Date'],axis=1,inplace=True) dfg.head() print(type(dfg.index[0])) dfg.index[0] dfg.index dfg['2017-08':'2017-06'] ``` # Attributes & general statitics of a Pandas DataFrame ``` dfg.shape # 251 business days last year dfg.columns dfg.size # Some general statistics dfg.describe() # Boolean indexing dfg['Open'][dfg['Open']>1130] # check what dates the opening # Check where Open, High, Low and Close where greater than 1130 dfg[dfg>1000].drop('Volume',axis=1) # If you want the values in an np array dfg.values ``` ## .loc() ``` # Getting a cross section with .loc - BY VALUES of the index and columns # df.loc[a:b, x:y], by rows and column location # Note: You have to know indices and columns dfg.loc['2017-08-31':'2017-08-21','Open':'Low'] ``` ## .iloc() ``` # .iloc slicing at specific location - BY POSITION in the table # Recall: # dfg[a:b] by rows # dfg[[col]] or df[[col1, col2]] by columns # df.loc[a:b, x:y], by index and column values + location # df.iloc[3:5,0:2], numeric position in table dfg.iloc[1:4,3:5] # 2nd to 4th row, 4th to 5th column ``` ### More Basic Statistics ``` # We can change the index sorting dfg.sort_index(axis=0, ascending=True).head() # starts a year ago # sort by value dfg.sort_values(by='Open')[0:10] ``` # Boolean ``` dfg[dfg>1115].head(10) # we can also drop all NaN values dfg[dfg>1115].head(10).dropna() dfg2 = dfg # make a copy and not a view dfg2 is dfg ``` ### Setting Values ``` # Recall dfg.head(4) # All the ways to view # can also be used to set values # good for data normalization dfg['Volume'] = dfg['Volume']/100000.0 dfg.head(4) ``` ### More Statistics and Operations ``` # mean by column, also try var() for variance dfg.mean() dfg[0:5].mean(axis = 1) # row means of first five rows ``` # PlotCorrelation ### Load several stocks ``` # Reload dfg = pd.read_csv('data/googl.csv').drop('Unnamed: 0',axis=1) # Google stock data dfa = pd.read_csv('data/apple.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfm = pd.read_csv('data/microsoft.csv').drop('Unnamed: 0',axis=1) # Google stock data dfn = pd.read_csv('data/nike.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb = pd.read_csv('data/boeing.csv').drop('Unnamed: 0',axis=1) # Apple stock data dfb.head() # Rename columns dfg = dfg.rename(columns = {'Close':'GOOG'}) #print (dfg.head()) dfa = dfa.rename(columns = {'Close':'AAPL'}) #print (dfa.head()) dfm = dfm.rename(columns = {'Close':'MSFT'}) #print (dfm.head()) dfn = dfn.rename(columns = {'Close':'NKE'}) #print (dfn.head()) dfb = dfb.rename(columns = {'Close':'BA'}) dfb.head(2) # Lets merge some tables # They will all merge on the common column Date df = dfg[['Date','GOOG']].merge(dfa[['Date','AAPL']]) df = df.merge(dfm[['Date','MSFT']]) df = df.merge(dfn[['Date','NKE']]) df = df.merge(dfb[['Date','BA']]) df.head() df['Date'] = pd.to_datetime(df['Date']) df = df.set_index('Date') df.head() df.plot() df['2017'][['NKE','BA']].plot() # show a correlation matrix (pearson) crl = df.corr() crl crl.sort_values(by='GOOG',ascending=False) s = crl.unstack() so = s.sort_values(ascending=False) so[so<1] df.mean() sim=df-df.mean() sim.tail() sim[['MSFT','BA']].plot() ```
github_jupyter
| Name | Surname | Student No | Department | |---|---|---|---| | Emin | Kartci | S014877 | EE Engineering | ## Emin Kartci #### Student ID: S014877 #### Department : Electrical & Electronics Engineering --- ### Semester Project - Foursquare & Restaurant Report --- #### This module is prepared for GUI --- ``` # To interact with user use ipywidgets library - Generate a simple GUI from __future__ import print_function from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets import matplotlib.pyplot as plt import numpy as np ################################-- Function Description --################################# # Purpose: # This class represents a company. For other modules we will need its values. # Moreover, creating a class makes simple our code. # PROPERTIES: # # From constructor: # name -> Name of the company (String) # longitude -> To represent at the map (String) # latitude -> To represent at the map (String) # servicesList -> To compare with others (List) # averagePrice -> For income statement - Simulation (Float) # averageUnitCost -> For incoma statement - Simulation (Float) # salesVolume -> For incoma statement - Simulation (Float) # fixedCost -> For incoma statement - Simulation (Float) # taxRate -> For incoma statement - Simulation (Float) # # Calculate: # # contributionMargin -> For incoma statement - Simulation (Float) # revenue -> For incoma statement - Simulation (Float) # costOfGoodSold -> For incoma statement - Simulation (Float) # grossMargin -> For incoma statement - Simulation (Float) # taxes -> For incoma statement - Simulation (Float) # netIncome -> For incoma statement - Simulation (Float) # # BEHAVIOUR: # # print_company_description -> prints the company inputs to the console # print_income_statement -> prints the income statemnt to the console #################################-- END Function Description --############################## # Create a Company class class Company(): # Constuctor def __init__(self, name,longitude,latitude,servicesList,averagePrice,averageUnitCost,salesVolume,fixedCost,taxRate): self.name = name self.longitude = longitude self.latitude = latitude self.servicesList = servicesList self.averagePrice = averagePrice self.averageUnitCost = averageUnitCost self.salesVolume = salesVolume self.fixedCost = fixedCost self.taxRate = taxRate/100 # calculate remain properties self.contributionMargin = self.calculate_contribution_margin() self.revenue = self.calculate_revenue() self.costOfGoodSold = self.calculate_COGS() self.totalCost = self.calculate_total_cost() self.grossMargin = self.calculate_gross_margin() self.taxes = self.calculate_taxes() self.netIncome = self.calculate_net_income() def calculate_contribution_margin(self): return self.averagePrice - self.averageUnitCost def calculate_revenue(self): return self.averagePrice * self.salesVolume def calculate_COGS(self): return self.salesVolume * self.averageUnitCost def calculate_gross_margin(self): return self.revenue - self.costOfGoodSold def calculate_taxes(self): return self.grossMargin * self.taxRate def calculate_net_income(self): return self.grossMargin - self.taxes def calculate_total_cost(self): return self.costOfGoodSold + self.fixedCost ######################################################################## def print_company_description(self): companyDescription = """ Company Name: {} Location: - Longitude : {}° N - Latitude : {}° E Services: {} Average Price : {} Average Unit Cost : {} Sales Volume : {} Fixed Cost : {} Tax Rate : {} """.format(self.name,self.longitude,self.latitude,self.set_services_string(),self.averagePrice,self.averageUnitCost,self.salesVolume,self.fixedCost,self.taxRate) print(companyDescription) def set_services_string(self): serviesString = "" for index in range(1,len(self.servicesList)+1): serviesString += "{} - {}\n\t\t".format(index,self.servicesList[index-1]) return serviesString def print_income_statement(self): incomeStatementStr = """ ========== {}'s MONTHLY INCOME STATEMENT ========== +------------------------------------------------------ | Unit Price : {} | Unit Cost : {} +------------------ | Contribution Margin : {} | Sales Volume : {} | Revenue : {} (Monthly) +------------------ | Cost of Goods Sold : {} (Monthly) | Total Fixed Cost : {} (Monthly) | Total Cost : {} +------------------ | Gross Margin : {} | Taxes : {} +------------------ | NET INCOME : {} +------------------------------------------------------ """.format(self.name,self.averagePrice,self.averageUnitCost,self.contributionMargin,self.salesVolume,self.revenue ,self.costOfGoodSold,self.fixedCost,self.totalCost,self.grossMargin,self.taxes,self.netIncome) print(incomeStatementStr) programLabel = widgets.Label('--------------------------> RESTAURANT SIMULATOR PROGRAM <--------------------------', layout=widgets.Layout(width='100%')) companyName = widgets.Text(description="Comp. Name",value="Example LTD",layout=widgets.Layout(width="50%")) longitude = widgets.Text(description="Longitude",value="48.8566",layout=widgets.Layout(width="30%")) latitude = widgets.Text(description="Latitude",value="2.3522",layout=widgets.Layout(width="30%")) br1Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) servicesLabel = widgets.Label('Select Services:', layout=widgets.Layout(width='100%')) Dessertbox = widgets.Checkbox(False, description='Dessert') Saladbox = widgets.Checkbox(False, description='Salad') Drinkbox = widgets.Checkbox(False, description='Drink') br2Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) expectedPriceLabel = widgets.Label('Expected Average Price:', layout=widgets.Layout(width='100%')) expectedAveragePrice = widgets.IntSlider(min=0, max=100, step=1, description='(Euro): ',value=0) expectedUnitCostLabel = widgets.Label('Expected Average Unit Cost:', layout=widgets.Layout(width='100%')) expectedUnitCost = widgets.IntSlider(min=0, max=100, step=1, description='(Euro): ',value=0) expectedSalesLabel = widgets.Label('Expected Sales Monthly:', layout=widgets.Layout(width='100%')) expectedSales = widgets.IntSlider(min=0, max=10000, step=1, description='(Euro): ',value=0) fixedCostLabel = widgets.Label('Fixed Costs:', layout=widgets.Layout(width='100%')) fixedCost = widgets.FloatText(value=10000, description='(Euro): ',color = 'blue') taxRateLabel = widgets.Label('Tax Rate:', layout=widgets.Layout(width='100%')) taxRate = widgets.FloatSlider(min=0, max=100, step=1, description='%: ',value=0) br3Label = widgets.Label('-----------------------------------------------------------------------------------------------------', layout=widgets.Layout(width='100%')) # create a string list bu considering checkbox widgets def set_service_list(): # create an empty list serviceList = [] # if it is checked if Dessertbox.value: # add to the list serviceList.append('Dessert') # if it is checked if Saladbox.value: # add to the list serviceList.append('Salad') # if it is checked if Drinkbox.value: # add to the list serviceList.append('Drink') # return the list return serviceList # display the widgets display(programLabel) display(companyName) display(longitude) display(latitude) display(br1Label) display(servicesLabel) display(Dessertbox) display(Saladbox) display(Drinkbox) display(br2Label) display(expectedPriceLabel) display(expectedAveragePrice) display(expectedUnitCostLabel) display(expectedUnitCost) display(expectedSalesLabel) display(expectedSales) display(fixedCostLabel) display(fixedCost) display(taxRateLabel) display(taxRate) display(br3Label) # create a company object company = Company(companyName.value,longitude.value,latitude.value,set_service_list(),expectedAveragePrice.value,expectedUnitCost.value,expectedSales.value,fixedCost.value,taxRate.value) # print income statement company.print_income_statement() company.plotting_price_cost() plt.plot(self.priceList, "g--") plt.plot(self.costList, "o--") plt.axhline(y=0, color='r', linewidth=0.5, linestyle='-') plt.axvline(x=0, color='r', linewidth=0.5, linestyle='-') plt.xlabel("Price"); plt.ylabel("Cost") plt.legend(["Corresponding Cost","Price"]) plt.title("Price vs. Cost") plt.grid() plt.show() x_labels = ["PROFIT", "Avg Price", "Avg Cost", "Contribution Margin", "Sales Vol"] plt.bar(x_labels, [96, 21.31, 10.53, 10.78, 899], color = "g") plt.legend(["Profit is shown as %, e.g, 96%"]) plt.show() ```
github_jupyter
``` import argparse import glob import io import os import random import numpy from PIL import Image, ImageFont, ImageDraw from scipy.ndimage.interpolation import map_coordinates from scipy.ndimage.filters import gaussian_filter SCRIPT_PATH = os.path.dirname(os.path.abspath('./hangul-WR')) # Default data paths. DEFAULT_LABEL_FILE = os.path.join(SCRIPT_PATH, './labels/2350-common-hangul.txt') DEFAULT_FONTS_DIR = os.path.join(SCRIPT_PATH, './fonts') DEFAULT_OUTPUT_DIR = os.path.join(SCRIPT_PATH, './image-data') # Number of random distortion images to generate per font and character. DISTORTION_COUNT = 3 # Width and height of the resulting image. IMAGE_WIDTH = 64 IMAGE_HEIGHT = 64 def generate_hangul_images(label_file, fonts_dir, output_dir): """Generate Hangul image files. This will take in the passed in labels file and will generate several images using the font files provided in the font directory. The font directory is expected to be populated with *.ttf (True Type Font) files. The generated images will be stored in the given output directory. Image paths will have their corresponding labels listed in a CSV file. """ with io.open(label_file, 'r', encoding='utf-8') as f: labels = f.read().splitlines() image_dir = os.path.join(output_dir, 'hangul-images') if not os.path.exists(image_dir): os.makedirs(os.path.join(image_dir)) # Get a list of the fonts. fonts = glob.glob(os.path.join(fonts_dir, '*.ttf')) labels_csv = io.open(os.path.join(output_dir, 'labels-map.csv'), 'w', encoding='utf-8') total_count = 0 prev_count = 0 for character in labels: # Print image count roughly every 5000 images. if total_count - prev_count > 5000: prev_count = total_count print('{} images generated...'.format(total_count)) for font in fonts: total_count += 1 image = Image.new('L', (IMAGE_WIDTH, IMAGE_HEIGHT), color=0) font = ImageFont.truetype(font, 48) drawing = ImageDraw.Draw(image) w, h = drawing.textsize(character, font=font) drawing.text( ((IMAGE_WIDTH-w)/2, (IMAGE_HEIGHT-h)/2), character, fill=(255), font=font ) file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) for i in range(DISTORTION_COUNT): total_count += 1 file_string = 'hangul_{}.jpeg'.format(total_count) file_path = os.path.join(image_dir, file_string) arr = numpy.array(image) distorted_array = elastic_distort( arr, alpha=random.randint(30, 36), sigma=random.randint(5, 6) ) distorted_image = Image.fromarray(distorted_array) distorted_image.save(file_path, 'JPEG') labels_csv.write(u'{},{}\n'.format(file_path, character)) print('Finished generating {} images.'.format(total_count)) labels_csv.close() def elastic_distort(image, alpha, sigma): """Perform elastic distortion on an image. Here, alpha refers to the scaling factor that controls the intensity of the deformation. The sigma variable refers to the Gaussian filter standard deviation. """ random_state = numpy.random.RandomState(None) shape = image.shape dx = gaussian_filter( (random_state.rand(*shape) * 2 - 1), sigma, mode="constant" ) * alpha dy = gaussian_filter( (random_state.rand(*shape) * 2 - 1), sigma, mode="constant" ) * alpha x, y = numpy.meshgrid(numpy.arange(shape[0]), numpy.arange(shape[1])) indices = numpy.reshape(y+dy, (-1, 1)), numpy.reshape(x+dx, (-1, 1)) return map_coordinates(image, indices, order=1).reshape(shape) # label_file = './labels/2350-common-hangul.txt' label_file = DEFAULT_LABEL_FILE # fonts_dir = './fonts' fonts_dir = DEFAULT_FONTS_DIR # output_dir = './image-data' output_dir = DEFAULT_OUTPUT_DIR generate_hangul_images(label_file, fonts_dir, output_dir) ```
github_jupyter
``` from os import environ environ['optimizer'] = 'Adam' environ['num_workers']= '2' environ['batch_size']= str(2048) environ['n_epochs']= '1000' environ['batch_norm']= 'True' environ['loss_func']='MAPE' environ['layers'] = '600 350 200 180' environ['dropouts'] = '0.1 '* 4 environ['log'] = 'False' environ['weight_decay'] = '0.01' environ['cuda_device'] ='cuda:3' environ['dataset'] = 'data/speedup_dataset2.pkl' %run utils.ipynb train_dl, val_dl, test_dl = train_dev_split(dataset, batch_size, num_workers, log=log) db = fai.basic_data.DataBunch(train_dl, val_dl, test_dl, device=device) input_size = train_dl.dataset.X.shape[1] output_size = train_dl.dataset.Y.shape[1] model = None if batch_norm: model = Model_BN(input_size, output_size, hidden_sizes=layers_sizes, drops=drops) else: model = Model(input_size, output_size) if loss_func == 'MSE': criterion = nn.MSELoss() else: criterion = smape_criterion # criterion = mape_criterion l = fai.Learner(db, model, loss_func=criterion, metrics=[mape_criterion, rmse_criterion]) if optimizer == 'SGD': l.opt_func = optim.SGD l = l.load(f"r_speedup_{optimizer}_batch_norm_{batch_norm}_{loss_func}_nlayers_{len(layers_sizes)}_log_{log}") l.lr_find() l.recorder.plot() lr = 1e-03 l.fit_one_cycle(450, lr) l.recorder.plot_losses() l.save(f"r_speedup_{optimizer}_batch_norm_{batch_norm}_{loss_func}_nlayers_{len(layers_sizes)}_log_{log}") !ls models val_df = get_results_df(val_dl, l.model) train_df = get_results_df(train_dl, l.model) df = val_df df[:][['prediction','target', 'abs_diff','APE']].describe() df = train_df df[:][['prediction','target', 'abs_diff','APE']].describe() df[:][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 0)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 1)][['prediction','target', 'abs_diff','APE']].describe() df[(df.interchange + df.tile + df.unroll != 0)][['prediction','target', 'abs_diff','APE']].describe() df1 = df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 0) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 0) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==0) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange==1) & (df.unroll == 1) & (df.tile == 1)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df1 = df[(df.interchange + df.tile + df.unroll != 0)] joint_plot(df1, f"Validation dataset, {loss_func} loss") df2 = df joint_plot(df2, f"Validation dataset, {loss_func} loss") ```
github_jupyter
# Hyperparameter Optimization (HPO) of Machine Learning Models L. Yang and A. Shami, “On hyperparameter optimization of machine learning algorithms: Theory and practice,” Neurocomputing, vol. 415, pp. 295–316, 2020, doi: https://doi.org/10.1016/j.neucom.2020.07.061. ### **Sample code for regression problems** **Dataset used:** &nbsp; Boson Housing dataset from sklearn **Machine learning algorithms used:** &nbsp; Random forest (RF), support vector machine (SVM), k-nearest neighbor (KNN), artificial neural network (ANN) **HPO algorithms used:** &nbsp; Grid search, random search, hyperband, Bayesian Optimization with Gaussian Processes (BO-GP), Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE), particle swarm optimization (PSO), genetic algorithm (GA). **Performance metric:** &nbsp; Mean square error (MSE) ``` import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split,cross_val_score from sklearn.ensemble import RandomForestClassifier,RandomForestRegressor from sklearn.metrics import classification_report,confusion_matrix,accuracy_score from sklearn.neighbors import KNeighborsClassifier,KNeighborsRegressor from sklearn.svm import SVC,SVR from sklearn import datasets import scipy.stats as stats ``` ## Load Boston Housing dataset We will take the Housing dataset which contains information about different houses in Boston. There are 506 samples and 13 feature variables in this Boston dataset. The main goal is to predict the value of prices of the house using the given features. You can read more about the data and the variables [[1]](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) [[2]](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html). ``` X, y = datasets.load_boston(return_X_y=True) datasets.load_boston() ``` ## Baseline Machine Learning models: Regressors with Default Hyperparameters ``` #Random Forest clf = RandomForestRegressor() scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') # 3-fold cross-validation print("MSE:"+ str(-scores.mean())) #SVM clf = SVR(gamma='scale') scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) #KNN clf = KNeighborsRegressor() scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) #ANN from keras.models import Sequential, Model from keras.layers import Dense, Input from sklearn.model_selection import GridSearchCV from keras.wrappers.scikit_learn import KerasRegressor from keras.callbacks import EarlyStopping def ANN(optimizer = 'adam',neurons=32,batch_size=32,epochs=50,activation='relu',patience=5,loss='mse'): model = Sequential() model.add(Dense(neurons, input_shape=(X.shape[1],), activation=activation)) model.add(Dense(neurons, activation=activation)) model.add(Dense(1)) model.compile(optimizer = optimizer, loss=loss) early_stopping = EarlyStopping(monitor="loss", patience = patience)# early stop patience history = model.fit(X, y, batch_size=batch_size, epochs=epochs, callbacks = [early_stopping], verbose=0) #verbose set to 1 will show the training process return model clf = KerasRegressor(build_fn=ANN, verbose=0) scores = cross_val_score(clf, X, y, cv=3,scoring='neg_mean_squared_error') print("MSE:"+ str(-scores.mean())) ``` ## HPO Algorithm 1: Grid Search Search all the given hyper-parameter configurations **Advantages:** * Simple implementation. **Disadvantages:** * Time-consuming, * Only efficient with categorical HPs. ``` #Random Forest from sklearn.model_selection import GridSearchCV # Define the hyperparameter configuration space rf_params = { 'n_estimators': [10, 20, 30], #'max_features': ['sqrt',0.5], 'max_depth': [15,20,30,50], #'min_samples_leaf': [1,2,4,8], #"bootstrap":[True,False], #"criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #SVM from sklearn.model_selection import GridSearchCV rf_params = { 'C': [1,10, 100], "kernel":['poly','rbf','sigmoid'], "epsilon":[0.01,0.1,1] } clf = SVR(gamma='scale') grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #KNN from sklearn.model_selection import GridSearchCV rf_params = { 'n_neighbors': [2, 3, 5,7,10] } clf = KNeighborsRegressor() grid = GridSearchCV(clf, rf_params, cv=3, scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) #ANN from sklearn.model_selection import GridSearchCV rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32], 'neurons':[16,32], 'epochs':[20,50], 'patience':[2,5] } clf = KerasRegressor(build_fn=ANN, verbose=0) grid = GridSearchCV(clf, rf_params, cv=3,scoring='neg_mean_squared_error') grid.fit(X, y) print(grid.best_params_) print("MSE:"+ str(-grid.best_score_)) ``` ## HPO Algorithm 2: Random Search Randomly search hyper-parameter combinations in the search space **Advantages:** * More efficient than GS. * Enable parallelization. **Disadvantages:** * Not consider previous results. * Not efficient with conditional HPs. ``` #Random Forest from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV # Define the hyperparameter configuration space rf_params = { 'n_estimators': sp_randint(10,100), "max_features":sp_randint(1,13), 'max_depth': sp_randint(5,50), "min_samples_split":sp_randint(2,11), "min_samples_leaf":sp_randint(1,11), "criterion":['mse','mae'] } n_iter_search=20 #number of iterations is set to 20, you can increase this number if time permits clf = RandomForestRegressor(random_state=0) Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #SVM from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV rf_params = { 'C': stats.uniform(0,50), "kernel":['poly','rbf','sigmoid'], "epsilon":stats.uniform(0,1) } n_iter_search=20 clf = SVR(gamma='scale') Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #KNN from scipy.stats import randint as sp_randint from sklearn.model_selection import RandomizedSearchCV rf_params = { 'n_neighbors': sp_randint(1,20), } n_iter_search=10 clf = KNeighborsRegressor() Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) #ANN from scipy.stats import randint as sp_randint from random import randrange as sp_randrange from sklearn.model_selection import RandomizedSearchCV rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':sp_randint(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':sp_randint(3,20) } n_iter_search=10 clf = KerasRegressor(build_fn=ANN, verbose=0) Random = RandomizedSearchCV(clf, param_distributions=rf_params,n_iter=n_iter_search,cv=3,scoring='neg_mean_squared_error') Random.fit(X, y) print(Random.best_params_) print("MSE:"+ str(-Random.best_score_)) ``` ## HPO Algorithm 3: Hyperband Generate small-sized subsets and allocate budgets to each hyper-parameter combination based on its performance **Advantages:** * Enable parallelization. **Disadvantages:** * Not efficient with conditional HPs. * Require subsets with small budgets to be representative. ``` #Random Forest from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint # Define the hyperparameter configuration space rf_params = { 'n_estimators': sp_randint(10,100), "max_features":sp_randint(1,13), 'max_depth': sp_randint(5,50), "min_samples_split":sp_randint(2,11), "min_samples_leaf":sp_randint(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=10,max_iter=100,scoring='neg_mean_squared_error') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #SVM from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'C': stats.uniform(0,50), "kernel":['poly','rbf','sigmoid'], "epsilon":stats.uniform(0,1) } clf = SVR(gamma='scale') hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=10,scoring='neg_mean_squared_error',resource_param='C') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #KNN from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'n_neighbors': range(1,20), } clf = KNeighborsRegressor() hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=20,scoring='neg_mean_squared_error',resource_param='n_neighbors') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) #ANN from hyperband import HyperbandSearchCV from scipy.stats import randint as sp_randint rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':sp_randint(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':sp_randint(3,20) } clf = KerasRegressor(build_fn=ANN, epochs=20, verbose=0) hyper = HyperbandSearchCV(clf, param_distributions =rf_params,cv=3,min_iter=1,max_iter=10,scoring='neg_mean_squared_error',resource_param='epochs') hyper.fit(X, y) print(hyper.best_params_) print("MSE:"+ str(-hyper.best_score_)) ``` ## HPO Algorithm 4: BO-GP Bayesian Optimization with Gaussian Process (BO-GP) **Advantages:** * Fast convergence speed for continuous HPs. **Disadvantages:** * Poor capacity for parallelization. * Not efficient with conditional HPs. ### Using skopt.BayesSearchCV ``` #Random Forest from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer # Define the hyperparameter configuration space rf_params = { 'n_estimators': Integer(10,100), "max_features":Integer(1,13), 'max_depth': Integer(5,50), "min_samples_split":Integer(2,11), "min_samples_leaf":Integer(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=20, scoring='neg_mean_squared_error') #number of iterations is set to 20, you can increase this number if time permits Bayes.fit(X, y) print(Bayes.best_params_) bclf = Bayes.best_estimator_ print("MSE:"+ str(-Bayes.best_score_)) #SVM from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'C': Real(0,50), "kernel":['poly','rbf','sigmoid'], 'epsilon': Real(0,1) } clf = SVR(gamma='scale') Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=20, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) #KNN from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'n_neighbors': Integer(1,20), } clf = KNeighborsRegressor() Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=10, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) #ANN from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':Integer(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':Integer(3,20) } clf = KerasRegressor(build_fn=ANN, verbose=0) Bayes = BayesSearchCV(clf, rf_params,cv=3,n_iter=10, scoring='neg_mean_squared_error') Bayes.fit(X, y) print(Bayes.best_params_) print("MSE:"+ str(-Bayes.best_score_)) ``` ### Using skopt.gp_minimize ``` #Random Forest from skopt.space import Real, Integer from skopt.utils import use_named_args reg = RandomForestRegressor() # Define the hyperparameter configuration space space = [Integer(10, 100, name='n_estimators'), Integer(5, 50, name='max_depth'), Integer(1, 13, name='max_features'), Integer(2, 11, name='min_samples_split'), Integer(1, 11, name='min_samples_leaf'), Categorical(['mse', 'mae'], name='criterion') ] # Define the objective function @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=20, random_state=0) #number of iterations is set to 20, you can increase this number if time permits print("MSE:%.4f" % res_gp.fun) print(res_gp.x) #SVM from skopt.space import Real, Integer from skopt.utils import use_named_args reg = SVR(gamma='scale') space = [Real(0, 50, name='C'), Categorical(['poly','rbf','sigmoid'], name='kernel'), Real(0, 1, name='epsilon'), ] @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=20, random_state=0) print("MSE:%.4f" % res_gp.fun) print(res_gp.x) #KNN from skopt.space import Real, Integer from skopt.utils import use_named_args reg = KNeighborsRegressor() space = [Integer(1, 20, name='n_neighbors')] @use_named_args(space) def objective(**params): reg.set_params(**params) return -np.mean(cross_val_score(reg, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) from skopt import gp_minimize res_gp = gp_minimize(objective, space, n_calls=10, random_state=0) print("MSE:%.4f" % res_gp.fun) print(res_gp.x) ``` ## HPO Algorithm 5: BO-TPE Bayesian Optimization with Tree-structured Parzen Estimator (TPE) **Advantages:** * Efficient with all types of HPs. * Keep conditional dependencies. **Disadvantages:** * Poor capacity for parallelization. ``` #Random Forest from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold # Define the objective function def objective(params): params = { 'n_estimators': int(params['n_estimators']), 'max_depth': int(params['max_depth']), 'max_features': int(params['max_features']), "min_samples_split":int(params['min_samples_split']), "min_samples_leaf":int(params['min_samples_leaf']), "criterion":str(params['criterion']) } clf = RandomForestRegressor( **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } # Define the hyperparameter configuration space space = { 'n_estimators': hp.quniform('n_estimators', 10, 100, 1), 'max_depth': hp.quniform('max_depth', 5, 50, 1), "max_features":hp.quniform('max_features', 1, 13, 1), "min_samples_split":hp.quniform('min_samples_split',2,11,1), "min_samples_leaf":hp.quniform('min_samples_leaf',1,11,1), "criterion":hp.choice('criterion',['mse','mae']) } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=20) print("Random Forest: Hyperopt estimated optimum {}".format(best)) #SVM from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { 'C': abs(float(params['C'])), "kernel":str(params['kernel']), 'epsilon': abs(float(params['epsilon'])), } clf = SVR(gamma='scale', **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { 'C': hp.normal('C', 0, 50), "kernel":hp.choice('kernel',['poly','rbf','sigmoid']), 'epsilon': hp.normal('epsilon', 0, 1), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=20) print("SVM: Hyperopt estimated optimum {}".format(best)) #KNN from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { 'n_neighbors': abs(int(params['n_neighbors'])) } clf = KNeighborsRegressor( **params) score = -np.mean(cross_val_score(clf, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { 'n_neighbors': hp.quniform('n_neighbors', 1, 20, 1), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=10) print("KNN: Hyperopt estimated optimum {}".format(best)) #ANN from hyperopt import hp, fmin, tpe, STATUS_OK, Trials from sklearn.model_selection import cross_val_score, StratifiedKFold def objective(params): params = { "optimizer":str(params['optimizer']), "activation":str(params['activation']), "loss":str(params['loss']), 'batch_size': abs(int(params['batch_size'])), 'neurons': abs(int(params['neurons'])), 'epochs': abs(int(params['epochs'])), 'patience': abs(int(params['patience'])) } clf = KerasRegressor(build_fn=ANN,**params, verbose=0) score = -np.mean(cross_val_score(clf, X, y, cv=3, scoring="neg_mean_squared_error")) return {'loss':score, 'status': STATUS_OK } space = { "optimizer":hp.choice('optimizer',['adam','rmsprop']), "activation":hp.choice('activation',['relu','tanh']), "loss":hp.choice('loss',['mse','mae']), 'batch_size': hp.quniform('batch_size', 16, 64, 16), 'neurons': hp.quniform('neurons', 10, 100, 10), 'epochs': hp.quniform('epochs', 20, 50, 10), 'patience': hp.quniform('patience', 3, 20, 3), } best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=10) print("ANN: Hyperopt estimated optimum {}".format(best)) ``` ## HPO Algorithm 6: PSO Partical swarm optimization (PSO): Each particle in a swarm communicates with other particles to detect and update the current global optimum in each iteration until the final optimum is detected. **Advantages:** * Efficient with all types of HPs. * Enable parallelization. **Disadvantages:** * Require proper initialization. ``` #Random Forest import optunity import optunity.metrics # Define the hyperparameter configuration space search = { 'n_estimators': [10, 100], 'max_features': [1, 13], 'max_depth': [5,50], "min_samples_split":[2,11], "min_samples_leaf":[1,11], } # Define the objective function @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,n_estimators=None, max_features=None,max_depth=None,min_samples_split=None,min_samples_leaf=None): # fit the model model = RandomForestRegressor(n_estimators=int(n_estimators), max_features=int(max_features), max_depth=int(max_depth), min_samples_split=int(min_samples_split), min_samples_leaf=int(min_samples_leaf), ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #SVM import optunity import optunity.metrics search = { 'C': (0,50), 'kernel':[0,3], 'epsilon': (0, 1) } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,C=None,kernel=None,epsilon=None): # fit the model if kernel<1: ke='poly' elif kernel<2: ke='rbf' else: ke='sigmoid' model = SVR(C=float(C), kernel=ke, gamma='scale', epsilon=float(epsilon) ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #KNN import optunity import optunity.metrics search = { 'n_neighbors': [1, 20], } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,n_neighbors=None): # fit the model model = KNeighborsRegressor(n_neighbors=int(n_neighbors), ) scores=-np.mean(cross_val_score(model, X, y, cv=3, n_jobs=-1, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=10, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) #ANN import optunity import optunity.metrics search = { 'optimizer':[0,2], 'activation':[0,2], 'loss':[0,2], 'batch_size': [0, 2], 'neurons': [10, 100], 'epochs': [20, 50], 'patience': [3, 20], } @optunity.cross_validated(x=X, y=y, num_folds=3) def performance(x_train, y_train, x_test, y_test,optimizer=None,activation=None,loss=None,batch_size=None,neurons=None,epochs=None,patience=None): # fit the model if optimizer<1: op='adam' else: op='rmsprop' if activation<1: ac='relu' else: ac='tanh' if loss<1: lo='mse' else: lo='mae' if batch_size<1: ba=16 else: ba=32 model = ANN(optimizer=op, activation=ac, loss=lo, batch_size=ba, neurons=int(neurons), epochs=int(epochs), patience=int(patience) ) clf = KerasRegressor(build_fn=ANN, verbose=0) scores=-np.mean(cross_val_score(clf, X, y, cv=3, scoring="neg_mean_squared_error")) return scores optimal_configuration, info, _ = optunity.minimize(performance, solver_name='particle swarm', num_evals=20, **search ) print(optimal_configuration) print("MSE:"+ str(info.optimum)) ``` ## HPO Algorithm 7: Genetic Algorithm Genetic algorithms detect well-performing hyper-parameter combinations in each generation, and pass them to the next generation until the best-performing combination is identified. **Advantages:** * Efficient with all types of HPs. * Not require good initialization. **Disadvantages:** * Poor capacity for parallelization. ### Using DEAP ``` #Random Forest from evolutionary_search import EvolutionaryAlgorithmSearchCV from scipy.stats import randint as sp_randint # Define the hyperparameter configuration space rf_params = { 'n_estimators': range(10,100), "max_features":range(1,13), 'max_depth': range(5,50), "min_samples_split":range(2,11), "min_samples_leaf":range(1,11), "criterion":['mse','mae'] } clf = RandomForestRegressor(random_state=0) # Set the hyperparameters of GA ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #SVM from evolutionary_search import EvolutionaryAlgorithmSearchCV rf_params = { 'C': np.random.uniform(0,50,1000), "kernel":['poly','rbf','sigmoid'], 'epsilon': np.random.uniform(0,1,100), } clf = SVR(gamma='scale') ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #KNN from evolutionary_search import EvolutionaryAlgorithmSearchCV rf_params = { 'n_neighbors': range(1,20), } clf = KNeighborsRegressor() ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) #ANN from evolutionary_search import EvolutionaryAlgorithmSearchCV # Define the hyperparameter configuration space rf_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32,64], 'neurons':range(10,100), 'epochs':[20,50], #'epochs':[20,50,100,200], 'patience':range(3,20) } clf = KerasRegressor(build_fn=ANN, verbose=0) # Set the hyperparameters of GA ga1 = EvolutionaryAlgorithmSearchCV(estimator=clf, params=rf_params, scoring="neg_mean_squared_error", cv=3, verbose=1, population_size=10, gene_mutation_prob=0.10, gene_crossover_prob=0.5, tournament_size=3, generations_number=5, n_jobs=1) ga1.fit(X, y) print(ga1.best_params_) print("MSE:"+ str(-ga1.best_score_)) ``` ### Using TPOT ``` #Random Forest from tpot import TPOTRegressor # Define the hyperparameter configuration space parameters = { 'n_estimators': range(20,200), "max_features":range(1,13), 'max_depth': range(10,100), "min_samples_split":range(2,11), "min_samples_leaf":range(1,11), #"criterion":['mse','mae'] } # Set the hyperparameters of GA ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.ensemble.RandomForestRegressor': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) #SVM from tpot import TPOTRegressor parameters = { 'C': np.random.uniform(0,50,1000), "kernel":['poly','rbf','sigmoid'], 'epsilon': np.random.uniform(0,1,100), 'gamma': ['scale'] } ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.svm.SVR': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) #KNN from tpot import TPOTRegressor parameters = { 'n_neighbors': range(1,20), } ga2 = TPOTRegressor(generations= 3, population_size= 10, offspring_size= 5, verbosity= 3, early_stop= 5, config_dict= {'sklearn.neighbors.KNeighborsRegressor': parameters}, cv = 3, scoring = 'neg_mean_squared_error') ga2.fit(X, y) ```
github_jupyter
## 1. Winter is Coming. Let's load the dataset ASAP! <p>If you haven't heard of <em>Game of Thrones</em>, then you must be really good at hiding. Game of Thrones is the hugely popular television series by HBO based on the (also) hugely popular book series <em>A Song of Ice and Fire</em> by George R.R. Martin. In this notebook, we will analyze the co-occurrence network of the characters in the Game of Thrones books. Here, two characters are considered to co-occur if their names appear in the vicinity of 15 words from one another in the books. </p> <p><img src="https://assets.datacamp.com/production/project_76/img/got_network.jpeg" style="width: 550px"></p> <p>This dataset constitutes a network and is given as a text file describing the <em>edges</em> between characters, with some attributes attached to each edge. Let's start by loading in the data for the first book <em>A Game of Thrones</em> and inspect it.</p> ``` # Importing modules # ... YOUR CODE FOR TASK 1 ... import pandas as pd # Reading in datasets/book1.csv book1 = pd.read_csv('datasets/book1.csv') book1.head() # Printing out the head of the dataset # ... YOUR CODE FOR TASK 1 ... ``` ## 2. Time for some Network of Thrones <p>The resulting DataFrame <code>book1</code> has 5 columns: <code>Source</code>, <code>Target</code>, <code>Type</code>, <code>weight</code>, and <code>book</code>. Source and target are the two nodes that are linked by an edge. A network can have directed or undirected edges and in this network all the edges are undirected. The weight attribute of every edge tells us the number of interactions that the characters have had over the book, and the book column tells us the book number.</p> <p>Once we have the data loaded as a pandas DataFrame, it's time to create a network. We will use <code>networkx</code>, a network analysis library, and create a graph object for the first book.</p> ``` # Importing modules # ... YOUR CODE FOR TASK 2 ... import networkx as nx # Creating an empty graph object G_book1 = nx.Graph() ``` ## 3. Populate the network with the DataFrame <p>Currently, the graph object <code>G_book1</code> is empty. Let's now populate it with the edges from <code>book1</code>. And while we're at it, let's load in the rest of the books too!</p> ``` # Iterating through the DataFrame to add edges # ... YOUR CODE FOR TASK 3 ... for _, edge in book1.iterrows(): G_book1.add_edge(edge['Source'], edge['Target'], weight=edge['weight']) # Creating a list of networks for all the books books = [G_book1] book_fnames = ['datasets/book2.csv', 'datasets/book3.csv', 'datasets/book4.csv', 'datasets/book5.csv'] for book_fname in book_fnames: book = pd.read_csv(book_fname) G_book = nx.Graph() for _, edge in book.iterrows(): G_book.add_edge(edge['Source'], edge['Target'], weight=edge['weight']) books.append(G_book) ``` ## 4. The most important character in Game of Thrones <p>Is it Jon Snow, Tyrion, Daenerys, or someone else? Let's see! Network science offers us many different metrics to measure the importance of a node in a network. Note that there is no "correct" way of calculating the most important node in a network, every metric has a different meaning.</p> <p>First, let's measure the importance of a node in a network by looking at the number of neighbors it has, that is, the number of nodes it is connected to. For example, an influential account on Twitter, where the follower-followee relationship forms the network, is an account which has a high number of followers. This measure of importance is called <em>degree centrality</em>.</p> <p>Using this measure, let's extract the top ten important characters from the first book (<code>book[0]</code>) and the fifth book (<code>book[4]</code>).</p> ``` # Calculating the degree centrality of book 1 deg_cen_book1 = nx.degree_centrality(books[0]) # Calculating the degree centrality of book 5 deg_cen_book5 = nx.degree_centrality(books[4]) # Sorting the dictionaries according to their degree centrality and storing the top 10 sorted_deg_cen_book1 = sorted(deg_cen_book1.items(), key=lambda x: x[1], reverse=True)[0:10] # Sorting the dictionaries according to their degree centrality and storing the top 10 sorted_deg_cen_book5 = sorted(deg_cen_book5.items(), key=lambda x: x[1], reverse=True)[0:10] print(sorted_deg_cen_book1) print(sorted_deg_cen_book5) # Printing out the top 10 of book1 and book5 # ... YOUR CODE FOR TASK 4 ... ``` ## 5. The evolution of character importance <p>According to degree centrality, the most important character in the first book is Eddard Stark but he is not even in the top 10 of the fifth book. The importance of characters changes over the course of five books because, you know, stuff happens... ;)</p> <p>Let's look at the evolution of degree centrality of a couple of characters like Eddard Stark, Jon Snow, and Tyrion, which showed up in the top 10 of degree centrality in the first book.</p> ``` %matplotlib inline # Creating a list of degree centrality of all the books evol = [nx.degree_centrality(book) for book in books] # Creating a DataFrame from the list of degree centralities in all the books degree_evol_df = pd.DataFrame.from_records(evol) degree_evol_df[['Eddard-Stark','Tyrion-Lannister','Jon-Snow']].plot() # Plotting the degree centrality evolution of Eddard-Stark, Tyrion-Lannister and Jon-Snow # ... YOUR CODE FOR TASK 5 ... ``` ## 6. What's up with Stannis Baratheon? <p>We can see that the importance of Eddard Stark dies off as the book series progresses. With Jon Snow, there is a drop in the fourth book but a sudden rise in the fifth book.</p> <p>Now let's look at various other measures like <em>betweenness centrality</em> and <em>PageRank</em> to find important characters in our Game of Thrones character co-occurrence network and see if we can uncover some more interesting facts about this network. Let's plot the evolution of betweenness centrality of this network over the five books. We will take the evolution of the top four characters of every book and plot it.</p> ``` # Creating a list of betweenness centrality of all the books just like we did for degree centrality evol = [nx.betweenness_centrality(book,weight='weight') for book in books] # Making a DataFrame from the list betweenness_evol_df = pd.DataFrame.from_records(evol) # Finding the top 4 characters in every book set_of_char = set() for i in range(5): set_of_char |= set(list(betweenness_evol_df.T[i].sort_values(ascending=False)[0:4].index)) list_of_char = list(set_of_char) betweenness_evol_df[list_of_char].plot() # Plotting the evolution of the top characters # ... YOUR CODE FOR TASK 6 ... ``` ## 7. What does Google PageRank tell us about GoT? <p>We see a peculiar rise in the importance of Stannis Baratheon over the books. In the fifth book, he is significantly more important than other characters in the network, even though he is the third most important character according to degree centrality.</p> <p>PageRank was the initial way Google ranked web pages. It evaluates the inlinks and outlinks of webpages in the world wide web, which is, essentially, a directed network. Let's look at the importance of characters in the Game of Thrones network according to PageRank. </p> ``` # Creating a list of pagerank of all the characters in all the books evol = [nx.pagerank(book) for book in books] # Making a DataFrame from the list pagerank_evol_df = pd.DataFrame.from_records(evol) # Finding the top 4 characters in every book set_of_char = set() for i in range(5): set_of_char |= set(list(pagerank_evol_df.T[i].sort_values(ascending=False)[0:4].index)) list_of_char = list(set_of_char) pagerank_evol_df[list_of_char].plot(figsize=(13, 7)) # Plotting the top characters # ... YOUR CODE FOR TASK 7 ... ``` ## 8. Correlation between different measures <p>Stannis, Jon Snow, and Daenerys are the most important characters in the fifth book according to PageRank. Eddard Stark follows a similar curve but for degree centrality and betweenness centrality: He is important in the first book but dies into oblivion over the book series.</p> <p>We have seen three different measures to calculate the importance of a node in a network, and all of them tells us something about the characters and their importance in the co-occurrence network. We see some names pop up in all three measures so maybe there is a strong correlation between them?</p> <p>Let's look at the correlation between PageRank, betweenness centrality and degree centrality for the fifth book using Pearson correlation.</p> ``` # Creating a list of pagerank, betweenness centrality, degree centrality # of all the characters in the fifth book. measures = [nx.pagerank(books[4]), nx.betweenness_centrality(books[4], weight='weight'), nx.degree_centrality(books[4])] # Creating the correlation DataFrame cor = pd.DataFrame.from_records(measures) cor.corr() # Calculating the correlation # ... YOUR CODE FOR TASK 8 ... ``` ## 9. Conclusion <p>We see a high correlation between these three measures for our character co-occurrence network.</p> <p>So we've been looking at different ways to find the important characters in the Game of Thrones co-occurrence network. According to degree centrality, Eddard Stark is the most important character initially in the books. But who is/are the most important character(s) in the fifth book according to these three measures? </p> ``` # Finding the most important character in the fifth book, # according to degree centrality, betweenness centrality and pagerank. p_rank, b_cent, d_cent = cor.idxmax(axis=1) # Printing out the top character accoding to the three measures # ... YOUR CODE FOR TASK 9 ... ```
github_jupyter
## CNN on MNIST digits classification This example is the same as the MLP for MNIST classification. The difference is we are going to use `Conv2D` layers instead of `Dense` layers. The model that will be costructed below is made of: - First 2 layers - `Conv2D-ReLU-MaxPool` - 3rd layer - `Conv2D-ReLU` - 4th layer - `Dense(10)` - Output Activation - `softmax` - Optimizer - `SGD` Let us first load the packages and perform the initial pre-processing such as loading the dataset, performing normalization and conversion of labels to one-hot. Recall that in our `3-Dense` MLP example, we achieved ~95.3% accuracy at 269k parameters. Here, we can achieve ~98.5% using 105k parameters. CNN is more parameter efficient. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Activation, Dense, Dropout from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten from tensorflow.keras.utils import to_categorical, plot_model from tensorflow.keras.datasets import mnist # load mnist dataset (x_train, y_train), (x_test, y_test) = mnist.load_data() # compute the number of labels num_labels = len(np.unique(y_train)) # convert to one-hot vector y_train = to_categorical(y_train) y_test = to_categorical(y_test) # input image dimensions image_size = x_train.shape[1] # resize and normalize x_train = np.reshape(x_train,[-1, image_size, image_size, 1]) x_test = np.reshape(x_test,[-1, image_size, image_size, 1]) x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 ``` ### Hyper-parameters This hyper-parameters are similar to our MLP example. The differences are `kernel_size = 3` which is a typical kernel size in most CNNs and `filters = 64`. ``` # network parameters # image is processed as is (square grayscale) input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 filters = 64 ``` ### Sequential Model Building The model is similar to our previous example in MLP. The difference is we use `Conv2D` instead of `Dense`. Note that due to mismatch in dimensions, the output of the last `Conv2D` is flattened via `Flatten()` layer to suit the input vector dimensions of the `Dense`. Note that though we use `Activation(softmax)` as the last layer, this can also be integrated within the `Dense` layer in the parameter `activation='softmax'`. Both are the same. ``` # model is a stack of CNN-ReLU-MaxPooling model = Sequential() model.add(Conv2D(filters=filters, kernel_size=kernel_size, activation='relu', padding='same', input_shape=input_shape)) model.add(MaxPooling2D()) model.add(Conv2D(filters=filters, kernel_size=kernel_size, padding='same', activation='relu')) model.add(MaxPooling2D()) model.add(Conv2D(filters=filters, kernel_size=kernel_size, padding='same', activation='relu')) model.add(Flatten()) # dropout added as regularizer # model.add(Dropout(dropout)) # output layer is 10-dim one-hot vector model.add(Dense(num_labels)) model.add(Activation('softmax')) model.summary() ``` ## Model Training and Evaluation After building the model, it is time to train and evaluate. This part is similar to MLP training and evaluation. ``` #plot_model(model, to_file='cnn-mnist.png', show_shapes=True) # loss function for one-hot vector # use of adam optimizer # accuracy is good metric for classification tasks model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) # train the network model.fit(x_train, y_train, epochs=20, batch_size=batch_size) loss, acc = model.evaluate(x_test, y_test, batch_size=batch_size) print("\nTest accuracy: %.1f%%" % (100.0 * acc)) ```
github_jupyter
<a href="https://colab.research.google.com/github/BNN-UPC/ignnition/blob/ignnition-nightly/notebooks/shortest_path.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # IGNNITION: Quick start tutorial ### **Problem**: Find the shortest path in graphs with a Graph Neural Network Find more details on this quick-start tutorial at: https://ignnition.net/doc/quick_tutorial/ --- # Prepare the environment #### **Note**: Follow the instructions below to finish the installation ``` #@title Installing libraries and load resources #@markdown ####Hit **"enter"** to complete the installation of libraries !add-apt-repository ppa:deadsnakes/ppa !apt-get update !apt-get install python3.7 !python -m pip install --upgrade pip !pip install jupyter-client==6.1.5 !pip install ignnition==1.2.2 !pip install ipython-autotime #@title Import libraries { form-width: "30%" } import networkx as nx import random import json from networkx.readwrite import json_graph import os import ignnition %load_ext tensorboard %load_ext autotime #@markdown #### Download three YAML files we will need after (train_options.yaml, model_description.yaml, global_variables.yaml) # Download YAML files for this tutorial !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/train_options.yaml !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/global_variables.yaml !curl -O https://raw.githubusercontent.com/BNN-UPC/ignnition/ignnition-nightly/examples/Shortest_Path/model_description.yaml #@title Generate the datasets (training and validation) import os def generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p): while True: # Create a random Erdos Renyi graph G = nx.erdos_renyi_graph(random.randint(min_nodes, max_nodes), p) complement = list(nx.k_edge_augmentation(G, k=1, partial=True)) G.add_edges_from(complement) nx.set_node_attributes(G, 0, 'src-tgt') nx.set_node_attributes(G, 0, 'sp') nx.set_node_attributes(G, 'node', 'entity') # Assign randomly weights to graph edges for (u, v, w) in G.edges(data=True): w['weight'] = random.randint(min_edge_weight, max_edge_weight) # Select a source and target nodes to compute the shortest path src, tgt = random.sample(list(G.nodes), 2) G.nodes[src]['src-tgt'] = 1 G.nodes[tgt]['src-tgt'] = 1 # Compute all the shortest paths between source and target nodes try: shortest_paths = list(nx.all_shortest_paths(G, source=src, target=tgt,weight='weight')) except: shortest_paths = [] # Check if there exists only one shortest path if len(shortest_paths) == 1: for node in shortest_paths[0]: G.nodes[node]['sp'] = 1 return nx.DiGraph(G) def generate_dataset(file_name, num_samples, min_nodes=5, max_nodes=15, min_edge_weight=1, max_edge_weight=10, p=0.3): samples = [] for _ in range(num_samples): G = generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p) G.remove_nodes_from([node for node, degree in dict(G.degree()).items() if degree == 0]) samples.append(json_graph.node_link_data(G)) with open(file_name, "w") as f: json.dump(samples, f) root_dir="./data" if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(root_dir+"/train"): os.makedirs(root_dir+"/train") if not os.path.exists(root_dir + "/validation"): os.makedirs(root_dir + "/validation") generate_dataset("./data/train/data.json", 20000) generate_dataset("./data/validation/data.json", 1000) ``` --- # GNN model training ``` #@title Remove all the models previously trained (CheckPoints) #@markdown (It is not needed to execute this the first time) ! rm -r ./CheckPoint ! rm -r ./computational_graphs #@title Load TensorBoard to visualize the evolution of learning metrics along training #@markdown **IMPORTANT NOTE**: Click on "settings" in the TensorBoard GUI and check the option "Reload data" to see the evolution in real time. Note you can set the reload time interval (in seconds). from tensorboard import notebook notebook.list() # View open TensorBoard instances dir="./CheckPoint" if not os.path.exists(dir): os.makedirs(dir) %tensorboard --logdir $dir # Para finalizar instancias anteriores de TensorBoard # !kill 2953 # !ps aux #@title Run the training of your GNN model #@markdown </u>**Note**</u>: You can stop the training whenever you want to continue making predictions below import ignnition model = ignnition.create_model(model_dir= './') model.computational_graph() model.train_and_validate() ``` --- # Make predictions ## (This can be only excuted once the training is finished or stopped) ``` #@title Load functions to generate random graphs and print them import os import networkx as nx import matplotlib.pyplot as plt import json from networkx.readwrite import json_graph import ignnition import numpy as np import random %load_ext autotime def generate_random_graph(min_nodes, max_nodes, min_edge_weight, max_edge_weight, p): while True: # Create a random Erdos Renyi graph G = nx.erdos_renyi_graph(random.randint(min_nodes, max_nodes), p) complement = list(nx.k_edge_augmentation(G, k=1, partial=True)) G.add_edges_from(complement) nx.set_node_attributes(G, 0, 'src-tgt') nx.set_node_attributes(G, 0, 'sp') nx.set_node_attributes(G, 'node', 'entity') # Assign randomly weights to graph edges for (u, v, w) in G.edges(data=True): w['weight'] = random.randint(min_edge_weight, max_edge_weight) # Select the source and target nodes to compute the shortest path src, tgt = random.sample(list(G.nodes), 2) G.nodes[src]['src-tgt'] = 1 G.nodes[tgt]['src-tgt'] = 1 # Compute all the shortest paths between source and target nodes try: shortest_paths = list(nx.all_shortest_paths(G, source=src, target=tgt,weight='weight')) except: shortest_paths = [] # Check if there exists only one shortest path if len(shortest_paths) == 1: if len(shortest_paths[0])>=3 and len(shortest_paths[0])<=5: for node in shortest_paths[0]: G.nodes[node]['sp'] = 1 return shortest_paths[0], nx.DiGraph(G) def print_graph_predictions(G, path, predictions,ax): predictions = np.array(predictions) node_border_colors = [] links = [] for i in range(len(path)-1): links.append([path[i], path[i+1]]) links.append([path[i+1], path[i]]) # Add colors to node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') # Add colors for predictions [0,1] node_colors = predictions # Add colors for edges edge_colors = [] for edge in G.edges(data=True): e=[edge[0],edge[1]] if e in links: edge_colors.append('red') else: edge_colors.append('black') pos= nx.shell_layout(G) vmin = node_colors.min() vmax = node_colors.max() vmin = 0 vmax = 1 cmap = plt.cm.coolwarm nx.draw_networkx_nodes(G, pos=pos, node_color=node_colors, cmap=cmap, vmin=vmin, vmax=vmax, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, edge_color=edge_colors, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax)) sm.set_array([]) plt.colorbar(sm, ax=ax) def print_graph_solution(G, path, predictions,ax, pred_th): predictions = np.array(predictions) node_colors = [] node_border_colors = [] links = [] for i in range(len(path)-1): links.append([path[i], path[i+1]]) links.append([path[i+1], path[i]]) # Add colors on node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') # Add colors for predictions Blue or Red cmap = plt.cm.get_cmap('coolwarm') dark_red = cmap(1.0) for p in predictions: if p >= pred_th: node_colors.append(dark_red) else: node_colors.append('blue') # Add colors for edges edge_colors = [] for edge in G.edges(data=True): e=[edge[0],edge[1]] if e in links: edge_colors.append('red') else: edge_colors.append('black') pos= nx.shell_layout(G) nx.draw_networkx_nodes(G, pos=pos, node_color=node_colors, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, edge_color=edge_colors, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) def print_input_graph(G, ax): node_colors = [] node_border_colors = [] # Add colors to node borders for source and target nodes for node in G.nodes(data=True): if node[1]['src-tgt'] == 1: node_border_colors.append('red') else: node_border_colors.append('white') pos= nx.shell_layout(G) nx.draw_networkx_nodes(G, pos=pos, edgecolors=node_border_colors, linewidths=4, ax=ax) nx.draw_networkx_edges(G, pos=pos, arrows=False, ax=ax, width=2) nx.draw_networkx_edge_labels(G, pos=pos, label_pos=0.5, edge_labels=nx.get_edge_attributes(G, 'weight'), ax=ax) #@title Make predictions on random graphs #@markdown **NOTE**: IGNNITION will automatically load the latest trained model (CheckPoint) to make the predictions dataset_samples = [] sh_path, G = generate_random_graph(min_nodes=8, max_nodes=12, min_edge_weight=1, max_edge_weight=10, p=0.3) graph = G.to_undirected() dataset_samples.append(json_graph.node_link_data(G)) # write prediction dataset root_dir="./data" if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(root_dir+"/test"): os.makedirs(root_dir+"/test") with open(root_dir+"/test/data.json", "w") as f: json.dump(dataset_samples, f) # Make predictions predictions = model.predict() # Print the results fig, axes = plt.subplots(nrows=1, ncols=3) ax = axes.flatten() # Print input graph ax1 = ax[0] ax1.set_title("Input graph") print_input_graph(graph, ax1) # Print graph with predictions (soft values) ax1 = ax[1] ax1.set_title("GNN predictions (soft values)") print_graph_predictions(graph, sh_path, predictions[0], ax1) # Print solution of the GNN pred_th = 0.5 ax1 = ax[2] ax1.set_title("GNN solution (p >= "+str(pred_th)+")") print_graph_solution(graph, sh_path, predictions[0], ax1, pred_th) # Show plot in full screen plt.rcParams['figure.figsize'] = [10, 4] plt.rcParams['figure.dpi'] = 100 plt.tight_layout() plt.show() ``` --- # Try to improve your GNN model **Optional exercise**: The previous training was executed with some parameters set by default, so the accuracy of the GNN model is far from optimal. Here, we propose an alternative configuration that defines better training parameters for the GNN model. For this, you can check and modify the following YAML files to configure your GNN model: * /content/model_description.yaml -> GNN model description * /content/train_options.yaml -> Configuration of training parameters Try to define an optimizer with learning rate decay and set the number of samples and epochs adding the following lines in the train_options.yaml file: ``` optimizer: type: Adam learning_rate: # define a schedule type: ExponentialDecay initial_learning_rate: 0.001 decay_steps: 10000 decay_rate: 0.5 ... batch_size: 1 epochs: 150 epoch_size: 200 ``` Then, you can train a new model from scratch by executing al the code snippets from section "GNN model training" Please note that the training process may take quite a long time depending on the machine where it is executed. In this example, there are a total of 30,000 training samples: 1 sample/step * 200 steps/epoch * 150 epochs = 30.000 samples
github_jupyter
# Lecture 3: Optimize, print and plot [Download on GitHub](https://github.com/NumEconCopenhagen/lectures-2019) [<img src="https://mybinder.org/badge_logo.svg">](https://mybinder.org/v2/gh/NumEconCopenhagen/lectures-2019/master?urlpath=lab/tree/03/Optimize_print_and_plot.ipynb) 1. [The consumer problem](#The-consumer-problem) 2. [Numerical python (numpy)](#Numerical-python-(numpy)) 3. [Utility function](#Utility-function) 4. [Algorithm 1: Simple loops](#Algorithm-1:-Simple-loops) 5. [Algorithm 2: Use monotonicity](#Algorithm-2:-Use-monotonicity) 6. [Algorithm 3: Call a solver](#Algorithm-3:-Call-a-solver) 7. [Indifference curves](#Indifference-curves) 8. [A classy solution](#A-classy-solution) 9. [Summary](#Summary) You will learn how to work with numerical data (**numpy**) and solve simple numerical optimization problems (**scipy.optimize**) and report the results both in text (**print**) and in figures (**matplotlib**). **Links:**: - **print**: [examples](https://www.python-course.eu/python3_formatted_output.php) (very detailed) - **numpy**: [detailed tutorial](https://www.python-course.eu/numpy.php) - **matplotlib**: [examples](https://matplotlib.org/tutorials/introductory/sample_plots.html#sphx-glr-tutorials-introductory-sample-plots-py), [documentation](https://matplotlib.org/users/index.html), [styles](https://matplotlib.org/3.1.0/gallery/style_sheets/style_sheets_reference.html) - **scipy-optimize**: [documentation](https://docs.scipy.org/doc/scipy/reference/optimize.html) <a id="The-consumer-problem"></a> # 1. The consumer problem Consider the following 2-good consumer problem with * utility function $u(x_1,x_2):\mathbb{R}^2_{+}\rightarrow\mathbb{R}$, * exogenous income $I$, and * price-vector $(p_1,p_2)$, given by $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}}u(x_{1},x_{2})\\ \text{s.t.}\\ p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\ x_{1},x_{2} & \geq 0 \end{aligned} $$ **Specific example:** Let the utility function be Cobb-Douglas, $$ u(x_1,x_2) = x_1^{\alpha}x_2^{1-\alpha} $$ We then know the solution is given by $$ \begin{aligned} x_1^{\ast} &= \alpha \frac{I}{p_1} \\ x_2^{\ast} &= (1-\alpha) \frac{I}{p_2} \end{aligned} $$ which implies that $\alpha$ is the budget share of the first good and $1-\alpha$ is the budget share of the second good. <a id="Numerical-python-(numpy)"></a> # 2. Numerical python (numpy) ``` import numpy as np # import the numpy module ``` A **numpy array** is like a list, but with two important differences: 1. Elements must be of **one homogenous type** 2. A **slice returns a view** rather than extract content ## 2.1 Basics Numpy arrays can be **created from lists** and can be **multi-dimensional**: ``` A = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) # one dimension B = np.array([[3.4, 8.7, 9.9], [1.1, -7.8, -0.7], [4.1, 12.3, 4.8]]) # two dimensions print(type(A),type(B)) # type print(A.dtype,B.dtype) # data type print(A.ndim,B.ndim) # dimensions print(A.shape,B.shape) # shape (1d: (columns,), 2d: (row,columns)) print(A.size,B.size) # size ``` **Slicing** a numpy array returns a **view**: ``` A = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) B = A.copy() # a copy of A C = A[2:6] # a view into A C[0] = 0 C[1] = 0 print(A) # changed print(B) # not changed ``` Numpy array can also be created using numpy functions: ``` print(np.ones((2,3))) print(np.zeros((4,2))) print(np.linspace(0,1,6)) # linear spacing ``` **Tip 1:** Try pressing <kbd>Shift</kbd>+<kbd>Tab</kbd> inside a function.<br> **Tip 2:** Try to write `?np.linspace` in a cell ``` ?np.linspace ``` ## 2.2 Math Standard **mathematical operations** can be applied: ``` A = np.array([[1,0],[0,1]]) B = np.array([[2,2],[2,2]]) print(A+B) print(A-B) print(A*B) # element-by-element product print(A/B) # element-by-element division print(A@B) # matrix product ``` If arrays does not fit together **broadcasting** is applied. Here is an example with multiplication: ``` A = np.array([ [10, 20, 30], [40, 50, 60] ]) # shape = (2,3) B = np.array([1, 2, 3]) # shape = (3,) = (1,3) C = np.array([[1],[2]]) # shape = (2,1) print(A) print(A*B) # every row is multiplied by B print(A*C) # every column is multiplied by C ``` **General rule:** Numpy arrays can be added/substracted/multiplied/divided if they in all dimensions have the same size or one of them has a size of one. If the numpy arrays differ in number of dimensions, this only has to be true for the (inner) dimensions they share. **More on broadcasting:** [Documentation](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html). A lot of **mathematical procedures** can easily be performed on numpy arrays. ``` A = np.array([3.1, 2.3, 9.1, -2.5, 12.1]) print(np.min(A)) # find minimum print(np.argmin(A)) # find index for minimum print(np.mean(A)) # calculate mean print(np.sort(A)) # sort (ascending) ``` **Note:** Sometimes a method can be used instead of a function, e.g. ``A.mean()``. Personally, I typically stick to functions because that always works. ## 2.3 Indexing **Multi-dimensional** indexing is done as: ``` X = np.array([ [11, 12, 13], [21, 22, 23] ]) print(X) print(X[0,0]) # first row, first column print(X[0,1]) # first row, second column print(X[1,2]) # second row, third column X[0] ``` Indexes can be **logical**. Logical 'and' is `&` and logical 'or' is `|`. ``` A = np.array([1,2,3,4,1,2,3,4]) B = np.array([3,3,3,3,2,2,2,2]) I = (A < 3) & (B == 3) # note & instead of 'and' print(type(I),I.dtype) print(I) print(A[I]) I = (A < 3) | (B == 3) # note | instead of 'or' print(A[I]) ``` ## 2.4 List of good things to know **Attributes and methods** to know: - size / ndim / shape - ravel / reshape / sort - copy **Functions** to know: - array / empty / zeros / ones / linspace - mean / median / std / var / sum / percentile - min/max, argmin/argmax / fmin / fmax / sort / clip - meshgrid / hstack / vstack / concatenate / tile / insert - allclose / isnan / isinf / isfinite / any / all **Concepts** to know: - view vs. copy - broadcasting - logical indexing **Question:** Consider the following code: ``` A = np.array([1,2,3,4,5]) B = A[3:] B[:] = 0 ``` What is `np.sum(A)` equal to? - **A:** 15 - **B:** 10 - **C:** 6 - **D:** 0 - **E:** Don't know ## 2.5 Extra: Memory Memory is structured in **rows**: ``` A = np.array([[3.1,4.2],[5.7,9.3]]) B = A.ravel() # one-dimensional view of A print(A.shape,A[0,:]) print(B.shape,B) ``` <a id="Utility-function"></a> # 3. Utility function Define the utility function: ``` def u_func(x1,x2,alpha=0.50): return x1**alpha*x2**(1-alpha) # x1,x2 are positional arguments # alpha is a keyword argument with default value 0.50 ``` ## 3.1 Print to screen Print a **single evaluation** of the utility function. ``` x1 = 1 x2 = 3 u = u_func(x1,x2) # f'text' is called a "formatted string" # {x1:.3f} prints variable x1 as floating point number with 3 decimals print(f'x1 = {x1:.3f}, x2 = {x2:.3f} -> u = {u:.3f}') print(u) ``` Print **multiple evaluations** of the utility function. ``` x1_list = [2,4,6,8,10,12] x2 = 3 for x1 in x1_list: # loop through each element in x1_list u = u_func(x1,x2,alpha=0.25) print(f'x1 = {x1:.3f}, x2 = {x2:.3f} -> u = {u:.3f}') ``` And a little nicer... ``` for i,x1 in enumerate(x1_list): # i is a counter u = u_func(x1,x2,alpha=0.25) print(f'{i:2d}: x1 = {x1:<6.3f} x2 = {x2:<6.3f} -> u = {u:<6.3f}') # {i:2d}: integer a width of 2 (right-aligned) # {x1:<6.3f}: float width of 6 and 3 decimals (<, left-aligned) ``` **Task**: Write a loop printing the results shown in the answer below. ``` # write your code here ``` **Answer:** ``` for i,x1 in enumerate(x1_list): # i is a counter u = u_func(x1,x2,alpha=0.25) print(f'{i:2d}: u({x1:.2f},{x1:.2f}) = {u:.4f}') ``` **More formatting options?** See these [examples](https://www.python-course.eu/python3_formatted_output.php). ## 3.2 Print to file Open a text-file and write lines in it: ``` with open('somefile.txt', 'w') as the_file: # 'w' is for 'write' for i, x1 in enumerate(x1_list): u = u_func(x1,x2,alpha=0.25) text = f'{i+10:2d}: x1 = {x1:<6.3f} x2 = {x2:<6.3f} -> u = {u:<6.3f}' the_file.write(text + '\n') # \n gives a lineshift # note: the with clause ensures that the file is properly closed afterwards ``` Open a text-file and read the lines in it and then print them: ``` with open('somefile.txt', 'r') as the_file: # 'r' is for 'read' lines = the_file.readlines() for line in lines: print(line,end='') # end='' removes the extra lineshift print creates ``` > **Note:** You could also write tables in LaTeX format and the import them in your LaTeX document. ## 3.3 Calculate the utility function on a grid **Calculate the utility function** on a 2-dimensional grid with $N$ elements in each dimension: ``` # a. settings N = 100 # number of elements x_max = 10 # maximum value # b. allocate numpy arrays shape_tuple = (N,N) x1_values = np.empty(shape_tuple) # allocate 2d numpy array with shape=(N,N) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # c. fill numpy arrays for i in range(N): # 0,1,...,N-1 for j in range(N): # 0,1,...,N-1 x1_values[i,j] = (i/(N-1))*x_max # in [0,x_max] x2_values[i,j] = (j/(N-1))*x_max # in [0,x_max] u_values[i,j] = u_func(x1_values[i,j],x2_values[i,j],alpha=0.25) ``` **Alternatively:** Use internal numpy functions: ``` x_vec = np.linspace(0,x_max,N) x1_values_alt,x2_values_alt = np.meshgrid(x_vec,x_vec,indexing='ij') u_values_alt = u_func(x1_values_alt,x2_values_alt,alpha=0.25) ``` Test whether the results are the same: ``` # a. maximum absolute difference max_abs_diff = np.max(np.abs(u_values-u_values_alt)) print(max_abs_diff) # very close to zero # b. test if all values are "close" print(np.allclose(u_values,u_values_alt)) ``` **Note:** The results are not exactly the same due to floating point arithmetics. ## 3.4 Plot the utility function Import modules and state that the figures should be inlined: ``` %matplotlib inline import matplotlib.pyplot as plt # baseline modul from mpl_toolkits.mplot3d import Axes3D # for 3d figures plt.style.use('seaborn-whitegrid') # whitegrid nice with 3d ``` Construct the actual plot: ``` fig = plt.figure() # create the figure ax = fig.add_subplot(1,1,1,projection='3d') # create a 3d axis in the figure ax.plot_surface(x1_values,x2_values,u_values); # create surface plot in the axis # note: fig.add_subplot(a,b,c) creates the c'th subplot in a grid of a times b plots ``` Make the figure **zoomable** and **panable** using a widget: ``` %matplotlib widget fig = plt.figure() # create the figure ax = fig.add_subplot(1,1,1,projection='3d') # create a 3d axis in the figure ax.plot_surface(x1_values,x2_values,u_values); # create surface plot in the axis ``` Turn back to normal inlining: ``` %matplotlib inline ``` **Extensions**: Use a colormap, make it pretier, and save to disc. ``` from matplotlib import cm # for colormaps # a. actual plot fig = plt.figure() ax = fig.add_subplot(1,1,1,projection='3d') ax.plot_surface(x1_values,x2_values,u_values,cmap=cm.jet) # b. add labels ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$u$') # c. invert xaxis ax.invert_xaxis() # d. save fig.tight_layout() fig.savefig('someplot.pdf') # or e.g. .png ``` **More formatting options?** See these [examples](https://matplotlib.org/tutorials/introductory/sample_plots.html#sphx-glr-tutorials-introductory-sample-plots-py). **Task**: Construct the following plot: ![wireframeplot](https://github.com/NumEconCopenhagen/lectures-2019/raw/master/03/someplot_wireframe.png) **Answer:** ``` # write your code here # a. actual plot fig = plt.figure() ax = fig.add_subplot(1,1,1,projection='3d') ax.plot_wireframe(x1_values,x2_values,u_values,edgecolor='black') # b. add labels ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_zlabel('$u$') # c. invert xaxis ax.invert_xaxis() # e. save fig.tight_layout() fig.savefig('someplot_wireframe.png') fig.savefig('someplot_wireframe.pdf') ``` ## 3.5 Summary We have talked about: 1. Print (to screen and file) 2. Figures (matplotlib) **Other plotting libraries:** [seaborn](https://seaborn.pydata.org/) and [bokeh](https://bokeh.pydata.org/en/latest/). <a id="Algorithm-1:-Simple-loops"></a> # 4. Algorithm 1: Simple loops Remember the problem we wanted to solve: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1},x_{2}}u(x_{1},x_{2})\\ & \text{s.t.}\\ p_{1}x_{1}+p_{2}x_{2} & \leq I,\,\,\,p_{1},p_{2},I>0\\ x_{1},x_{2} & \geq 0 \end{aligned} $$ **Idea:** Loop through a grid of $N_1 \times N_2$ possible solutions. This is the same as solving: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1}\in X_1,x_{2} \in X_2} x_1^{\alpha}x_2^{1-\alpha}\\ & \text{s.t.}\\ X_1 & = \left\{0,\frac{1}{N_1-1}\frac{I}{p_1},\frac{2}{N_1-1}\frac{I}{p_1},\dots,\frac{I}{p_1}\right\} \\ X_2 & = \left\{0,\frac{1}{N_2-1}\frac{I}{p_2},\frac{2}{N_2-1}\frac{ I}{p_2},\dots,\frac{ I}{p_2}\right\} \\ p_{1}x_{1}+p_{2}x_{2} & \leq I\\ \end{aligned} $$ Function doing just this: ``` def find_best_choice(alpha,I,p1,p2,N1,N2,do_print=True): # a. allocate numpy arrays shape_tuple = (N1,N2) x1_values = np.empty(shape_tuple) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # b. start from guess of x1=x2=0 x1_best = 0 x2_best = 0 u_best = u_func(0,0,alpha=alpha) # c. loop through all possibilities for i in range(N1): for j in range(N2): # i. x1 and x2 (chained assignment) x1_values[i,j] = x1 = (i/(N1-1))*I/p1 x2_values[i,j] = x2 = (j/(N2-1))*I/p2 # ii. utility if p1*x1+p2*x2 <= I: # u(x1,x2) if expenditures <= income u_values[i,j] = u_func(x1,x2,alpha=alpha) else: # u(0,0) if expenditures > income u_values[i,j] = u_func(0,0,alpha=alpha) # iii. check if best sofar if u_values[i,j] > u_best: x1_best = x1_values[i,j] x2_best = x2_values[i,j] u_best = u_values[i,j] # d. print if do_print: print_solution(x1_best,x2_best,u_best,I,p1,p2) return x1_best,x2_best,u_best,x1_values,x2_values,u_values # function for printing the solution def print_solution(x1,x2,u,I,p1,p2): print(f'x1 = {x1:.8f}') print(f'x2 = {x2:.8f}') print(f'u = {u:.8f}') print(f'I-p1*x1-p2*x2 = {I-p1*x1-p2*x2:.8f}') ``` Call the function: ``` sol = find_best_choice(alpha=0.25,I=20,p1=1,p2=2,N1=500,N2=400) ``` Plot the solution: ``` %matplotlib widget # a. unpack solution x1_best,x2_best,u_best,x1_values,x2_values,u_values = sol # b. setup figure fig = plt.figure(dpi=100,num='') ax = fig.add_subplot(1,1,1,projection='3d') # c. plot 3d surface of utility values for different choices ax.plot_surface(x1_values,x2_values,u_values,cmap=cm.jet) ax.invert_xaxis() # d. plot optimal choice ax.scatter(x1_best,x2_best,u_best,s=50,color='black'); %matplotlib inline ``` **Task**: Can you find a better solution with higher utility and lower left-over income, $I-p_1 x_1-p_2 x_2$? ``` # write your code here # sol = find_best_choice() ``` **Answer:** ``` sol = find_best_choice(alpha=0.25,I=10,p1=1,p2=2,N1=1000,N2=1000) ``` <a id="Algorithm-2:-Use-monotonicity"></a> # 5. Algorithm 2: Use monotonicity **Idea:** Loop through a grid of $N$ possible solutions for $x_1$ and assume the remainder is spent on $x_2$. This is the same as solving: $$ \begin{aligned} V(p_{1},p_{2},I) & = \max_{x_{1}\in X_1} x_1^{\alpha}x_2^{1-\alpha}\\ \text{s.t.}\\ X_1 & = \left\{0,\frac{1}{N-1}\frac{}{p_1},\frac{2}{N-1}\frac{I}{p_1},\dots,\frac{I}{p_1}\right\} \\ x_{2} & = \frac{I-p_{1}x_{1}}{p_2}\\ \end{aligned} $$ Function doing just this: ``` def find_best_choice_monotone(alpha,I,p1,p2,N,do_print=True): # a. allocate numpy arrays shape_tuple = (N) x1_values = np.empty(shape_tuple) x2_values = np.empty(shape_tuple) u_values = np.empty(shape_tuple) # b. start from guess of x1=x2=0 x1_best = 0 x2_best = 0 u_best = u_func(0,0,alpha) # c. loop through all possibilities for i in range(N): # i. x1 x1_values[i] = x1 = i/(N-1)*I/p1 # ii. implied x2 x2_values[i] = x2 = (I-p1*x1)/p2 # iii. utility u_values[i] = u_func(x1,x2,alpha) if u_values[i] >= u_best: x1_best = x1_values[i] x2_best = x2_values[i] u_best = u_values[i] # d. print if do_print: print_solution(x1_best,x2_best,u_best,I,p1,p2) return x1_best,x2_best,u_best,x1_values,x2_values,u_values sol_monotone = find_best_choice_monotone(alpha=0.25,I=10,p1=1,p2=2,N=1000) ``` Plot the solution: ``` plt.style.use("seaborn") # a. create the figure fig = plt.figure(figsize=(10,4))# figsize is in inches... # b. unpack solution x1_best,x2_best,u_best,x1_values,x2_values,u_values = sol_monotone # c. left plot ax_left = fig.add_subplot(1,2,1) ax_left.plot(x1_values,u_values) ax_left.scatter(x1_best,u_best) ax_left.set_title('value of choice, $u(x_1,x_2)$') ax_left.set_xlabel('$x_1$') ax_left.set_ylabel('$u(x_1,(I-p_1 x_1)/p_2)$') ax_left.grid(True) # c. right plot ax_right = fig.add_subplot(1,2,2) ax_right.plot(x1_values,x2_values) ax_right.scatter(x1_best,x2_best) ax_right.set_title('implied $x_2$') ax_right.set_xlabel('$x_1$') ax_right.set_ylabel('$x_2$') ax_right.grid(True) ``` <a id="Algorithm-3:-Call-a-solver"></a> # 6. Algorithm 3: Call a solver ``` from scipy import optimize ``` Choose paramters: ``` alpha = 0.25 # preference parameter I = 10 # income p1 = 1 # price 1 p2 = 2 # price 2 ``` **Case 1**: Scalar solver using monotonicity. ``` # a. objective funciton (to minimize) def value_of_choice(x1,alpha,I,p1,p2): x2 = (I-p1*x1)/p2 return -u_func(x1,x2,alpha) # b. call solver sol_case1 = optimize.minimize_scalar( value_of_choice,method='bounded', bounds=(0,I/p1),args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case1.x x2 = (I-p1*x1)/p2 u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Case 2**: Multi-dimensional constrained solver. ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # note: x is a vector x1 = x[0] x2 = x[1] return -u_func(x1,x2,alpha) # b. constraints (violated if negative) and bounds constraints = ({'type': 'ineq', 'fun': lambda x: I-p1*x[0]-p2*x[1]}) bounds = ((0,I/p1),(0,I/p2)) # c. call solver initial_guess = [I/p1/2,I/p2/2] sol_case2 = optimize.minimize( value_of_choice,initial_guess,args=(alpha,I,p1,p2), method='SLSQP',bounds=bounds,constraints=constraints) # d. unpack solution x1 = sol_case2.x[0] x2 = sol_case2.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Case 3**: Multi-dimensional unconstrained solver with constrains implemented via penalties. ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # i. unpack x1 = x[0] x2 = x[1] # ii. penalty penalty = 0 E = p1*x1+p2*x2 # total expenses if E > I: # expenses > income -> not allowed fac = I/E penalty += 1000*(E-I) # calculate penalty x1 *= fac # force E = I x2 *= fac # force E = I return -u_func(x1,x2,alpha) # b. call solver initial_guess = [I/p1/2,I/p2/2] sol_case3 = optimize.minimize( value_of_choice,initial_guess,method='Nelder-Mead', args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case3.x[0] x2 = sol_case3.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` **Task:** Find the error in the code in the previous cell. ``` # write your code here ``` **Answer:** ``` # a. objective function (to minimize) def value_of_choice(x,alpha,I,p1,p2): # i. unpack x1 = x[0] x2 = x[1] # ii. penalty penalty = 0 E = p1*x1+p2*x2 # total expenses if E > I: # expenses > income -> not allowed fac = I/E penalty += 1000*(E-I) # calculate penalty x1 *= fac # force E = I x2 *= fac # force E = I return -u_func(x1,x2,alpha) + penalty # the error # b. call solver initial_guess = [I/p1/2,I/p2/2] sol_case3 = optimize.minimize( value_of_choice,initial_guess,method='Nelder-Mead', args=(alpha,I,p1,p2)) # c. unpack solution x1 = sol_case3.x[0] x2 = sol_case3.x[1] u = u_func(x1,x2,alpha) print_solution(x1,x2,u,I,p1,p2) ``` <a id="Indifference-curves"></a> # 7. Indifference curves Remember that the indifference curve through the point $(y_1,y_2)$ is given by $$ \big\{(x_1,x_2) \in \mathbb{R}^2_+ \,|\, u(x_1,x_2) = u(y_1,y_2)\big\} $$ To find the indifference curve, we can fix a grid for $x_2$, and then find the corresponding $x_1$ which solves $u(x_1,x_2) = u(y_1,y_2)$ for each value of $x_2$. ``` def objective(x1,x2,alpha,u): return u_func(x1,x2,alpha)-u # = 0 then on indifference curve with utility = u def find_indifference_curve(y1,y2,alpha,N,x2_max): # a. utiltty in (y1,y2) u_y1y2 = u_func(y1,y2,alpha) # b. allocate numpy arrays x1_vec = np.empty(N) x2_vec = np.linspace(1e-8,x2_max,N) # c. loop through x2 for i,x2 in enumerate(x2_vec): x1_guess = 0 # initial guess sol = optimize.root(objective, x1_guess, args=(x2,alpha,u_y1y2)) # optimize.root -> solve objective = 0 starting from x1 = x1_guess x1_vec[i] = sol.x[0] return x1_vec,x2_vec ``` Find and plot an inddifference curve: ``` # a. find indifference curve through (2,2) for x2 in [0,10] x2_max = 10 x1_vec,x2_vec = find_indifference_curve(y1=2,y2=2,alpha=0.25,N=100,x2_max=x2_max) # b. plot inddifference curve fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) ax.plot(x1_vec,x2_vec) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_xlim([0,x2_max]) ax.set_ylim([0,x2_max]) ax.grid(True) ``` **Task:** Find the indifference curve through $x_1 = 15$ and $x_2 = 3$ with $\alpha = 0.5$. ``` # write your code here x2_max = 20 x1_vec,x2_vec = find_indifference_curve(y1=15,y2=3,alpha=0.5,N=100,x2_max=x2_max) fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) ax.plot(x1_vec,x2_vec) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$') ax.set_xlim([0,x2_max]) ax.set_ylim([0,x2_max]) ax.grid(True) ``` <a id="A-classy-solution"></a> # 8. A classy solution > **Note:** This section is advanced due to the use of a module with a class. It is, however, a good example of how to structure code for solving and illustrating a model. **Load module** I have written (consumer_module.py in the same folder as this notebook). ``` from consumer_module import consumer ``` ## 8.1 Jeppe Give birth to a consumer called **jeppe**: ``` jeppe = consumer() # create an instance of the consumer class called jeppe print(jeppe) ``` Solve **jeppe**'s problem. ``` jeppe.solve() print(jeppe) ``` ## 8.2 Mette Create a new consumer, called Mette, and solve her problem. ``` mette = consumer(alpha=0.25) mette.solve() mette.find_indifference_curves() print(mette) ``` Make an illustration of Mette's problem and it's solution: ``` fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) mette.plot_indifference_curves(ax) mette.plot_budgetset(ax) mette.plot_solution(ax) mette.plot_details(ax) ``` <a id="Summary"></a> # 9. Summary **This lecture:** We have talked about: 1. Numpy (view vs. copy, indexing, broadcasting, functions, methods) 2. Print (to screen and file) 3. Figures (matplotlib) 4. Optimization (using loops or scipy.optimize) 5. Advanced: Consumer class Most economic models contain optimizing agents solving a constrained optimization problem. The tools applied in this lecture is not specific to the consumer problem in anyway. **Your work:** Before solving Problem Set 1 read through this notebook and play around with the code. To solve the problem set, you only need to modify the code used here slightly. **Next lecture:** Random numbers and simulation.
github_jupyter
# Step 1) Data Preparation ``` %run data_prep.py INTC import pandas as pd df = pd.read_csv("../1_Data/INTC.csv",infer_datetime_format=True, parse_dates=['dt'], index_col=['dt']) trainCount=int(len(df)*0.4) dfTrain = df.iloc[:trainCount] dfTest = df.iloc[trainCount:] dfTest.to_csv('local_test/test_dir/input/data/training/data.csv') dfTest.head() %matplotlib notebook dfTest["close"].plot() ``` # Step 2) Modify Strategy Configuration In the following cell, you can adjust the parameters for the strategy. * `user` = Name for Leaderboard (optional) * `go_long` = Go Long for Breakout (true or false) * `go_short` = Go Short for Breakout (true or false) * `period` = Length of window for previous high and low * `size` = The number of shares for a transaction `Tip`: A good starting point for improving the strategy is to lengthen the period of the previous high and low. Equity Markets tend to have a long bias and if you only consider long trades this might improve the performance. ``` %%writefile model/algo_config { "user" : "user", "go_long" : true, "go_short" : true, "period" : 9, "size" : 1000 } %run update_config.py daily_breakout ``` # Step 3) Modify Strategy Code `Tip`: A good starting point for improving the strategy is to add additional indicators like ATR (Average True Range) before placing a trade. You want to avoid false signals if there is not enough volatility. Here are some helpful links: * Backtrader Documentation: https://www.backtrader.com/docu/strategy/ * TA-Lib Indicator Reference: https://www.backtrader.com/docu/talibindautoref/ * Backtrader Indicator Reference: https://www.backtrader.com/docu/indautoref/ ``` %%writefile model/algo_daily_breakout.py import backtrader as bt from algo_base import * import pytz from pytz import timezone class MyStrategy(StrategyTemplate): def __init__(self): # Initiation super(MyStrategy, self).__init__() self.highest = bt.ind.Highest(period=self.config["period"]) self.lowest = bt.ind.Lowest(period=self.config["period"]) self.size = self.config["size"] def next(self): # Processing super(MyStrategy, self).next() dt=self.datas[0].datetime.datetime(0) if not self.position: if self.config["go_long"] and self.datas[0] > self.highest[-1]: self.buy(size=self.size) # Go long elif self.config["go_short"] and self.datas[0] < self.lowest[-1]: self.sell(size=self.size) # Go short elif self.position.size>0 and self.datas[0] < self.highest[-1]: self.close() elif self.position.size<0 and self.datas[0] > self.lowest[-1]: self.close() ``` # Step 4) Backtest Locally (historical data) **Please note that the initial docker image build may take up to 5 min. Subsequent runs are fast.** ``` #Build Local Algo Image !docker build -t algo_$(cat model/algo_name) . !docker run -v $(pwd)/local_test/test_dir:/opt/ml --rm algo_$(cat model/algo_name) train from IPython.display import Image Image(filename='local_test/test_dir/model/chart.png') ``` ## Refine your trading strategy (step 2 to 4). Once you are ready to test the performance of your strategy in a forwardtest, move on to the next step. # Step 5) Forwardtest on SageMaker (simulated data) and submit performance **Please note that the forwardtest in SageMaker runs each time with a new simulated dataset to validate the performance of the strategy. Feel free to run it multiple times to compare performance.** ``` #Deploy Algo Image to ECS !./build_and_push.sh #Run Remote Forwardtest via SageMaker import sagemaker as sage from sagemaker import get_execution_role from sagemaker.estimator import Estimator role = get_execution_role() sess = sage.Session() WORK_DIRECTORY = 'local_test/test_dir/input/data/training' data_location = sess.upload_data(WORK_DIRECTORY, key_prefix='data') print(data_location) with open('model/algo_config', 'r') as f: config = json.load(f) algo_name=config['algo_name'] config['sim_data']=True prefix='algo_'+algo_name job_name=prefix.replace('_','-') account = sess.boto_session.client('sts').get_caller_identity()['Account'] region = sess.boto_session.region_name image = f'{account}.dkr.ecr.{region}.amazonaws.com/{prefix}:latest' algo = sage.estimator.Estimator( image_name=image, role=role, train_instance_count=1, train_instance_type='ml.m4.xlarge', output_path="s3://{}/output".format(sess.default_bucket()), sagemaker_session=sess, base_job_name=job_name, hyperparameters=config, metric_definitions=[ { "Name": "algo:pnl", "Regex": "Total PnL:(.*?)]" }, { "Name": "algo:sharpe_ratio", "Regex": "Sharpe Ratio:(.*?)," } ]) algo.fit(data_location) #Get Algo Metrics from sagemaker.analytics import TrainingJobAnalytics latest_job_name = algo.latest_training_job.job_name metrics_dataframe = TrainingJobAnalytics(training_job_name=latest_job_name).dataframe() metrics_dataframe #Get Algo Chart from S3 model_name=algo.model_data.replace('s3://'+sess.default_bucket()+'/','') import boto3 s3 = boto3.resource('s3') my_bucket = s3.Bucket(sess.default_bucket()) my_bucket.download_file(model_name,'model.tar.gz') !tar -xzf model.tar.gz !rm model.tar.gz from IPython.display import Image Image(filename='chart.png') ``` ### Congratulations! You've completed this strategy. Verify your submission on the leaderboard. ``` %run leaderboard.py ```
github_jupyter
This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # Installation Note that some packages may be out of date. You can always get the newest `nbsphinx` release from [PyPI](https://pypi.org/project/nbsphinx) (using `pip`). If you want to try the latest development version, have a look at the file [CONTRIBUTING.rst](https://github.com/spatialaudio/nbsphinx/blob/master/CONTRIBUTING.rst). ## nbsphinx Packages [![Anaconda Badge](https://anaconda.org/conda-forge/nbsphinx/badges/version.svg)](https://anaconda.org/conda-forge/nbsphinx) If you are using the `conda` package manager (e.g. with [Anaconda](https://www.anaconda.com/distribution/) for Linux/macOS/Windows), you can install `nbsphinx` from the [conda-forge](https://conda-forge.org/) channel: conda install -c conda-forge nbsphinx If you are using Linux, there are packages available for many distributions. [![Packaging status](https://repology.org/badge/vertical-allrepos/python:nbsphinx.svg)](https://repology.org/project/python:nbsphinx/versions) [![PyPI version](https://badge.fury.io/py/nbsphinx.svg)](https://pypi.org/project/nbsphinx) On any platform, you can also install `nbsphinx` with `pip`, Python's own package manager: python3 -m pip install nbsphinx --user If you want to install it system-wide for all users (assuming you have the necessary rights), just drop the `--user` flag. To upgrade an existing `nbsphinx` installation to the newest release, use the `--upgrade` flag: python3 -m pip install nbsphinx --upgrade --user If you suddenly change your mind, you can un-install it with: python3 -m pip uninstall nbsphinx Depending on your Python installation, you may have to use `python` instead of `python3`. ## nbsphinx Prerequisites Some of the aforementioned packages will install some of these prerequisites automatically, some of the things may be already installed on your computer anyway. ### Python Of course you'll need Python, because both Sphinx and `nbsphinx` are implemented in Python. There are many ways to get Python. If you don't know which one is best for you, you can try [Anaconda](https://www.anaconda.com/distribution/). ### Sphinx You'll need [Sphinx](https://www.sphinx-doc.org/) as well, because `nbsphinx` is just a Sphinx extension and doesn't do anything on its own. If you use `conda`, you can get [Sphinx from the conda-forge channel](https://anaconda.org/conda-forge/sphinx): conda install -c conda-forge sphinx Alternatively, you can install it with `pip` (see below): python3 -m pip install Sphinx --user ### pip Recent versions of Python already come with `pip` pre-installed. If you don't have it, you can [install it manually](https://pip.pypa.io/en/latest/installing/). ### pandoc The stand-alone program [pandoc](https://pandoc.org/) is used to convert Markdown content to something Sphinx can understand. You have to install this program separately, ideally with your package manager. If you are using `conda`, you can install [pandoc from the conda-forge channel](https://anaconda.org/conda-forge/pandoc): conda install -c conda-forge pandoc If that doesn't work out for you, have a look at `pandoc`'s [installation instructions](https://pandoc.org/installing.html). <div class="alert alert-info"> **Note:** The use of `pandoc` in `nbsphinx` is temporary, but will likely stay that way for a long time, see [issue #36](https://github.com/spatialaudio/nbsphinx/issues/36). </div> ### Pygments Lexer for Syntax Highlighting To get proper syntax highlighting in code cells, you'll need an appropriate *Pygments lexer*. This of course depends on the programming language of your Jupyter notebooks (more specifically, the `pygments_lexer` metadata of your notebooks). For example, if you use Python in your notebooks, you'll have to have the `IPython` package installed, e.g. with conda install -c conda-forge ipython or python3 -m pip install IPython --user <div class="alert alert-info"> **Note:** If you are using Anaconda with the default channel and syntax highlighting in code cells doesn't seem to work, you can try to install IPython from the `conda-forge` channel or directly with `pip`, or as a work-around, add `'IPython.sphinxext.ipython_console_highlighting'` to `extensions` in your `conf.py`. For details, see [Anaconda issue #1430](https://github.com/ContinuumIO/anaconda-issues/issues/1430) and [nbsphinx issue #24](https://github.com/spatialaudio/nbsphinx/issues/24). </div> ### Jupyter Kernel If you want to execute your notebooks during the Sphinx build process (see [Controlling Notebook Execution](executing-notebooks.ipynb)), you need an appropriate [Jupyter kernel](https://jupyter.readthedocs.io/en/latest/projects/kernels.html) installed. For example, if you use Python, you should install the `ipykernel` package, e.g. with conda install -c conda-forge ipykernel or python3 -m pip install ipykernel --user If you created your notebooks yourself with Jupyter, it's very likely that you have the right kernel installed already.
github_jupyter
# Building your Deep Neural Network: Step by Step Welcome to your week 4 assignment (part 1 of 2)! You have previously trained a 2-layer Neural Network (with a single hidden layer). This week, you will build a deep neural network, with as many layers as you want! - In this notebook, you will implement all the functions required to build a deep neural network. - In the next assignment, you will use these functions to build a deep neural network for image classification. **After this assignment you will be able to:** - Use non-linear units like ReLU to improve your model - Build a deeper neural network (with more than 1 hidden layer) - Implement an easy-to-use neural network class **Notation**: - Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters. - Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. - Example: $x^{(i)}$ is the $i^{th}$ training example. - Lowerscript $i$ denotes the $i^{th}$ entry of a vector. - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations). Let's get started! ## 1 - Packages Let's first import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the main package for scientific computing with Python. - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python. - dnn_utils provides some necessary functions for this notebook. - testCases provides some test cases to assess the correctness of your functions - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work. Please don't change the seed. ``` import numpy as np import h5py import matplotlib.pyplot as plt from testCases_v3 import * from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward %matplotlib inline plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' %load_ext autoreload %autoreload 2 np.random.seed(1) ``` ## 2 - Outline of the Assignment To build your neural network, you will be implementing several "helper functions". These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. Each small helper function you will implement will have detailed instructions that will walk you through the necessary steps. Here is an outline of this assignment, you will: - Initialize the parameters for a two-layer network and for an $L$-layer neural network. - Implement the forward propagation module (shown in purple in the figure below). - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$). - We give you the ACTIVATION function (relu/sigmoid). - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function. - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function. - Compute the loss. - Implement the backward propagation module (denoted in red in the figure below). - Complete the LINEAR part of a layer's backward propagation step. - We give you the gradient of the ACTIVATE function (relu_backward/sigmoid_backward) - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function. - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function - Finally update the parameters. <img src="images/final outline.png" style="width:800px;height:500px;"> <caption><center> **Figure 1**</center></caption><br> **Note** that for every forward function, there is a corresponding backward function. That is why at every step of your forward module you will be storing some values in a cache. The cached values are useful for computing gradients. In the backpropagation module you will then use the cache to calculate the gradients. This assignment will show you exactly how to carry out each of these steps. ## 3 - Initialization You will write two helper functions that will initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one will generalize this initialization process to $L$ layers. ### 3.1 - 2-layer Neural Network **Exercise**: Create and initialize the parameters of the 2-layer neural network. **Instructions**: - The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. - Use random initialization for the weight matrices. Use `np.random.randn(shape)*0.01` with the correct shape. - Use zero initialization for the biases. Use `np.zeros(shape)`. ``` # GRADED FUNCTION: initialize_parameters def initialize_parameters(n_x, n_h, n_y): """ Argument: n_x -- size of the input layer n_h -- size of the hidden layer n_y -- size of the output layer Returns: parameters -- python dictionary containing your parameters: W1 -- weight matrix of shape (n_h, n_x) b1 -- bias vector of shape (n_h, 1) W2 -- weight matrix of shape (n_y, n_h) b2 -- bias vector of shape (n_y, 1) """ np.random.seed(1) ### START CODE HERE ### (≈ 4 lines of code) W1 = np.random.randn(n_h,n_x) * 0.01 b1 = np.zeros((n_h,1)) W2 = np.random.randn(n_y,n_h) * 0.01 b2 = np.zeros((n_y,1)) ### END CODE HERE ### assert(W1.shape == (n_h, n_x)) assert(b1.shape == (n_h, 1)) assert(W2.shape == (n_y, n_h)) assert(b2.shape == (n_y, 1)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(3,2,1) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td> [[ 0.01624345 -0.00611756 -0.00528172] [-0.01072969 0.00865408 -0.02301539]] </td> </tr> <tr> <td> **b1**</td> <td>[[ 0.] [ 0.]]</td> </tr> <tr> <td>**W2**</td> <td> [[ 0.01744812 -0.00761207]]</td> </tr> <tr> <td> **b2** </td> <td> [[ 0.]] </td> </tr> </table> ### 3.2 - L-layer Neural Network The initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep`, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. Thus for example if the size of our input $X$ is $(12288, 209)$ (with $m=209$ examples) then: <table style="width:100%"> <tr> <td> </td> <td> **Shape of W** </td> <td> **Shape of b** </td> <td> **Activation** </td> <td> **Shape of Activation** </td> <tr> <tr> <td> **Layer 1** </td> <td> $(n^{[1]},12288)$ </td> <td> $(n^{[1]},1)$ </td> <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> <td> $(n^{[1]},209)$ </td> <tr> <tr> <td> **Layer 2** </td> <td> $(n^{[2]}, n^{[1]})$ </td> <td> $(n^{[2]},1)$ </td> <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> <td> $(n^{[2]}, 209)$ </td> <tr> <tr> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$ </td> <td> $\vdots$</td> <td> $\vdots$ </td> <tr> <tr> <td> **Layer L-1** </td> <td> $(n^{[L-1]}, n^{[L-2]})$ </td> <td> $(n^{[L-1]}, 1)$ </td> <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> <td> $(n^{[L-1]}, 209)$ </td> <tr> <tr> <td> **Layer L** </td> <td> $(n^{[L]}, n^{[L-1]})$ </td> <td> $(n^{[L]}, 1)$ </td> <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td> <td> $(n^{[L]}, 209)$ </td> <tr> </table> Remember that when we compute $W X + b$ in python, it carries out broadcasting. For example, if: $$ W = \begin{bmatrix} j & k & l\\ m & n & o \\ p & q & r \end{bmatrix}\;\;\; X = \begin{bmatrix} a & b & c\\ d & e & f \\ g & h & i \end{bmatrix} \;\;\; b =\begin{bmatrix} s \\ t \\ u \end{bmatrix}\tag{2}$$ Then $WX + b$ will be: $$ WX + b = \begin{bmatrix} (ja + kd + lg) + s & (jb + ke + lh) + s & (jc + kf + li)+ s\\ (ma + nd + og) + t & (mb + ne + oh) + t & (mc + nf + oi) + t\\ (pa + qd + rg) + u & (pb + qe + rh) + u & (pc + qf + ri)+ u \end{bmatrix}\tag{3} $$ **Exercise**: Implement initialization for an L-layer Neural Network. **Instructions**: - The model's structure is *[LINEAR -> RELU] $ \times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function. - Use random initialization for the weight matrices. Use `np.random.rand(shape) * 0.01`. - Use zeros initialization for the biases. Use `np.zeros(shape)`. - We will store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for the "Planar Data classification model" from last week would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. Thus means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! - Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network). ```python if L == 1: parameters["W" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01 parameters["b" + str(L)] = np.zeros((layer_dims[1], 1)) ``` ``` # GRADED FUNCTION: initialize_parameters_deep def initialize_parameters_deep(layer_dims): """ Arguments: layer_dims -- python array (list) containing the dimensions of each layer in our network Returns: parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL": Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1]) bl -- bias vector of shape (layer_dims[l], 1) """ np.random.seed(3) parameters = {} L = len(layer_dims) # number of layers in the network for l in range(1, L): ### START CODE HERE ### (≈ 2 lines of code) parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) ### END CODE HERE ### assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1])) assert(parameters['b' + str(l)].shape == (layer_dims[l], 1)) return parameters parameters = initialize_parameters_deep([5,4,3]) print("W1 = " + str(parameters["W1"])) print("b1 = " + str(parameters["b1"])) print("W2 = " + str(parameters["W2"])) print("b2 = " + str(parameters["b2"])) ``` **Expected output**: <table style="width:80%"> <tr> <td> **W1** </td> <td>[[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388] [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218] [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034] [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]</td> </tr> <tr> <td>**b1** </td> <td>[[ 0.] [ 0.] [ 0.] [ 0.]]</td> </tr> <tr> <td>**W2** </td> <td>[[-0.01185047 -0.0020565 0.01486148 0.00236716] [-0.01023785 -0.00712993 0.00625245 -0.00160513] [-0.00768836 -0.00230031 0.00745056 0.01976111]]</td> </tr> <tr> <td>**b2** </td> <td>[[ 0.] [ 0.] [ 0.]]</td> </tr> </table> ## 4 - Forward propagation module ### 4.1 - Linear Forward Now that you have initialized your parameters, you will do the forward propagation module. You will start by implementing some basic functions that you will use later when implementing the model. You will complete three functions in this order: - LINEAR - LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID (whole model) The linear forward module (vectorized over all the examples) computes the following equations: $$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\tag{4}$$ where $A^{[0]} = X$. **Exercise**: Build the linear part of forward propagation. **Reminder**: The mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help. ``` # GRADED FUNCTION: linear_forward def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ ### START CODE HERE ### (≈ 1 line of code) Z = np.dot(W,A) + b ### END CODE HERE ### assert(Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache A, W, b = linear_forward_test_case() Z, linear_cache = linear_forward(A, W, b) print("Z = " + str(Z)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **Z** </td> <td> [[ 3.26295337 -1.23429987]] </td> </tr> </table> ### 4.2 - Linear-Activation Forward In this notebook, you will use two activation functions: - **Sigmoid**: $\sigma(Z) = \sigma(W A + b) = \frac{1}{ 1 + e^{-(W A + b)}}$. We have provided you with the `sigmoid` function. This function returns **two** items: the activation value "`a`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = sigmoid(Z) ``` - **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. We have provided you with the `relu` function. This function returns **two** items: the activation value "`A`" and a "`cache`" that contains "`Z`" (it's what we will feed in to the corresponding backward function). To use it you could just call: ``` python A, activation_cache = relu(Z) ``` For more convenience, you are going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you will implement a function that does the LINEAR forward step followed by an ACTIVATION forward step. **Exercise**: Implement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation "g" can be sigmoid() or relu(). Use linear_forward() and the correct activation function. ``` # GRADED FUNCTION: linear_activation_forward def linear_activation_forward(A_prev, W, b, activation): """ Implement the forward propagation for the LINEAR->ACTIVATION layer Arguments: A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: A -- the output of the activation function, also called the post-activation value cache -- a python dictionary containing "linear_cache" and "activation_cache"; stored for computing the backward pass efficiently """ if activation == "sigmoid": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = sigmoid(Z) ### END CODE HERE ### elif activation == "relu": # Inputs: "A_prev, W, b". Outputs: "A, activation_cache". ### START CODE HERE ### (≈ 2 lines of code) Z, linear_cache = linear_forward(A_prev, W, b) A, activation_cache = relu(Z) ### END CODE HERE ### assert (A.shape == (W.shape[0], A_prev.shape[1])) cache = (linear_cache, activation_cache) return A, cache A_prev, W, b = linear_activation_forward_test_case() A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid") print("With sigmoid: A = " + str(A)) A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu") print("With ReLU: A = " + str(A)) ``` **Expected output**: <table style="width:35%"> <tr> <td> **With sigmoid: A ** </td> <td > [[ 0.96890023 0.11013289]]</td> </tr> <tr> <td> **With ReLU: A ** </td> <td > [[ 3.43896131 0. ]]</td> </tr> </table> **Note**: In deep learning, the "[LINEAR->ACTIVATION]" computation is counted as a single layer in the neural network, not two layers. ### d) L-Layer Model For even more convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID. <img src="images/model_architecture_kiank.png" style="width:600px;height:300px;"> <caption><center> **Figure 2** : *[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br> **Exercise**: Implement the forward propagation of the above model. **Instruction**: In the code below, the variable `AL` will denote $A^{[L]} = \sigma(Z^{[L]}) = \sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\hat{Y}$.) **Tips**: - Use the functions you had previously written - Use a for loop to replicate [LINEAR->RELU] (L-1) times - Don't forget to keep track of the caches in the "caches" list. To add a new value `c` to a `list`, you can use `list.append(c)`. ``` # GRADED FUNCTION: L_model_forward def L_model_forward(X, parameters): """ Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation Arguments: X -- data, numpy array of shape (input size, number of examples) parameters -- output of initialize_parameters_deep() Returns: AL -- last post-activation value caches -- list of caches containing: every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2) the cache of linear_sigmoid_forward() (there is one, indexed L-1) """ caches = [] A = X L = len(parameters) // 2 # number of layers in the neural network # Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list. for l in range(1, L): A_prev = A ### START CODE HERE ### (≈ 2 lines of code) A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu") caches.append(cache) ### END CODE HERE ### # Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list. ### START CODE HERE ### (≈ 2 lines of code) AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid") caches.append(cache) ### END CODE HERE ### assert(AL.shape == (1,X.shape[1])) return AL, caches X, parameters = L_model_forward_test_case_2hidden() AL, caches = L_model_forward(X, parameters) print("AL = " + str(AL)) print("Length of caches list = " + str(len(caches))) ``` <table style="width:50%"> <tr> <td> **AL** </td> <td > [[ 0.03921668 0.70498921 0.19734387 0.04728177]]</td> </tr> <tr> <td> **Length of caches list ** </td> <td > 3 </td> </tr> </table> Great! Now you have a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in "caches". Using $A^{[L]}$, you can compute the cost of your predictions. ## 5 - Cost function Now you will implement forward and backward propagation. You need to compute the cost, because you want to check if your model is actually learning. **Exercise**: Compute the cross-entropy cost $J$, using the following formula: $$-\frac{1}{m} \sum\limits_{i = 1}^{m} (y^{(i)}\log\left(a^{[L] (i)}\right) + (1-y^{(i)})\log\left(1- a^{[L](i)}\right)) \tag{7}$$ ``` # GRADED FUNCTION: compute_cost def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (≈ 1 lines of code) logprobs = np.multiply(np.log(AL),Y) + np.multiply(np.log(1-AL),1-Y) cost = - np.sum(logprobs) / m ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost Y, AL = compute_cost_test_case() print("cost = " + str(compute_cost(AL, Y))) ``` **Expected Output**: <table> <tr> <td>**cost** </td> <td> 0.41493159961539694</td> </tr> </table> ## 6 - Backward propagation module Just like with forward propagation, you will implement helper functions for backpropagation. Remember that back propagation is used to calculate the gradient of the loss function with respect to the parameters. **Reminder**: <img src="images/backprop_kiank.png" style="width:650px;height:250px;"> <caption><center> **Figure 3** : Forward and Backward propagation for *LINEAR->RELU->LINEAR->SIGMOID* <br> *The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.* </center></caption> <!-- For those of you who are expert in calculus (you don't need to be to do this assignment), the chain rule of calculus can be used to derive the derivative of the loss $\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows: $$\frac{d \mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \frac{d\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\frac{{da^{[2]}}}{{dz^{[2]}}}\frac{{dz^{[2]}}}{{da^{[1]}}}\frac{{da^{[1]}}}{{dz^{[1]}}} \tag{8} $$ In order to calculate the gradient $dW^{[1]} = \frac{\partial L}{\partial W^{[1]}}$, you use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial W^{[1]}}$. During the backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted. Equivalently, in order to calculate the gradient $db^{[1]} = \frac{\partial L}{\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \times \frac{\partial z^{[1]} }{\partial b^{[1]}}$. This is why we talk about **backpropagation**. !--> Now, similar to forward propagation, you are going to build the backward propagation in three steps: - LINEAR backward - LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation - [LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model) ### 6.1 - Linear backward For layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation). Suppose you have already calculated the derivative $dZ^{[l]} = \frac{\partial \mathcal{L} }{\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]} dA^{[l-1]})$. <img src="images/linearback_kiank.png" style="width:250px;height:300px;"> <caption><center> **Figure 4** </center></caption> The three outputs $(dW^{[l]}, db^{[l]}, dA^{[l]})$ are computed using the input $dZ^{[l]}$.Here are the formulas you need: $$ dW^{[l]} = \frac{\partial \mathcal{L} }{\partial W^{[l]}} = \frac{1}{m} dZ^{[l]} A^{[l-1] T} \tag{8}$$ $$ db^{[l]} = \frac{\partial \mathcal{L} }{\partial b^{[l]}} = \frac{1}{m} \sum_{i = 1}^{m} dZ^{[l](i)}\tag{9}$$ $$ dA^{[l-1]} = \frac{\partial \mathcal{L} }{\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \tag{10}$$ **Exercise**: Use the 3 formulas above to implement linear_backward(). ``` # GRADED FUNCTION: linear_backward def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (≈ 3 lines of code) dW = np.dot(dZ, A_prev.T) * 1. / m db = 1. / m * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(W.T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db # Set up some test inputs dZ, linear_cache = linear_backward_test_case() dA_prev, dW, db = linear_backward(dZ, linear_cache) print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected Output**: <table style="width:90%"> <tr> <td> **dA_prev** </td> <td > [[ 0.51822968 -0.19517421] [-0.40506361 0.15255393] [ 2.37496825 -0.89445391]] </td> </tr> <tr> <td> **dW** </td> <td > [[-0.10076895 1.40685096 1.64992505]] </td> </tr> <tr> <td> **db** </td> <td> [[ 0.50629448]] </td> </tr> </table> ### 6.2 - Linear-Activation backward Next, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. To help you implement `linear_activation_backward`, we provided two backward functions: - **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows: ```python dZ = sigmoid_backward(dA, activation_cache) ``` - **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows: ```python dZ = relu_backward(dA, activation_cache) ``` If $g(.)$ is the activation function, `sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}) \tag{11}$$. **Exercise**: Implement the backpropagation for the *LINEAR->ACTIVATION* layer. ``` # GRADED FUNCTION: linear_activation_backward def linear_activation_backward(dA, cache, activation): """ Implement the backward propagation for the LINEAR->ACTIVATION layer. Arguments: dA -- post-activation gradient for current layer l cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu" Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ linear_cache, activation_cache = cache if activation == "relu": ### START CODE HERE ### (≈ 2 lines of code) dZ = relu_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### elif activation == "sigmoid": ### START CODE HERE ### (≈ 2 lines of code) dZ = sigmoid_backward(dA, activation_cache) dA_prev, dW, db = linear_backward(dZ, linear_cache) ### END CODE HERE ### return dA_prev, dW, db AL, linear_activation_cache = linear_activation_backward_test_case() dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid") print ("sigmoid:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db) + "\n") dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu") print ("relu:") print ("dA_prev = "+ str(dA_prev)) print ("dW = " + str(dW)) print ("db = " + str(db)) ``` **Expected output with sigmoid:** <table style="width:100%"> <tr> <td > dA_prev </td> <td >[[ 0.11017994 0.01105339] [ 0.09466817 0.00949723] [-0.05743092 -0.00576154]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.10266786 0.09778551 -0.01968084]] </td> </tr> <tr> <td > db </td> <td > [[-0.05729622]] </td> </tr> </table> **Expected output with relu:** <table style="width:100%"> <tr> <td > dA_prev </td> <td > [[ 0.44090989 0. ] [ 0.37883606 0. ] [-0.2298228 0. ]] </td> </tr> <tr> <td > dW </td> <td > [[ 0.44513824 0.37371418 -0.10478989]] </td> </tr> <tr> <td > db </td> <td > [[-0.20837892]] </td> </tr> </table> ### 6.3 - L-Model Backward Now you will implement the backward function for the whole network. Recall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you will use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you will iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. <img src="images/mn_backward.png" style="width:450px;height:300px;"> <caption><center> **Figure 5** : Backward pass </center></caption> ** Initializing backpropagation**: To backpropagate through this network, we know that the output is, $A^{[L]} = \sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \frac{\partial \mathcal{L}}{\partial A^{[L]}}$. To do so, use this formula (derived using calculus which you don't need in-depth knowledge of): ```python dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ``` You can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). After that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : $$grads["dW" + str(l)] = dW^{[l]}\tag{15} $$ For example, for $l=3$ this would store $dW^{[l]}$ in `grads["dW3"]`. **Exercise**: Implement backpropagation for the *[LINEAR->RELU] $\times$ (L-1) -> LINEAR -> SIGMOID* model. ``` # GRADED FUNCTION: L_model_backward def L_model_backward(AL, Y, caches): """ Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group Arguments: AL -- probability vector, output of the forward propagation (L_model_forward()) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) caches -- list of caches containing: every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2) the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1]) Returns: grads -- A dictionary with the gradients grads["dA" + str(l)] = ... grads["dW" + str(l)] = ... grads["db" + str(l)] = ... """ grads = {} L = len(caches) # the number of layers m = AL.shape[1] Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL # Initializing the backpropagation ### START CODE HERE ### (1 line of code) dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL ### END CODE HERE ### # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"] ### START CODE HERE ### (approx. 2 lines) current_cache = caches[-1] grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid") ### END CODE HERE ### for l in reversed(range(L-1)): # lth layer: (RELU -> LINEAR) gradients. # Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)] ### START CODE HERE ### (approx. 5 lines) current_cache = caches[l] dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu") grads["dA" + str(l + 1)] = dA_prev_temp grads["dW" + str(l + 1)] = dW_temp grads["db" + str(l + 1)] = db_temp ### END CODE HERE ### return grads AL, Y_assess, caches = L_model_backward_test_case() grads = L_model_backward(AL, Y_assess, caches) print_grads(grads) ``` **Expected Output** <table style="width:60%"> <tr> <td > dW1 </td> <td > [[ 0.41010002 0.07807203 0.13798444 0.10502167] [ 0. 0. 0. 0. ] [ 0.05283652 0.01005865 0.01777766 0.0135308 ]] </td> </tr> <tr> <td > db1 </td> <td > [[-0.22007063] [ 0. ] [-0.02835349]] </td> </tr> <tr> <td > dA1 </td> <td > [[ 0.12913162 -0.44014127] [-0.14175655 0.48317296] [ 0.01663708 -0.05670698]] </td> </tr> </table> ### 6.4 - Update Parameters In this section you will update the parameters of the model, using gradient descent: $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{16}$$ $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{17}$$ where $\alpha$ is the learning rate. After computing the updated parameters, store them in the parameters dictionary. **Exercise**: Implement `update_parameters()` to update your parameters using gradient descent. **Instructions**: Update parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ``` # GRADED FUNCTION: update_parameters def update_parameters(parameters, grads, learning_rate): """ Update parameters using gradient descent Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients, output of L_model_backward Returns: parameters -- python dictionary containing your updated parameters parameters["W" + str(l)] = ... parameters["b" + str(l)] = ... """ L = len(parameters) // 2 # number of layers in the neural network # Update rule for each parameter. Use a for loop. ### START CODE HERE ### (≈ 3 lines of code) for l in range(1,L+1): parameters["W" + str(l)] -= learning_rate * grads["dW" + str(l)] parameters["b" + str(l)] -= learning_rate * grads["db" + str(l)] ### END CODE HERE ### return parameters parameters, grads = update_parameters_test_case() parameters = update_parameters(parameters, grads, 0.1) print ("W1 = "+ str(parameters["W1"])) print ("b1 = "+ str(parameters["b1"])) print ("W2 = "+ str(parameters["W2"])) print ("b2 = "+ str(parameters["b2"])) ``` **Expected Output**: <table style="width:100%"> <tr> <td > W1 </td> <td > [[-0.59562069 -0.09991781 -2.14584584 1.82662008] [-1.76569676 -0.80627147 0.51115557 -1.18258802] [-1.0535704 -0.86128581 0.68284052 2.20374577]] </td> </tr> <tr> <td > b1 </td> <td > [[-0.04659241] [-1.28888275] [ 0.53405496]] </td> </tr> <tr> <td > W2 </td> <td > [[-0.55569196 0.0354055 1.32964895]]</td> </tr> <tr> <td > b2 </td> <td > [[-0.84610769]] </td> </tr> </table> ## 7 - Conclusion Congrats on implementing all the functions required for building a deep neural network! We know it was a long assignment but going forward it will only get better. The next part of the assignment is easier. In the next assignment you will put all these together to build two models: - A two-layer neural network - An L-layer neural network You will in fact use these models to classify cat vs non-cat images!
github_jupyter
``` import time import numpy as np import random def write_table2sql(table, engine, sql=None): def select_col_agg(mask): """ select col agg pair :return: """ col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num) * mask) sel_type = table['types'][sel_idx] if sel_type == 'text': sel_agg = random.sample([0, 3], 1) else: sel_agg = random.sample([0,1,2,3,4,5], 1) sel_agg = sel_agg[0] return sel_idx, sel_agg def select_cond_op(type): if type == 'text': return 0 else: flag = random.randint(0, 2) return flag datas = [] for j in range(1): data = {} sql = {} agg = None sel = None conds = [] data['table_id'] = table['id'] mask = np.asarray([1] * len(table['header'])) ret = None # make sure at least one condition cnt = 0 while(1): cnt += 1 col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num)) sel_type = table['types'][sel_idx] cond_op = select_cond_op(sel_type) rows = table['rows'] if len(rows) == 0: return [] row_num = len(rows) select_row = random.randint(0, row_num-1) cond_value = rows[select_row][sel_idx] if len(str(cond_value).split()) > 20 or str(cond_value) == '': continue conds.append([sel_idx, cond_op, cond_value]) start = time.time() ret = engine.execute(table['id'], 0, 0, conds, ret_rows=True) if time.time() - start > 1: mask[sel_idx] = -1 break if len(ret) != 0: mask[sel_idx] = -1 break conds.pop() if len(ret) != 0: for i in range(min(3, len(ret[0])-1)): col_num = len(table['header']) sel_idx = np.argmax(np.random.rand(col_num) * mask) sel_type = table['types'][sel_idx] cond_op = select_cond_op(sel_type) rows = ret row_num = len(rows) select_row = random.randint(0, row_num-1) cond_value = list(rows[select_row])[sel_idx] conds.append([sel_idx, cond_op, cond_value]) ret = engine.execute(table['id'], 0, 0, conds, ret_rows=True) # result doesn't change if len(ret) == row_num: conds.pop() break if len(str(cond_value).split()) > 20 or str(cond_value) == '': conds.pop() break mask[sel_idx] = -1 if len(ret) == 0: break sel_idx, sel_agg = select_col_agg(mask) sel = sel_idx agg = sel_agg sql['agg'] = agg sql['sel'] = sel sql['conds'] = conds data['sql'] = sql question = sql2qst(sql, table) data['question'] = question datas.append(data) return datas op_sql_dict = {0: "=", 1: ">", 2: "<", 3: "OP"} agg_sql_dict = {0: "", 1: "MAX", 2: "MIN", 3: "COUNT", 4: "SUM", 5: "AVG"} agg_str_dict = {0: "What is ", 1: "What is the maximum of ", 2: "What is the minimum ", 3: "What is the number of ", 4: "What is the sum of ", 5: "What is the average of "} op_str_dict = {0: "is", 1: "is more than", 2: "is less than", 3: ""} def sql2qst(sql, table): select_index = sql['sel'] aggregation_index = sql['agg'] conditions = sql['conds'] # select part select_part = "" select_str = table['header'][select_index] agg_str = agg_str_dict[aggregation_index] select_part += '{}{}'.format(agg_str, select_str) # where part where_part = [] for col_index, op, val in conditions: cond_col = table['header'][col_index] where_part.append('{} {} {}'.format(cond_col, op_str_dict[op], val)) # print('where part:', where_part) final_question = "{} that {}".format(select_part, ' and '.join(where_part)) # print('final question:', final_question) return final_question import records from sqlalchemy import * import re, time from babel.numbers import parse_decimal, NumberFormatError schema_re = re.compile(r'\((.+)\)') # group (.......) dfdf (.... )group num_re = re.compile(r'[-+]?\d*\.\d+|\d+') # ? zero or one time appear of preceding character, * zero or several time appear of preceding character. # Catch something like -34.34, .4543, # | is 'or' agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG'] cond_ops = ['=', '>', '<', 'OP'] class DBEngine: def __init__(self, fdb): self.db = create_engine('sqlite:///{}'.format(fdb)) self.conn = self.db.connect() self.table_id = '' self.schema_str = '' def execute_query(self, table_id, query, *args, **kwargs): return self.execute(table_id, query.sel_index, query.agg_index, query.conditions, *args, **kwargs) def execute(self, table_id, select_index, aggregation_index, conditions, lower=True, ret_rows=False): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) start = time.time() if table_id != self.table_id: self.table_id = table_id table_info = self.conn.execute('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).fetchall()[0].sql.replace('\n','') self.schema_str = schema_re.findall(table_info)[0] schema = {} for tup in self.schema_str.split(', '): c, t = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) if ret_rows is True: select = '*' where_clause = [] where_map = {} for col_index, op, val in conditions: if lower and (isinstance(val, str) or isinstance(val, str)): val = val.lower() if schema['col{}'.format(col_index)] == 'real' and not isinstance(val, (int, float)): try: # print('!!!!!!value of val is: ', val, 'type is: ', type(val)) # val = float(parse_decimal(val)) # somehow it generates error. val = float(parse_decimal(val, locale='en_US')) # print('!!!!!!After: val', val) except NumberFormatError as e: try: val = float(num_re.findall(val)[0]) # need to understand and debug this part. except: # Although column is of number, selected one is not number. Do nothing in this case. pass where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = 'WHERE ' + ' AND '.join(where_clause) query = 'SELECT {} FROM {} {}'.format(select, table_id, where_str) out = self.conn.execute(query, **where_map) if ret_rows is False: return [o[0] for o in out] return [o for o in out] def execute_return_query(self, table_id, select_index, aggregation_index, conditions, lower=True): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) table_info = self.db.query('SELECT sql from sqlite_master WHERE tbl_name = :name', name=table_id).all()[0].sql.replace('\n','') schema_str = schema_re.findall(table_info)[0] schema = {} for tup in schema_str.split(', '): c, t = tup.split() schema[c] = t select = 'col{}'.format(select_index) agg = agg_ops[aggregation_index] if agg: select = '{}({})'.format(agg, select) where_clause = [] where_map = {} for col_index, op, val in conditions: if lower and (isinstance(val, str) or isinstance(val, str)): val = val.lower() if schema['col{}'.format(col_index)] == 'real' and not isinstance(val, (int, float)): try: # print('!!!!!!value of val is: ', val, 'type is: ', type(val)) # val = float(parse_decimal(val)) # somehow it generates error. val = float(parse_decimal(val, locale='en_US')) # print('!!!!!!After: val', val) except NumberFormatError as e: val = float(num_re.findall(val)[0]) where_clause.append('col{} {} :col{}'.format(col_index, cond_ops[op], col_index)) where_map['col{}'.format(col_index)] = val where_str = '' if where_clause: where_str = 'WHERE ' + ' AND '.join(where_clause) query = 'SELECT {} AS result FROM {} {}'.format(select, table_id, where_str) #print query out = self.db.query(query, **where_map) # return [o.result for o in out], query return [o[0] for o in out], query def show_table(self, table_id): if not table_id.startswith('table'): table_id = 'table_{}'.format(table_id.replace('-', '_')) rows = self.db.query('select * from ' +table_id) print(rows.dataset) import json class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) import numpy as np with open('train.tables.jsonl', 'r') as fr: tables = [] for table in fr: table = json.loads(table) if '' in table['header']: continue tables.append(table) engine = DBEngine('train.db') with open("train_augment.jsonl","w") as f: for i in range(1000): probs = np.random.rand(len(tables)) table_i = tables[np.argmax(probs)] data = write_table2sql(table_i, engine) if len(data) == 0: print('couldnt find a valid sql!') for js in data: js["phase"] = js["table_id"][0] agg_str = ['', 'max ', 'min ', 'count ', 'sum ', 'avg '] op_str = ['=', '>', '<'] js1 = {} sql_str = '' sql_str += 'select ' sql_str += agg_str[js['sql']['agg']] sql_str += table_i['header'][js['sql']['sel']].lower() + ' ' sql_str += 'where ' for j in range(len(js['sql']['conds'])): sql_str += table_i['header'][js['sql']['conds'][j][0]].lower() + ' ' sql_str += op_str[js['sql']['conds'][j][1]] + ' ' sql_str += str(js['sql']['conds'][j][2]).lower() if len(js['sql']['conds']) > 1 and j != len(js['sql']['conds']) - 1: sql_str += ' and ' src = sql_str.split(' ') trg = js['question'].lower().split(' ') while (trg[-1] == ''): trg = trg[:-1] if trg[-1][-1] == '?': trg[-1] = trg[-1][:-1] trg += ['?'] js['src'] = src js['trg'] = trg f.write(json.dumps(js, cls=NpEncoder) + '\n') print('finished!') ```
github_jupyter
###### Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) Daniel Koehn based on Jupyter notebooks by Marc Spiegelman [Dynamical Systems APMA 4101](https://github.com/mspieg/dynamical-systems) and Kyle Mandli from his course [Introduction to numerical methods](https://github.com/mandli/intro-numerical-methods), notebook style sheet by L.A. Barba, N.C. Clementi [Engineering Computations](https://github.com/engineersCode) ``` # Execute this cell to load the notebook's style sheet, then ignore it from IPython.core.display import HTML css_file = '../style/custom.css' HTML(open(css_file, "r").read()) ``` # Exploring the Lorenz Equations The Lorenz Equations are a 3-D dynamical system that is a simplified model of Rayleigh-Benard thermal convection. They are derived and described in detail in Edward Lorenz' 1963 paper [Deterministic Nonperiodic Flow](http://journals.ametsoc.org/doi/pdf/10.1175/1520-0469%281963%29020%3C0130%3ADNF%3E2.0.CO%3B2) in the Journal of Atmospheric Science. In their classical form they can be written \begin{equation} \begin{split} \frac{\partial X}{\partial t} &= \sigma( Y - X)\\ \frac{\partial Y}{\partial t} &= rX - Y - XZ \\ \frac{\partial Z}{\partial t} &= XY -b Z \end{split} \tag{1} \end{equation} where $\sigma$ is the "Prandtl number", $r = \mathrm{Ra}/\mathrm{Ra}_c$ is a scaled "Rayleigh number" and $b$ is a parameter that is related to the the aspect ratio of a convecting cell in the original derivation. Here, $X(t)$, $Y(t)$ and $Z(t)$ are the time dependent amplitudes of the streamfunction and temperature fields, expanded in a highly truncated Fourier Series where the streamfunction contains one cellular mode $$ \psi(x,z,t) = X(t)\sin(a\pi x)\sin(\pi z) $$ and temperature has two modes $$ \theta(x,z,t) = Y(t)\cos(a\pi x)\sin(\pi z) - Z(t)\sin(2\pi z) $$ This Jupyter notebook, will provide some simple python routines for numerical integration and visualization of the Lorenz Equations. ## Numerical solution of the Lorenz Equations We have to solve the uncoupled ordinary differential equations (1) using the finite difference method introduced in [this lecture](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/1_fd_intro.ipynb). The approach is similar to the one used in [Exercise: How to sail without wind](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/3_fd_ODE_example_sailing_wo_wind.ipynb), except that eqs.(1) are coupled ordinary differential equations, we have an additional differential equation and the RHS are more complex. Approximating the temporal derivatives in eqs. (1) using the **backward FD operator** \begin{equation} \frac{df}{dt} = \frac{f(t)-f(t-dt)}{dt} \notag \end{equation} with the time sample interval $dt$ leads to \begin{equation} \begin{split} \frac{X(t)-X(t-dt)}{dt} &= \sigma(Y - X)\\ \frac{Y(t)-Y(t-dt)}{dt} &= rX - Y - XZ\\ \frac{Y(t)-Y(t-dt)}{dt} &= XY -b Z\\ \end{split} \notag \end{equation} After solving for $X(t), Y(t), Z(t)$, we get the **explicit time integration scheme** for the Lorenz equations: \begin{equation} \begin{split} X(t) &= X(t-dt) + dt\; \sigma(Y - X)\\ Y(t) &= Y(t-dt) + dt\; (rX - Y - XZ)\\ Z(t) &= Z(t-dt) + dt\; (XY -b Z)\\ \end{split} \notag \end{equation} and by introducing a temporal dicretization $t^n = n * dt$ with $n \in [0,1,...,nt]$, where $nt$ denotes the maximum time steps, the final FD code becomes: \begin{equation} \begin{split} X^{n} &= X^{n-1} + dt\; \sigma(Y^{n-1} - X^{n-1})\\ Y^{n} &= Y^{n-1} + dt\; (rX^{n-1} - Y^{n-1} - X^{n-1}Z^{n-1})\\ Z^{n} &= Z^{n-1} + dt\; (X^{n-1}Y^{n-1} - b Z^{n-1})\\ \end{split} \tag{2} \end{equation} The Python implementation is quite straightforward, because we can reuse some old codes ... ##### Exercise 1 Finish the function `Lorenz`, which computes and returns the RHS of eqs. (1) for a given $X$, $Y$, $Z$. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def Lorenz(X,Y,Z,sigma,r,b): ''' Returns the RHS of the Lorenz equations ''' # ADD RHS OF LORENZ EQUATIONS (1) HERE! X_dot_rhs = Y_dot_rhs = Z_dot_rhs = # return the state derivatives return X_dot_rhs, Y_dot_rhs, Z_dot_rhs ``` Next, we write the function to solve the Lorenz equation `SolveLorenz` based on the `sailing_boring` code from the [Exercise: How to sail without wind](https://nbviewer.jupyter.org/github/daniel-koehn/Differential-equations-earth-system/blob/master/02_finite_difference_intro/3_fd_ODE_example_sailing_wo_wind.ipynb) ##### Exercise 2 Finish the FD-code implementation `SolveLorenz` ``` def SolveLorenz(tmax, dt, X0, Y0, Z0, sigma=10.,r=28.,b=8./3.0): ''' Integrate the Lorenz equations from initial condition (X0,Y0,Z0)^T at t=0 for parameters sigma, r, b Returns: X, Y, Z, time ''' # Compute number of time steps based on tmax and dt nt = (int)(tmax/dt) # vectors for storage of X, Y, Z positions and time t X = np.zeros(nt + 1) Y = np.zeros(nt + 1) Z = np.zeros(nt + 1) t = np.zeros(nt + 1) # define initial position and time X[0] = X0 Y[0] = Y0 Z[0] = Z0 # start time stepping over time samples n for n in range(1,nt + 1): # compute RHS of Lorenz eqs. (1) at current position (X,Y,Z)^T X_dot_rhs, Y_dot_rhs, Z_dot_rhs = Lorenz(X[n-1],Y[n-1],Z[n-1],sigma,r,b) # compute new position using FD approximation of time derivative # ADD FD SCHEME OF THE LORENZ EQS. HERE! X[n] = Y[n] = Z[n] = t[n] = n * dt return X, Y, Z, t ``` Finally, we create a function to plot the solution (X,Y,Z)^T of the Lorenz eqs. ... ``` def PlotLorenzXvT(X,Y,Z,t,sigma,r,b): ''' Create time series plots of solutions of the Lorenz equations X(t),Y(t),Z(t) ''' plt.figure() ax = plt.subplot(111) ax.plot(t,X,'r',label='X') ax.plot(t,Y,'g',label='Y') ax.plot(t,Z,'b',label='Z') ax.set_xlabel('time t') plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b)) # Shrink current axis's height by 10% on the bottom box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),ncol=3) plt.show() ``` ... and a function to plot the trajectory in the **phase space portrait**: ``` def PlotLorenz3D(X,Y,Z,sigma,r,b): ''' Show 3-D Phase portrait using mplot3D ''' # do some fancy 3D plotting fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(X,Y,Z) ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') plt.title('Lorenz Equations: $\sigma=${}, $r=${}, $b=${}'.format(sigma,r,b)) plt.show() ``` ##### Exercise 3 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=0.5$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 4 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=10$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 5 Solve the Lorenz equations again for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=10$. However, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(-2,-3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. How does the solution change compared to exercise 4? ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 0.01 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 6 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=28$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. Compare with the previous results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ##### Exercise 7 In his 1963 paper Lorenz also investigated the influence of small changes of the initial conditions on the long-term evolution of the thermal convection problem for large Rayleigh numbers. Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=28$, however starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3.001,4)^T$. Plot the temporal evolution and compare with the solution of exercise 6. Describe and interpret the results. Explain why Lorenz introduced the term **Butterfly effect** based on your results. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 30 dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X1, Y1, Z1, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize differences as a time series PlotLorenzXvT(X-X1,Y-Y1,Z-Z1,t,sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X1,Y1,Z1,t,sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) ``` ##### Exercise 8 Solve the Lorenz equations for a Prandtl number $\sigma=10$, $b=8/3$ and a scaled Rayleigh number $r=350$, starting from the initial condition ${\bf{X_0}}=(X_0,Y_0,Z_0)^T=(2,3,4)^T$. Plot the temporal evolution and 3D phase potrait of the solution $(X(t),Y(t),Z(t))^T$. Mark the fix points, you derived in [Stationary Solutions of Time-Dependent Problems](http://nbviewer.ipython.org/urls/github.com/daniel-koehn/Differential-equations-earth-system/tree/master/03_Lorenz_equations/02_Stationary_solutions_of_DE.ipynb) in the 3D phase portrait. Describe and interpret the results. Compare with the previous result from exercise 8. ``` # SET THE PARAMETERS HERE! sigma= b = # SET THE INITIAL CONDITIONS HERE! X0 = Y0 = Z0 = # Set maximum integration time and sample interval dt tmax = 8. dt = 5e-4 # SET THE RAYLEIGH NUMBER HERE! r = # Solve the Equations X, Y, Z, t = SolveLorenz(tmax, dt, X0, Y0, Z0, sigma,r,b) # and Visualize as a time series PlotLorenzXvT(X,Y,Z,t,sigma,r,b) # and as a 3-D phase portrait PlotLorenz3D(X,Y,Z,sigma,r,b) ``` ## What we learned: - How to solve the Lorenz equations using a simple finite-difference scheme. - How to visualize the solution of ordinary differential equations using the temporal evolution and phase portrait. - Exporing the dynamic of non-linear differential equations and the sensitivity of small changes of the initial conditions to the long term evolution of the system. - Why physicists can only predict the time evolution of complex dynamical systems to some extent.
github_jupyter
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i> <i>Licensed under the MIT License.</i> # Vowpal Wabbit Deep Dive <center> <img src="https://github.com/VowpalWabbit/vowpal_wabbit/blob/master/logo_assets/vowpal-wabbits-github-logo.png?raw=true" height="30%" width="30%" alt="Vowpal Wabbit"> </center> [Vowpal Wabbit](https://github.com/VowpalWabbit/vowpal_wabbit) is a fast online machine learning library that implements several algorithms relevant to the recommendation use case. The main advantage of Vowpal Wabbit (VW) is that training is done in an online fashion typically using Stochastic Gradient Descent or similar variants, which allows it to scale well to very large datasets. Additionally, it is optimized to run very quickly and can support distributed training scenarios for extremely large datasets. VW is best applied to problems where the dataset is too large to fit into memory but can be stored on disk in a single node. Though distributed training is possible with additional setup and configuration of the nodes. The kinds of problems that VW handles well mostly fall into the supervised classification domain of machine learning (Linear Regression, Logistic Regression, Multiclass Classification, Support Vector Machines, Simple Neural Nets). It also supports Matrix Factorization approaches and Latent Dirichlet Allocation, as well as a few other algorithms (see the [wiki](https://github.com/VowpalWabbit/vowpal_wabbit/wiki) for more information). A good example of a typical deployment use case is a Real Time Bidding scenario, where an auction to place an ad for a user is being decided in a matter of milliseconds. Feature information about the user and items must be extracted and passed into a model to predict likelihood of click (or other interaction) in short order. And if the user and context features are constantly changing (e.g. user browser and local time of day) it may be infeasible to score every possible input combination before hand. This is where VW provides value, as a platform to explore various algorithms offline to train a highly accurate model on a large set of historical data then deploy the model into production so it can generate rapid predictions in real time. Of course this isn't the only manner VW can be deployed, it is also possible to use it entirely online where the model is constantly updating, or use active learning approaches, or work completely offline in a pre-scoring mode. <h3>Vowpal Wabbit for Recommendations</h3> In this notebook we demonstrate how to use the VW library to generate recommendations on the [Movielens](https://grouplens.org/datasets/movielens/) dataset. Several things are worth noting in how VW is being used in this notebook: By leveraging an Azure Data Science Virtual Machine ([DSVM](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/)), VW comes pre-installed and can be used directly from the command line. If you are not using a DSVM you must install vw yourself. There are also python bindings to allow VW use within a python environment and even a wrapper conforming to the SciKit-Learn Estimator API. However, the python bindings must be installed as an additional python package with Boost dependencies, so for simplicity's sake execution of VW is done via a subprocess call mimicking what would happen from the command line execution of the model. VW expects a specific [input format](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format), in this notebook to_vw() is a convenience function that converts the standard movielens dataset into the required data format. Datafiles are then written to disk and passed to VW for training. The examples shown are to demonstrate functional capabilities of VW not to indicate performance advantages of different approaches. There are several hyper-parameters (e.g. learning rate and regularization terms) that can greatly impact performance of VW models which can be adjusted using [command line options](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). To properly compare approaches it is helpful to learn about and tune these parameters on the relevant dataset. # 0. Global Setup ``` import sys sys.path.append('../..') import os from subprocess import run from tempfile import TemporaryDirectory from time import process_time import pandas as pd import papermill as pm from reco_utils.common.notebook_utils import is_jupyter from reco_utils.dataset.movielens import load_pandas_df from reco_utils.dataset.python_splitters import python_random_split from reco_utils.evaluation.python_evaluation import (rmse, mae, exp_var, rsquared, get_top_k_items, map_at_k, ndcg_at_k, precision_at_k, recall_at_k) print("System version: {}".format(sys.version)) print("Pandas version: {}".format(pd.__version__)) def to_vw(df, output, logistic=False): """Convert Pandas DataFrame to vw input format Args: df (pd.DataFrame): input DataFrame output (str): path to output file logistic (bool): flag to convert label to logistic value """ with open(output, 'w') as f: tmp = df.reset_index() # we need to reset the rating type to an integer to simplify the vw formatting tmp['rating'] = tmp['rating'].astype('int64') # convert rating to binary value if logistic: tmp['rating'] = tmp['rating'].apply(lambda x: 1 if x >= 3 else -1) # convert each row to VW input format (https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format) # [label] [tag]|[user namespace] [user id feature] |[item namespace] [movie id feature] # label is the true rating, tag is a unique id for the example just used to link predictions to truth # user and item namespaces separate the features to support interaction features through command line options for _, row in tmp.iterrows(): f.write('{rating} {index}|user {userID} |item {itemID}\n'.format_map(row)) def run_vw(train_params, test_params, test_data, prediction_path, logistic=False): """Convenience function to train, test, and show metrics of interest Args: train_params (str): vw training parameters test_params (str): vw testing parameters test_data (pd.dataFrame): test data prediction_path (str): path to vw prediction output logistic (bool): flag to convert label to logistic value Returns: (dict): metrics and timing information """ # train model train_start = process_time() run(train_params.split(' '), check=True) train_stop = process_time() # test model test_start = process_time() run(test_params.split(' '), check=True) test_stop = process_time() # read in predictions pred_df = pd.read_csv(prediction_path, delim_whitespace=True, names=['prediction'], index_col=1).join(test_data) test_df = test_data.copy() if logistic: # make the true label binary so that the metrics are captured correctly test_df['rating'] = test['rating'].apply(lambda x: 1 if x >= 3 else -1) else: # ensure results are integers in correct range pred_df['prediction'] = pred_df['prediction'].apply(lambda x: int(max(1, min(5, round(x))))) # calculate metrics result = dict() result['RMSE'] = rmse(test_df, pred_df) result['MAE'] = mae(test_df, pred_df) result['R2'] = rsquared(test_df, pred_df) result['Explained Variance'] = exp_var(test_df, pred_df) result['Train Time (ms)'] = (train_stop - train_start) * 1000 result['Test Time (ms)'] = (test_stop - test_start) * 1000 return result # create temp directory to maintain data files tmpdir = TemporaryDirectory() model_path = os.path.join(tmpdir.name, 'vw.model') saved_model_path = os.path.join(tmpdir.name, 'vw_saved.model') train_path = os.path.join(tmpdir.name, 'train.dat') test_path = os.path.join(tmpdir.name, 'test.dat') train_logistic_path = os.path.join(tmpdir.name, 'train_logistic.dat') test_logistic_path = os.path.join(tmpdir.name, 'test_logistic.dat') prediction_path = os.path.join(tmpdir.name, 'prediction.dat') all_test_path = os.path.join(tmpdir.name, 'new_test.dat') all_prediction_path = os.path.join(tmpdir.name, 'new_prediction.dat') ``` # 1. Load & Transform Data ``` # Select Movielens data size: 100k, 1m, 10m, or 20m MOVIELENS_DATA_SIZE = '100k' TOP_K = 10 # load movielens data (use the 1M dataset) df = load_pandas_df(MOVIELENS_DATA_SIZE) # split data to train and test sets, default values take 75% of each users ratings as train, and 25% as test train, test = python_random_split(df, 0.75) # save train and test data in vw format to_vw(df=train, output=train_path) to_vw(df=test, output=test_path) # save data for logistic regression (requires adjusting the label) to_vw(df=train, output=train_logistic_path, logistic=True) to_vw(df=test, output=test_logistic_path, logistic=True) ``` # 2. Regression Based Recommendations When considering different approaches for solving a problem with machine learning it is helpful to generate a baseline approach to understand how more complex solutions perform across dimensions of performance, time, and resource (memory or cpu) usage. Regression based approaches are some of the simplest and fastest baselines to consider for many ML problems. ## 2.1 Linear Regression As the data provides a numerical rating between 1-5, fitting those values with a linear regression model is easy approach. This model is trained on examples of ratings as the target variable and corresponding user ids and movie ids as independent features. By passing each user-item rating in as an example the model will begin to learn weights based on average ratings for each user as well as average ratings per item. This however can generate predicted ratings which are no longer integers, so some additional adjustments should be made at prediction time to convert them back to the integer scale of 1 through 5 if necessary. Here, this is done in the evaluate function. ``` """ Quick description of command line parameters used Other optional parameters can be found here: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments VW uses linear regression by default, so no extra command line options -f <model_path>: indicates where the final model file will reside after training -d <data_path>: indicates which data file to use for training or testing --quiet: this runs vw in quiet mode silencing stdout (for debugging it's helpful to not use quiet mode) -i <model_path>: indicates where to load the previously model file created during training -t: this executes inference only (no learned updates to the model) -p <prediction_path>: indicates where to store prediction output """ train_params = 'vw -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) # save these results for later use during top-k analysis test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = pd.DataFrame(result, index=['Linear Regression']) comparison ``` ## 2.2 Linear Regression with Interaction Features Previously we treated the user features and item features independently, but taking into account interactions between features can provide a mechanism to learn more fine grained preferences of the users. To generate interaction features use the quadratic command line argument and specify the namespaces that should be combined: '-q ui' combines the user and item namespaces based on the first letter of each. Currently the userIDs and itemIDs used are integers which means the feature ID is used directly, for instance when user ID 123 rates movie 456, the training example puts a 1 in the values for features 123 and 456. However when interaction is specified (or if a feature is a string) the resulting interaction feature is hashed into the available feature space. Feature hashing is a way to take a very sparse high dimensional feature space and reduce it into a lower dimensional space. This allows for reduced memory while retaining fast computation of feature and model weights. The caveat with feature hashing, is that it can lead to hash collisions, where separate features are mapped to the same location. In this case it can be beneficial to increase the size of the space to support interactions between features of high cardinality. The available feature space is dictated by the --bit_precision (-b) <N> argument. Where the total available space for all features in the model is 2<sup>N</sup>. See [Feature Hashing and Extraction](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Feature-Hashing-and-Extraction) for more details. ``` """ Quick description of command line parameters used -b <N>: sets the memory size to 2<sup>N</sup> entries -q <ab>: create quadratic feature interactions between features in namespaces starting with 'a' and 'b' """ train_params = 'vw -b 26 -q ui -f {model} -d {data} --quiet'.format(model=saved_model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=saved_model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) saved_result = result comparison = comparison.append(pd.DataFrame(result, index=['Linear Regression w/ Interaction'])) comparison ``` ## 2.3 Multinomial Logistic Regression An alternative to linear regression is to leverage multinomial logistic regression, or multiclass classification, which treats each rating value as a distinct class. This avoids any non integer results, but also reduces the training data for each class which could lead to poorer performance if the counts of different rating levels are skewed. Basic multiclass logistic regression can be accomplished using the One Against All approach specified by the '--oaa N' option, where N is the number of classes and proving the logistic option for the loss function to be used. ``` """ Quick description of command line parameters used --loss_function logistic: sets the model loss function for logistic regression --oaa <N>: trains N separate models using One-Against-All approach (all models are captured in the single model file) This expects the labels to be contiguous integers starting at 1 --link logistic: converts the predicted output from logit to probability The predicted output is the model (label) with the largest likelihood """ train_params = 'vw --loss_function logistic --oaa 5 -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw --link logistic -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Multinomial Regression'])) comparison ``` ## 2.4 Logistic Regression Additionally, one might simply be interested in whether the user likes or dislikes an item and we can adjust the input data to represent a binary outcome, where ratings in (1,3] are dislikes (negative results) and (3,5] are likes (positive results). This framing allows for a simple logistic regression model to be applied. To perform logistic regression the loss_function parameter is changed to 'logistic' and the target label is switched to [0, 1]. Also, be sure to set '--link logistic' during prediction to convert the logit output back to a probability value. ``` train_params = 'vw --loss_function logistic -f {model} -d {data} --quiet'.format(model=model_path, data=train_logistic_path) test_params = 'vw --link logistic -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_logistic_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path, logistic=True) comparison = comparison.append(pd.DataFrame(result, index=['Logistic Regression'])) comparison ``` # 3. Matrix Factorization Based Recommendations All of the above approaches train a regression model, but VW also supports matrix factorization with two different approaches. As opposed to learning direct weights for specific users, items and interactions when training a regression model, matrix factorization attempts to learn latent factors that determine how a user rates an item. An example of how this might work is if you could represent user preference and item categorization by genre. Given a smaller set of genres we can associate how much each item belongs to each genre class, and we can set weights for a user's preference for each genre. Both sets of weights could be represented as a vectors where the inner product would be the user-item rating. Matrix factorization approaches learn low rank matrices for latent features of users and items such that those matrices can be combined to approximate the original user item matrix. ## 3.1. Singular Value Decomposition Based Matrix Factorization The first approach performs matrix factorization based on Singular Value Decomposition (SVD) to learn a low rank approximation for the user-item rating matix. It is is called using the '--rank' command line argument. See the [Matrix Factorization Example](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example) for more detail. ``` """ Quick description of command line parameters used --rank <N>: sets the number of latent factors in the reduced matrix """ train_params = 'vw --rank 5 -q ui -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Matrix Factorization (Rank)'])) comparison ``` ## 3.2. Factorization Machine Based Matrix Factorization An alternative approach based on [Rendel's factorization machines](https://cseweb.ucsd.edu/classes/fa17/cse291-b/reading/Rendle2010FM.pdf) is called using '--lrq' (low rank quadratic). More LRQ details in this [demo](https://github.com/VowpalWabbit/vowpal_wabbit/tree/master/demo/movielens). This learns two lower rank matrices which are multiplied to generate an approximation of the user-item rating matrix. Compressing the matrix in this way leads to learning generalizable factors which avoids some of the limitations of using regression models with extremely sparse interaction features. This can lead to better convergence and smaller on-disk models. An additional term to improve performance is --lrqdropout which will dropout columns during training. This however tends to increase the optimal rank size. Other parameters such as L2 regularization can help avoid overfitting. ``` """ Quick description of command line parameters used --lrq <abN>: learns approximations of rank N for the quadratic interaction between namespaces starting with 'a' and 'b' --lrqdroupout: performs dropout during training to improve generalization """ train_params = 'vw --lrq ui7 -f {model} -d {data} --quiet'.format(model=model_path, data=train_path) test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=model_path, data=test_path, pred=prediction_path) result = run_vw(train_params=train_params, test_params=test_params, test_data=test, prediction_path=prediction_path) comparison = comparison.append(pd.DataFrame(result, index=['Matrix Factorization (LRQ)'])) comparison ``` # 4. Conclusion The table above shows a few of the approaches in the VW library that can be used for recommendation prediction. The relative performance can change when applied to different datasets and properly tuned, but it is useful to note the rapid speed at which all approaches are able to train (75,000 examples) and test (25,000 examples). # 5. Scoring After training a model with any of the above approaches, the model can be used to score potential user-pairs in offline batch mode, or in a real-time scoring mode. The example below shows how to leverage the utilities in the reco_utils directory to generate Top-K recommendations from offline scored output. ``` # First construct a test set of all items (except those seen during training) for each user users = df[['userID']].drop_duplicates() users['key'] = 1 items = df[['itemID']].drop_duplicates() items['key'] = 1 all_pairs = pd.merge(users, items, on='key').drop(columns=['key']) # now combine with training data and filter only those entries that don't match merged = pd.merge(train, all_pairs, on=["userID", "itemID"], how="outer") all_user_items = merged[merged['rating'].isnull()].copy() all_user_items['rating'] = 0 # save in vw format (this can take a while) to_vw(df=all_user_items, output=all_test_path) # run the saved model (linear regression with interactions) on the new dataset test_start = process_time() test_params = 'vw -i {model} -d {data} -t -p {pred} --quiet'.format(model=saved_model_path, data=all_test_path, pred=prediction_path) run(test_params.split(' '), check=True) test_stop = process_time() test_time = test_stop - test_start # load predictions and get top-k from previous saved results pred_data = pd.read_csv(prediction_path, delim_whitespace=True, names=['prediction'], index_col=1).join(test) pred_data['prediction'] = pred_data['prediction'].apply(lambda x: int(max(1, min(5, round(x))))) top_k = get_top_k_items(pred_data, col_rating='prediction', k=TOP_K)[['prediction', 'userID', 'itemID', 'rating']] # convert dtypes of userID and itemID columns. for col in ['userID', 'itemID']: top_k[col] = top_k[col].astype(int) top_k.head() # get ranking metrics args = [test, top_k] kwargs = dict(col_user='userID', col_item='itemID', col_rating='rating', col_prediction='prediction', relevancy_method='top_k', k=TOP_K) rank_metrics = {'MAP': map_at_k(*args, **kwargs), 'NDCG': ndcg_at_k(*args, **kwargs), 'Precision': precision_at_k(*args, **kwargs), 'Recall': recall_at_k(*args, **kwargs)} # final results all_results = ['{k}: {v}'.format(k=k, v=v) for k, v in saved_result.items()] all_results += ['{k}: {v}'.format(k=k, v=v) for k, v in rank_metrics.items()] print('\n'.join(all_results)) ``` # 6. Cleanup ``` # record results for testing if is_jupyter(): pm.record('rmse', saved_result['RMSE']) pm.record('mae', saved_result['MAE']) pm.record('rsquared', saved_result['R2']) pm.record('exp_var', saved_result['Explained Variance']) pm.record("train_time", saved_result['Train Time (ms)']) pm.record("test_time", test_time) pm.record('map', rank_metrics['MAP']) pm.record('ndcg', rank_metrics['NDCG']) pm.record('precision', rank_metrics['Precision']) pm.record('recall', rank_metrics['Recall']) tmpdir.cleanup() ``` ## References 1. John Langford, et. al. Vowpal Wabbit Wiki. URL: https://github.com/VowpalWabbit/vowpal_wabbit/wiki 2. Steffen Rendel. Factorization Machines. 2010 IEEE International Conference on Data Mining. 3. Jake Hoffman. Matrix Factorization Example. URL: https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Matrix-factorization-example 4. Paul Minero. Low Rank Quadratic Example. URL: https://github.com/VowpalWabbit/vowpal_wabbit/tree/master/demo/movielens
github_jupyter
# Peak Detection Feature detection, also referred to as peak detection, is the process by which local maxima that fulfill certain criteria (such as sufficient signal-to-noise ratio) are located in the signal acquired by a given analytical instrument. This process results in “features” associated with the analysis of molecular analytes from the sample under study or from chemical, instrument, or random noise. Typically, feature detection involves a mass dimension (*m/z*) as well as one or more separation dimensions (e.g. drift and/or retention time), the latter offering distinction among isobaric/isotopic features. DEIMoS implements an N-dimensional maximum filter from [scipy.ndimage](https://docs.scipy.org/doc/scipy/reference/ndimage.html) that convolves the instrument signal with a structuring element, also known as a kernel, and compares the result against the input array to identify local maxima as candidate features or peaks. To demonstrate, we will operate on a subset of 2D data to minimize memory usage and computation time. ``` import deimos import numpy as np import matplotlib.pyplot as plt # load data, excluding scanid column ms1 = deimos.load('example_data.h5', key='ms1', columns=['mz', 'drift_time', 'retention_time', 'intensity']) # sum over retention time ms1_2d = deimos.collapse(ms1, keep=['mz', 'drift_time']) # take a subset in m/z ms1_2d = deimos.slice(ms1_2d, by='mz', low=200, high=400) %%time # perform peak detection ms1_peaks = deimos.peakpick.local_maxima(ms1_2d, dims=['mz', 'drift_time'], bins=[9.5, 4.25]) ``` ## Selecting Kernel Size Key to this process is the selection of kernel size, which can vary by instrument, dataset, and even compound. For example, in LC-IMS-MS/MS data, peak width increases with increasing *m/z* and drift time, and also varies in retention time. Ideally, the kernel would be the same size as the N-dimensional peak (i.e. wavelets), though computational efficiency considerations for high-dimensional data currently limit the ability to dynamically adjust kernel size. Thus, the selected kernel size should be representative of likely features of interest. This process is exploratory, and selections can be further refined pending an initial processing of the data. To start, we will get a sense of our data by visualizing a high-intensity feature. ``` # get maximal data point mz_i, dt_i, rt_i, intensity_i = ms1.loc[ms1['intensity'] == ms1['intensity'].max(), :].values[0] # subset the raw data feature = deimos.slice(ms1, by=['mz', 'drift_time', 'retention_time'], low=[mz_i - 0.1, dt_i - 1, rt_i - 1], high=[mz_i + 0.2, dt_i + 1, rt_i + 2]) # visualize deimos.plot.multipanel(feature, dpi=150) plt.tight_layout() plt.show() print('{}:\t\t{}'.format('mz', len(feature['mz'].unique()))) print('{}:\t{}'.format('drift_time', len(feature['drift_time'].unique()))) print('{}:\t{}'.format('retention_time', len(feature['retention_time'].unique()))) ``` The number of sampled data points in each dimension informs selection of suitable peak detection parameters, in this case 38 values in *m/z*, 17 values in drift time, and 74 values in retention time. For the kernel to be centered on each "voxel", however, selections must be odd. Due to the multidimensional nature of the data, kernel size need not be exact: two features need only be separated in one dimension, not all dimensions simultaneously. ## Partitioning This dataset is comprised of almost 200,000 unique *m/z* values, 416 unique drift times, and 568 unique retention times. In order to process the data by N-dimensional filter convolution, the data frame-based coordinate format must be converted into a dense array. In this case, a dense array would comprise 4.7E9 cells and, for 32-bit intensities, requiring approximately 174 GB of memory. ``` print('{}:\t\t{}'.format('mz', len(ms1['mz'].unique()))) print('{}:\t{}'.format('drift_time', len(ms1['drift_time'].unique()))) print('{}:\t{}'.format('retention_time', len(ms1['retention_time'].unique()))) ``` This is of course not tenable for many workstations, necessitating a partitioning utility by which the input may be split along a given dimension, each partition processed separately. Here, we create a `Partitions` object to divide the *m/z* dimension into chunks of 1000 unique values, with a partition overlap of 0.2 Da to ameliorate artifacts arising from artificial partition "edges". Next, its `map` method is invoked to apply peak detection to each partition. The `processes` flag may also be specified to spread the computational load over multiple cores. Memory footprint scales linearly with number of processes. ``` %%time # partition the data partitions = deimos.partition(ms1_2d, split_on='mz', size=500, overlap=0.2) # map peak detection over partitions ms1_peaks_partitioned = partitions.map(deimos.peakpick.local_maxima, dims=['mz', 'drift_time'], bins=[9.5, 4.25], processes=4) ``` With `overlap` selected appropriately, the partitioned result should be identical to the previous result. ``` all(ms1_peaks_partitioned == ms1_peaks) ``` ## Kernel Scaling Peak width in *m/z* and drift time increase with *m/z*. In the example data used here, the sample inverval in *m/z* also increases with increasing *m/z*. This means that our kernel effectively "grows" as *m/z* increases, as kernel is selected by number of such intervals rather than an *m/z* range. ``` # unique m/z values mz_unq = np.unique(ms1_2d['mz']) # m/z sample intervals mz_diff = np.diff(mz_unq) # visualize plt.figure(dpi=150) plt.plot(mz_unq[1:], mz_diff) plt.xlabel('m/z', fontweight='bold') plt.ylabel('Interval', fontweight='bold') plt.show() ``` However, the drift time sample interval is constant throughout the acquisition. To accommodate increasing peak width in drift time, we can scale the kernel in that dimension by the *m/z* per partition, scaled by a reference resolution (i.e. the minimum interval in the above). Thus, the drift time kernel size of the first partition will be scaled by a factor of 1 (no change), the last by a factor of ~1.4. This represents an advanced usage scenario and should only be considered with sufficient justification. That is, knowledge of sample intervals in each dimension, peak widths as a function of these sample intervals, and whether the relationship(s) scale linearly. ``` %%time # partition the data partitions = deimos.partition(ms1_2d, split_on='mz', size=500, overlap=0.2) # map peak detection over partitions ms1_peaks_partitioned = partitions.map(deimos.peakpick.local_maxima, dims=['mz', 'drift_time'], bins=[9.5, 4.25], scale_by='mz', ref_res=mz_diff.min(), scale=['drift_time'], processes=4) ``` Note that, though we have ignored retention time, its sample interval in these data is also constant. However, there is no discernable relationship with *m/z*, thus barring use of this scaling functionality. In such cases, simply determining an average, representative kernel size is typically sufficient.
github_jupyter
``` import os import numpy as np from glob import glob from deformation_functions import * from menpo_functions import * from logging_functions import * from data_loading_functions import * from time import time from scipy.misc import imsave %matplotlib inline dataset='training' img_dir='/Users/arik/Dropbox/a_mac_thesis/face_heatmap_networks/conventional_landmark_detection_dataset/' train_crop_dir = 'crop_gt_margin_0.25' img_dir_ns=os.path.join(img_dir,train_crop_dir+'_ns') bb_dir = os.path.join(img_dir, 'Bounding_Boxes') bb_type='gt' gt = bb_type=='gt' margin = 0.25 image_size = 256 mode='TRAIN' augment_basic=True augment_texture=True augment_geom=True bb_dictionary = load_bb_dictionary(bb_dir, mode=mode, test_data=dataset) def augment_menpo_img_ns(img, img_dir_ns, p_ns=0, ns_ind=None): """texture style image augmentation using stylized copies in *img_dir_ns*""" img = img.copy() if p_ns > 0.5: ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*')) num_augs = len(ns_augs) if num_augs > 0: if ns_ind is None or ns_ind >= num_augs: ns_ind = np.random.randint(0, num_augs) ns_aug = mio.import_image(ns_augs[ns_ind]) ns_pixels = ns_aug.pixels img.pixels = ns_pixels return img def augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=0, ns_ind=None): """texture style image augmentation using stylized copies in *img_dir_ns*""" img = img.copy() if p_ns > 0.5: ns_augs = glob(os.path.join(img_dir_ns, img.path.name.split('.')[0] + '*')) num_augs = len(ns_augs) if num_augs > 0: if ns_ind is None or ns_ind >= num_augs: ns_ind = np.random.randint(0, num_augs) ns_aug = mio.import_image(ns_augs[ns_ind]) ns_pixels = ns_aug.pixels return img def augment_menpo_img_geom_dont_apply(img, p_geom=0): """geometric style image augmentation using random face deformations""" img = img.copy() if p_geom > 0.5: lms_geom_warp = deform_face_geometric_style(img.landmarks['PTS'].points.copy(), p_scale=p_geom, p_shift=p_geom) return img def load_menpo_image_list( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_geom( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_texture( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list def load_menpo_image_list_no_artistic( img_dir, train_crop_dir, img_dir_ns, mode, bb_dictionary=None, image_size=256, margin=0.25, bb_type='gt', test_data='full', augment_basic=True, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0, verbose=False,ns_ind=None): def crop_to_face_image_gt(img): return crop_to_face_image(img, bb_dictionary, gt=True, margin=margin, image_size=image_size) def crop_to_face_image_init(img): return crop_to_face_image(img, bb_dictionary, gt=False, margin=margin, image_size=image_size) def augment_menpo_img_ns_rand(img): return augment_menpo_img_ns_dont_apply(img, img_dir_ns, p_ns=1. * (np.random.rand() < p_texture),ns_ind=ns_ind) def augment_menpo_img_geom_rand(img): return augment_menpo_img_geom_dont_apply(img, p_geom=1. * (np.random.rand() < p_geom)) if mode is 'TRAIN': if train_crop_dir is None: img_set_dir = os.path.join(img_dir, 'training_set') out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: img_set_dir = os.path.join(img_dir, train_crop_dir) out_image_list = mio.import_images(img_set_dir, verbose=verbose) if augment_texture: out_image_list = out_image_list.map(augment_menpo_img_ns_rand) if augment_geom: out_image_list = out_image_list.map(augment_menpo_img_geom_rand) if augment_basic: out_image_list = out_image_list.map(augment_face_image) else: img_set_dir = os.path.join(img_dir, test_data + '_set') if test_data in ['full', 'challenging', 'common', 'training', 'test']: out_image_list = mio.import_images(img_set_dir, verbose=verbose, normalize=False) if bb_type is 'gt': out_image_list = out_image_list.map(crop_to_face_image_gt) elif bb_type is 'init': out_image_list = out_image_list.map(crop_to_face_image_init) else: out_image_list = mio.import_images(img_set_dir, verbose=verbose) return out_image_list plt.figure(figsize=[10,10]) num_augs=9 ns_inds = np.arange(num_augs) for i in range(16): if i % num_augs == 0: np.random.shuffle(ns_inds) print ns_inds img_list = load_menpo_image_list( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) plt.subplot(4,4,i +1) img_list[0].view() # plt.savefig('g.png',bbox='tight') outdir = os.path.join('/Users/arik/Desktop/test_art_data3') if not os.path.exists(outdir): os.mkdir(outdir) aug_geom_dir = os.path.join(outdir,'aug_geom') aug_texture_dir = os.path.join(outdir,'aug_texture') aug_geom_texture_dir = os.path.join(outdir,'aug_geom_texture') aug_basic_dir = os.path.join(outdir,'aug_basic') if not os.path.exists(aug_texture_dir): os.mkdir(aug_texture_dir) if not os.path.exists(aug_geom_dir): os.mkdir(aug_geom_dir) if not os.path.exists(aug_geom_texture_dir): os.mkdir(aug_geom_texture_dir) if not os.path.exists(aug_basic_dir): os.mkdir(aug_basic_dir) num_train_images = 3148. train_iter=100000 batch_size = 6 num_epochs = int(np.ceil((1. * train_iter) / (1. * num_train_images / batch_size)))+1 num_augs=9 num_epochs = 10 debug_data_size =5 debug=True aug_geom = True aug_texture = True np.random.seed(1234) ns_inds = np.arange(num_augs) if not aug_geom and aug_texture: save_aug_path = aug_texture_dir elif aug_geom and not aug_texture: save_aug_path = aug_geom_dir elif aug_geom and aug_texture: save_aug_path = aug_geom_texture_dir else: save_aug_path = aug_basic_dir print ('saving augmented images: aug_geom='+str(aug_geom)+' aug_texture='+str(aug_texture)+' : '+str(save_aug_path)) for i in range(num_epochs): print ('saving augmented images of epoch %d/%d'%(i+1,num_epochs)) if not os.path.exists(os.path.join(save_aug_path,str(i))): os.mkdir(os.path.join(save_aug_path,str(i))) if i % num_augs == 0: np.random.shuffle(ns_inds) if not aug_geom and aug_texture: img_list = load_menpo_image_list_no_geom( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) elif aug_geom and not aug_texture: img_list = load_menpo_image_list_no_texture( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) elif aug_geom and aug_texture: img_list = load_menpo_image_list( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) else: img_list = load_menpo_image_list_no_artistic( img_dir=img_dir, train_crop_dir=train_crop_dir, img_dir_ns=img_dir_ns, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, augment_basic=augment_basic, augment_texture=augment_texture, p_texture=1., augment_geom=augment_geom, p_geom=1.,ns_ind=ns_inds[i % num_augs]) if debug: img_list=img_list[:debug_data_size] for im in img_list: if im.pixels.shape[0] == 1: im_pixels = gray2rgb(np.squeeze(im.pixels)) else: im_pixels = np.rollaxis(im.pixels,0,3) imsave( os.path.join(os.path.join(save_aug_path,str(i)),im.path.name.split('.')[0]+'.png'),im_pixels) mio.export_landmark_file(im.landmarks['PTS'],os.path.join(os.path.join(save_aug_path,str(i)),im.path.name.split('.')[0]+'.pts'),overwrite=True) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import cython import timeit import math %load_ext cython ``` # Native code compilation We will see how to convert Python code to native compiled code. We will use the example of calculating the pairwise distance between a set of vectors, a $O(n^2)$ operation. For native code compilation, it is usually preferable to use explicit for loops and minimize the use of `numpy` vectorization and broadcasting because - It makes it easier for the `numba` JIT to optimize - It is easier to "cythonize" - It is easier to port to C++ However, use of vectors and matrices is fine especially if you will be porting to use a C++ library such as Eigen. ## Timing code ### Manual ``` import time def f(n=1): start = time.time() time.sleep(n) elapsed = time.time() - start return elapsed f(1) ``` ### Clock time ``` %%time time.sleep(1) ``` ### Using `timeit` The `-r` argument says how many runs to average over, and `-n` says how many times to run the function in a loop per run. ``` %timeit time.sleep(0.01) %timeit -r3 time.sleep(0.01) %timeit -n10 time.sleep(0.01) %timeit -r3 -n10 time.sleep(0.01) ``` ### Time unit conversions ``` 1 s = 1,000 ms 1 ms = 1,000 µs 1 µs = 1,000 ns ``` ## Profiling If you want to identify bottlenecks in a Python script, do the following: - First make sure that the script is modular - i.e. it consists mainly of function calls - Each function should be fairly small and only do one thing - Then run a profiler to identify the bottleneck function(s) and optimize them See the Python docs on [profiling Python code](https://docs.python.org/3/library/profile.html) Profiling can be done in a notebook with %prun, with the following readouts as column headers: - ncalls - for the number of calls, - tottime - for the total time spent in the given function (and excluding time made in calls to sub-functions), - percall - is the quotient of tottime divided by ncalls - cumtime - is the total time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions. - percall - is the quotient of cumtime divided by primitive calls - filename:lineno(function) - provides the respective data of each function ``` def foo1(n): return np.sum(np.square(np.arange(n))) def foo2(n): return sum(i*i for i in range(n)) def foo3(n): [foo1(n) for i in range(10)] foo2(n) def foo4(n): return [foo2(n) for i in range(100)] def work(n): foo1(n) foo2(n) foo3(n) foo4(n) %%time work(int(1e5)) %prun -q -D work.prof work(int(1e5)) import pstats p = pstats.Stats('work.prof') p.print_stats() pass p.sort_stats('time', 'cumulative').print_stats('foo') pass p.sort_stats('ncalls').print_stats(5) pass ``` ## Optimizing a function Our example will be to optimize a function that calculates the pairwise distance between a set of vectors. We first use a built-in function from`scipy` to check that our answers are right and also to benchmark how our code compares in speed to an optimized compiled routine. ``` from scipy.spatial.distance import squareform, pdist n = 100 p = 100 xs = np.random.random((n, p)) sol = squareform(pdist(xs)) %timeit -r3 -n10 squareform(pdist(xs)) ``` ## Python ### Simple version ``` def pdist_py(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) return A ``` Note that we - first check that the output is **right** - then check how fast the code is ``` func = pdist_py print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Exploiting symmetry ``` def pdist_sym(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) A += A.T return A func = pdist_sym print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Vectorizing inner loop ``` def pdist_vec(xs): """Vectorize inner loop.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): A[i,j] = np.sqrt(np.sum((xs[i] - xs[j])**2)) A += A.T return A func = pdist_vec print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Broadcasting and vectorizing Note that the broadcast version does twice as much work as it does not exploit symmetry. ``` def pdist_numpy(xs): """Fully vectroized version.""" return np.sqrt(np.square(xs[:, None] - xs[None, :]).sum(axis=-1)) func = pdist_numpy print(np.allclose(func(xs), sol)) %timeit -r3 -n10 squareform(func(xs)) ``` ## JIT with `numba` We use the `numba.jit` decorator which will trigger generation and execution of compiled code when the function is first called. ``` from numba import jit ``` ### Using `jit` as a function ``` pdist_numba_py = jit(pdist_py, nopython=True, cache=True) func = pdist_numba_py print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Using `jit` as a decorator ``` @jit(nopython=True, cache=True) def pdist_numba_py_1(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): for k in range(p): A[i,j] += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(A[i,j]) return A func = pdist_numba_py_1 print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Can we make the code faster? Note that in the inner loop, we are updating a matrix when we only need to update a scalar. Let's fix this. ``` @jit(nopython=True, cache=True) def pdist_numba_py_2(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(n): d = 0.0 for k in range(p): d += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(d) return A func = pdist_numba_py_2 print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Can we make the code even faster? We can also try to exploit symmetry. ``` @jit(nopython=True, cache=True) def pdist_numba_py_sym(xs): """Unvectorized Python.""" n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i, k] - xs[j, k])**2 A[i,j] = np.sqrt(d) A += A.T return A func = pdist_numba_py_sym print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Does `jit` work with vectorized code? ``` pdist_numba_vec = jit(pdist_vec, nopython=True, cache=True) %timeit -r3 -n10 pdist_vec(xs) func = pdist_numba_vec print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Does `jit` work with broadcasting? ``` pdist_numba_numpy = jit(pdist_numpy, nopython=True, cache=True) %timeit -r3 -n10 pdist_numpy(xs) func = pdist_numba_numpy try: print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) except Exception as e: print(e) ``` #### We need to use `reshape` to broadcast ``` def pdist_numpy_(xs): """Fully vectroized version.""" return np.sqrt(np.square(xs.reshape(n,1,p) - xs.reshape(1,n,p)).sum(axis=-1)) pdist_numba_numpy_ = jit(pdist_numpy_, nopython=True, cache=True) %timeit -r3 -n10 pdist_numpy_(xs) func = pdist_numba_numpy_ print(np.allclose(func(xs), sol)) %timeit -r3 -n10 func(xs) ``` ### Summary - `numba` appears to work best with converting fairly explicit Python code - This might change in the future as the `numba` JIT compiler becomes more sophisticated - Always check optimized code for correctness - We can use `timeit` magic as a simple way to benchmark functions ## Cython Cython is an Ahead Of Time (AOT) compiler. It compiles the code and replaces the function invoked with the compiled version. In the notebook, calling `%cython -a` magic shows code colored by how many Python C API calls are being made. You want to reduce the yellow as much as possible. ``` %%cython -a import numpy as np def pdist_cython_1(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A def pdist_base(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A %timeit -r3 -n1 pdist_base(xs) func = pdist_cython_1 print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ``` ## Cython with static types - We provide types for all variables so that Cython can optimize their compilation to C code. - Note `numpy` functions are optimized for working with `ndarrays` and have unnecessary overhead for scalars. We therefor replace them with math functions from the C `math` library. ``` %%cython -a import cython import numpy as np cimport numpy as np from libc.math cimport sqrt, pow @cython.boundscheck(False) @cython.wraparound(False) def pdist_cython_2(double[:, :] xs): cdef int n, p cdef int i, j, k cdef double[:, :] A cdef double d n = xs.shape[0] p = xs.shape[1] A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += pow(xs[i,k] - xs[j,k],2) A[i,j] = sqrt(d) for i in range(1, n): for j in range(i): A[i, j] = A[j, i] return A func = pdist_cython_2 print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ``` ## Wrapping C++ cdoe ### Function to port ```python def pdist_base(xs): n, p = xs.shape A = np.zeros((n, n)) for i in range(n): for j in range(i+1, n): d = 0.0 for k in range(p): d += (xs[i,k] - xs[j,k])**2 A[i,j] = np.sqrt(d) A += A.T return A ``` ### First check that the function works as expected ``` %%file main.cpp #include <iostream> #include <Eigen/Dense> #include <cmath> using std::cout; // takes numpy array as input and returns another numpy array Eigen::MatrixXd pdist(Eigen::MatrixXd xs) { int n = xs.rows() ; int p = xs.cols(); Eigen::MatrixXd A = Eigen::MatrixXd::Zero(n, n); for (int i=0; i<n; i++) { for (int j=i+1; j<n; j++) { double d = 0; for (int k=0; k<p; k++) { d += std::pow(xs(i,k) - xs(j,k), 2); } A(i, j) = std::sqrt(d); } } A += A.transpose().eval(); return A; } int main() { using namespace Eigen; MatrixXd A(3,2); A << 0, 0, 3, 4, 5, 12; std::cout << pdist(A) << "\n"; } %%bash g++ -o main.exe main.cpp -I./eigen3 %%bash ./main.exe A = np.array([ [0, 0], [3, 4], [5, 12] ]) squareform(pdist(A)) ``` ### Now use the boiler plate for wrapping ``` %%file wrap.cpp <% cfg['compiler_args'] = ['-std=c++11'] cfg['include_dirs'] = ['./eigen3'] setup_pybind11(cfg) %> #include <pybind11/pybind11.h> #include <pybind11/eigen.h> // takes numpy array as input and returns another numpy array Eigen::MatrixXd pdist(Eigen::MatrixXd xs) { int n = xs.rows() ; int p = xs.cols(); Eigen::MatrixXd A = Eigen::MatrixXd::Zero(n, n); for (int i=0; i<n; i++) { for (int j=i+1; j<n; j++) { double d = 0; for (int k=0; k<p; k++) { d += std::pow(xs(i,k) - xs(j,k), 2); } A(i, j) = std::sqrt(d); } } A += A.transpose().eval(); return A; } PYBIND11_PLUGIN(wrap) { pybind11::module m("wrap", "auto-compiled c++ extension"); m.def("pdist", &pdist); return m.ptr(); } import cppimport import numpy as np code = cppimport.imp("wrap") print(code.pdist(A)) func = code.pdist print(np.allclose(func(xs), sol)) %timeit -r3 -n1 func(xs) ```
github_jupyter
# MSOA Mapping - England ``` import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import numpy as np from shapely.geometry import Point from sklearn.neighbors import KNeighborsRegressor import rasterio as rst from rasterstats import zonal_stats %matplotlib inline path = r"[CHANGE THIS PATH]\England\\" data = pd.read_csv(path + "final_data.csv", index_col = 0) ``` # Convert to GeoDataFrame ``` geo_data = gpd.GeoDataFrame(data = data, crs = {'init':'epsg:27700'}, geometry = data.apply(lambda geom: Point(geom['oseast1m'],geom['osnrth1m']),axis=1)) geo_data.head() f, (ax1, ax2, ax3) = plt.subplots(1,3, figsize = (16,6), sharex = True, sharey = True) geo_data[geo_data['Year'] == 2016].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax1); geo_data[geo_data['Year'] == 2017].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax2); geo_data[geo_data['Year'] == 2018].plot(column = 'loneills', scheme = 'quantiles', cmap = 'Reds', marker = '.', ax = ax3); ``` ## k-nearest neighbour interpolation Non-parametric interpolation of loneliness based on local set of _k_ nearest neighbours for each cell in our evaluation grid. Effectively becomes an inverse distance weighted (idw) interpolation when weights are set to be distance based. ``` def idw_model(k, p): def _inv_distance_index(weights, index=p): return (test==0).astype(int) if np.any(weights == 0) else 1. / weights**index return KNeighborsRegressor(k, weights=_inv_distance_index) def grid(xmin, xmax, ymin, ymax, cellsize): # Set x and y ranges to accommodate cellsize xmin = (xmin // cellsize) * cellsize xmax = -(-xmax // cellsize) * cellsize # ceiling division ymin = (ymin // cellsize) * cellsize ymax = -(-ymax // cellsize) * cellsize # Make meshgrid x = np.linspace(xmin,xmax,(xmax-xmin)/cellsize) y = np.linspace(ymin,ymax,(ymax-ymin)/cellsize) return np.meshgrid(x,y) def reshape_grid(xx,yy): return np.append(xx.ravel()[:,np.newaxis],yy.ravel()[:,np.newaxis],1) def reshape_image(z, xx): return np.flip(z.reshape(np.shape(xx)),0) def idw_surface(locations, values, xmin, xmax, ymin, ymax, cellsize, k=5, p=2): # Make and fit the idw model idw = idw_model(k,p).fit(locations, values) # Make the grid to estimate over xx, yy = grid(xmin, xmax, ymin, ymax, cellsize) # reshape the grid for estimation xy = reshape_grid(xx,yy) # Predict the grid values z = idw.predict(xy) # reshape to image array z = reshape_image(z, xx) return z ``` ## 2016 data ``` # Get point locations and values from data points = geo_data[geo_data['Year'] == 2016][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2016]['loneills'].values surface2016 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2016, cmap='Reds') ax.set_aspect('equal') ``` ## 2017 Data ``` # Get point locations and values from data points = geo_data[geo_data['Year'] == 2017][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2017]['loneills'].values surface2017 = idw_surface(points, vals, 90000,656000,10000,654000,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2017, cmap='Reds') ax.set_aspect('equal') ``` ## 2018 Data Get minimum and maximum bounds from the data. Round these down (in case of the 'min's) and up (in case of the 'max's) to get the values for `idw_surface()` ``` print("xmin = ", geo_data['oseast1m'].min(), "\n\r", "xmax = ", geo_data['oseast1m'].max(), "\n\r", "ymin = ", geo_data['osnrth1m'].min(), "\n\r", "ymax = ", geo_data['osnrth1m'].max()) xmin = 90000 xmax = 656000 ymin = 10000 ymax = 654000 # Get point locations and values from data points = geo_data[geo_data['Year'] == 2018][['oseast1m','osnrth1m']].values vals = geo_data[geo_data['Year'] == 2018]['loneills'].values surface2018 = idw_surface(points, vals, xmin,xmax,ymin,ymax,250,7,2) # Look at surface f, ax = plt.subplots(figsize = (8,10)) ax.imshow(surface2018, cmap='Reds') ax.set_aspect('equal') ``` # Extract Values to MSOAs Get 2011 MSOAs from the Open Geography Portal: http://geoportal.statistics.gov.uk/ ``` # Get MSOAs which we use to aggregate the loneills variable. #filestring = './Data/MSOAs/Middle_Layer_Super_Output_Areas_December_2011_Full_Clipped_Boundaries_in_England_and_Wales.shp' filestring = r'[CHANGE THIS PATH]\Data\Boundaries\England and Wales\Middle_Layer_Super_Output_Areas_December_2011_Super_Generalised_Clipped_Boundaries_in_England_and_Wales.shp' msoas = gpd.read_file(filestring) msoas.to_crs({'init':'epsg:27700'}) # drop the Wales MSOAs msoas = msoas[msoas['msoa11cd'].str[:1] == 'E'].copy() # Get GB countries data to use for representation #gb = gpd.read_file('./Data/GB/Countries_December_2017_Generalised_Clipped_Boundaries_in_UK_WGS84.shp') #gb = gb.to_crs({'init':'epsg:27700'}) # get England #eng = gb[gb['ctry17nm'] == 'England'].copy() # Make affine transform for raster trans = rst.Affine.from_gdal(xmin-125,250,0,ymax+125,0,-250) # NB This process is slooow - write bespoke method? # 2016 #msoa_zones = zonal_stats(msoas['geometry'], surface2016, affine = trans, stats = 'mean', nodata = np.nan) #msoas['loneills_2016'] = list(map(lambda x: x['mean'] , msoa_zones)) # 2017 #msoa_zones = zonal_stats(msoas['geometry'], surface2017, affine = trans, stats = 'mean', nodata = np.nan) #msoas['loneills_2017'] = list(map(lambda x: x['mean'] , msoa_zones)) # 2018 msoa_zones = zonal_stats(msoas['geometry'], surface2018, affine = trans, stats = 'mean', nodata = np.nan) msoas['loneills_2018'] = list(map(lambda x: x['mean'] , msoa_zones)) # Check out the distributions of loneills by MSOA f, [ax1, ax2, ax3] = plt.subplots(1,3, figsize=(14,5), sharex = True, sharey=True) #ax1.hist(msoas['loneills_2016'], bins = 30) #ax2.hist(msoas['loneills_2017'], bins = 30) ax3.hist(msoas['loneills_2018'], bins = 30) ax1.set_title("2016") ax2.set_title("2017") ax3.set_title("2018"); bins = [-10, -5, -3, -2, -1, 1, 2, 3, 5, 10, 22] labels = ['#01665e','#35978f', '#80cdc1','#c7eae5','#f5f5f5','#f6e8c3','#dfc27d','#bf812d','#8c510a','#543005'] #msoas['loneills_2016_class'] = pd.cut(msoas['loneills_2016'], bins, labels = labels) #msoas['loneills_2017_class'] = pd.cut(msoas['loneills_2017'], bins, labels = labels) msoas['loneills_2018_class'] = pd.cut(msoas['loneills_2018'], bins, labels = labels) msoas['loneills_2018_class'] = msoas.loneills_2018_class.astype(str) # convert categorical to string f, (ax1, ax2, ax3) = plt.subplots(1,3,figsize = (16,10)) #msoas.plot(color = msoas['loneills_2016_class'], ax=ax1) #msoas.plot(color = msoas['loneills_2017_class'], ax=ax2) msoas.plot(color = msoas['loneills_2018_class'], ax=ax3) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax1) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax2) #gb.plot(edgecolor = 'k', linewidth = 0.5, facecolor='none', ax=ax3) # restrict to England #ax1.set_xlim([82672,656000]) #ax1.set_ylim([5342,658000]) #ax2.set_xlim([82672,656000]) #ax2.set_ylim([5342,658000]) #ax3.set_xlim([82672,656000]) #ax3.set_ylim([5342,658000]) # Make a legend # make bespoke legend from matplotlib.patches import Patch handles = [] ranges = ["-10, -5","-5, -3","-3, -2","-2, -1","-1, 1","1, 2","3, 3","3, 5","5, 10","10, 22"] for color, label in zip(labels,ranges): handles.append(Patch(facecolor = color, label = label)) ax1.legend(handles = handles, loc = 2); # Save out msoa data as shapefile and geojson msoas.to_file(path + "msoa_loneliness.shp", driver = 'ESRI Shapefile') #msoas.to_file(path + "msoa_loneliness.geojson", driver = 'GeoJSON') # save out msoa data as csv msoas.to_csv(path + "msoa_loneliness.csv") ```
github_jupyter
<font size = "5"> **Chapter 4: [Spectroscopy](CH4-Spectroscopy.ipynb)** </font> <hr style="height:1px;border-top:4px solid #FF8200" /> # Analysis of Core-Loss Spectra <font size = "5"> **This notebook does not work in Google Colab** </font> [Download](https://raw.githubusercontent.com/gduscher/MSE672-Introduction-to-TEM/main/Spectroscopy/CH4_09-Analyse_Core_Loss.ipynb) part of <font size = "5"> **[MSE672: Introduction to Transmission Electron Microscopy](../_MSE672_Intro_TEM.ipynb)**</font> by Gerd Duscher, Spring 2021 Microscopy Facilities<br> Joint Institute of Advanced Materials<br> Materials Science & Engineering<br> The University of Tennessee, Knoxville Background and methods to analysis and quantification of data acquired with transmission electron microscopes. ## Content Quantitative determination of chemical composition from a core-loss EELS spectrum Please cite: [M. Tian et al. *Measuring the areal density of nanomaterials by electron energy-loss spectroscopy* Ultramicroscopy Volume 196, 2019, pages 154-160](https://doi.org/10.1016/j.ultramic.2018.10.009) as a reference of this quantification method. ## Load important packages ### Check Installed Packages ``` import sys from pkg_resources import get_distribution, DistributionNotFound def test_package(package_name): """Test if package exists and returns version or -1""" try: version = (get_distribution(package_name).version) except (DistributionNotFound, ImportError) as err: version = '-1' return version # pyTEMlib setup ------------------ if test_package('sidpy') < '0.0.5': print('installing sidpy') !{sys.executable} -m pip install --upgrade sidpy -q if test_package('pyTEMlib') < '0.2021.4.20': print('installing pyTEMlib') !{sys.executable} -m pip install --upgrade pyTEMlib -q # ------------------------------ print('done') ``` ### Import all relevant libraries Please note that the EELS_tools package from pyTEMlib is essential. ``` %pylab --no-import-all notebook %gui qt # Import libraries from pyTEMlib import pyTEMlib import pyTEMlib.file_tools as ft # File input/ output library import pyTEMlib.image_tools as it import pyTEMlib.eels_tools as eels # EELS methods import pyTEMlib.interactive_eels as ieels # Dialogs for EELS input and quantification # For archiving reasons it is a good idea to print the version numbers out at this point print('pyTEM version: ',pyTEMlib.__version__) __notebook__ = 'analyse_core_loss' __notebook_version__ = '2021_04_22' ``` ## Load and plot a spectrum As an example we load the spectrum **1EELS Acquire (high-loss).dm3** from the *example data* folder. Please see [Loading an EELS Spectrum](LoadEELS.ipynb) for details on storage and plotting. First a dialog to select a file will apear. Then the spectrum plot and ``Spectrum Info`` dialog will appear, in which we set the experimental parameters. Please use the ``Set Energy Scale`` button to change the energy scale. When pressed a new dialog and a cursor will appear in which one is able to set the energy scale based on known features in the spectrum. ``` # -----Input -------# load_example = True try: main_dataset.h5_dataset.file.close() except: pass if load_example: main_dataset = ft.open_file('../example_data/EELS_STO.dm3') else: main_dataset = ft.open_file() current_channel = main_dataset.h5_dataset.parent if 'experiment' not in main_dataset.metadata: main_dataset.metadata['experiment']= eels.read_dm3_eels_info(main_dataset.original_metadata) eels.set_previous_quantification(main_dataset) # US 200 does not set acceleration voltage correctly. # comment out next line for other microscopes # current_dataset.metadata['experiment']['acceleration_voltage'] = 200000 info = ieels.InfoDialog(main_dataset) ``` ## Chemical Composition The fit of the cross-section and background to the spectrum results in the chemical composition. If the calibration is correct this composition is given as areal density in atoms/nm$^2$ ### Fit of Data A dialog window will open, enter the elements first (0 will open a periodic table) and press ``Fit Composition`` button (bottom right). Adjust parameters as needed and check fit by pressing the ``Fit Composition`` button again. Select the ``Region`` checkbox to see which parts of the spectrum you choose to fit. Changing the multiplier value will make a simulation of your spectrum. The ``InfoDialog``, if open, still works to change experimental parameters and the energy scale. ``` # current_dataset.metadata['edges'] = {'0': {}, 'model': {}} composition = ieels.CompositionDialog(main_dataset) ``` ### Output of Results ``` edges = main_dataset.metadata['edges'] element = [] areal_density = [] for key, edge in edges.items(): if key.isdigit(): element.append(edge['element']) areal_density.append(edge['areal_density']) print('Relative chemical composition of ', main_dataset.title) for i in range(len(element)): print(f'{element[i]}: {areal_density[i]/np.sum(areal_density)*100:.1f} %') saved_edges_metadata = edges ``` ### Log Data We write all the data to the hdf5 file associated with our dataset. In our case that is only the ``metadata``, in which we stored the ``experimental parameters`` and the ``fitting parameters and result``. ``` current_group = main_dataset.h5_dataset.parent.parent if 'Log_000' in current_group: del current_group['Log_000'] log_group = current_group.create_group('Log_000') log_group['analysis'] = 'EELS_quantification' log_group['EELS_quantification'] = '' flat_dict = ft.flatten_dict(main_dataset.metadata) if 'peak_fit-peak_out_list' in flat_dict: del flat_dict['peak_fit-peak_out_list'] for key, item in flat_dict.items(): if not key == 'peak_fit-peak_out_list': log_group.attrs[key]= item current_group.file.flush() ft.h5_tree(main_dataset.h5_dataset.file) ``` ## ELNES The electron energy-loss near edge structure is determined by fititng the spectrum after quantification model subtraction. First smooth the spectrum (2 iterations are ususally sufficient) and then find the number of peaks you want (Can be repeated as oftern as one wants). ``` peak_dialog = ieels.PeakFitDialog(main_dataset) ``` ### Output ``` areas = [] for p, peak in peak_dialog.peaks['peaks'].items(): area = np.sqrt(2* np.pi)* peak['amplitude'] * np.abs(peak['width'] / np.sqrt(2 *np.log(2))) areas.append(area) if 'associated_edge' not in peak: peak['associated_edge']= '' print(f"peak {p}: position: {peak['position']:7.1f}, area: {area:12.3f} associated edge: {peak['associated_edge']}") #print(f'\n M4/M5 peak 2 to peak 1 ratio: {(areas[1])/areas[0]:.2f}') ``` ### Log Data ``` current_group = main_dataset.h5_dataset.parent.parent if 'Log_001' in current_group: del current_group['Log_001'] log_group = current_group.create_group('Log_001') log_group['analysis'] = 'ELNES_fit' log_group['ELNES_fit'] = '' metadata = ft.flatten_dict(main_dataset.metadata) if 'peak_fit-peak_out_list' in flat_dict: del flat_dict['peak_fit-peak_out_list'] for key, item in metadata.items(): if not key == 'peak_fit-peak_out_list': log_group.attrs[key]= item current_group.file.flush() print('Logged Data of ', main_dataset.title) for key in current_group: if 'Log_' in key: if 'analysis' in current_group[key]: print(f" {key}: {current_group[key]['analysis'][()]}") ``` ## Close File File needs to be closed to be used with other notebooks ``` main_dataset.h5_dataset.file.close() ``` ## Navigation
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # Film yorumları ile metin sınıflandırma <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/tr/r1/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Google Colab’da Çalıştır</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/tr/r1/tutorials/keras/basic_text_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />GitHub'da Kaynağı Görüntüle</a> </td> </table> Note: Bu dökümanlar TensorFlow gönüllü kullanıcıları tarafından çevirilmiştir. Topluluk tarafından sağlananan çeviriler gönüllülerin ellerinden geldiğince güncellendiği için [Resmi İngilizce dökümanlar](https://www.tensorflow.org/?hl=en) ile bire bir aynı olmasını garantileyemeyiz. Eğer bu tercümeleri iyileştirmek için önerileriniz var ise lütfen [tensorflow/docs](https://github.com/tensorflow/docs) havuzuna pull request gönderin. Gönüllü olarak çevirilere katkıda bulunmak için [[email protected]](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-tr) listesi ile iletişime geçebilirsiniz. Bu yardımcı döküman, yorum metinlerini kullanarak film yorumlarını *olumlu* veya *olumsuz* olarak sınıflandırmaktadır. Bu örnek, yoğun olarak kullanılan ve önemli bir makina öğrenmesi uygulaması olan *ikili* veya *iki kategorili sınıflandırma*' yı kapsamaktadır. Bu örnekte, [Internet Film Veritabanı](https://www.imdb.com/) sitesinde yer alan 50,000 film değerlendirme metnini içeren [IMDB veri seti](https://www.tensorflow.org/api_docs/python/tf/keras/datasets/imdb) 'ni kullancağız. Bu veri seti içerisindeki 25,000 yorum modelin eğitimi için, 25,000 yorum ise modelin testi için ayrılmıştır. Eğitim ve test veri setleri eşit miktarda olumlu ve olumsuz yorum içerecek şekilde dengelenmiştir. Bu yardımcı döküman, Tensorflow'da modellerin oluşturulması ve eğitilmesinde kullanına yüksek-seviye API [tf.keras](https://www.tensorflow.org/r1/guide/keras) 'ı kullanır. `tf.keras` ile ileri seviye metin sınıflandımayı öğrenmek için [MLCC Metin Sınıflandırma ](https://developers.google.com/machine-learning/guides/text-classification/)'a göz atabilirsiniz. ``` # keras.datasets.imdb is broken in 1.13 and 1.14, by np 1.16.3 !pip install tf_nightly from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf from tensorflow import keras import numpy as np print(tf.__version__) ``` ## IMDB veri setini indirelim IMDB veri seti TensorFlow ile birlikte bütünleşik olarak gelmektedir. Yorumların kelime diziliş sıraları, her bir sayının bir kelimeyi temsil ettiği sıralı bir tam sayı dizisine çevrilerek veri seti ön işlemden geçirilmiştir. Aşağıdaki kodlar, IMDB veri setini bilgisayarınıza indirir (eğer daha önceden indirme yapmışsanız, önbellekteki veri kullanılır) : ``` imdb = keras.datasets.imdb (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000) ``` `num_words=10000` değişkeni eğitim veri setinde en sık kullanılan 10,000 kelimeyi tutar, az kullanılan kelimeleri veri boyutunun yönetilebilir olması için ihmal eder. ## Veriyi inceleyelim Veri formatını aşağıdaki kodlar yardımı ile birlikte inceleyelim. Veri seti, ön işlem uygulanmış şekilde gelmektedir: tüm film yorum örnekleri, her bir sayının yorumundaki bir kelimeye denk geldiği tam sayı dizisi olarak gelmektedir. Tüm etiketler 0 veya 1 değerine sahiptir (0 olumsuz değerlendirme, 1 olumlu değerlendirme). ``` print("Training entries: {}, labels: {}".format(len(train_data), len(train_labels))) ``` Yorum metinleri, her bir sayının sözlükte yer alan bir kelimeye denk geldiği sayı dizisine çevrilmiştir. İlk yorum metni, aşağıdaki gibidir: ``` print(train_data[0]) ``` Farklı film yorumlarının uzunlukları farklı olabilir. Aşağıdaki kod, ilk ve ikinci yorumda yer alan kelime sayılarını göstermektedir. Sinir ağlarında girdi boyutlarının aynı olması gerekmektedir, bu problemi daha sonra çözeceğiz. ``` len(train_data[0]), len(train_data[1]) ``` ### Tam sayıları kelimelere geri çevirelerim Tam sayıları metin'e çevirme işlemini bilmemiz, bazı durumlarda işimize yarayabilir. Bunun için bir yardımcı fonksiyon oluşturacağız. Bu fonksiyon, tam sayı-karakter eşleştirmesi içeren bir sözlük nesnesini sorguyabilmemizi sağlayacak: ``` # A dictionary mapping words to an integer index word_index = imdb.get_word_index() # İlk indisler rezervedir word_index = {k:(v+3) for k,v in word_index.items()} word_index["<PAD>"] = 0 word_index["<START>"] = 1 word_index["<UNK>"] = 2 # unknown word_index["<UNUSED>"] = 3 reverse_word_index = dict([(value, key) for (key, value) in word_index.items()]) def decode_review(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) ``` 'decode_review' fonksiyonunu kullanarak ilk yorum metnini şimdi ekranda gösterebiliriz: ``` decode_review(train_data[0]) ``` ## Veriyi hazırlayalım Yorumlar -tam sayı dizileri- sinir ağına beslenmeden önce ilk olarak tensor yapısına çevrilmelidir. Bu çevirme işlemi birkaç farklı şekilde yapabilir: * Bu ilk yöntemde, one-hot encoding işlemine benzer şekilde, tam sayı dizileri kelimelerin mevcut olup olmamasına göre 0 ve 1 ler içeren, vektörlere çevrilir. Örnek olarak, [3, 5] dizisini vektör'e dönüştürdüğümüzde, bu dizi 3üncü ve 5inci indeksleri dışında tüm değerleri 0 olan 10,000 boyutlu bir vektor'e dönüşür. Sonrasında, ağımızın ilk katmanını floating point vektor verisini işleyebilen yoğun katman (dense layer) olarak oluşturabiliriz. Bu yöntem, 'num_words * num_reviews' boyutlu bir matris oluşturduğumuz için, yoğun hafıza kullanımına ihtiyaç duyar. * Alternatif olarak, tüm dizileri aynı boyutta olacak şekilde doldurabiliriz. Sonrasında 'max_length * max_review' boyutlu bir tam sayı vektorü oluşturabiliriz. Son olarak, bu boyuttaki vektörleri işleyebilen gömülü katmanı, ağımızın ilk katmanı olarak oluşturabiliriz. Bu örnekte ikinci yöntem ile ilerleyeceğiz. Film yorumlarımızın aynı boyutta olması gerektiği için, yorum boyutlarını standart uzunluğa dönüştüren [pad_sequences](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences) fonksiyonunu kullanacağız: ``` train_data = keras.preprocessing.sequence.pad_sequences(train_data, value=word_index["<PAD>"], padding='post', maxlen=256) test_data = keras.preprocessing.sequence.pad_sequences(test_data, value=word_index["<PAD>"], padding='post', maxlen=256) ``` Şimdi, ilk yorum örneklerinin uzunluklarına birlikte bakalım: ``` len(train_data[0]), len(train_data[1]) ``` Ve ilk yorumu (doldurulmuş şekliyle) inceleyelim: ``` print(train_data[0]) ``` ## Modeli oluşturalım Sinir ağları, katmanların birleştirilmesiyle oluşturulur. Bu noktada, modelin yapısıyla ilgili iki temel karar vermemiz gerekmektedir: * Modeli oluşturuken kaç adet katman kullanacağız? * Her bir katmanda kaç adet *gizli birim* (hidden units) kullanacağız? Bu örnekte modelimizin girdi verisi, kelime indekslerini kapsayan bir tam sayı dizisidir. Tahmin edilecek etiket değerleri 0 ve 1'dir. Problemimiz için modelimizi oluşturalım: ``` # Girdiler film yorumları için kullanılan kelime sayısıdır (10,000 kelime) vocab_size = 10000 model = keras.Sequential() model.add(keras.layers.Embedding(vocab_size, 16)) model.add(keras.layers.GlobalAveragePooling1D()) model.add(keras.layers.Dense(16, activation=tf.nn.relu)) model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid)) model.summary() ``` Sınıflandırıcı modelimizi oluşturmak için katmanlar sıralı bir şekilde birleştirilmiştir: 1. İlk katmanımız 'gömülü-embedding' katmandır. Bu katman tam sayı olarak şifrelenmiş sözcük grubu içerisinden kelime değerlerini alıp, her bir kelime indeksi için bu değeri gömülü vektör içerisinde arar. Bu vektörler modelin eğitimi sırasında öğrenilirler ve çıktı dizisine bir boyut eklerler. Sonuç olarak boyutlar '(batch, sequence, embedding)' şeklinde oluşur: 2. Sonrasında, `GlobalAveragePooling1D` katmanı, her bir yorum örneği için, ardaşık boyutların ortalamasını alarak sabit uzunlukta bir çıktı vektörü oluştur. Bu işlem, en basit şekliyle, modelimizin faklı boyutlardaki girdileri işleyebilmesini sağlar. 3. Bu sabit boyutlu çıktı vektörü, 16 gizli birim (hidden units) içeren tam-bağlı (fully-connected) yoğun katman'a beslenir. 4. Son katman, tek bir çıktı düğümü içeren yoğun bağlı bir katmandır. 'sigmoid' aktivasyon fonksiyonunu kullanarak, bu düğümün çıktısı 0 ile 1 arasında, olasılık veya güven değerini temsil eden bir değer alır. ### Gizli birimler (Hidden units) Yukarıdaki model, girdi ve çıktı arasında iki adet ara veya "gizli" katman içerir. Çıktıların sayısı (birimler, düğümler veya neronlar), mevcut katman içerisinde yapılan çıkarımların boyutudur. Başka bir ifade ile, ağın öğrenirken yapabileceği ara çıkarım miktarını, katmanın çıktı boyutu belirler. Eğer model fazla gizli birim (daha fazla boyutta çıkarım alanı) veya fazla katmana sahipse, model daha kompleks çıkarımlar yapabilir. Bu durumda daha yoğun hesaplama gücüne ihtiyaç duyulur. Bununla birlikte, modelimiz problemin çözümü için gerekli olmayacak derecede çıkarımlar yaparak eğitim verisi ile çok iyi sonuçlar verse de, test verisinde aynı oranda başarılı olmayabilir. Buna *aşırı uyum - overfitting* denir, bu kavramı daha sonra tekrar inceleyeceğiz. ### Kayıp fonksiyonu ve optimize edici Modelimizin eğitilmesi için bir kayıp fonksiyonuna ve optimize ediciye ihitiyacımız vardır. Problemimiz, film yorumlarını olumlu ve olumsuz olarak sınıflandırmak (yani ikili siniflandirma problemi) olduğu için, 'binary_crossentropy' kayıp fonksiyonunu kullanacağız. Bu kayıp fonksiyonu tek seçeneğimiz olmasa da, örneğin 'mean_squared_error' kayıp fonksiyonunu da kullanabilirdik, 'binary_crossentropy' kayıp fonksiyonu, olasılık dağılımları (kesin referans ile tahmin edilen olaralık dağılımı) arasındaki farkı ölçerek, olasılık hesaplamaları için daha iyi sonuç verir. Daha sonra, regrasyon problemlerini incelediğimizde (yani bir evin fiyatını tahmin etmek için), 'mean squared error' gibi diğer kayıp fonksiyonlarını nasıl kullanabileceğimizi göreceğiz. Şimdi, kayıp fonksiyonu ve optimize ediciyi kullanarak modelimizi yapılandıralım: ``` model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc']) ``` ## Doğrulama veri setini oluşturalım Eğitim sürecinde, daha önce görmediği veriler ile modelin doğrulunu kontrol etmek isteriz. *Doğrulama veri seti* oluşturmak için eğitim veri seti içerisinden 10,000 yorum ayıralım. (Doğrulama için neden test veri setini şimdi kullanmıyoruz? Bunun nedeni modeli oluşturulması ve düzenlenmesi için sadece eğitim veri setini kullanmak istememizdir. Modelimiz oluşup, eğitildikten sonra, test verisini modelimizin doğruluğunu değerlendirmek için kullanacağız). ``` x_val = train_data[:10000] partial_x_train = train_data[10000:] y_val = train_labels[:10000] partial_y_train = train_labels[10000:] ``` ## Modelin eğitilmesi Modeli, her bir mini-batch te 512 yorum örneği olacak şekilde 40 epoch döngüsü ile eğitelim. 'x_train' ve 'y_train' tensorlarını kullanarak tüm yorumları bu 40 iterasyon ile kapsıyoruz. Eğitim süresince, doğrulama veri setini kullanarak modelin kayıp fonksiyon değerini ve doğruluğunu gözlemleyelim: ``` history = model.fit(partial_x_train, partial_y_train, epochs=40, batch_size=512, validation_data=(x_val, y_val), verbose=1) ``` ## Modeli değerlendirelim Ve modelin nasıl performans gösterdiğini görelim. Bunun için iki değer kullanacağız. Kayıp (hatayı temsil eden sayı, düşük değerler daha iyi anlamına gelmektedir) ve doğruluk değeri. ``` results = model.evaluate(test_data, test_labels) print(results) ``` Bu oldukça basit yöntem ile %87 gibi bir doğruluk değeri elde ediyoruz. Daha ileri yöntemler ile modelimiz %95'e kadar çıkan doğruluk sonuçları verebilir. ## Doğruluk ve kayıp değerlerinin zamana göre değişimini veren bir grafik oluşturalım `model.fit()` methodu eğitim sürecinde olan biten herşeyi görebileceğimiz 'History' sözlük nesnesi oluşturur: ``` history_dict = history.history history_dict.keys() ``` Grafiğimiz için 4 adet girdimiz mevcut: eğitim ve doğrulama olmak üzere, gözlemlenen metrikler (kayıp ve doğruluk değeri) için birer değer mevcuttur. Bu değerleri, eğitim ve doğrulama kayıplarını, aynı şekilde doğruluk değerlerini karşılaştırmak için grafik üzerine çizdireceğiz: ``` import matplotlib.pyplot as plt acc = history_dict['acc'] val_acc = history_dict['val_acc'] loss = history_dict['loss'] val_loss = history_dict['val_loss'] epochs = range(1, len(acc) + 1) # "bo", "mavi nokta"'nın kısaltmasıdır plt.plot(epochs, loss, 'bo', label='Training loss') # b, "düz mavi çizgi"'nin kısaltmasıdır plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() plt.clf() # grafiğin görüntüsünü temizleyelim plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.legend() plt.show() ``` Grafikte noktalı çizgiler eğitim kayıp ve doğruluk değerlerini temsil etmektedir. Aynı şekilde, düz çizgiler doğrulama kayıp ve doğruluk değerlerini temsil etmektedir. Eğitim kayıp değerleri her bir epoch iterasyonuyla *düşerken*, eğitim doğruluk değerlerinin *arttığını* görebilirsiniz. Gradient descent optimizasyonu, her bir iterasyonda belirli bir oranda değerleri minimize ettiği için, bu beklenen bir durumdur. Aynı durum doğrulama kayıp ve doğruluk değerleri için geçerli değildir. Görüldüğü gibi doğrulama değerleri, 20nci epoch iterasyonunda en iyi değerlere ulaşmaktadır. Bu durum aşırı uyuma bir örnektir: modelin eğitim veri kümesiyle, daha önceden hiç görmediği verilere göre daha iyi sonuç vermesi durumu. Bu noktadan sonra model gereğinden fazla optimize edilir ve eğitim veri setine özgü, test verisine genellenemeyen çıkarımları öğrenir. Örneğimizdeki bu özel durum nedeniyle, gözlemlemiş olduğumuz fazla uyumu giderebilmek için, eğitim işlemini 20nci epoch iterasyonu sonrası durdurabiliriz. Bunu otomatik olarak nasıl yapabileceğimizi daha sonra göreceğiz.
github_jupyter
## Problem Given a sorted list of integers of length N, determine if an element x is in the list without performing any multiplication, division, or bit-shift operations. Do this in `O(log N)` time. ## Solution We can't use binary search to locate the element because involves dividing by two to get the middle element. We can use Fibonacci search to get around this limitation. The idea is that fibonacci numbers are used to locate indices to check in the array, and by cleverly updating these indices, we can efficiently locate our element. Let `p` and `q` be consequtive Fibonacci numbers. `q` is the smallest Fibonacci number that is **greater than or equal to** the size of the array. We compare `x` with `array[p]` and perform the following logic: 1. If `x == array[p]`, we have found the element. Return true. 2. If `x < array[p]` move p and q down two indices each, cutting down the largest two elements from the search. 3. If `x > array[p]` move p and q down index each, and add an offset of p to the next search value. If we have exhausted our list of Fibonacci numbers, we can be assured that the element is not in our array. Let's go through an example. First, we need a helper function to generate the Fibonacci numbers, given the length of the array => N. ``` def get_fib_sequence(n): a, b = 0, 1 sequence = [a] while a < n: a, b = b, a + b sequence.append(a) return sequence ``` Suppose we have array ``` [2, 4, 10, 16, 25, 45, 55, 65, 80, 100] ``` Since there are 10 elements in the array, the generated sequence of Fibonacci numbers will be ``` [0, 1, 1, 2, 3, 5, 8, 13] ``` So the values of p and q are: `p == 6, q == 7` (The second last and last indices in the sequence) Now suppose we are searching for `45`, we'll carry out the following steps: - Compare 45 with `array[fib[p]] => array[8]`. Since 45 < 80, we move p and q down two indices. p = 4, q = 5. - Next, compare 45 with `array[fib[p]] => array[3]`. Since 45 > 16, we set p = 3 and create an offset of 2. So p = 5, q = 4. - Finally, we compare 45 with `array[fib[p]]`. Since array[5] == 45, we have found x. ``` def fibo_search(array, x): n = len(array) fibs = get_fib_sequence(n) p, q = len(fibs) - 2, len(fibs) - 1 offset = 0 while q > 0: index = min(offset + fibs[p], n - 1) if x == array[index]: return True elif x < array[index]: p -= 2 q -= 2 else: p -= 1 q -= 1 offset = index return False fibo_search([2, 4, 10, 16, 25, 45, 55, 65, 80, 100], 45) ```
github_jupyter
# Feature processing with Spark, training with BlazingText and deploying as Inference Pipeline Typically a Machine Learning (ML) process consists of few steps: gathering data with various ETL jobs, pre-processing the data, featurizing the dataset by incorporating standard techniques or prior knowledge, and finally training an ML model using an algorithm. In many cases, when the trained model is used for processing real time or batch prediction requests, the model receives data in a format which needs to pre-processed (e.g. featurized) before it can be passed to the algorithm. In the following notebook, we will demonstrate how you can build your ML Pipeline leveraging Spark Feature Transformers and SageMaker BlazingText algorithm & after the model is trained, deploy the Pipeline (Feature Transformer and BlazingText) as an Inference Pipeline behind a single Endpoint for real-time inference and for batch inferences using Amazon SageMaker Batch Transform. In this notebook, we use Amazon Glue to run serverless Spark. Though the notebook demonstrates the end-to-end flow on a small dataset, the setup can be seamlessly used to scale to larger datasets. ## Objective: Text Classification on DBPedia dataset In this example, we will train the text classification model using SageMaker `BlazingText` algorithm on the [DBPedia Ontology Dataset](https://wiki.dbpedia.org/services-resources/dbpedia-data-set-2014#2) as done by [Zhang et al](https://arxiv.org/pdf/1509.01626.pdf). The DBpedia ontology dataset is constructed by picking 14 nonoverlapping classes from DBpedia 2014. It has 560,000 training samples and 70,000 testing samples. The fields we used for this dataset contain title and abstract of each Wikipedia article. Before passing the input data to `BlazingText`, we need to process this dataset into white-space separated tokens, have the label field in every line prefixed with `__label__` and all input data should be in a single file. ## Methodologies The Notebook consists of a few high-level steps: * Using AWS Glue for executing the SparkML feature processing job. * Using SageMaker BlazingText to train on the processed dataset produced by SparkML job. * Building an Inference Pipeline consisting of SparkML & BlazingText models for a realtime inference endpoint. * Building an Inference Pipeline consisting of SparkML & BlazingText models for a single Batch Transform job. ## Using AWS Glue for executing Spark jobs We'll be running the SparkML job using [AWS Glue](https://aws.amazon.com/glue). AWS Glue is a serverless ETL service which can be used to execute standard Spark/PySpark jobs. Glue currently only supports `Python 2.7`, hence we'll write the script in `Python 2.7`. ## Permission setup for invoking AWS Glue from this Notebook In order to enable this Notebook to run AWS Glue jobs, we need to add one additional permission to the default execution role of this notebook. We will be using SageMaker Python SDK to retrieve the default execution role and then you have to go to [IAM Dashboard](https://console.aws.amazon.com/iam/home) to edit the Role to add AWS Glue specific permission. ### Finding out the current execution role of the Notebook We are using SageMaker Python SDK to retrieve the current role for this Notebook which needs to be enhanced. ``` # Import SageMaker Python SDK to get the Session and execution_role import sagemaker from sagemaker import get_execution_role sess = sagemaker.Session() role = get_execution_role() print(role[role.rfind('/') + 1:]) ``` ### Adding AWS Glue as an additional trusted entity to this role This step is needed if you want to pass the execution role of this Notebook while calling Glue APIs as well without creating an additional **Role**. If you have not used AWS Glue before, then this step is mandatory. If you have used AWS Glue previously, then you should have an already existing role that can be used to invoke Glue APIs. In that case, you can pass that role while calling Glue (later in this notebook) and skip this next step. On the IAM dashboard, please click on **Roles** on the left sidenav and search for this Role. Once the Role appears, click on the Role to go to its **Summary** page. Click on the **Trust relationships** tab on the **Summary** page to add AWS Glue as an additional trusted entity. Click on **Edit trust relationship** and replace the JSON with this JSON. ``` { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": [ "sagemaker.amazonaws.com", "glue.amazonaws.com" ] }, "Action": "sts:AssumeRole" } ] } ``` Once this is complete, click on **Update Trust Policy** and you are done. ## Downloading dataset and uploading to S3 SageMaker team has downloaded the dataset and uploaded to one of the S3 buckets in our account. In this notebook, we will download from that bucket and upload to your bucket so that AWS Glue can access the data. The default AWS Glue permissions we just added expects the data to be present in a bucket with the string `aws-glue`. Hence, after we download the dataset, we will create an S3 bucket in your account with a valid name and then upload the data to S3. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/dbpedia/train.csv !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/dbpedia/test.csv ``` ### Creating an S3 bucket and uploading this dataset Next we will create an S3 bucket with the `aws-glue` string in the name and upload this data to the S3 bucket. In case you want to use some existing bucket to run your Spark job via AWS Glue, you can use that bucket to upload your data provided the `Role` has access permission to upload and download from that bucket. Once the bucket is created, the following cell would also update the `train.csv` and `test.csv` files downloaded locally to this bucket under the `input/dbpedia` prefix. ``` import boto3 import botocore from botocore.exceptions import ClientError boto_session = sess.boto_session s3 = boto_session.resource('s3') account = boto_session.client('sts').get_caller_identity()['Account'] region = boto_session.region_name default_bucket = 'aws-glue-{}-{}'.format(account, region) try: if region == 'us-east-1': s3.create_bucket(Bucket=default_bucket) else: s3.create_bucket(Bucket=default_bucket, CreateBucketConfiguration={'LocationConstraint': region}) except ClientError as e: error_code = e.response['Error']['Code'] message = e.response['Error']['Message'] if error_code == 'BucketAlreadyOwnedByYou': print ('A bucket with the same name already exists in your account - using the same bucket.') pass # Uploading the training data to S3 sess.upload_data(path='train.csv', bucket=default_bucket, key_prefix='input/dbpedia') sess.upload_data(path='test.csv', bucket=default_bucket, key_prefix='input/dbpedia') ``` ## Writing the feature processing script using SparkML The code for feature transformation using SparkML can be found in `dbpedia_processing.py` file written in the same directory. You can go through the code itself to see how it is using standard SparkML feature transformers to define the Pipeline for featurizing and processing the data. Once the Spark ML Pipeline `fit` and `transform` is done, we are tranforming the `train` and `test` file and writing it in the format `BlazingText` expects before uploading to S3. ### Serializing the trained Spark ML Model with [MLeap](https://github.com/combust/mleap) Apache Spark is best suited batch processing workloads. In order to use the Spark ML model we trained for low latency inference, we need to use the MLeap library to serialize it to an MLeap bundle and later use the [SageMaker SparkML Serving](https://github.com/aws/sagemaker-sparkml-serving-container) to perform realtime and batch inference. By using the `SerializeToBundle()` method from MLeap in the script, we are serializing the ML Pipeline into an MLeap bundle and uploading to S3 in `tar.gz` format as SageMaker expects. ## Uploading the code and other dependencies to S3 for AWS Glue Unlike SageMaker, in order to run your code in AWS Glue, we do not need to prepare a Docker image. We can upload your code and dependencies directly to S3 and pass those locations while invoking the Glue job. ### Upload the featurizer script to S3 We will be uploading the `dbpedia_processing.py` script to S3 now so that Glue can use it to run the PySpark job. You can replace it with your own script if needed. If your code has multiple files, you need to zip those files and upload to S3 instead of uploading a single file like it's being done here. ``` script_location = sess.upload_data(path='dbpedia_processing.py', bucket=default_bucket, key_prefix='codes') ``` ### Upload MLeap dependencies to S3 For our job, we will also have to pass MLeap dependencies to Glue.MLeap is an additional library we are using which does not come bundled with default Spark. Similar to most of the packages in the Spark ecosystem, MLeap is also implemented as a Scala package with a front-end wrapper written in Python so that it can be used from PySpark. We need to make sure that the MLeap Python library as well as the JAR is available within the Glue job environment. In the following cell, we will download the MLeap Python dependency & JAR from a SageMaker hosted bucket and upload to the S3 bucket we created above in your account. If you are using some other Python libraries like `nltk` in your code, you need to download the wheel file from PyPI and upload to S3 in the same way. At this point, Glue only supports passing pure Python libraries in this way (e.g. you can not pass `Pandas` or `OpenCV`). However you can use `NumPy` & `SciPy` without having to pass these as packages because these are pre-installed in the Glue environment. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/0.9.6/python/python.zip !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/0.9.6/jar/mleap_spark_assembly.jar python_dep_location = sess.upload_data(path='python.zip', bucket=default_bucket, key_prefix='dependencies/python') jar_dep_location = sess.upload_data(path='mleap_spark_assembly.jar', bucket=default_bucket, key_prefix='dependencies/jar') ``` ## Defining output locations for the data and model Next we define the output location where the transformed dataset should be uploaded. We are also specifying a model location where the MLeap serialized model would be updated. This locations should be consumed as part of the Spark script using `getResolvedOptions` method of AWS Glue library (see `dbpedia_processing.py` for details). By designing our code in this way, we can re-use these variables as part of the SageMaker training job (details below). ``` from time import gmtime, strftime import time timestamp_prefix = strftime("%Y-%m-%d-%H-%M-%S", gmtime()) # Input location of the data, We uploaded our train.csv file to input key previously s3_input_bucket = default_bucket s3_input_key_prefix = 'input/dbpedia' # Output location of the data. The input data will be split, transformed, and # uploaded to output/train and output/validation s3_output_bucket = default_bucket s3_output_key_prefix = timestamp_prefix + '/dbpedia' # the MLeap serialized SparkML model will be uploaded to output/mleap s3_model_bucket = default_bucket s3_model_key_prefix = s3_output_key_prefix + '/mleap' ``` ### Calling Glue APIs Next we'll be creating Glue client via Boto so that we can invoke the `create_job` API of Glue. `create_job` API will create a job definition which can be used to execute your jobs in Glue. The job definition created here is mutable. While creating the job, we are also passing the code location as well as the dependencies location to Glue. `AllocatedCapacity` parameter controls the hardware resources that Glue will use to execute this job. It is measures in units of `DPU`. For more information on `DPU`, please see [here](https://docs.aws.amazon.com/glue/latest/dg/add-job.html). ``` glue_client = boto_session.client('glue') job_name = 'sparkml-dbpedia-' + timestamp_prefix response = glue_client.create_job( Name=job_name, Description='PySpark job to featurize the DBPedia dataset', Role=role, # you can pass your existing AWS Glue role here if you have used Glue before ExecutionProperty={ 'MaxConcurrentRuns': 1 }, Command={ 'Name': 'glueetl', 'ScriptLocation': script_location }, DefaultArguments={ '--job-language': 'python', '--extra-jars' : jar_dep_location, '--extra-py-files': python_dep_location }, AllocatedCapacity=10, Timeout=60, ) glue_job_name = response['Name'] print(glue_job_name) ``` The aforementioned job will be executed now by calling `start_job_run` API. This API creates an immutable run/execution corresponding to the job definition created above. We will require the `job_run_id` for the particular job execution to check for status. We'll pass the data and model locations as part of the job execution parameters. ``` job_run_id = glue_client.start_job_run(JobName=job_name, Arguments = { '--S3_INPUT_BUCKET': s3_input_bucket, '--S3_INPUT_KEY_PREFIX': s3_input_key_prefix, '--S3_OUTPUT_BUCKET': s3_output_bucket, '--S3_OUTPUT_KEY_PREFIX': s3_output_key_prefix, '--S3_MODEL_BUCKET': s3_model_bucket, '--S3_MODEL_KEY_PREFIX': s3_model_key_prefix })['JobRunId'] print(job_run_id) ``` ### Checking Glue job status Now we will check for the job status to see if it has `succeeded`, `failed` or `stopped`. Once the job is succeeded, we have the transformed data into S3 in CSV format which we can use with `BlazingText` for training. If the job fails, you can go to [AWS Glue console](https://us-west-2.console.aws.amazon.com/glue/home), click on **Jobs** tab on the left, and from the page, click on this particular job and you will be able to find the CloudWatch logs (the link under **Logs**) link for these jobs which can help you to see what exactly went wrong in the `spark-submit` call. ``` job_run_status = glue_client.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState'] while job_run_status not in ('FAILED', 'SUCCEEDED', 'STOPPED'): job_run_status = glue_client.get_job_run(JobName=job_name,RunId=job_run_id)['JobRun']['JobRunState'] print (job_run_status) time.sleep(30) ``` ## Using SageMaker BlazingText to train on the processed dataset produced by SparkML job Now we will use SageMaker `BlazingText` algorithm to train a text classification model this dataset. We already know the S3 location where the preprocessed training data was uploaded as part of the Glue job. ### We need to retrieve the BlazingText algorithm image ``` from sagemaker.amazon.amazon_estimator import get_image_uri training_image = get_image_uri(sess.boto_region_name, 'blazingtext', repo_version="latest") print (training_image) ``` ### Next BlazingText model parameters and dataset details will be set properly We have parameterized the notebook so that the same data location which was used in the PySpark script can now be passed to `BlazingText` Estimator as well. ``` s3_train_data = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'train') s3_validation_data = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'validation') s3_output_location = 's3://{}/{}/{}'.format(s3_output_bucket, s3_output_key_prefix, 'bt_model') bt_model = sagemaker.estimator.Estimator(training_image, role, train_instance_count=1, train_instance_type='ml.c4.xlarge', train_volume_size = 20, train_max_run = 3600, input_mode= 'File', output_path=s3_output_location, sagemaker_session=sess) bt_model.set_hyperparameters(mode="supervised", epochs=10, min_count=2, learning_rate=0.05, vector_dim=10, early_stopping=True, patience=4, min_epochs=5, word_ngrams=2) train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated', content_type='text/plain', s3_data_type='S3Prefix') data_channels = {'train': train_data, 'validation': validation_data} ``` ### Finally BlazingText training will be performed ``` bt_model.fit(inputs=data_channels, logs=True) ``` # Building an Inference Pipeline consisting of SparkML & BlazingText models for a realtime inference endpoint Next we will proceed with deploying the models in SageMaker to create an Inference Pipeline. You can create an Inference Pipeline with upto five containers. Deploying a model in SageMaker requires two components: * Docker image residing in ECR. * Model artifacts residing in S3. **SparkML** For SparkML, Docker image for MLeap based SparkML serving is provided by SageMaker team. For more information on this, please see [SageMaker SparkML Serving](https://github.com/aws/sagemaker-sparkml-serving-container). MLeap serialized SparkML model was uploaded to S3 as part of the SparkML job we executed in AWS Glue. **BlazingText** For BlazingText, we will use the same Docker image we used for training. The model artifacts for BlazingText was uploaded as part of the training job we just ran. ### Creating the Endpoint with both containers Next we'll create a SageMaker inference endpoint with both the `sagemaker-sparkml-serving` & `BlazingText` containers. For this, we will first create a `PipelineModel` which will consist of both the `SparkML` model as well as `BlazingText` model in the right sequence. ### Passing the schema of the payload via environment variable SparkML serving container needs to know the schema of the request that'll be passed to it while calling the `predict` method. In order to alleviate the pain of not having to pass the schema with every request, `sagemaker-sparkml-serving` allows you to pass it via an environment variable while creating the model definitions. This schema definition will be required in our next step for creating a model. We will see later that you can overwrite this schema on a per request basis by passing it as part of the individual request payload as well. ``` import json schema = { "input": [ { "name": "abstract", "type": "string" } ], "output": { "name": "tokenized_abstract", "type": "string", "struct": "array" } } schema_json = json.dumps(schema) print(schema_json) ``` ### Creating a `PipelineModel` which comprises of the SparkML and BlazingText model in the right order Next we'll create a SageMaker `PipelineModel` with SparkML and BlazingText.The `PipelineModel` will ensure that both the containers get deployed behind a single API endpoint in the correct order. The same model would later be used for Batch Transform as well to ensure that a single job is sufficient to do prediction against the Pipeline. Here, during the `Model` creation for SparkML, we will pass the schema definition that we built in the previous cell. ### Controlling the output format from `sagemaker-sparkml-serving` to the next container By default, `sagemaker-sparkml-serving` returns an output in `CSV` format. However, BlazingText does not understand CSV format and it supports a different format. In order for the `sagemaker-sparkml-serving` to emit the output with the right format, we need to pass a second environment variable `SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT` with the value `application/jsonlines;data=text` to ensure that `sagemaker-sparkml-serving` container emits response in the proper format which BlazingText can parse. For more information on different output formats `sagemaker-sparkml-serving` supports, please check the documentation pointed above. ``` from sagemaker.model import Model from sagemaker.pipeline import PipelineModel from sagemaker.sparkml.model import SparkMLModel sparkml_data = 's3://{}/{}/{}'.format(s3_model_bucket, s3_model_key_prefix, 'model.tar.gz') # passing the schema defined above by using an environment variable that sagemaker-sparkml-serving understands sparkml_model = SparkMLModel(model_data=sparkml_data, env={'SAGEMAKER_SPARKML_SCHEMA' : schema_json, 'SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT': "application/jsonlines;data=text"}) bt_model = Model(model_data=bt_model.model_data, image=training_image) model_name = 'inference-pipeline-' + timestamp_prefix sm_model = PipelineModel(name=model_name, role=role, models=[sparkml_model, bt_model]) ``` ### Deploying the `PipelineModel` to an endpoint for realtime inference Next we will deploy the model we just created with the `deploy()` method to start an inference endpoint and we will send some requests to the endpoint to verify that it works as expected. ``` endpoint_name = 'inference-pipeline-ep-' + timestamp_prefix sm_model.deploy(initial_instance_count=1, instance_type='ml.c4.xlarge', endpoint_name=endpoint_name) ``` ### Invoking the newly created inference endpoint with a payload to transform the data Now we will invoke the endpoint with a valid payload that `sagemaker-sparkml-serving` can recognize. There are three ways in which input payload can be passed to the request: * Pass it as a valid CSV string. In this case, the schema passed via the environment variable will be used to determine the schema. For CSV format, every column in the input has to be a basic datatype (e.g. int, double, string) and it can not be a Spark `Array` or `Vector`. * Pass it as a valid JSON string. In this case as well, the schema passed via the environment variable will be used to infer the schema. With JSON format, every column in the input can be a basic datatype or a Spark `Vector` or `Array` provided that the corresponding entry in the schema mentions the correct value. * Pass the request in JSON format along with the schema and the data. In this case, the schema passed in the payload will take precedence over the one passed via the environment variable (if any). #### Passing the payload in CSV format We will first see how the payload can be passed to the endpoint in CSV format. ``` from sagemaker.predictor import json_serializer, csv_serializer, json_deserializer, RealTimePredictor from sagemaker.content_types import CONTENT_TYPE_CSV, CONTENT_TYPE_JSON payload = "Convair was an american aircraft manufacturing company which later expanded into rockets and spacecraft." predictor = RealTimePredictor(endpoint=endpoint_name, sagemaker_session=sess, serializer=csv_serializer, content_type=CONTENT_TYPE_CSV, accept='application/jsonlines') print(predictor.predict(payload)) ``` #### Passing the payload in JSON format We will now pass a different payload in JSON format. ``` payload = {"data": ["Berwick secondary college is situated in the outer melbourne metropolitan suburb of berwick ."]} predictor = RealTimePredictor(endpoint=endpoint_name, sagemaker_session=sess, serializer=json_serializer, content_type=CONTENT_TYPE_JSON) print(predictor.predict(payload)) ``` ### [Optional] Deleting the Endpoint If you do not plan to use this endpoint, then it is a good practice to delete the endpoint so that you do not incur the cost of running it. ``` sm_client = boto_session.client('sagemaker') sm_client.delete_endpoint(EndpointName=endpoint_name) ``` # Building an Inference Pipeline consisting of SparkML & BlazingText models for a single Batch Transform job SageMaker Batch Transform also supports chaining multiple containers together when deploying an Inference Pipeline and performing a single Batch Transform job to transform your data for a batch use-case similar to the real-time use-case we have seen above. ### Preparing data for Batch Transform Batch Transform requires data in the same format described above, with one CSV or JSON being per line. For this notebook, SageMaker team has created a sample input in CSV format which Batch Transform can process. The input is a simple CSV file with one input string per line. Next we will download a sample of this data from one of the SageMaker buckets (named `batch_input_dbpedia.csv`) and upload to your S3 bucket. We will also inspect first five rows of the data post downloading. ``` !wget https://s3-us-west-2.amazonaws.com/sparkml-mleap/data/batch_input_dbpedia.csv !printf "\n\nShowing first two lines\n\n" !head -n 3 batch_input_dbpedia.csv !printf "\n\nAs we can see, it is just one input string per line.\n\n" batch_input_loc = sess.upload_data(path='batch_input_dbpedia.csv', bucket=default_bucket, key_prefix='batch') ``` ### Invoking the Transform API to create a Batch Transform job Next we will create a Batch Transform job using the `Transformer` class from Python SDK to create a Batch Transform job. ``` input_data_path = 's3://{}/{}/{}'.format(default_bucket, 'batch', 'batch_input_dbpedia.csv') output_data_path = 's3://{}/{}/{}'.format(default_bucket, 'batch_output/dbpedia', timestamp_prefix) transformer = sagemaker.transformer.Transformer( model_name = model_name, instance_count = 1, instance_type = 'ml.m4.xlarge', strategy = 'SingleRecord', assemble_with = 'Line', output_path = output_data_path, base_transform_job_name='serial-inference-batch', sagemaker_session=sess, accept = CONTENT_TYPE_CSV ) transformer.transform(data = input_data_path, content_type = CONTENT_TYPE_CSV, split_type = 'Line') transformer.wait() ```
github_jupyter
# Polynomial Regression ``` import numpy as np import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['axes.titlesize'] = 14 plt.rcParams['legend.fontsize'] = 12 plt.rcParams['figure.figsize'] = (8, 5) %config InlineBackend.figure_format = 'retina' ``` ### Linear models $y = \beta_0 + \beta_1 x_1 + \beta_2 x_2 + \dots + \beta_n x_n + \epsilon$ $\begin{bmatrix} \vdots \\ y \\ \vdots \end{bmatrix} = \beta_0 + \beta_1 \begin{bmatrix} \vdots \\ x_1 \\ \vdots \end{bmatrix} + \beta_2 \begin{bmatrix} \vdots \\ x_2 \\ \vdots \end{bmatrix} + \dots + \beta_n \begin{bmatrix} \vdots \\ x_n \\ \vdots \end{bmatrix} + \begin{bmatrix} \vdots \\ \epsilon \\ \vdots \end{bmatrix}$ $X = \begin{bmatrix} \vdots & \vdots & & \vdots \\ x_1 & x_2 & \dots & x_n \\ \vdots & \vdots & & \vdots \end{bmatrix}$ ### A simple linear model $y = \beta_1 x_1 + \beta_2 x_2 + \epsilon$ ### Extending this to a $2^{nd}$ degree polynomial model $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_1^2 + \beta_4 x_1 x_2 + \beta_5 x_2^2 + \epsilon$ $x_1 x_2$ is an interaction term between $x_1$ and $x_2$ ### Reparameterize the model $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_1^2 + \beta_4 x_1 x_2 + \beta_5 x_2^2 + \epsilon$ $\begin{matrix} x_3 & \rightarrow & x_1^2 \\ x_4 & \rightarrow & x_1 x_2 \\ x_5 & \rightarrow & x_2^2 \end{matrix}$ $y = \beta_1 x_1 + \beta_2 x_2 + \beta_3 x_3 + \beta_4 x_4 + \beta_5 x_5 + \epsilon$ ### !!! But that's just a linear model ### Given the matrix of measured features $X$: $X = \begin{bmatrix} \vdots & \vdots \\ x_1 & x_2 \\ \vdots & \vdots \end{bmatrix}$ ### All we need to do is fit a linear model using the following feature matrix $X_{poly}$: $X_{poly} = \begin{bmatrix} \vdots & \vdots & \vdots & \vdots & \vdots \\ x_1 & x_2 & x_1^2 & x_1 x_2 & x_2^2 \\ \vdots & \vdots & \vdots & \vdots & \vdots \end{bmatrix}$ ## Some experimental data: Temperature vs. Yield ``` temperature = np.array([50, 50, 50, 70, 70, 70, 80, 80, 80, 90, 90, 90, 100, 100, 100]) experimental_yield = np.array([3.3, 2.8, 2.9, 2.3, 2.6, 2.1, 2.5, 2.9, 2.4, 3, 3.1, 2.8, 3.3, 3.5, 3]) plt.plot(temperature, experimental_yield, 'o') plt.xlabel('Temperature') plt.ylabel('Experimental Yield'); ``` ### Rearranging the data for use with sklearn ``` X = temperature.reshape([-1,1]) y = experimental_yield X ``` # Fit yield vs. temperature data with a linear model ``` from sklearn.linear_model import LinearRegression ols_model = LinearRegression() ols_model.fit(X, y) plt.plot(temperature, experimental_yield, 'o') plt.plot(temperature, ols_model.predict(X), '-', label='OLS') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` # Fit yield vs. temperature data with a $2^{nd}$ degree polynomial model ``` from sklearn.preprocessing import PolynomialFeatures poly2 = PolynomialFeatures(degree=2) X_poly2 = poly2.fit_transform(X) X.shape, X_poly2.shape poly2_model = LinearRegression() poly2_model.fit(X_poly2, y) plt.plot(temperature, experimental_yield, 'o') plt.plot(temperature, ols_model.predict(X), '-', label='OLS') plt.plot(temperature, poly2_model.predict(X_poly2), '-', label='Poly2') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` Note that you could very well use a regularization model such as Ridge or Lasso instead of the simple ordinary least squares LinearRegression model. In this case, it doesn't matter too much becuase we have only one feature (Temperature). # Smoothing the plot of the model fit ``` X_fit = np.arange(50, 101).reshape([-1, 1]) X_fit_poly2 = poly2.fit_transform(X_fit) plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` # Fit yield vs. temperature data with a $3^{rd}$ degree polynomial model ``` poly3 = PolynomialFeatures(degree=3) X_poly3 = poly.fit_transform(X) X.shape, X_poly3.shape poly3_model = LinearRegression() poly3_model.fit(X_poly3, y) X_fit_poly3 = poly3.fit_transform(X_fit) plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` ### Polynomial fit is clearly better than a linear fit, but which degree polynomial should we use? ### Why not try a range of polynomiall degrees, and see which one is best? ### But how do we determine which degree is best? ### We could use cross validation to determine the degree of polynomial that is most likely to best explain new data. ### Ideally, we would: 1. Split the data into training and testing sets 2. Perform cross validation on the training set to determine the best choice of polynomial degree 3. Fit the chosen model to the training set 4. Evaluate it on the withheld testing set However, we have such little data that doing all of these splits is likely to leave individual partitions with subsets of data that are no longer representative of the relationship between temperature and yield. ``` plt.plot(temperature, experimental_yield, 'o') plt.xlabel('Temperature') plt.ylabel('Experimental Yield'); ``` Thus, I'll forgo splitting the data into training and testing sets, and we'll train our model on the entire dataset. This is not ideal of course, and it means we'll have to simply hope that our model generalizes to new data. I will use 5-fold cross validation to tune the polynomial degree hyperparameter. You might also want to explore 10-fold or leave one out cross validation. ``` from sklearn.model_selection import cross_validate cv_mse = [] for degree in [2, 3]: poly = PolynomialFeatures(degree=degree) X_poly = poly.fit_transform(X) model = LinearRegression() results = cross_validate(model, X_poly, y, cv=5, scoring='neg_mean_squared_error') cv_mse.append(-results['test_score']) cv_mse np.mean(cv_mse[0]), np.mean(cv_mse[1]) ``` Slightly better mean validation error for $3^{rd}$ degree polynomial. ``` plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ``` Despite the lower validation error for the $3^{rd}$ degree polynomial, we might still opt to stick with a $2^{nd}$ degree polynomial model. Why might we want to do that? Less flexible models are more likely to generalize to new data because they are less likely to overfit noise. Another important question to ask is whether the slight difference in mean validation error between $2^{nd}$ and $3^{rd}$ degree polynomial models is enough to really distinguish between the models? One thing we can do is look at how variable the validation errors are across the various validation partitions. ``` cv_mse binedges = np.linspace(0, np.max(cv_mse[0]), 11) plt.hist(cv_mse[0], binedges, alpha=0.5, label='Poly2') plt.hist(cv_mse[1], binedges, alpha=0.5, label='Poly3') plt.xlabel('Validation MSE') plt.ylabel('Counts') plt.legend(); ``` Is the extra flexibility of the $3^{rd}$ degree polynomial model worth it, or is it more likely to overfit noise in our data and less likely to generalize to new measurements? How dependent are our results on how we partitioned the data? Repeat the above using 10-fold cross validation. Of course, more measurements, including measures at 60 degrees, would help you to better distinguish between these models. ``` plt.plot(temperature, experimental_yield, 'o') plt.plot(X_fit, ols_model.predict(X_fit), '-', label='OLS') plt.plot(X_fit, poly2_model.predict(X_fit_poly2), '-', label='Poly2') plt.plot(X_fit, poly3_model.predict(X_fit_poly3), '-', label='Poly3') plt.xlabel('Temperature') plt.ylabel('Experimental Yield') plt.legend(); ```
github_jupyter
``` import pickle import pandas as pd import numpy as np import os, sys, gc from plotnine import * import plotnine from tqdm import tqdm_notebook import seaborn as sns import warnings import matplotlib.pyplot as plt import matplotlib.font_manager as fm import matplotlib as mpl from matplotlib import rc import re from matplotlib.ticker import PercentFormatter import datetime from math import log # IDF 계산을 위해 path = 'C:/Users/User/Documents/T아카데미/T 아카데미/input/' # pd.read_json : json 형태의 파일을 dataframe 형태로 불러오는 코드 magazine = pd.read_json(path + 'magazine.json', lines=True) # lines = True : Read the file as a json object per line. metadata = pd.read_json(path + 'metadata.json', lines=True) users = pd.read_json(path + 'users.json', lines=True) %%time import itertools from itertools import chain import glob import os input_read_path = path + 'read/read/' # os.listdir : 해당 경로에 있는 모든 파일들을 불러오는 명령어 file_list = os.listdir(input_read_path) exclude_file_lst = ['read.tar', '.2019010120_2019010121.un~'] read_df_list = [] for file in tqdm_notebook(file_list): # 예외처리 if file in exclude_file_lst: continue else: file_path = input_read_path + file df_temp = pd.read_csv(file_path, header=None, names=['raw']) # file명을 통해서 읽은 시간을 추출(from, to) df_temp['from'] = file.split('_')[0] df_temp['to'] = file.split('_')[1] read_df_list.append(df_temp) read_df = pd.concat(read_df_list) # reads 파일을 전처리해서 row 당 user - article이 1:1이 되도록 수정 read_df['user_id'] = read_df['raw'].apply(lambda x: x.split(' ')[0]) read_df['article_id'] = read_df['raw'].apply(lambda x: x.split(' ')[1:]) def chainer(s): return list(itertools.chain.from_iterable(s)) read_cnt_by_user = read_df['article_id'].map(len) read_rowwise = pd.DataFrame({'from': np.repeat(read_df['from'], read_cnt_by_user), 'to': np.repeat(read_df['to'], read_cnt_by_user), 'user_id': np.repeat(read_df['user_id'], read_cnt_by_user), 'article_id': chainer(read_df['article_id'])}) read_rowwise.reset_index(drop=True, inplace=True) from datetime import datetime metadata['reg_datetime'] = metadata['reg_ts'].apply(lambda x : datetime.fromtimestamp(x/1000.0)) metadata.loc[metadata['reg_datetime'] == metadata['reg_datetime'].min(), 'reg_datetime'] = datetime(2090, 12, 31) metadata['reg_dt'] = metadata['reg_datetime'].dt.date metadata['type'] = metadata['magazine_id'].apply(lambda x : '개인' if x == 0.0 else '매거진') metadata['reg_dt'] = pd.to_datetime(metadata['reg_dt']) read_rowwise = read_rowwise.merge(metadata[['id', 'reg_dt']], how='left', left_on='article_id', right_on='id') read_rowwise = read_rowwise[read_rowwise['article_id'] != ''] # 사용자가 읽은 글의 목록들을 저장 read_total = pd.DataFrame(read_rowwise.groupby(['user_id'])['article_id'].unique()).reset_index() read_total.columns = ['user_id', 'article_list'] ``` ## 콘텐츠 기반의 추천시스템 - Model의 단어를 이용한 방식 - TF-IDF 형식 - index : 문서의 아이디 - column : 단어 하지만, 문서가 총 64만개로 너무 많고 data.0의 파일을 읽어보면 단어 또한 너무 많아서 사용하기가 어려운 상황 ### 해결방식 위와 같은 문제를 해결하기 위해서 해당 대회의 1등팀인 NAFMA팀은 글의 키워드를 활용해서 Embedding을 구성 - 참고자료 : https://github.com/JungoKim/brunch_nafma ``` from sklearn.feature_extraction.text import TfidfVectorizer metadata = metadata[metadata['keyword_list'].notnull()].reset_index() metadata = metadata[metadata['reg_dt'] >= '2019-01-01'] article2idx = {} for i, l in enumerate(metadata['id'].unique()): article2idx[l] = i idx2article = {i: item for item, i in article2idx.items()} articleidx = metadata['articleidx'] = metadata['id'].apply(lambda x: article2idx[x]).values import scipy docs = metadata['keyword_list'].apply(lambda x: ' '.join(x)).values tfidv = TfidfVectorizer(use_idf=True, smooth_idf=False, norm=None).fit(docs) tfidv_df = scipy.sparse.csr_matrix(tfidv.transform(docs)) tfidv_df = tfidv_df.astype(np.float32) print(tfidv_df.shape) ``` 데이터가 Sparse 형태인 것을 확인할 수 있음 ``` from sklearn.metrics.pairwise import cosine_similarity # 메모리 문제 발생 cos_sim = cosine_similarity(tfidv_df, tfidv_df) valid = pd.read_csv(path + '/predict/predict/dev.users', header=None) %%time popular_rec_model = read_rowwise['article_id'].value_counts().index[0:100] top_n = 100 with open('./recommend.txt', 'w') as f: for user in tqdm_notebook(valid[0].values): seen = chainer(read_total[read_total['user_id'] == user]['article_list']) for seen_id in seen: # 2019년도 이전에 읽어서 혹은 메타데이터에 글이 없어서 유사도 계산이 안된 글 cos_sim_sum = np.zeros(len(cos_sim)) try: cos_sim_sum += cos_sim[article2idx[seen_id]] except: pass recs = [] for rec in cos_sim_sum.argsort()[-(top_n+100):][::-1]: if (idx2article[rec] not in seen) & (len(recs) < 100): recs.append(idx2article[rec]) f.write('%s %s\n' % (user, ' '.join(recs[0:100]))) ``` ![](https://github.com/choco9966/T-academy-Recommendation/blob/master/figure/Contents_Based_Score.PNG?raw=true)
github_jupyter
# Python Functions ``` import numpy as np ``` ## Custom functions ### Anatomy name, arguments, docstring, body, return statement ``` def func_name(arg1, arg2): """Docstring starts wtih a short description. May have more information here. arg1 = something arg2 = somehting Returns something Example usage: func_name(1, 2) """ result = arg1 + arg2 return result help(func_name) ``` ### Function arguments place, keyword, keyword-only, defaults, mutatble an immutable arguments ``` def f(a, b, c, *args, **kwargs): return a, b, c, args, kwargs f(1, 2, 3, 4, 5, 6, x=7, y=8, z=9) def g(a, b, c, *, x, y, z): return a, b, c, x, y, z try: g(1,2,3,4,5,6) except TypeError as e: print(e) g(1,2,3,x=4,y=5,z=6) def h(a=1, b=2, c=3): return a, b, c h() h(b=9) h(7,8,9) ``` ### Default mutable argumnet binding is fixed at function definition, the default=None idiom ``` def f(a, x=[]): x.append(a) return x f(1) f(2) def f(a, x=None): if x is None: x = [] x.append(a) return x f(1) f(2) ``` ## Pure functions deterministic, no side effects ``` def f1(x): """Pure.""" return x**2 def f2(x): """Pure if we ignore local state change. The x in the function baheaves like a copy. """ x = x**2 return x def f3(x): """Impure if x is mutable. Augmented assignemnt is an in-place operation for mutable structures.""" x **= 2 return x a = 2 b = np.array([1,2,3]) f1(a), a f1(b), b f2(a), a f2(b), b f3(a), a f3(b), b def f4(): """Stochastic functions are tehcnically impure since a global seed is changed between function calls.""" import random return random.randint(0,10) f4(), f4(), f4() ``` ## Recursive functions Euclidean GCD algorithm ``` gcd(a, 0) = a gcd(a, b) = gcd(b, a mod b) ``` ``` def factorial(n): """Simple recursive funciton.""" if n == 0: return 1 else: return n * factorial(n-1) factorial(4) def factorial1(n): """Non-recursive version.""" s = 1 for i in range(1, n+1): s *= i return s factorial1(4) def gcd(a, b): if b == 0: return a else: return gcd(b, a % b) gcd(16, 24) ``` ## Generators yield and laziness, infinite streams ``` def count(n=0): while True: yield n n += 1 for i in count(10): print(i) if i >= 15: break from itertools import islice list(islice(count(), 10, 15)) def updown(n): yield from range(n) yield from range(n, 0, -1) updown(5) list(updown(5)) ``` ## First class functions functions as arguments, functions as return values ``` def double(x): return x*2 def twice(x, func): return func(func(x)) twice(3, double) ``` Example from standard library ``` xs = 'banana apple guava'.split() xs sorted(xs) sorted(xs, key=lambda s: s.count('a')) def f(n): def g(): print("hello") def h(): print("goodbye") if n == 0: return g else: return h g = f(0) g() h = f(1) h() ``` ## Function dispatch Poor man's switch statement ``` def add(x, y): return x + y def mul(x, y): return x * y ops = { 'a': add, 'm': mul } items = zip('aammaammam', range(10), range(10)) for item in items: key, x, y = item op = ops[key] print(key, x, y, op(x, y)) ``` ## Closure Capture of argument in enclosing scope ``` def f(x): def g(y): return x + y return g f1 = f(0) f2 = f(10) f1(5), f2(5) ``` ## Decorators A timing decorator ``` def timer(f): import time def g(*args, **kwargs): tic = time.time() res = f(*args, **kwargs) toc = time.time() return res, toc-tic return g def f(n): s = 0 for i in range(n): s += i return s timed_f = timer(f) timed_f(100000) ``` Decorator syntax ``` @timer def g(n): s = 0 for i in range(n): s += i return s g(100000) ``` ## Anonymous functions Short, one-use lambdas ``` f = lambda x: x**2 f(3) g = lambda x, y: x+y g(3,4) ``` ## Map, filter and reduce Funcitonal building blocks ``` xs = range(10) list(map(lambda x: x**2, xs)) list(filter(lambda x: x%2 == 0, xs)) from functools import reduce reduce(lambda x, y: x+y, xs) reduce(lambda x, y: x+y, xs, 100) ``` ## Functional modules in the standard library itertools, functional and operator ``` import operator as op reduce(op.add, range(10)) import itertools as it list(it.islice(it.cycle([1,2,3]), 1, 10)) list(it.permutations('abc', 2)) list(it.combinations('abc', 2)) from functools import partial, lru_cache def f(a, b, c): return a + b + c g = partial(f, b = 2, c=3) g(1) def fib(n, trace=False): if trace: print("fib(%d)" % n, end=',') if n <= 2: return 1 else: return fib(n-1, trace) + fib(n-2, trace) fib(10, True) %timeit -r1 -n100 fib(20) @lru_cache(3) def fib1(n, trace=False): if trace: print("fib(%d)" % n, end=',') if n <= 2: return 1 else: return fib1(n-1, trace) + fib1(n-2, trace) fib1(10, True) %timeit -r1 -n100 fib1(20) ``` ## Using `toolz` funcitonal power tools ``` import toolz as tz import toolz.curried as c ``` Find the 5 most common sequences of length 3 in the dna variable. ``` dna = np.random.choice(list('ACTG'), (10,80), p=[.1,.2,.3,.4]) dna tz.pipe( dna, c.map(lambda s: ''.join(s)), list ) res = tz.pipe( dna, c.map(lambda s: ''.join(s)), lambda s: ''.join(s), c.sliding_window(3), c.map(lambda s: ''.join(s)), tz.frequencies ) [(k,v) for i, (k, v) in enumerate(sorted(res.items(), key=lambda x: -x[1])) if i < 5] ``` ## Function annotations and type hints Function annotations and type hints are optional and meant for 3rd party libraries (e.g. a static type checker or JIT compiler). They are NOT enforced at runtime. Notice the type annotation, default value and return type. ``` def f(a: str = "hello") -> bool: return a.islower() f() f("hello") f("Hello") ``` Function annotations can be accessed through a special attribute. ``` f.__annotations__ ``` Type and function annotations are NOT enforced. In fact, the Python interpreter essentially ignores them. ``` def f(x: int) -> int: return x + x f("hello") ``` For more types, import from the `typing` module ``` from typing import Sequence, TypeVar from functools import reduce import operator as op T = TypeVar('T') def f(xs: Sequence[T]) -> T: return reduce(op.add, xs) f([1,2,3]) f({1., 2., 3.}) f(('a', 'b', 'c')) ```
github_jupyter
``` # Require the packages require(ggplot2) library(repr) options(repr.plot.width=15, repr.plot.height=4.5) ladder_results_dir <- "../resources/results/ladder_results_sensem/140" bootstrap_results_dir <- "../resources/results/results_semisupervised_sensem_7k/140" lemma_data <- data.frame(iteration=integer(), sense=character(), count=integer(), experiment=character()) for(exp in c("bow_logreg", "wordvec_mlp_2_0", "wordvecpos_mlp_2_0")) { data <- read.csv(paste(bootstrap_results_dir, exp, "targets_distribution", sep="/"), header = F) names(data) <- c("iteration", "sense", "count") data$experiment <- exp lemma_data <- rbind(lemma_data, data) } for(exp in c("vec", "vecpos")) { data <- read.csv(paste(ladder_results_dir, exp, "population_growth", sep="/"), header = F) names(data) <- c("iteration", "sense", "count") data$experiment <- exp lemma_data <- rbind(lemma_data, data) } lemma_data$experiment <- factor(lemma_data$experiment, levels=c("bow_logreg", "wordvec_mlp_2_0", "wordvecpos_mlp_2_0", "vec", "vecpos")) levels(lemma_data$experiment) <- c("Naive Bootstrap\nBag-of-Words\n& Logistic Regression", "Naive Bootstrap\nWord Embeddings\n& Multilayer Perceptron", "Naive Bootstrap\nWord Embeddings\nand PoS\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\n& Multilayer Perceptron", "Ladder Networks\nWord Embeddings\nand PoS\n& Multilayer Perceptron") p <- ggplot(lemma_data, aes(x=iteration, y=count, fill=sense)) p <- p + facet_wrap(~ experiment, scales = 'free', ncol=5) p <- p + geom_area(position="fill") p <- p + scale_x_continuous(breaks=seq(0, 20, 2)) p <- p + scale_y_continuous(breaks=seq(0, 1, 0.1), labels=seq(0, 100, 10)) p <- p + labs(title="Population percentage per sense for lemma \"limitar\"", y="Percent", x="Iteration Number") p <- p + scale_fill_brewer(name="Sense", palette = "Accent", direction = 1, breaks=c("limitar-04", "limitar-03", "limitar-02", "limitar-01")) p <- p + theme( plot.title=element_text(size=15, face="bold", margin=margin(10, 0, 10, 0), vjust=1, lineheight=0.6), strip.text.x=element_text(size=10), axis.title.x=element_text(size=12, margin=margin(10, 0, 0, 0)), axis.title.y=element_text(size=12, margin=margin(0, 10, 0, 0)), legend.title=element_text(face="bold", size=13), legend.text=element_text(size=11), legend.key.height=unit(1.5,"line") ) p # Save the plot ggsave("~/Google Drive/Posgrado/WSD with WE/papers/esslli/plots/limitar.png", plot=p, width=15, height=4.5) library(grid) library(gridExtra) options(repr.plot.width=10.5, repr.plot.height=18) ggsave("plots/population_progres.png", plot=grid.arrange(p1, p2, p3, p4, ncol = 1), width=10.5, height=18) levels(lemma_data$experiment) ```
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/4_image_classification_zoo/Classifier%20-%20Weed%20Species%20Classification%20-%20Hyperparameter%20Tuning%20using%20Monk.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Table of contents ## Install Monk ## Using pretrained model for classifying weather type based on images ## Training a classifier from scratch <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` # Used trained classifier for demo ``` #Using pytorch backend # When installed using pip from monk.pytorch_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.pytorch_prototype import prototype # Download trained weights ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1MkDsHcgqtnt3ZzfwYTuEsCd4buDSe9-g" -O cls_weather_trained.zip && rm -rf /tmp/cookies.txt ! unzip -qq cls_weather_trained.zip ls workspace/Project-Weather # Load project in inference mode gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50", eval_infer=True); #Other trained models - uncomment #gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet101", eval_infer=True); # Infer img_name = "workspace/test/test1.jpg" predictions = gtf.Infer(img_name=img_name); from IPython.display import Image Image(filename=img_name) img_name = "workspace/test/test2.jpg" predictions = gtf.Infer(img_name=img_name); from IPython.display import Image Image(filename=img_name) ``` # Training custom classifier from scratch ## Dataset - Credits: https://data.mendeley.com/datasets/4drtyfjtfy/1 ## Download ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ" -O weather.zip && rm -rf /tmp/cookies.txt ! unzip -qq weather.zip ``` ## Training ``` # Using mxnet-gluon backend #from monk.gluon_prototype import prototype # For pytorch backend from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50"); gtf.Default(dataset_path="weather/train", model_name="wide_resnet50_2", freeze_base_network=False, num_epochs=2); ``` ### How to change hyper parameters and models - Docs - https://github.com/Tessellate-Imaging/monk_v1#4 - Examples - https://github.com/Tessellate-Imaging/monk_v1/tree/master/study_roadmaps/1_getting_started_roadmap ``` gtf.update_batch_size(8); # Very important to reload post updates gtf.Reload(); #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` ## Validating on the same dataset ``` # Using mxnet-gluon backend #from monk.gluon_prototype import prototype # For pytorch backend from monk.pytorch_prototype import prototype # For Keras backend #from monk.keras_prototype import prototype # Create Project and Experiment gtf = prototype(verbose=1); gtf.Prototype("Project-Weather", "Pytorch-Wide-Resnet50", eval_infer=True); # Load dataset for validaion gtf.Dataset_Params(dataset_path="weather/train"); gtf.Dataset(); # Run validation accuracy, class_based_accuracy = gtf.Evaluate(); ```
github_jupyter
## $k$-means clustering: An example implementation in Python 3 with numpy and matplotlib. The [$k$-means](https://en.wikipedia.org/wiki/K-means_clustering) algorithm is an unsupervised learning method for identifying clusters within a dataset. The $k$ represents the number of clusters to be identified, which is specified by the user before starting the algorithm. The algorithm goes like this: * Initialize the $k$ cluster centroids. * Repeat: 1. Cluster assignment: Assign each data point to the nearest cluster centroid. 2. Cluster updating: For each cluster centroid, average the locations of it's corresponding points and re-assign the centroid to that location. The last two steps are repeated until stopping criteria are met such as a maximum number of iterations or the centroid velocity drops below a threshold. The results of the algorithm can be highly dependent on the cluster initialization step, especially when there are a large number of clusters and data points. Performance be improved in a few different ways such as running it multiple times and averaging the results or using different initalization methods such as [$k$-means plus plus](https://en.wikipedia.org/wiki/K-means%2B%2B). Here, we will initialize the $k$ cluster centroids by selecting $k$ random data points. Mathematically, the cluster assignment step can be written as: $c^{(i)} = argmin_{k} \left\lVert x^{(i)} - \mu_k\right\rVert^2$ where $c^{(i)}$ is the centroid closest to sample $x^{(i)}$ and $\mu_k$ represents the $k$-th centroid. Similarly, the cluster update step can be written as: $\mu_k = \frac{1}{n}[x^{(k_1)}+x^{(k_2)}+...+x^{(k_n)}]$ where, again $\mu_k$ represents the $k$-th centroid and $x^{(k_n)}$ are the training examples assigned to that centroid. First, some imports. ``` import numpy as np np.random.seed(0) import matplotlib.pyplot as plt %matplotlib inline %config InlineBackend.figure_format = 'retina' from sklearn.datasets import make_blobs ``` Next we'll define some functions based on steps in the K-means algorithm. ``` def initialize_clusters(points, k): """Initializes clusters as k randomly selected points from points.""" return points[np.random.randint(points.shape[0], size=k)] # Function for calculating the distance between centroids def get_distances(centroid, points): """Returns the distance the centroid is from each data point in points.""" return np.linalg.norm(points - centroid, axis=1) ``` Here we'll generate some data using [scikit-learn](http://scikit-learn.org)'s [`make_blobs`](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_blobs.html#sklearn.datasets.make_blobs) function. For this example we'll generate a dataset with three clusters. Using this function will give us access to the actual class labels for each group so we can assess accuracy later if we would like to. Normally when using K-means, you won't know the cluster assignments or the number of clusters in the dataset! ``` # Generate dataset X, y = make_blobs(centers=3, n_samples=500, random_state=1) # Visualize fig, ax = plt.subplots(figsize=(4,4)) ax.scatter(X[:,0], X[:,1], alpha=0.5) ax.set_xlabel('$x_1$') ax.set_ylabel('$x_2$'); ``` Now let's implement K-means using k = 3. ``` k = 3 maxiter = 50 # Initialize our centroids by picking random data points centroids = initialize_clusters(X, k) # Initialize the vectors in which we will store the # assigned classes of each data point and the # calculated distances from each centroid classes = np.zeros(X.shape[0], dtype=np.float64) distances = np.zeros([X.shape[0], k], dtype=np.float64) # Loop for the maximum number of iterations for i in range(maxiter): # Assign all points to the nearest centroid for i, c in enumerate(centroids): distances[:, i] = get_distances(c, X) # Determine class membership of each point # by picking the closest centroid classes = np.argmin(distances, axis=1) # Update centroid location using the newly # assigned data point classes for c in range(k): centroids[c] = np.mean(X[classes == c], 0) ``` Once we've finished running the algorithm, we can visualize the classified data and our calculated centroids locations. ``` group_colors = ['skyblue', 'coral', 'lightgreen'] colors = [group_colors[j] for j in classes] fig, ax = plt.subplots(figsize=(4,4)) ax.scatter(X[:,0], X[:,1], color=colors, alpha=0.5) ax.scatter(centroids[:,0], centroids[:,1], color=['blue', 'darkred', 'green'], marker='o', lw=2) ax.set_xlabel('$x_0$') ax.set_ylabel('$x_1$'); ``` Look's pretty good! In another post I'll discuss some limitations of the $k$-means algorithm and assess what happens when $k$ is chosen to be greater than or less than the actual number of clusters in your dataset.
github_jupyter
# Biological question: Are there differences in the binding distance of the same TF-pair in different clusters? - PART2 This notebook can be used to analyse if there are differences in the binding distance of the same TF-pair in two different clusters. In "Outline of this notebook" the general steps in the notebook are explained. The details for each general step are described directly in the notebook for the general step. **Needed input for notebook:** .pkl file with performed market basket analysis for the second of two chosen clusters for comparison + -csv file of the results of the distance analysis of the first cluster (you can also have a look at TF-COMB docs) #### Exemplary Data: WP2 - A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts vs. A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts ### Outline of this notebook: This notbook presents the second part of the analysis to find out if there is a difference in the binding distance of the same TF-pair in different clusters. If you **have not done the PART 1** yet, **do the PART 1 first**. Here the second cluster is analysed. The **results of the first cluster are then imported**, merged together and the comparison of the binding distance are done. 1. Implementation of distance analysis for second cluster and transfering in dataframe 2. Importing the results of the first cluster 3. Merging the results of first and second cluster 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters 5. Comparing the binding distances between the same TF-pairs in the two clusters 6. Possibility to have a closer look and to compare results of different clusters ## 1. Implementation of distance analysis for cluster 0. Creation of folders for the structure if necessary, so the needed path for the output are existing 1. Read in **path of the .pkl file of the first chosen cluster from already performed market basket analysis** (alternative: perform normal market basket analysis) **(-> adjust for cluster)** 2. Using .simplify_rules(), so the duplicates of a TF-pair (because of the two orientations TF1-TF2 or TF2-TF1) are not displayed 3. Selection of TF-pairs by cosine and zscore 4. Implementation of distance analysis with considering the noisiness (lower noise, "clearer/better" analysis) 5. Creation of dataframe df_distance_clusterPART2 with the results of the distance analysis of the second cluster so it can be easily merged with the dataframe of the first cluster 6. Reducing the TF co-occurrences by selecting the TF co-occurrences by peak hight above 2.8 (good proven boundary from other applications of distance analysis) ``` # The following lines, initally check if all file/paths are available. #If a result folder does not exist it is created automatically import os import pathlib if not os.path.exists("./results/distanceresultsfordifference/"): pathlib.Path("./results/distanceresultsfordifference/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_distributionplot/"): pathlib.Path("./results/differencedistance_distributionplot/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_plot/"): pathlib.Path("./results/differencedistance_plot/").mkdir(parents=True, exist_ok=True) if not os.path.exists("./results/differencedistance_table/"): pathlib.Path("./results/differencedistance_table/").mkdir(parents=True, exist_ok=True) import tfcomb.objects clusterPART2_object = tfcomb.objects.CombObj().from_pickle("/mnt/workspace_stud/stud6/repositories/Datenanalyse-2021/wp6/analyse/results/wp2/main/A8CPH_esophagus_muscularis_mucosa/A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.pkl") clusterPART2_object clusterPART2_object.simplify_rules() clusterPART2_object_selected = clusterPART2_object.select_significant_rules() clusterPART2_object_selected.analyze_distances(threads=6) clusterPART2_object_selected.distObj.evaluate_noise(threads=6) clusterPART2_object_selected.distObj.rank_rules() df_distance_clusterPART2=clusterPART2_object_selected.distObj.peaks df_distance_clusterPART2=df_distance_clusterPART2[(df_distance_clusterPART2["Peak Heights"]>2.8)] df_distance_clusterPART2 ``` ## 2. Importing the results of the first cluster 1. Import of results of the first cluster and saving them in df_distance_clusterPART1_csv dataframe ``` import pandas as pd df_distance_clusterPART1_csv=pd.read_csv("./results/distanceresultsfordifference/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts.csv") df_distance_clusterPART1_csv ``` ## 3. Merging the results of first and second cluster 1. Merging the results of the distance analysis for the TF-pairs that are in first and second cluster in new dataframe df_distancedifference_2clusters so the binding distance between the two clusters can be compared. The columns with the suffix CPART1 have the information of the first cluster and columns with CPART2 have the information of the second cluster. ``` df_distancedifference_2clusters=df_distance_clusterPART1_csv.merge(df_distance_clusterPART2,suffixes=('_CPART1', '_CPART2'), left_on =["TF1","TF2"], right_on = ["TF1","TF2"]) df_distancedifference_2clusters pd.set_option('max_columns', None) pd.set_option('max_rows', 50) ``` ## 4. Looking at the Distribution of the difference in binding distance between the same TF-pairs in the two clusters 1. Calculation of the difference between the binding distance per TF-pair 2. Calcultation of the average peak Height of a TF-pair as an additional assessment factor 3. Selection of TF-pairs above 100 counts as TF-pairs of interest so they so probability is higher that they are important for cluster 4. Sorting the dataframe df_distancedifference_2clusters by the difference in the binding distance for plotting them 5. Calculating the Distribution in the difference in binding distance. This can be used for comparing different two clusters (2 clusters same celltype vs 2 clusters different celltype) concering their distribution in difference in binding distance 6. Plotting the Distribution of the difference in binding distance and saving the **distribution plot .png file (-> adjust for cluster)** ``` df_distancedifference_2clusters['Difference between Distance'] = abs(df_distancedifference_2clusters['Distance_CPART1'] - df_distancedifference_2clusters['Distance_CPART2']) df_distancedifference_2clusters['Average Peak Height'] = ((df_distancedifference_2clusters['Peak Heights_CPART1'] + df_distancedifference_2clusters['Peak Heights_CPART2'])/2) df_distancedifference_2clusters['TF-pair'] = df_distancedifference_2clusters['TF1'] + " + " + df_distancedifference_2clusters['TF2'] df_distancedifference_2clusters = df_distancedifference_2clusters[(df_distancedifference_2clusters["TF1_TF2_count_CPART1"]>100) &(df_distancedifference_2clusters["TF1_TF2_count_CPART2"]>100)] df_distancedifference_2clusters_sorted=df_distancedifference_2clusters.sort_values(by=['Difference between Distance']) df_differencedistance_distribution=df_distancedifference_2clusters_sorted['Difference between Distance'].value_counts() df_differencedistance_distribution df_distancedifference_2clusters_sorted import matplotlib.pyplot as plt differencedistance = df_differencedistance_distribution.keys() occurrence = df_differencedistance_distribution plt.figure(figsize=(15, 5)) plt.scatter(differencedistance,occurrence) plt.xticks plt.grid(True) plt.xlabel('Difference in Distance') plt.ylabel('Occurrence') plt.title('Distribution of the Difference in binding Distance') plt.savefig("./results/differencedistance_distributionplot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png") plt.show() ``` ## 5. Comparing the binding distances between the same TF-pairs in the two clusters 1. Selecting the difference in binding distance over 10 since now we only want to look at the TF-pairs with a difference in binding distance 2. Sorting the TF-Pairs by noisiness of the first cluster and selecting the first 100. The noisiness is selected as an assessment factor since it distinguishes a clear signal from noisy signal in the distance analysis what was ranged as important factor for the qualitiy of the TF-pair binding distance. The reason for the selection of 100 TF-pairs is for the readability of the figure and can also be adjusted reasonable (as well as the other factors) 3. Sorting the dataframe df_distancedifference_2clusters_withoutlowdifference_noisinesstop100 by the difference in the binding distance for plotting them and **saving finished distance difference table in .csv file (-> adjust for cluster)** 10. Plotting the difference in distance over the TF-pairs and saving the **difference in distnace plot in a .png file (-> adjust for cluster)** ``` df_distancedifference_2clusters_withoutlowdifference = df_distancedifference_2clusters[(df_distancedifference_2clusters["Difference between Distance"]>10)] df_distancedifference_2clusters_withoutlowdifference_noisinesstop100=df_distancedifference_2clusters_withoutlowdifference.sort_values(by=['Noisiness_CPART1']).head(100) df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted=df_distancedifference_2clusters_withoutlowdifference_noisinesstop100.sort_values(by=['Difference between Distance']) df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted.to_csv("./results/differencedistance_table/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.csv") differencedistance = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Difference between Distance'] TFpairs = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['TF-pair'] averagepeakheight = df_distancedifference_2clusters_withoutlowdifference_noisinesstop100_sorted['Average Peak Height'] plt.figure(figsize=(20, 5)) plt.scatter(TFpairs,differencedistance, c=averagepeakheight, cmap = 'cividis_r') plt.xticks(rotation='vertical') plt.grid(True, axis = 'y') plt.xlabel('TF-pairs') plt.ylabel('Difference in Distance') cbar = plt.colorbar() cbar.set_label("average Peak Height") plt.title('Difference in Distance over the TF-pairs') plt.savefig("./results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__A8CPH_esophagus_muscularis_mucosa_c7_Fibroblasts.png") plt.show() ``` ## 5. Possibility to have a closer look and to compare results of different clusters #### Possibility to import other plots from different two clusters for comparisons (-> adjust for cluster) ``` from PIL import Image image = Image.open("/mnt/workspace_stud/stud7/Datenanalyse-2021/wp6/analyse/results/differencedistance_plot/A8CPH_esophagus_muscularis_mucosa_c1_Fibroblasts__ACCQ1_colon_transverse_c3_Enterocytes.png") image.show() ```
github_jupyter
``` #!pwd import pandas as pd import os import string from nltk.corpus import stopwords from nltk import word_tokenize, WordNetLemmatizer from nltk import stem, pos_tag from nltk.corpus import wordnet as wn from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer import os import re cwd = os.getcwd() filepath = cwd+'/data/df_test_indian.csv' outputfilepath = cwd+'/data/df_test_indian_with_negation.csv' df = pd.read_csv(filepath) df.head() # ffile1 = open(filepath,"r", encoding = "ISO-8859-1") # df_standard = pd.read_csv(ffile1, encoding = "utf-8") # df_standard.drop(['restaurant_id', 'date', 'review_id'], inplace=True, axis=1) # df_standard.rename(columns={'text': 'review', 'Sentiment': 'sentiment'}, inplace=True) # df_standard.head() # mask = df_standard.sentiment == 'negative' # column_name = 'sentiment' # df_standard.loc[mask, column_name] = -1 # mask = df_standard.sentiment == 'positive' # column_name = 'sentiment' # df_standard.loc[mask, column_name] = 1 # df_standard = df_standard.rename(columns = {'stars':'rating'}) # df_standard.head() # df = pd.concat([df, df_standard], axis=0) # df = df.reset_index().drop('index', axis=1) ratings = df['rating'] sentiments = df['sentiment'] reviews = df['review'] sentiment_trans = [] for r in ratings.keys(): rating = ratings.loc[r] sentiment = sentiments.loc[r] sentiment = int(sentiment) if rating <= 3: sentiment = -1 else: sentiment = sentiment sentiment_trans.insert(r, sentiment) sentiments = pd.Series(sentiment_trans, index=ratings.keys()) df = pd.concat([ratings, reviews, sentiments], axis = 1) df.columns = ['rating', 'review', 'sentiment'] df.head() df stop = stopwords.words('english') snowball = stem.snowball.EnglishStemmer() wnl = WordNetLemmatizer() reviews = df['review'] def negation_Processing(reviews): def neg_tag(text): transformed = re.sub(r"\b(?:never|nothing|nowhere|noone|none|not|haven't|hasn't|hasnt|hadn't|hadnt|can't|cant|couldn't|couldnt|shouldn't|shouldnt|won't|wont|wouldn't|wouldnt|don't|dont|doesn't|doesnt|didn't|didnt|isnt|isn't|aren't|arent|aint|ain't|hardly|seldom)\b[\w\s]+[^\w\s]", lambda match: re.sub(r'(\s+)(\w+)', r'\1NEG_\2', match.group(0)), text, flags=re.IGNORECASE) return(transformed) negation_reviews = [] # Append elements to the list for doc in reviews: trans = neg_tag(doc) negation_reviews.append(trans) return negation_reviews # Remove all the punctuations and numbers def removePunc(reviews): comwoPunc = str.maketrans({key: None for key in string.punctuation + string.digits}) for i in reviews.keys(): comment = reviews.loc[i] reviewswoPunc = reviews.replace(comment, comment.translate(comwoPunc)) return reviewswoPunc # Convert all characters to Lower case def convToLow(words): reviewsToLow = words.apply(str.lower) return reviewsToLow # Stopwords removal def removeStop(text, stop): for i in text.keys(): comment = text.loc[i] comment_nostop = " ".join(filter(lambda word: word not in stop, comment.split())) reviewswoStop = text.replace(comment, comment_nostop) return reviewswoStop # Tokenization def token(text): reviewsToken = text.apply(word_tokenize) return reviewsToken # pos tagging def posTag(words): reviews_pos = words.apply(pos_tag) reviews_wnpos = [] for i in reviews_pos.keys(): comment = reviews_pos.loc[i] comment_wnpos = [] for t in comment: t = list(t) tag = t[1] if t[1].startswith('J'): t[1] = wn.ADJ elif t[1].startswith('V'): t[1] = wn.VERB elif t[1].startswith('N'): t[1] = wn.NOUN elif t[1].startswith('R'): t[1] = wn.ADV else: del t t = None if t is not None: comment_wnpos.append(t) else: pass reviews_wnpos.append(comment_wnpos) reviews_wnpos = pd.Series(reviews_wnpos, index=reviews_pos.keys()) return reviews_wnpos # Lemmatization def lemma(text, wnl): reviews_lem = [] for i in text.keys(): comment = text.loc[i] comment_lem = [] for t in comment: word = t[0] tag = t[1] t = wnl.lemmatize(word, pos=tag) comment_lem.append(t) reviews_lem.append(comment_lem) allReviews = [] for j in reviews_lem: reviews = ' '.join(j) allReviews.append(reviews) reviewsLemma = pd.Series(allReviews, index=text.keys()) return reviewsLemma if __name__ == '__main__': reviews = negation_Processing(reviews) reviews = pd.Series(reviews) reviews = removePunc(reviews) reviews = convToLow(reviews) reviews = removeStop(reviews, stop) reviews = token(reviews) reviews = posTag(reviews) final_reviews = lemma(reviews,wnl) ratings = df['rating'] sentiments = df['sentiment'] df = pd.concat([ratings, final_reviews, sentiments], axis = 1) df.columns = ['rating', 'review', 'sentiment'] df.to_csv(outputfilepath, encoding='utf-8') ```
github_jupyter
# TV Script Generation In this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,"fake" TV script, based on patterns it recognizes in this training data. ## Get the Data The data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text. >* As a first step, we'll load in this data and look at some samples. * Then, you'll be tasked with defining and training an RNN to generate a new script! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # load in data import helper data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) ``` ## Explore the Data Play around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`. ``` view_line_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) lines = text.split('\n') print('Number of lines: {}'.format(len(lines))) word_count_line = [len(line.split()) for line in lines] print('Average number of words in each line: {}'.format(np.average(word_count_line))) print('The lines {} to {}:'.format(*view_line_range)) print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]])) ``` --- ## Implement Pre-processing Functions The first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below: - Lookup Table - Tokenize Punctuation ### Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call `vocab_to_int` - Dictionary to go from the id to word, we'll call `int_to_vocab` Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)` ``` import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function # return tuple return (None, None) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) ``` ### Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids. Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( **.** ) - Comma ( **,** ) - Quotation Mark ( **"** ) - Semicolon ( **;** ) - Exclamation mark ( **!** ) - Question mark ( **?** ) - Left Parentheses ( **(** ) - Right Parentheses ( **)** ) - Dash ( **-** ) - Return ( **\n** ) This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||". ``` def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenized dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function return None """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) ``` ## Pre-process all the data and save it Running the code cell below will pre-process all the data and save it to file. You're encouraged to look at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # pre-process training data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) ``` # Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() ``` ## Build the Neural Network In this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions. ### Check Access to GPU ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') ``` ## Input Let's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.html#torch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions. You can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual. ``` data = TensorDataset(feature_tensors, target_tensors) data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size) ``` ### Batching Implement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes. >You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`. For example, say we have these as input: ``` words = [1, 2, 3, 4, 5, 6, 7] sequence_length = 4 ``` Your first `feature_tensor` should contain the values: ``` [1, 2, 3, 4] ``` And the corresponding `target_tensor` should just be the next "word"/tokenized word value: ``` 5 ``` This should continue with the second `feature_tensor`, `target_tensor` being: ``` [2, 3, 4, 5] # features 6 # target ``` ``` from torch.utils.data import TensorDataset, DataLoader def batch_data(words, sequence_length, batch_size): """ Batch the neural network data using DataLoader :param words: The word ids of the TV scripts :param sequence_length: The sequence length of each batch :param batch_size: The size of each batch; the number of sequences in a batch :return: DataLoader with batched data """ # TODO: Implement function # return a dataloader return None # there is no test for this function, but you are encouraged to create # print statements and tests of your own ``` ### Test your dataloader You'll have to modify this code to test a batching function, but it should look fairly similar. Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader. Your code should return something like the following (likely in a different order, if you shuffled your data): ``` torch.Size([10, 5]) tensor([[ 28, 29, 30, 31, 32], [ 21, 22, 23, 24, 25], [ 17, 18, 19, 20, 21], [ 34, 35, 36, 37, 38], [ 11, 12, 13, 14, 15], [ 23, 24, 25, 26, 27], [ 6, 7, 8, 9, 10], [ 38, 39, 40, 41, 42], [ 25, 26, 27, 28, 29], [ 7, 8, 9, 10, 11]]) torch.Size([10]) tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12]) ``` ### Sizes Your sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10). ### Values You should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`. ``` # test dataloader test_text = range(50) t_loader = batch_data(test_text, sequence_length=5, batch_size=10) data_iter = iter(t_loader) sample_x, sample_y = data_iter.next() print(sample_x.shape) print(sample_x) print() print(sample_y.shape) print(sample_y) ``` --- ## Build the Neural Network Implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class: - `__init__` - The initialize function. - `init_hidden` - The initialization function for an LSTM/GRU hidden state - `forward` - Forward propagation function. The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state. **The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word. ### Hints 1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)` 2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so: ``` # reshape into (batch_size, seq_length, output_size) output = output.view(batch_size, -1, self.output_size) # get last batch out = output[:, -1] ``` ``` import torch.nn as nn class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): """ Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary) :param output_size: The number of output dimensions of the neural network :param embedding_dim: The size of embeddings, should you choose to use them :param hidden_dim: The size of the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers """ super(RNN, self).__init__() # TODO: Implement function # set class variables # define model layers def forward(self, nn_input, hidden): """ Forward propagation of the neural network :param nn_input: The input to the neural network :param hidden: The hidden state :return: Two Tensors, the output of the neural network and the latest hidden state """ # TODO: Implement function # return one batch of output word scores and the hidden state return None, None def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function # initialize hidden state with zero weights, and move to GPU if available return None """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_rnn(RNN, train_on_gpu) ``` ### Define forward and backpropagation Use the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows: ``` loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target) ``` And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`. **If a GPU is available, you should move your data to that GPU device, here.** ``` def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): """ Forward and backward propagation on the neural network :param rnn: The PyTorch Module that holds the neural network :param optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch loss function :param inp: A batch of input to the neural network :param target: The target output for the batch of input :return: The loss and the latest hidden state Tensor """ # TODO: Implement Function # move data to GPU, if available # perform backpropagation and optimization # return the loss over a batch and the hidden state produced by our model return None, None # Note that these tests aren't completely extensive. # they are here to act as general checks on the expected outputs of your functions """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu) ``` ## Neural Network Training With the structure of the network complete and data ready to be fed in the neural network, it's time to train it. ### Train Loop The training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print("Training for %d epoch(s)..." % n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn ``` ### Hyperparameters Set and train the neural network with the following parameters: - Set `sequence_length` to the length of a sequence. - Set `batch_size` to the batch size. - Set `num_epochs` to the number of epochs to train for. - Set `learning_rate` to the learning rate for an Adam optimizer. - Set `vocab_size` to the number of unique tokens in our vocabulary. - Set `output_size` to the desired size of the output. - Set `embedding_dim` to the embedding dimension; smaller than the vocab_size. - Set `hidden_dim` to the hidden dimension of your RNN. - Set `n_layers` to the number of layers/cells in your RNN. - Set `show_every_n_batches` to the number of batches at which the neural network should print progress. If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class. ``` # Data params # Sequence Length sequence_length = # of words in a sequence # Batch Size batch_size = # data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs = # Learning Rate learning_rate = # Model parameters # Vocab size vocab_size = # Output size output_size = # Embedding Dimension embedding_dim = # Hidden Dimension hidden_dim = # Number of RNN Layers n_layers = # Show stats for every n number of batches show_every_n_batches = 500 ``` ### Train In the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train. > **You should aim for a loss less than 3.5.** You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn. ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ # create model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./save/trained_rnn', trained_rnn) print('Model Trained and Saved') ``` ### Question: How did you decide on your model hyperparameters? For example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those? **Answer:** (Write answer, here) --- # Checkpoint After running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name! ``` """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./save/trained_rnn') ``` ## Generate TV Script With the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section. ### Generate Text To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores! ``` """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): """ Generate text using the neural network :param decoder: The PyTorch Module that holds the trained neural network :param prime_id: The word id to start the first prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value used to pad a sequence :param predict_len: The length of text to generate :return: The generated text """ rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling to get the index of the next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) if(train_on_gpu): current_seq = current_seq.cpu() # move to cpu # the generated word becomes the next "current sequence" and the cycle can continue if train_on_gpu: current_seq = current_seq.cpu() current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\n ', '\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences ``` ### Generate a New Script It's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction: - "jerry" - "elaine" - "george" - "kramer" You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!) ``` # run the cell multiple times to get different results! gen_length = 400 # modify the length to your preference prime_word = 'jerry' # name for starting the script """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script) ``` #### Save your favorite scripts Once you have a script that you like (or find interesting), save it to a text file! ``` # save script to a text file f = open("generated_script_1.txt","w") f.write(generated_script) f.close() ``` # The TV Script is Not Perfect It's ok if the TV script doesn't make perfect sense. It should look like alternating lines of dialogue, here is one such example of a few generated lines. ### Example generated script >jerry: what about me? > >jerry: i don't have to wait. > >kramer:(to the sales table) > >elaine:(to jerry) hey, look at this, i'm a good doctor. > >newman:(to elaine) you think i have no idea of this... > >elaine: oh, you better take the phone, and he was a little nervous. > >kramer:(to the phone) hey, hey, jerry, i don't want to be a little bit.(to kramer and jerry) you can't. > >jerry: oh, yeah. i don't even know, i know. > >jerry:(to the phone) oh, i know. > >kramer:(laughing) you know...(to jerry) you don't know. You can see that there are multiple characters that say (somewhat) complete sentences, but it doesn't have to be perfect! It takes quite a while to get good results, and often, you'll have to use a smaller vocabulary (and discard uncommon words), or get more data. The Seinfeld dataset is about 3.4 MB, which is big enough for our purposes; for script generation you'll want more than 1 MB of text, generally. # Submitting This Project When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save another copy as an HTML file by clicking "File" -> "Download as.."->"html". Include the "helper.py" and "problem_unittests.py" files in your submission. Once you download these files, compress them into one zip file for submission.
github_jupyter
### Requirement ``` aliyun-python-sdk-core==2.13.25 aliyun-python-sdk-ocr==1.0.8 Flask==1.1.2 imutils==0.5.3 json5==0.9.5 Keras==2.4.3 Keras-Preprocessing==1.1.2 matplotlib==3.3.0 numpy==1.18.5 opencv-python==4.4.0.40 oss2==2.12.1 Pillow==7.0.0 sklearn==0.0 tensorflow==2.3.0 trdg==1.6.0 ``` ### Import Aliyun python SDK modules - `aliyun-python-sdk-core` - `aliyun-python-sdk-ocr` - `oss2` ``` #Aliyun SDK Core from aliyunsdkcore.client import AcsClient from aliyunsdkcore.acs_exception.exceptions import ClientException from aliyunsdkcore.acs_exception.exceptions import ServerException #Aliyun SDK OSS import oss2 #Aliyun SDK OCR from aliyunsdkocr.request.v20191230.RecognizeCharacterRequest import RecognizeCharacterRequest ``` ### Configure Aliyun python SDK ``` #Access_key access_key_id = '' access_key_secret = '' #OSS endpoint = '' bucket_name = '' auth = oss2.Auth(access_key_id, access_key_secret) bucket = oss2.Bucket(auth, endpoint, bucket_name) #OCR location = '' client = AcsClient(access_key_id, access_key_secret, location) ``` ### Instruct OCR request body, set return format to `json` ``` request = RecognizeCharacterRequest() request.set_accept_format('json') ``` ### Upload local image with SHA1 hashed name to OSS - `image_path` is pointed to the local image - image format should be `.png` - image size should less than 3MB ``` import os from hashlib import sha1 image_path = '' #Upload with SHA1 hashed name filename, file_extension = os.path.splitext(image_path) key = sha1(open(image_path, 'rb').read()).hexdigest() + file_extension bucket.put_object_from_file(key, image_path) import json #Get image info from OSS info = bucket.get_object(key, process = 'image/info') info_content = info.read() decoded_info = json.loads(oss2.to_unicode(info_content)) print('Image Info ->') print(json.dumps(decoded_info, indent = 4, sort_keys = True)) #Struct image URL image_url = 'https://' + bucket_name + '.' + endpoint.replace("https://","") + '/' + key print('Image URL -> ' + image_url) #Set OCR image_url request.set_ImageURL(image_url) ``` ### Send request and show OCR result - `MinHeight` is set to $\frac{1}{20}$ of the image width - `OutputProbability` is set to `true` ``` #Pre-config request min_height = int(int(decoded_info['ImageHeight']['value']) / 20) request.set_MinHeight(int(min_height)) request.set_OutputProbability(True) #Send request to OCR server and get response response = client.do_action_with_exception(request) #Delete OSS image bucket.delete_object(key) import json #Parse json response parsed = json.loads(response) print('Response ->') print(json.dumps(parsed, indent = 4, sort_keys = True)) ``` ### Parsed all `TextRectangle` and calculate the distance between image center and rect center ``` distances = [] objects = parsed['Data']['Results'] #Cal image center O(o_x0, o_y0) o_x0, o_y0 = int(decoded_info['ImageWidth']['value']) / 2.0, int(decoded_info['ImageHeight']['value']) / 2.0 import math for object in objects: #Cal TextRectangle angle A, start point A(x0, y0) and endpoint B(x1, y1) A = object['TextRectangles']['Angle'] / 180.0 x0, y0 = object['TextRectangles']['Left'], object['TextRectangles']['Top'] x1, y1 = x0 + object['TextRectangles']['Width'], y0 + object['TextRectangles']['Height'] #Cal vector AB = (v_x0, v_y0) v_x0, v_y0 = x1 - x0, y1 - y0 #Cal angle A rotated and 1/2 lenthed vector AB' = (v_x1, v_y1) v_x1, v_y1 = (v_x0 * math.cos(A) - v_y0 * math.sin(A)) / 2.0, (v_y0 * math.cos(A) + v_x0 * math.sin(A)) / 2.0 #Cal TextRectangle center point B'(x2, y2) x2, y2 = x0 + v_x1, y0 + v_y1 print('TextRectangleCtr -> ', (x2, y2)) #Cal distance between point B and O d = math.pow(x2 - o_x0, 2) + math.pow(y2 - o_y0, 2) distances.append(d) ``` ### Find the nearest `TextRectangle` index to the image center ``` index_min = distances.index(min(distances)) print('Min_Index -> ', index_min) ``` ### Draw all `TextRectangle` - ROI is **green** and others is **red** ``` from matplotlib import pyplot as plt from matplotlib import patches as patches %matplotlib inline img = plt.imread(image_path) fig, ax = plt.subplots(1) ax.imshow(img) index = 0 for object in objects: if (index == index_min): c = 'g' else: c = 'r' index += 1 ret = patches.Rectangle( (object['TextRectangles']['Left'], object['TextRectangles']['Top']), object['TextRectangles']['Width'], object['TextRectangles']['Height'], object['TextRectangles']['Angle'] / 180.0, linewidth = 2, edgecolor = c, facecolor = 'none' ) ax.add_patch(ret) plt.show() ``` ### ROI ``` import PIL from matplotlib import pyplot as plt A = - objects[index_min]['TextRectangles']['Angle'] / 180.0 roi = PIL.Image.open(image_path) roi = roi.rotate(A) def rotate(x, y, o_x, o_y, theta): x_r = math.cos(theta) * (x - o_x) - math.sin(theta) * (y - o_y) + o_x y_r = math.sin(theta) * (x - o_x) + math.cos(theta) * (y - o_y) + o_y return [x_r, y_r] #Cal start point A(x0, y0) x0, y0 = objects[index_min]['TextRectangles']['Left'], objects[index_min]['TextRectangles']['Top'] #Cal angle A rotated A'(x1, y1) x1, y1 = rotate(x0, y0, o_x0, o_y0, A) roi = roi.crop((x1, y1, (x1 + objects[index_min]['TextRectangles']['Width']), (y1 + objects[index_min]['TextRectangles']['Height']))) fig, ax = plt.subplots(1) ax.imshow(roi) plt.show() ``` ### Load image function for DeepFont - color to gray - resize to (105, 105) ``` import PIL import numpy as np def pil_image(img_path): pil_img = PIL.Image.open(img_path).convert('L') pil_img = pil_img.resize((105, 105)) return pil_img ``` ### Preprocessing function - Noise a small Gaussian noise with 0 mean and standard deviation 3 is added to input. - Blur a random Gaussian blur with standard deviation from 2.5 to 3.5 is added to input. - Perspective Rotation a randomly-parameterized affine transformation is added to input. - Shading the input background is filled with a gradient in illumination. ``` import PIL import cv2 import numpy as np def noise_image(img): img_array = np.asarray(img) mean = 0.0 std = 3 noisy_img = img_array + np.random.normal(mean, std, img_array.shape) noisy_img_clipped = np.clip(noisy_img, 0, 255) noise_img = PIL.Image.fromarray(np.uint8(noisy_img_clipped)) noise_img = noise_img.resize((105, 105)) return noise_img def blur_image(img): blur_img = img.filter(PIL.ImageFilter.GaussianBlur(radius = 3)) blur_img = blur_img.resize((105, 105)) return blur_img def affine_rotation(img): rows, columns = img.shape point1 = np.float32([[10, 10], [30, 10], [10, 30]]) point2 = np.float32([[20, 15], [40, 10], [20, 40]]) anchor = cv2.getAffineTransform(point1, point2) output = cv2.warpAffine(img, anchor, (columns, rows)) affine_img = PIL.Image.fromarray(np.uint8(output)) affine_img = affine_img.resize((105, 105)) return affine_img def gradient_fill(img): output = cv2.Laplacian(img, cv2.CV_64F) laplacian_img = PIL.Image.fromarray(np.uint8(output)) laplacian_img = laplacian_img.resize((105, 105)) return laplacian_img ``` ### Generate Datasets - `ttf_path` is a folder contains all the font file with correct font name and `.ttf` extension - `data_path` is a folder stores or contains generated datasets Uses `TextRecognitionDataGenerator` ``` import os ttf_path = '' data_path = '' for file in os.listdir(ttf_path): if file.endswith('.ttf'): path = os.path.join(ttf_path, file) name, ext = os.path.splitext(os.path.basename(path)) out_path = data_path + '/' + name command = 'trdg -l en -c 30 -rs -let -num -r --length 1 -b 1 -e .png -fi -f 105 -ft ' + path + ' --output_dir ' + out_path os.system(command) ``` ### Import Datasets - `label_path` should be defined ``` import os import json from imutils import paths from random import seed, shuffle label_path = '' #Random image path from data_path image_paths = sorted(list(paths.list_images(data_path))) seed(10) shuffle(image_paths) #Use folder name in data_path as font name font_names = [] for f in os.listdir(data_path): if not f.startswith('.'): font_names.append(f) font_names.sort() with open(label_path, 'w') as outfile: json.dump(font_names, outfile) print('Font Names -> ', font_names) ``` ### Labeling font by the index of font name in `font_names` ``` def conv_label(label): return font_names.index(label) ``` ### Preprocessing Datasets ``` import os import itertools import numpy as np from keras.preprocessing.image import img_to_array data = [] labels = [] auguments = ["blur", "noise", "affine", "gradient"] for path in image_paths: #Labeling images label = path.split(os.path.sep)[-2] if not label.startswith('.'): label = conv_label(label) else: continue pil_img = pil_image(path) org_img = img_to_array(pil_img) data.append(org_img) labels.append(label) #Random auguments combinations for i in range(0, len(auguments)): for augument in list(itertools.combinations(auguments, i + 1)): temp_img = pil_img combinations = list(augument) for method in combinations: if method == 'noise': temp_img = noise_image(temp_img) elif method == 'blur': temp_img = blur_image(temp_img) elif method == 'affine': open_cv_affine = np.array(pil_img) temp_img = affine_rotation(open_cv_affine) elif method == 'gradient': open_cv_gradient = np.array(pil_img) temp_img = gradient_fill(open_cv_gradient) temp_img = img_to_array(temp_img) data.append(temp_img) labels.append(label) ``` ### Partition Datasets and transform - $\frac{3}{4}$ for training - $\frac{1}{4}$ for testing ``` import numpy as np from sklearn.model_selection import train_test_split from keras.utils import to_categorical #Partition data = np.asarray(data, dtype = "float") / 255.0 labels = np.array(labels) (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size = 0.25, random_state = 10) #Converting labels from integers to vectors trainY = to_categorical(trainY, num_classes = len(font_names)) testY = to_categorical(testY, num_classes = len(font_names)) ``` ### Additional Datasets process - **Variable Character Spacing** when rendering each synthetic image, set the character spacing (by pixel) to be a Gaussian random variable of mean 10 and standard deviation 40, bounded by [0, 50]. - **Variable Aspect Ratio** Before cropping each image into a input patch, the image, with heigh fixed, is squeezed in width by a random ratio, drawn from a uniform distribution between $\frac{5}{6}$ and $\frac{7}{6}$. ``` from keras.preprocessing.image import ImageDataGenerator augmented_images = ImageDataGenerator( rotation_range = 30, width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True ) ``` ### Re-arrange Datasets channels ``` from keras import backend as K K.set_image_data_format('channels_last') ``` ### Create model - **Unsupervised cross-domain sub-network ${C_u}$**, which consists of the first *K* layers of *CNN*. It accounts for extracting low-level visual features shared by both syn- thetic and real-world data domains. ${C_u}$ will be trained in a unsupervised way, using unlabeled data from both domains. It constitutes the crucial step that further minimizes the low-level feature gap, beyond the previous data augmentation efforts. - **Supervised domain-specific sub-network ${C_s}$**, which consists of the remaining *N − K* layers. It accounts for learning higher-level discriminative features for classi- fication, based on the shared features from ${C_u}$. ${C_s}$ will be trained in a supervised way, using labeled data from the synthetic domain only. ``` from keras.models import Sequential from keras.layers.normalization import BatchNormalization from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D , UpSampling2D ,Conv2DTranspose def create_model(): model = Sequential() #Cu Layers model.add(Conv2D(64, kernel_size = (48, 48), activation = 'relu', input_shape = (105, 105, 1))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2D(128, kernel_size = (24, 24), activation = 'relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size = (2, 2))) model.add(Conv2DTranspose(128, (24, 24), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform')) model.add(UpSampling2D(size = (2, 2))) model.add(Conv2DTranspose(64, (12, 12), strides = (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'uniform')) model.add(UpSampling2D(size = (2, 2))) #Cs Layers model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Conv2D(256, kernel_size=(12, 12), activation = 'relu')) model.add(Flatten()) model.add(Dense(4096, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation = 'relu')) model.add(Dropout(0.5)) model.add(Dense(2383, activation = 'relu')) model.add(Dense(len(font_names), activation = 'softmax')) return model ``` ### Compile Model ``` from keras import optimizers batch_size = 128 epochs = 50 model= create_model() opt = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True) model.compile(loss = 'mean_squared_error', optimizer = opt, metrics = ['accuracy']) ``` ### Fit and store Model - `model_path` should be defined ``` from keras import callbacks model_path = '' my_callbacks = [ callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 10, verbose = 0, mode = 'min'), callbacks.ModelCheckpoint(model_path, monitor = 'val_loss', verbose = 1, save_best_only = True, mode = 'min') ] model.fit( trainX, trainY, shuffle = True, batch_size = batch_size, epochs = epochs, verbose = 1, validation_data = (testX, testY), callbacks = my_callbacks ) ``` ### Evaluate ``` from keras.models import load_model model_path = '' model = load_model(model_path) score = model.evaluate(testX, testY, verbose = 0) print('Test loss ->', score[0]) print('Test accuracy ->', score[1]) ``` ### Revert font name from labels ``` def rev_conv_label(label): return font_names[label] ``` ### Verify ``` import PIL import numpy as np import matplotlib.cm as cm import matplotlib.pylab as plt from keras.preprocessing.image import img_to_array #Load image and de-noisy tmp_img = roi.copy().convert('L') tmp_img = blur_image(tmp_img) arr_img = img_to_array(tmp_img) #Predict using trained model data = [] data.append(arr_img) data = np.asarray(data, dtype = "float") / 255.0 y = np.argmax(model.predict(data), axis = -1) #Display result label = rev_conv_label(int(y[0])) fig, ax = plt.subplots(1) ax.imshow(roi, interpolation = 'nearest', cmap = cm.gray) ax.text(5, 5, label, bbox = {'facecolor': 'white', 'pad': 8}) plt.show() ```
github_jupyter
``` %matplotlib inline %run utils.ipynb import matplotlib.pyplot as plt from matplotlib import colors, ticker # import cartopy.crs as ccrs import pandas as pd import numpy as np import scipy as sp from astropy.table import Table import astropy.units as u import astropy.coordinates as coord import arviz as az import seaborn as sns import kinesis as kn import gapipes as gp plt.style.use(mystyledict) %store -r out_full df = out_full#.loc[out_full['Member_r19']!='other'] print(f"{len(df)} rows, {len(df.columns)} columns") # slices of data gdr2 = df.groupby('in_dr2').get_group(True) df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby(["in_dr2"]).sum() df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_meingast').sum() df[["in_dr2", "in_leao", "in_meingast", "in_roser"]].fillna(False).groupby('in_dr2').get_group(False).groupby('in_roser').sum() fig, ax = plt.subplots() ax.hist(df['radial_velocity_error'].dropna(), np.logspace(-1,1.2,32)); median_rv_error = df['radial_velocity_error'].median() print(median_rv_error) ax.axvline(median_rv_error, c='k',lw=1); ax.set_xscale('log'); fig, ax = plt.subplots(1, 1, figsize=(4, 2.5), subplot_kw=dict(projection=ccrs.Mollweide())) ax.gridlines( crs=ccrs.Geodetic(), xlocs=[-180, -90, 0, 90, 180], ylocs=[0, 45, 90, -45, -90], linewidth=0.5, zorder=0, ) ax.scatter(df["ra"], df["dec"], s=1, c='k', transform=ccrs.Geodetic()) ax.scatter(gdr2["ra"], gdr2["dec"], s=1, transform=ccrs.Geodetic()) ax.set_global() ax.set_title("Sky distribution") fig.tight_layout() fig.savefig('../plots/hyades-sky.pdf') fig, ax = plt.subplots(1, 1, figsize=(4, 2.5),subplot_kw=dict(projection=ccrs.Mollweide(central_longitude=180))) ax.gridlines( crs=ccrs.Geodetic(), xlocs=[-180, -90, 0, 90, 180], ylocs=[0, 45, 90, -45, -90], linewidth=0.5, zorder=0, ) ax.scatter(df["l"], df["b"], s=1, c='k', transform=ccrs.Geodetic()) ax.scatter(gdr2["l"], gdr2["b"], s=1, transform=ccrs.Geodetic()) ax.set_global() ax.set_title("Galactic (centered on $l=180$)") fig.tight_layout() fig.savefig('../plots/hyades-galactic-distribution.pdf') fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[1].scatter(cartx.x, cartx.z, s=1, c=color) for cax in ax: cax.set_xlabel("$X_\mathrm{ICRS}$") ax[0].set_ylabel("$Y_\mathrm{ICRS}$") ax[1].set_ylabel("$Z_\mathrm{ICRS}$") fig.tight_layout() fig.savefig('../plots/hyades-xyz-icrs.pdf') xlims = (-115, 42) ylims = (-145, 186) zlims = (-50, 30) totheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0]) height_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0]) fig_xsize = 3 fig_ysize = totheight * fig_xsize fig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize), sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]}) for cax in ax: cax.set_aspect('equal'); labels = ['cl+tails ({})'.format(len(df)), 'cl ({})'.format(len(gdr2))] for dset, color, label in zip([df, gdr2], ['k', None], labels): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity ax[0].scatter(cartx.x, cartx.y, s=1, c=color, label=label); ax[1].scatter(cartx.x, cartx.z, s=1, c=color); ax[1].set_xlabel('$X$ [pc]') ax[0].set_ylabel('$Y$ [pc]') ax[1].set_ylabel('$Z$ [pc]'); ax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False) fig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01) fig.savefig('../report/plots/hyades-data-dist.pdf') gdr2_rv = df.loc[df['radial_velocity'].notnull()] harps_rv = df.loc[df['RV_HARPS_leao'].notnull()] xlims = (-115, 42) ylims = (-145, 186) zlims = (-50, 30) totheight = ((zlims[1]-zlims[0]) + (ylims[1]-ylims[0]))/(xlims[1]-xlims[0]) height_ratio = (ylims[1]-ylims[0])/(zlims[1]-zlims[0]) fig_xsize = 3 fig_ysize = totheight * fig_xsize fig, ax = plt.subplots(2, 1, figsize=(fig_xsize+1., fig_ysize), sharex=True, gridspec_kw={'height_ratios':[height_ratio, .8]}) for cax in ax: cax.set_aspect('equal'); labels = ['', 'has RV'.format(len(gdr2_rv)), 'has HARPS RV'] for dset, color, label, s in zip([df, gdr2_rv,harps_rv], ['k', None,'tab:red'], labels, [1, 4,1]): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity ax[0].scatter(cartx.x, cartx.y, s=s, c=color, label=label); ax[1].scatter(cartx.x, cartx.z, s=s, c=color); ax[1].set_xlabel('$X$ [pc]') ax[0].set_ylabel('$Y$ [pc]') ax[1].set_ylabel('$Z$ [pc]'); ax[0].legend(loc='lower right', fontsize=12, markerscale=3, fancybox=False) fig.subplots_adjust(left=0.22, bottom=0.08,top=0.99, right=0.98, hspace=0.01) # fig.savefig('../report/plots/hyades-data-dist-rv.pdf') df[["radial_velocity", "RV_HARPS_leao", "source_id"]].notnull().groupby( ["radial_velocity", "RV_HARPS_leao"] ).agg("count") delta_rv = df["radial_velocity"] - df["RV_HARPS_leao"] delta_rv_sigma = delta_rv / np.hypot(df["radial_velocity_error"], df["eRV_HARPS_leao"]) mean_delta_rv = np.nanmean(delta_rv) mean_delta_rv_sigma = np.nanmean(delta_rv_sigma) print(f"mean delta RV (DR2-HARPS) = {mean_delta_rv:-8.4f}") print(f"mean delta RV (DR2-HARPS) / error = {mean_delta_rv_sigma:-8.4f}") fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) ax1 = sns.distplot( delta_rv[~np.isnan(delta_rv)], ax=ax1, color="k", hist_kws={"lw":0}, kde_kws={"lw": 1}, ) ax1.axvline(0, c="k", lw=1) ax1.set_xlabel(r"$\mathrm{RV}_\mathrm{DR2} - \mathrm{RV}_\mathrm{HARPS}$") ax1.set_ylabel("Density") ax1.text( 0.05, 0.95, f"mean={mean_delta_rv:-.3f} km/s", ha="left", va="top", size=12, transform=ax1.transAxes, ) ax1.set_yticks([0, .5, 1, 1.5, 2.]) ax1.set_ylim(0, 2.2) sns.distplot( delta_rv_sigma[~np.isnan(delta_rv_sigma)], ax=ax2, color="k", hist_kws={"lw":0}, kde_kws={"lw": 1}, ) ax2.axvline(0, c="k", lw=1) ax2.set_xlabel( r"$\mathrm{RV}_\mathrm{DR2} - \mathrm{RV}_\mathrm{HARPS}" r"/ \sqrt{\sigma_\mathrm{RV, DR2}^2+\sigma_\mathrm{RV, HARPS}^2}$" ) ax2.set_ylabel("Density") fig.tight_layout() fig.savefig("../plots/compare-gaia-harps-rv.pdf") mean_cartv_icrs = [-6.03, 45.56, 5.57] vx, vy, vz = mean_cartv_icrs fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:,None] cond = (np.abs(dvx)<5) & (np.abs(dvy)<5) & (np.abs(dvz)<5) # ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color) ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color) for cax in ax: cax.set_xlabel("$X_\mathrm{ICRS}$") ax[0].set_ylabel("$Y_\mathrm{ICRS}$") ax[1].set_ylabel("$Z_\mathrm{ICRS}$") fig.tight_layout() # fig.savefig('../plots/hyades-xyz-vector-icrs.pdf') mean_cartv_galactic = [-42.24, -19.00, -1.48] fig, ax = plt.subplots(1, 2, figsize=(8, 4)) for cax in ax: cax.set_aspect("equal") for dset, color in zip([df, gdr2], ["k", None]): cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None] cond = (np.abs(dvx) < 3) & (np.abs(dvy) < 3) & (np.abs(dvz) < 3) # ax[0].scatter(cartx.x, cartx.y, s=1, c=color) ax[0].quiver(cartx.x[cond], cartx.y[cond], dvx[cond], dvy[cond], color=color) ax[1].quiver(cartx.x[cond], cartx.z[cond], dvx[cond], dvz[cond], color=color) for cax in ax: cax.set_xlabel("$X_\mathrm{Galactic}$") ax[0].set_ylabel("$Y_\mathrm{Galactic}$") ax[1].set_ylabel("$Z_\mathrm{Galactic}$") fig.tight_layout() fig.savefig('../plots/hyades-xyz-vector-galactic.pdf') mean_cartv_galactic = [-42.24, -19.00, -1.48] fig, ax = plt.subplots( 3, 3, figsize=(6.5, 6.5), sharex="col", sharey="all" ) dset = df cartx, cartv = dset.g.galactic.cartesian, dset.g.galactic.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_galactic)[:, None] xyz = cartx.xyz.value dvxyz = [dvx, dvy, dvz] for icol in range(3): for irow in range(3): ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1) ax[0, 0].set_ylim(-5, 5) for cax in ax.ravel(): cax.set_yticks([-4, -2, 0, 2, 4]) cax.tick_params(width=1, length=6) fig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.94) for cax, label in zip(ax[:, 0], ["x", "y", "z"]): cax.set_ylabel( r"$\Delta v_{0}$".format(label) + r" [$\mathrm{km}\,\mathrm{s}^{-1}$]" ) ax[2, 0].set_xlabel("$X$ [pc]") ax[2, 1].set_xlabel("$Y$ [pc]") ax[2, 2].set_xlabel("$Z$ [pc]") fig.suptitle( "Residual velocities vs. position (Galactic) $N$={}/{}".format( (~np.isnan(dvx)).sum(), len(df) ), size=15 ) fig.subplots_adjust(right=0.98, left=0.1, bottom=0.1) # fig.savefig("../plots/residual-velocity-vs-position-galactic.pdf") error_summary = pd.DataFrame( dict( pmra_error_frac=np.abs(df["pmra_error"] / df["pmra"]), pmdec_error_frac=np.abs(df["pmdec_error"] / df["pmdec"]), parallax_error_frac=np.abs(df["parallax_error"] / df["parallax"]), ) ).describe() error_summary pmdelta = np.hypot( *(df_gfr[['pmra', 'pmdec']].values - df[['pmra', 'pmdec']].values).T) plt.scatter(df['phot_g_mean_mag'], pmdelta, s=4); plt.xlabel('$G$ [mag]') plt.ylabel(r'$\Delta \mu$'); deltav = np.hypot((df_gfr.g.vra-df.g.vra).values, (df_gfr.g.vdec-df.g.vdec).values) plt.scatter(df['phot_g_mean_mag'], deltav, s=4); plt.xlabel('$G$ [mag]') plt.ylabel(r'$\Delta v_{\mathrm{tan}}$'); mean_cartv_icrs = [-6.03, 45.56, 5.57] fig, ax = plt.subplots( 3, 3, figsize=(6.5, 6.5), sharex="col", sharey="all" ) dset = df cartx, cartv = dset.g.icrs.cartesian, dset.g.icrs.velocity dvx, dvy, dvz = cartv.d_xyz.value - np.array(mean_cartv_icrs)[:, None] xyz = cartx.xyz.value dvxyz = [dvx, dvy, dvz] for icol in range(3): for irow in range(3): ax[irow, icol].scatter(xyz[icol], dvxyz[irow], s=1) ax[0, 0].set_ylim(-5, 5) for cax in ax.ravel(): cax.set_yticks([-4, -2, 0, 2, 4]) cax.tick_params(width=1, length=6) fig.subplots_adjust(wspace=0.04, hspace=0.04, left=0.15, bottom=0.15, top=0.85) for cax, label in zip(ax[:, 0], ["x", "y", "z"]): cax.set_ylabel(r"$\Delta v_{0}$".format(label)+r" [$\mathrm{km}\,\mathrm{s}^{-1}$]") ax[2,0].set_xlabel("$X$ [pc]") ax[2,1].set_xlabel("$Y$ [pc]") ax[2,2].set_xlabel("$Z$ [pc]") fig.suptitle( "Residual velocities vs. position (ICRS) $N$={}/{}".format( (~np.isnan(dvx)).sum(), len(df) ), size=15 ) fig.subplots_adjust(right=0.98, left=0.1, bottom=0.1, top=0.94) # fig.savefig("../plots/residual-velocity-vs-position-icrs.pdf") fig, ax = plt.subplots(1, 1) ax.set_xlabel("$G$ [mag]") n_bright_sources = (df["phot_g_mean_mag"] < 12).sum() print(n_bright_sources) ax.hist( df["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", color="k", label="all (N={})".format(len(df)), ) ax.hist( df.dropna(subset=["radial_velocity"])["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", label="has Gaia RV (N={})".format(df["radial_velocity"].notna().sum()), ) ax.hist( df.dropna(subset=["RV_HARPS_leao"])["phot_g_mean_mag"], bins=np.linspace(0, 20, 21), histtype="step", label="has HARPS RV (N={})".format(df["RV_HARPS_leao"].notna().sum()), ) ax.legend(loc="upper left", fontsize=10, frameon=False); ax.set_ylabel('Count'); df = out_full.loc[out_full["Member_r19"] != "other"] fig, ax = plt.subplots() ax.scatter( df["bp_rp"], df["phot_g_mean_mag"] + df.g.distmod, s=1, c='k' ) ax.invert_yaxis() ax.set_xlabel("BP-RP [mag]") ax.set_ylabel("$M_G$ [mag]"); # get tgas data for velocity uncertainty comparison hy_tgas = pd.read_csv("../data/reino_tgas_full.csv", index_col=0) print(f"number of sources in Reino selection: {len(hy_tgas)} rows") tmp = pd.concat( [ hy_tgas.g.vra_error.rename("v").to_frame().assign(label=r"TGAS $v_\alpha$"), hy_tgas.g.vdec_error.rename("v").to_frame().assign(label=r"TGAS $v_\delta$"), df.g.vra_error.rename("v").to_frame().assign(label=r"DR2 $v_\alpha$"), df.g.vdec_error.rename("v").to_frame().assign(label=r"DR2 $v_\delta$"), # df.g.vra_error.rename('v').to_frame().assign(label='HG vra'), # df.g.vdec_error.rename('v').to_frame().assign(label='HG vdec'), df["radial_velocity_error"].rename("v").to_frame().assign(label="DR2 RV"), df["eRV_HARPS_leao"].rename("v").to_frame().assign(label="HARPS RV"), ] ) tmp["v"] = np.log10(tmp["v"]) tmp.groupby('label').describe() g = sns.FacetGrid(tmp, row="label", aspect=5, height=0.8) g.map(sns.kdeplot, "v", clip_on=False, shade=True, alpha=1, lw=1.5, bw=0.2) g.set_titles("") g.fig.subplots_adjust(hspace=0.1, top=0.95, right=0.95, left=0.05, bottom=0.12) g.set(xticklabels=["0.001", "0.01", "0.1", "1", "10"], xticks=[-3, -2, -1, 0, 1]) g.set(yticks=[]) for cax, label in zip(g.fig.axes, g.row_names): cax.spines["left"].set_visible(False) cax.tick_params(length=5, labelsize=12) cax.text(0.95, 0.95, label, ha='right', va='top', transform=cax.transAxes, bbox=dict(facecolor='w'), size=12) cax.axvline(np.log10(0.3), c='k', lw=1, linestyle=':', zorder=-1); g.fig.axes[-1].set_xlabel(r'$\log \sigma_v\,/\,[\mathrm{km}\,\mathrm{s}^{-1}$]'); g.fig.savefig("../plots/hyades-velocity-uncertainty-distribution.pdf") cl_center_icrs_cart = [] ```
github_jupyter
# Scaling up ML using Cloud AI Platform In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud AI Platform. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates *how* to package up a TensorFlow model to run it within Cloud AI Platform. Later in the course, we will look at ways to make a more effective machine learning model. ## Environment variables for project and bucket Note that: <ol> <li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li> <li> Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket. </li> </ol> <b>Change the cell below</b> to reflect your Project ID and bucket name. ``` !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst import os PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 # For Python Code # Model Info MODEL_NAME = 'taxifare' # Model Version MODEL_VERSION = 'v1' # Training Directory name TRAINING_DIR = 'taxi_trained' # for bash os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION os.environ['MODEL_NAME'] = MODEL_NAME os.environ['MODEL_VERSION'] = MODEL_VERSION os.environ['TRAINING_DIR'] = TRAINING_DIR os.environ['TFVERSION'] = '2.5' # Tensorflow version %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION ``` ## Packaging up the code Take your code and put into a standard Python package structure. <a href="taxifare/trainer/model.py">model.py</a> and <a href="taxifare/trainer/task.py">task.py</a> containing the Tensorflow code from earlier (explore the <a href="taxifare/trainer/">directory structure</a>). ``` %%bash find ${MODEL_NAME} %%bash cat ${MODEL_NAME}/trainer/model.py ``` ## Find absolute paths to your data Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you ``` %%bash echo "Working Directory: ${PWD}" echo "Head of taxi-train.csv" head -1 $PWD/taxi-train.csv echo "Head of taxi-valid.csv" head -1 $PWD/taxi-valid.csv ``` ## Running the Python module from the command-line #### Clean model training dir/output dir ``` %%bash # This is so that the trained model is started fresh each time. However, this needs to be done before rm -rf $PWD/${TRAINING_DIR} %%bash # Setup python so it sees the task module which controls the model.py export PYTHONPATH=${PYTHONPATH}:${PWD}/${MODEL_NAME} # Currently set for python 2. To run with python 3 # 1. Replace 'python' with 'python3' in the following command # 2. Edit trainer/task.py to reflect proper module import method python -m trainer.task \ --train_data_paths="${PWD}/taxi-train*" \ --eval_data_paths=${PWD}/taxi-valid.csv \ --output_dir=${PWD}/${TRAINING_DIR} \ --train_steps=1000 --job-dir=./tmp %%bash ls $PWD/${TRAINING_DIR}/export/exporter/ %%writefile ./test.json {"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2} %%bash sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete %%bash # This model dir is the model exported after training and is used for prediction # model_dir=$(ls ${PWD}/${TRAINING_DIR}/export/exporter | tail -1) # predict using the trained model gcloud ai-platform local predict \ --model-dir=${PWD}/${TRAINING_DIR}/export/exporter/${model_dir} \ --json-instances=./test.json ``` #### Clean model training dir/output dir ``` %%bash # This is so that the trained model is started fresh each time. However, this needs to be done before rm -rf $PWD/${TRAINING_DIR} ``` ## Running locally using gcloud ``` %%bash # Use Cloud Machine Learning Engine to train the model in local file system gcloud ai-platform local train \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ -- \ --train_data_paths=${PWD}/taxi-train.csv \ --eval_data_paths=${PWD}/taxi-valid.csv \ --train_steps=1000 \ --output_dir=${PWD}/${TRAINING_DIR} %%bash ls $PWD/${TRAINING_DIR} ``` ## Submit training job using gcloud First copy the training data to the cloud. Then, launch a training job. After you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress. <b>Note:</b> Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job. ``` %%bash # Clear Cloud Storage bucket and copy the CSV files to Cloud Storage bucket echo $BUCKET gsutil -m rm -rf gs://${BUCKET}/${MODEL_NAME}/smallinput/ gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/${MODEL_NAME}/smallinput/ %%bash OUTDIR=gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR} JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) echo $OUTDIR $REGION $JOBNAME # Clear the Cloud Storage Bucket used for the training job gsutil -m rm -rf $OUTDIR gcloud ai-platform jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=BASIC \ --runtime-version 2.3 \ --python-version 3.5 \ -- \ --train_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-train*" \ --eval_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-valid*" \ --output_dir=$OUTDIR \ --train_steps=10000 ``` Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. <b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b> ``` %%bash gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput ``` ## Train on larger dataset I have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow. Go to http://bigquery.cloud.google.com/ and type the query: <pre> SELECT (tolls_amount + fare_amount) AS fare_amount, pickup_longitude AS pickuplon, pickup_latitude AS pickuplat, dropoff_longitude AS dropofflon, dropoff_latitude AS dropofflat, passenger_count*1.0 AS passengers, 'nokeyindata' AS key FROM [nyc-tlc:yellow.trips] WHERE trip_distance > 0 AND fare_amount >= 2.5 AND pickup_longitude > -78 AND pickup_longitude < -70 AND dropoff_longitude > -78 AND dropoff_longitude < -70 AND pickup_latitude > 37 AND pickup_latitude < 45 AND dropoff_latitude > 37 AND dropoff_latitude < 45 AND passenger_count > 0 AND ABS(HASH(pickup_datetime)) % 1000 == 1 </pre> Note that this is now 1,000,000 rows (i.e. 100x the original dataset). Export this to CSV using the following steps (Note that <b>I have already done this and made the resulting GCS data publicly available</b>, so you don't need to do it.): <ol> <li> Click on the "Save As Table" button and note down the name of the dataset and table. <li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name. <li> Click on "Export Table" <li> Supply your bucket name and give it the name train.csv (for example: gs://cloud-training-demos-ml/taxifare/ch3/train.csv). Note down what this is. Wait for the job to finish (look at the "Job History" on the left-hand-side menu) <li> In the query above, change the final "== 1" to "== 2" and export this to Cloud Storage as valid.csv (e.g. gs://cloud-training-demos-ml/taxifare/ch3/valid.csv) <li> Download the two files, remove the header line and upload it back to GCS. </ol> <p/> <p/> <h2> Run Cloud training on 1-million row dataset </h2> This took 60 minutes and uses as input 1-million rows. The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). At the end of the training the loss was 32, but the RMSE (calculated on the validation dataset) was stubbornly at 9.03. So, simply adding more data doesn't help. ``` %%bash OUTDIR=gs://${BUCKET}/${MODEL_NAME}/${TRAINING_DIR} JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S) CRS_BUCKET=cloud-training-demos # use the already exported data echo $OUTDIR $REGION $JOBNAME gsutil -m rm -rf $OUTDIR gcloud ai-platform jobs submit training $JOBNAME \ --region=$REGION \ --module-name=trainer.task \ --package-path=${PWD}/${MODEL_NAME}/trainer \ --job-dir=$OUTDIR \ --staging-bucket=gs://$BUCKET \ --scale-tier=STANDARD_1 \ --runtime-version 2.3 \ --python-version 3.5 \ -- \ --train_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/train.csv" \ --eval_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/valid.csv" \ --output_dir=$OUTDIR \ --train_steps=100000 ``` ## Challenge Exercise Modify your solution to the challenge exercise in d_trainandevaluate.ipynb appropriately. Make sure that you implement training and deployment. Increase the size of your dataset by 10x since you are running on the cloud. Does your accuracy improve? Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Riskfolio-Lib Tutorial: <br>__[Financionerioncios](https://financioneroncios.wordpress.com)__ <br>__[Orenji](https://www.orenj-i.net)__ <br>__[Riskfolio-Lib](https://riskfolio-lib.readthedocs.io/en/latest/)__ <br>__[Dany Cajas](https://www.linkedin.com/in/dany-cajas/)__ <a href='https://ko-fi.com/B0B833SXD' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://cdn.ko-fi.com/cdn/kofi1.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> ## Tutorial 14: Mean [Ulcer Index](https://en.wikipedia.org/wiki/Ulcer_index) Portfolio Optimization ## 1. Downloading the data: ``` import numpy as np import pandas as pd import yfinance as yf import warnings warnings.filterwarnings("ignore") pd.options.display.float_format = '{:.4%}'.format # Date range start = '2016-01-01' end = '2019-12-30' # Tickers of assets assets = ['JCI', 'TGT', 'CMCSA', 'CPB', 'MO', 'APA', 'MMC', 'JPM', 'ZION', 'PSA', 'BAX', 'BMY', 'LUV', 'PCAR', 'TXT', 'TMO', 'DE', 'MSFT', 'HPQ', 'SEE', 'VZ', 'CNP', 'NI', 'T', 'BA'] assets.sort() # Downloading data data = yf.download(assets, start = start, end = end) data = data.loc[:,('Adj Close', slice(None))] data.columns = assets # Calculating returns Y = data[assets].pct_change().dropna() display(Y.head()) ``` ## 2. Estimating Mean Ulcer Index Portfolios ### 2.1 Calculating the portfolio that maximizes Ulcer Performance Index (UPI) ratio. ``` import riskfolio as rp # Building the portfolio object port = rp.Portfolio(returns=Y) # Calculating optimal portfolio # Select method and estimate input parameters: method_mu='hist' # Method to estimate expected returns based on historical data. method_cov='hist' # Method to estimate covariance matrix based on historical data. port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94) # Estimate optimal portfolio: model='Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model) rm = 'UCI' # Risk measure used, this time will be variance obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe hist = True # Use historical scenarios for risk measures that depend on scenarios rf = 0 # Risk free rate l = 0 # Risk aversion factor, only useful when obj is 'Utility' w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist) display(w.T) ``` ### 2.2 Plotting portfolio composition ``` # Plotting the composition of the portfolio ax = rp.plot_pie(w=w, title='Sharpe Mean Ulcer Index', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) ``` ### 2.3 Calculate efficient frontier ``` points = 40 # Number of points of the frontier frontier = port.efficient_frontier(model=model, rm=rm, points=points, rf=rf, hist=hist) display(frontier.T.head()) # Plotting the efficient frontier label = 'Max Risk Adjusted Return Portfolio' # Title of point mu = port.mu # Expected returns cov = port.cov # Covariance matrix returns = port.returns # Returns of the assets ax = rp.plot_frontier(w_frontier=frontier, mu=mu, cov=cov, returns=returns, rm=rm, rf=rf, alpha=0.05, cmap='viridis', w=w, label=label, marker='*', s=16, c='r', height=6, width=10, ax=None) # Plotting efficient frontier composition ax = rp.plot_frontier_area(w_frontier=frontier, cmap="tab20", height=6, width=10, ax=None) ``` ## 3. Estimating Risk Parity Portfolios for Ulcer Index ### 3.1 Calculating the risk parity portfolio for Ulcer Index. ``` b = None # Risk contribution constraints vector w_rp = port.rp_optimization(model=model, rm=rm, rf=rf, b=b, hist=hist) display(w.T) ``` ### 3.2 Plotting portfolio composition ``` ax = rp.plot_pie(w=w_rp, title='Risk Parity Ulcer Index', others=0.05, nrow=25, cmap = "tab20", height=6, width=10, ax=None) ``` ### 3.3 Plotting Risk Composition ``` ax = rp.plot_risk_con(w_rp, cov=port.cov, returns=port.returns, rm=rm, rf=0, alpha=0.01, color="tab:blue", height=6, width=10, ax=None) ```
github_jupyter
# 多层感知机 :label:`sec_mlp` 在 :numref:`chap_linear`中, 我们介绍了softmax回归( :numref:`sec_softmax`), 然后我们从零开始实现softmax回归( :numref:`sec_softmax_scratch`), 接着使用高级API实现了算法( :numref:`sec_softmax_concise`), 并训练分类器从低分辨率图像中识别10类服装。 在这个过程中,我们学习了如何处理数据,如何将输出转换为有效的概率分布, 并应用适当的损失函数,根据模型参数最小化损失。 我们已经在简单的线性模型背景下掌握了这些知识, 现在我们可以开始对深度神经网络的探索,这也是本书主要涉及的一类模型。 ## 隐藏层 我们在 :numref:`subsec_linear_model`中描述了仿射变换, 它是一个带有偏置项的线性变换。 首先,回想一下如 :numref:`fig_softmaxreg`中所示的softmax回归的模型架构。 该模型通过单个仿射变换将我们的输入直接映射到输出,然后进行softmax操作。 如果我们的标签通过仿射变换后确实与我们的输入数据相关,那么这种方法就足够了。 但是,仿射变换中的*线性*是一个很强的假设。 ### 线性模型可能会出错 例如,线性意味着*单调*假设: 任何特征的增大都会导致模型输出的增大(如果对应的权重为正), 或者导致模型输出的减小(如果对应的权重为负)。 有时这是有道理的。 例如,如果我们试图预测一个人是否会偿还贷款。 我们可以认为,在其他条件不变的情况下, 收入较高的申请人总是比收入较低的申请人更有可能偿还贷款。 但是,虽然收入与还款概率存在单调性,但它们不是线性相关的。 收入从0增加到5万,可能比从100万增加到105万带来更大的还款可能性。 处理这一问题的一种方法是对我们的数据进行预处理, 使线性变得更合理,如使用收入的对数作为我们的特征。 然而我们可以很容易找出违反单调性的例子。 例如,我们想要根据体温预测死亡率。 对于体温高于37摄氏度的人来说,温度越高风险越大。 然而,对于体温低于37摄氏度的人来说,温度越高风险就越低。 在这种情况下,我们也可以通过一些巧妙的预处理来解决问题。 例如,我们可以使用与37摄氏度的距离作为特征。 但是,如何对猫和狗的图像进行分类呢? 增加位置$(13, 17)$处像素的强度是否总是增加(或降低)图像描绘狗的似然? 对线性模型的依赖对应于一个隐含的假设, 即区分猫和狗的唯一要求是评估单个像素的强度。 在一个倒置图像后依然保留类别的世界里,这种方法注定会失败。 与我们前面的例子相比,这里的线性很荒谬, 而且我们难以通过简单的预处理来解决这个问题。 这是因为任何像素的重要性都以复杂的方式取决于该像素的上下文(周围像素的值)。 我们的数据可能会有一种表示,这种表示会考虑到我们在特征之间的相关交互作用。 在此表示的基础上建立一个线性模型可能会是合适的, 但我们不知道如何手动计算这么一种表示。 对于深度神经网络,我们使用观测数据来联合学习隐藏层表示和应用于该表示的线性预测器。 ### 在网络中加入隐藏层 我们可以通过在网络中加入一个或多个隐藏层来克服线性模型的限制, 使其能处理更普遍的函数关系类型。 要做到这一点,最简单的方法是将许多全连接层堆叠在一起。 每一层都输出到上面的层,直到生成最后的输出。 我们可以把前$L-1$层看作表示,把最后一层看作线性预测器。 这种架构通常称为*多层感知机*(multilayer perceptron),通常缩写为*MLP*。 下面,我们以图的方式描述了多层感知机( :numref:`fig_mlp`)。 ![一个单隐藏层的多层感知机,具有5个隐藏单元](../img/mlp.svg) :label:`fig_mlp` 这个多层感知机有4个输入,3个输出,其隐藏层包含5个隐藏单元。 输入层不涉及任何计算,因此使用此网络产生输出只需要实现隐藏层和输出层的计算。 因此,这个多层感知机中的层数为2。 注意,这两个层都是全连接的。 每个输入都会影响隐藏层中的每个神经元, 而隐藏层中的每个神经元又会影响输出层中的每个神经元。 然而,正如 :numref:`subsec_parameterization-cost-fc-layers`所说, 具有全连接层的多层感知机的参数开销可能会高得令人望而却步, 即使在不改变输入或输出大小的情况下, 也可能促使在参数节约和模型有效性之间进行权衡 :cite:`Zhang.Tay.Zhang.ea.2021`。 ### 从线性到非线性 同之前的章节一样, 我们通过矩阵$\mathbf{X} \in \mathbb{R}^{n \times d}$ 来表示$n$个样本的小批量, 其中每个样本具有$d$个输入特征。 对于具有$h$个隐藏单元的单隐藏层多层感知机, 用$\mathbf{H} \in \mathbb{R}^{n \times h}$表示隐藏层的输出, 称为*隐藏表示*(hidden representations)。 在数学或代码中,$\mathbf{H}$也被称为*隐藏层变量*(hidden-layer variable) 或*隐藏变量*(hidden variable)。 因为隐藏层和输出层都是全连接的, 所以我们有隐藏层权重$\mathbf{W}^{(1)} \in \mathbb{R}^{d \times h}$ 和隐藏层偏置$\mathbf{b}^{(1)} \in \mathbb{R}^{1 \times h}$ 以及输出层权重$\mathbf{W}^{(2)} \in \mathbb{R}^{h \times q}$ 和输出层偏置$\mathbf{b}^{(2)} \in \mathbb{R}^{1 \times q}$。 形式上,我们按如下方式计算单隐藏层多层感知机的输出 $\mathbf{O} \in \mathbb{R}^{n \times q}$: $$ \begin{aligned} \mathbf{H} & = \mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)}, \\ \mathbf{O} & = \mathbf{H}\mathbf{W}^{(2)} + \mathbf{b}^{(2)}. \end{aligned} $$ 注意在添加隐藏层之后,模型现在需要跟踪和更新额外的参数。 可我们能从中得到什么好处呢? 你可能会惊讶地发现:在上面定义的模型里,我们没有好处! 原因很简单:上面的隐藏单元由输入的仿射函数给出, 而输出(softmax操作前)只是隐藏单元的仿射函数。 仿射函数的仿射函数本身就是仿射函数, 但是我们之前的线性模型已经能够表示任何仿射函数。 我们可以证明这一等价性,即对于任意权重值, 我们只需合并隐藏层,便可产生具有参数 $\mathbf{W} = \mathbf{W}^{(1)}\mathbf{W}^{(2)}$ 和$\mathbf{b} = \mathbf{b}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)}$ 的等价单层模型: $$ \mathbf{O} = (\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)})\mathbf{W}^{(2)} + \mathbf{b}^{(2)} = \mathbf{X} \mathbf{W}^{(1)}\mathbf{W}^{(2)} + \mathbf{b}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)} = \mathbf{X} \mathbf{W} + \mathbf{b}. $$ 为了发挥多层架构的潜力, 我们还需要一个额外的关键要素: 在仿射变换之后对每个隐藏单元应用非线性的*激活函数*(activation function)$\sigma$。 激活函数的输出(例如,$\sigma(\cdot)$)被称为*活性值*(activations)。 一般来说,有了激活函数,就不可能再将我们的多层感知机退化成线性模型: $$ \begin{aligned} \mathbf{H} & = \sigma(\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)}), \\ \mathbf{O} & = \mathbf{H}\mathbf{W}^{(2)} + \mathbf{b}^{(2)}.\\ \end{aligned} $$ 由于$\mathbf{X}$中的每一行对应于小批量中的一个样本, 出于记号习惯的考量, 我们定义非线性函数$\sigma$也以按行的方式作用于其输入, 即一次计算一个样本。 我们在 :numref:`subsec_softmax_vectorization`中 以相同的方式使用了softmax符号来表示按行操作。 但是在本节中,我们应用于隐藏层的激活函数通常不仅按行操作,也按元素操作。 这意味着在计算每一层的线性部分之后,我们可以计算每个活性值, 而不需要查看其他隐藏单元所取的值。对于大多数激活函数都是这样。 为了构建更通用的多层感知机, 我们可以继续堆叠这样的隐藏层, 例如$\mathbf{H}^{(1)} = \sigma_1(\mathbf{X} \mathbf{W}^{(1)} + \mathbf{b}^{(1)})$和$\mathbf{H}^{(2)} = \sigma_2(\mathbf{H}^{(1)} \mathbf{W}^{(2)} + \mathbf{b}^{(2)})$, 一层叠一层,从而产生更有表达能力的模型。 ### 通用近似定理 多层感知机可以通过隐藏神经元,捕捉到输入之间复杂的相互作用, 这些神经元依赖于每个输入的值。 我们可以很容易地设计隐藏节点来执行任意计算。 例如,在一对输入上进行基本逻辑操作,多层感知机是通用近似器。 即使是网络只有一个隐藏层,给定足够的神经元和正确的权重, 我们可以对任意函数建模,尽管实际中学习该函数是很困难的。 你可能认为神经网络有点像C语言。 C语言和任何其他现代编程语言一样,能够表达任何可计算的程序。 但实际上,想出一个符合规范的程序才是最困难的部分。 而且,虽然一个单隐层网络能学习任何函数, 但并不意味着我们应该尝试使用单隐藏层网络来解决所有问题。 事实上,通过使用更深(而不是更广)的网络,我们可以更容易地逼近许多函数。 我们将在后面的章节中进行更细致的讨论。 ## 激活函数 :label:`subsec_activation_functions` *激活函数*(activation function)通过计算加权和并加上偏置来确定神经元是否应该被激活, 它们将输入信号转换为输出的可微运算。 大多数激活函数都是非线性的。 由于激活函数是深度学习的基础,下面(**简要介绍一些常见的激活函数**)。 ``` %matplotlib inline import torch from d2l import torch as d2l ``` ### ReLU函数 最受欢迎的激活函数是*修正线性单元*(Rectified linear unit,*ReLU*), 因为它实现简单,同时在各种预测任务中表现良好。 [**ReLU提供了一种非常简单的非线性变换**]。 给定元素$x$,ReLU函数被定义为该元素与$0$的最大值: (**$$\operatorname{ReLU}(x) = \max(x, 0).$$**) 通俗地说,ReLU函数通过将相应的活性值设为0,仅保留正元素并丢弃所有负元素。 为了直观感受一下,我们可以画出函数的曲线图。 正如从图中所看到,激活函数是分段线性的。 ``` x = torch.arange(-8.0, 8.0, 0.1, requires_grad=True) y = torch.relu(x) d2l.plot(x.detach(), y.detach(), 'x', 'relu(x)', figsize=(5, 2.5)) ``` 当输入为负时,ReLU函数的导数为0,而当输入为正时,ReLU函数的导数为1。 注意,当输入值精确等于0时,ReLU函数不可导。 在此时,我们默认使用左侧的导数,即当输入为0时导数为0。 我们可以忽略这种情况,因为输入可能永远都不会是0。 这里引用一句古老的谚语,“如果微妙的边界条件很重要,我们很可能是在研究数学而非工程”, 这个观点正好适用于这里。 下面我们绘制ReLU函数的导数。 ``` y.backward(torch.ones_like(x), retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of relu', figsize=(5, 2.5)) ``` 使用ReLU的原因是,它求导表现得特别好:要么让参数消失,要么让参数通过。 这使得优化表现的更好,并且ReLU减轻了困扰以往神经网络的梯度消失问题(稍后将详细介绍)。 注意,ReLU函数有许多变体,包括*参数化ReLU*(Parameterized ReLU,*pReLU*) 函数 :cite:`He.Zhang.Ren.ea.2015`。 该变体为ReLU添加了一个线性项,因此即使参数是负的,某些信息仍然可以通过: $$\operatorname{pReLU}(x) = \max(0, x) + \alpha \min(0, x).$$ ### sigmoid函数 [**对于一个定义域在$\mathbb{R}$中的输入, *sigmoid函数*将输入变换为区间(0, 1)上的输出**]。 因此,sigmoid通常称为*挤压函数*(squashing function): 它将范围(-inf, inf)中的任意输入压缩到区间(0, 1)中的某个值: (**$$\operatorname{sigmoid}(x) = \frac{1}{1 + \exp(-x)}.$$**) 在最早的神经网络中,科学家们感兴趣的是对“激发”或“不激发”的生物神经元进行建模。 因此,这一领域的先驱可以一直追溯到人工神经元的发明者麦卡洛克和皮茨,他们专注于阈值单元。 阈值单元在其输入低于某个阈值时取值0,当输入超过阈值时取值1。 当人们的注意力逐渐转移到基于梯度的学习时, sigmoid函数是一个自然的选择,因为它是一个平滑的、可微的阈值单元近似。 当我们想要将输出视作二元分类问题的概率时, sigmoid仍然被广泛用作输出单元上的激活函数 (你可以将sigmoid视为softmax的特例)。 然而,sigmoid在隐藏层中已经较少使用, 它在大部分时候被更简单、更容易训练的ReLU所取代。 在后面关于循环神经网络的章节中,我们将描述利用sigmoid单元来控制时序信息流的架构。 下面,我们绘制sigmoid函数。 注意,当输入接近0时,sigmoid函数接近线性变换。 ``` y = torch.sigmoid(x) d2l.plot(x.detach(), y.detach(), 'x', 'sigmoid(x)', figsize=(5, 2.5)) ``` sigmoid函数的导数为下面的公式: $$\frac{d}{dx} \operatorname{sigmoid}(x) = \frac{\exp(-x)}{(1 + \exp(-x))^2} = \operatorname{sigmoid}(x)\left(1-\operatorname{sigmoid}(x)\right).$$ sigmoid函数的导数图像如下所示。 注意,当输入为0时,sigmoid函数的导数达到最大值0.25; 而输入在任一方向上越远离0点时,导数越接近0。 ``` # 清除以前的梯度 x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of sigmoid', figsize=(5, 2.5)) ``` ### tanh函数 与sigmoid函数类似, [**tanh(双曲正切)函数也能将其输入压缩转换到区间(-1, 1)上**]。 tanh函数的公式如下: (**$$\operatorname{tanh}(x) = \frac{1 - \exp(-2x)}{1 + \exp(-2x)}.$$**) 下面我们绘制tanh函数。 注意,当输入在0附近时,tanh函数接近线性变换。 函数的形状类似于sigmoid函数, 不同的是tanh函数关于坐标系原点中心对称。 ``` y = torch.tanh(x) d2l.plot(x.detach(), y.detach(), 'x', 'tanh(x)', figsize=(5, 2.5)) ``` tanh函数的导数是: $$\frac{d}{dx} \operatorname{tanh}(x) = 1 - \operatorname{tanh}^2(x).$$ tanh函数的导数图像如下所示。 当输入接近0时,tanh函数的导数接近最大值1。 与我们在sigmoid函数图像中看到的类似, 输入在任一方向上越远离0点,导数越接近0。 ``` # 清除以前的梯度 x.grad.data.zero_() y.backward(torch.ones_like(x),retain_graph=True) d2l.plot(x.detach(), x.grad, 'x', 'grad of tanh', figsize=(5, 2.5)) ``` 总结一下,我们现在了解了如何结合非线性函数来构建具有更强表达能力的多层神经网络架构。 顺便说一句,这些知识已经让你掌握了一个类似于1990年左右深度学习从业者的工具。 在某些方面,你比在20世纪90年代工作的任何人都有优势, 因为你可以利用功能强大的开源深度学习框架,只需几行代码就可以快速构建模型, 而以前训练这些网络需要研究人员编写数千行的C或Fortran代码。 ## 小结 * 多层感知机在输出层和输入层之间增加一个或多个全连接隐藏层,并通过激活函数转换隐藏层的输出。 * 常用的激活函数包括ReLU函数、sigmoid函数和tanh函数。 ## 练习 1. 计算pReLU激活函数的导数。 1. 证明一个仅使用ReLU(或pReLU)的多层感知机构造了一个连续的分段线性函数。 1. 证明$\operatorname{tanh}(x) + 1 = 2 \operatorname{sigmoid}(2x)$。 1. 假设我们有一个非线性单元,将它一次应用于一个小批量的数据。你认为这会导致什么样的问题? [Discussions](https://discuss.d2l.ai/t/1796)
github_jupyter
``` import numpy as np from numpy import loadtxt import pylab as pl from IPython import display from RcTorchPrivate import * from matplotlib import pyplot as plt from scipy.integrate import odeint %matplotlib inline #this method will ensure that the notebook can use multiprocessing on jupyterhub or any other linux based system. try: mp.set_start_method("spawn") except: pass torch.set_default_tensor_type(torch.FloatTensor) %matplotlib inline lineW = 3 lineBoxW=2 font = {'family' : 'normal', 'weight' : 'normal',#'bold', 'size' : 22} #plt.rc('font', **font) #plt.rcParams['text.usetex'] = True #helper functions def pltTr(x,y,clr='cyan', mark='o'): plt.plot(x.detach().numpy(), y.detach().numpy(), marker=mark, color=clr, markersize=8, label='truth', alpha = 0.9) def pltPred(x,y,clr='red', linS='-'): plt.plot(x.detach().numpy(), y.detach().numpy(), color=clr, marker='.', linewidth=2, label='RC') from decimal import Decimal def convert2pd(tensor1, tensor2): pd_ = pd.DataFrame(np.hstack((tensor1.detach().cpu().numpy(), tensor2.detach().cpu().numpy()))) pd_.columns = ["t", "y"] return pd_ '%.2E' % Decimal('40800000000.00000000000000') def param(t,N,y0): f = 1 - torch.exp(-t) f_dot = 1 - f #f = t #f_dot=1 return y0 + f*N #define a reparameterization function def reparam(t, y0 = None, N = None, dN_dt = None, t_only = False): f = 1 - torch.exp(-t) f_dot = 1 - f if t_only: return f, f_dot y = y0 + N*f if dN_dt: ydot = dN_dt * f + f_dot * N else: ydot = None return y, ydot def reparam(t, order = 1): exp_t = torch.exp(-t) derivatives_of_g = [] g = 1 - exp_t #0th derivative derivatives_of_g.append(g) g_dot = 1 - g #first derivative #derivatives_of_g.append(g_dot) # for i in range(order): # if i %2 == 0: # #print("even") # derivatives_of_g.append(g_dot) # else: # #print("odd") # derivatives_of_g.append(-g_dot) # return derivatives_of_g return g, g_dot def force(X, A = 0): return torch.zeros_like(X) q = 0.5 def custom_loss(X , y, ydot, out_weights, f = force, reg = True, ode_coefs = None, q = q, init_conds = None, enet_strength = None, enet_alpha = None): #with paramization L = ydot + ode_coefs[0]* y - f(X) + q*y**2 """ if reg: weight_size_sq = torch.mean(torch.square(out_weights)) weight_size_L1 = torch.mean(torch.abs(out_weights)) L_reg = 0.1*(weight_size_sq + weight_size_L1)/2 L = L + L_reg """ L = torch.mean(torch.square(L)) return L def plot_result(esn, xtrain, lams = [1], y0s = [1], plot_gt = True, loglog = False, ode_coefs = None, force_k = 0, fileName=None, q = q,backprop_f = None, time_result = True, solve = None): RC = esn fig = plt.figure(figsize = (8, 6)) t_pow = 0 gts, ys, ydots, ws, bs, Ls = [], [], [], [], [], [] for i, lam in enumerate(lams): y0 = y0s[i] ode_coefs[0] = lam #fit the echo state network train_args = {"burn_in" : int(BURN_IN), "ODE_order" : 1, #"track_in_grad" : self.track_in_grad, "force" : force, "reparam_f" : reparam, #"nl_f" : self.nl_f, "init_conditions" : [float(y0)], "ode_coefs" : ode_coefs, "y" : None, "X" : xtrain.view(-1,1), "q" : q, "nl" : True, } if not i: y, ydot = esn.fit(**train_args, SOLVE = solve) ode_coefs_copy = ode_coefs.copy() states_dict = {"s" : RC.states.clone(), "s1" : RC.states_dot.clone(), "G" : RC.G, "ex" : RC.extended_states.clone(), "sb1": RC.sb1, "sb" : RC.sb } if esn.ODE_order == 2: states_dict["s2"] = RC.states_dot2.clone() states_dict["sb2"] = RC.sb2.clone() t2 = time.perf_counter() else: y, ydot = RC.fit(preloaded_states_dict = states_dict, SOLVE = solve, **train_args) if backprop_f: weight_dict = backprop_f(esn) y, ydot = esn.fit(**train_args, out_weights = weight_dict, SOLVE = False) ode_coefs_copy = ode_coefs.copy() if ode_coefs[0] == "t**2": sp = esn.X**2 t_pow = 2 ode_coefs_copy[0] = sp def ODE_numSolver(y,t, q = q): k = 1 # dydt = -k * y *t**t_pow + force_k*np.sin(t) dydt = -k * y -q*y**2 return dydt y_truth = odeint(ODE_numSolver,y0,np.array(esn.X.cpu().view(-1,))) y_truth = torch.tensor(y_truth) # y_exac = y0*torch.exp(-lam*(esn.X)) if y0==1: extraWidth = 2; color = 'k' else: extraWidth=0; color = 'b' #line to ensure that cuda tensors can move to cpu for plotti X = esn.X.cpu().detach() y = y.cpu().detach() y_truth = y_truth.cpu().detach() if not i: plt.plot(X, y,color, linewidth=lineW+extraWidth, label = "pred" ) plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85, label = "gt") else: plt.plot(X, y,color, linewidth=lineW+extraWidth) plt.plot(X, y_truth,'--r', linewidth=lineW, alpha=0.85) ## Formating Figure # Changing spine style ax = plt.gca() for ps in ['top','bottom','left','right']: ax.spines[ps].set_linewidth(lineBoxW) plt.xlabel(r'$t$') plt.ylabel(r'$y(t)$') plt.legend() gts.append(y_truth.cpu()) ys.append(y.cpu()) ydots.append(ydot.cpu()) if backprop_f: Ls.append(weight_dict["loss"]) #Ls.append(esn.backprop_args) bs.append(esn.LinOut.bias.data.cpu()) ws.append(esn.LinOut.weight.data.cpu()) if time_result: return t2, ys, ydots, gts, ws, bs, Ls else: return ys, ydots, gts, ws, bs, Ls # plt.savefig(fileName+"Trajectories",format='png')#, dpi=600,transparent=True) # plt.savefig(fileName+'Trajectories.eps',format='eps') # return residuals def optimize_last_layer(esn, SAVE_AFTER_EPOCHS = 1, epochs = 30000, custom_loss = custom_loss, loss_threshold = 10 ** -8, EPOCHS_TO_TERMINATION = None, f = force, learning_rate = 0.01, plott = True, spikethreshold = 0.25): #define new_x new_X = esn.extended_states.detach() #force detach states_dot esn.states_dot = esn.states_dot.detach().requires_grad_(False) #define criterion criterion = torch.nn.MSELoss() try: assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad except: esn.LinOut.weight.requires_grad_(True) esn.LinOut.bias.requires_grad_(True) #define previous_loss (could be used to do a convergence stop) previous_loss = 0 #define best score so that we can save the best weights best_score = 0 #define the optimizer optimizer = optim.Adam(esn.parameters(), lr = learning_rate) #define the loss history loss_history = [] if plott: #use pl for live plotting fig, ax = pl.subplots(1,3, figsize = (16,4)) t = esn.X#.view(*N.shape).detach() g, g_dot = esn.G y0 = esn.init_conds[0] #optimizer = torch.optim.SGD(model.parameters(), lr=100) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.5) lrs = [] floss_last = 0 #begin optimization loop for e in range(epochs): optimizer.zero_grad() N = esn.forward( esn.extended_states ) N_dot = esn.calc_Ndot(esn.states_dot) y = y0 + g *N ydot = g_dot * N + g * N_dot assert N.shape == N_dot.shape, f'{N.shape} != {N_dot.shape}' assert esn.LinOut.weight.requires_grad and esn.LinOut.bias.requires_grad assert False, f'{esn.LinOut.weight}' total_ws = esn.LinOut.weight.shape[0] + 1 weight_size_sq = torch.mean(torch.square(esn.LinOut.weight)) loss = custom_loss(esn.X, y, ydot, esn.LinOut.weight, reg = False, ode_coefs = esn.ode_coefs) loss.backward() optimizer.step() floss = float(loss) loss_history.append(floss) if not e and not best_score: best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() if e > SAVE_AFTER_EPOCHS: if not best_score: if floss <= min(loss_history): best_pred = y.clone() best_pred = y.clone() best_ydot = ydot.clone() best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() best_score = float(loss) else: if floss < best_score: best_pred = y.clone() best_ydot = ydot.clone() best_bias, best_weight = esn.LinOut.bias.detach(), esn.LinOut.weight.detach() best_score = float(loss) if not EPOCHS_TO_TERMINATION: if float(loss) < loss_threshold: EPOCHS_TO_TERMINATION = e + 100 else: if e >= EPOCHS_TO_TERMINATION: backprop_args = {"loss_history" : loss_history, "lr" : learning_rate, "epochs" : epochs } return {"weights": best_weight, "bias" : best_bias, "loss" : backprop_args, "ydot" : best_ydot, "y" : best_pred} if e > 1: if float(np.log(floss_last) - np.log(floss)) > spikethreshold: lrs.append(optimizer.param_groups[0]["lr"]) scheduler.step() for param_group in optimizer.param_groups: print('lr', param_group['lr']) floss_last = floss if plott: if e % 1500 == 0: ax[0].clear() logloss_str = 'Log(L) ' + '%.2E' % Decimal((loss).item()) delta_loss = ' delta Log(L) ' + '%.2E' % Decimal((loss-previous_loss).item()) print(logloss_str + ", " + delta_loss) ax[0].plot(N.detach().cpu(), label = "exact") ax[0].set_title(f"Epoch {e}" + ", " + logloss_str) ax[0].set_xlabel("epochs") ax[1].set_title(delta_loss) ax[1].plot(N_dot.detach().cpu()) #ax[0].plot(y_dot.detach(), label = "dy_dx") ax[2].clear() weight_size = str(weight_size_sq.detach().item()) ax[2].set_title("loss history \n and "+ weight_size) ax[2].loglog(loss_history) [ax[i].legend() for i in range(3)] previous_loss = loss.item() #clear the plot outputt and then re-plot display.clear_output(wait=True) display.display(pl.gcf()) backprop_args = {"loss_history" : loss_history, "lr" : learning_rate, "epochs" : epochs } return {"weights": best_weight, "bias" : best_bias, "loss" : backprop_args, "ydot" : best_ydot, "y" : best_pred} #optimized_hyper_params x0,xf, nsteps = 0, 5, 1000 #int(2000 * ratio_up) xtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False) BURN_IN = 500 y0 = 1 ; lam = 1 #the length of xtrain won't matter. Only dt , x0, and xf matter. xtrain = torch.linspace(x0, xf, steps = nsteps, requires_grad=False).view(-1,1) xtrain.shape #q = 0.7 hybrid_hps_q07 = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.005200326335063122, 'spectral_radius': 4.063828945159912, 'regularization': 0.16819202592057847, 'leaking_rate': 0.07071314752101898, 'bias': 0.6888809204101562} #q = 0.5 ######################################################################################## hybrid_hps_q05 = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0003179179463749722, 'spectral_radius': 7.975825786590576, 'regularization': 0.3332787303378571, 'leaking_rate': 0.07119506597518921, 'bias': -0.9424528479576111} ######################################################################################## #q = 0.3 ######################################################################################## exact_hps_q03 = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0020952467703604792, 'spectral_radius': 0.37082985043525696, 'regularization': 0.361264334627276, 'leaking_rate': 0.012962563894689083, 'bias': 0.15055322647094727} another_exact_03_run = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.00010646483429429022, 'spectral_radius': 9.755386352539062, 'regularization': 0.001061326151397624, 'leaking_rate': 0.015667859464883804, 'bias': -0.6486743688583374} # 3000 epochs hybrid_03_hps = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.000876183849077606, 'spectral_radius': 7.2928466796875, 'regularization': 0.6050492589156197, 'leaking_rate': 0.014219114556908607, 'bias': 0.18588018417358398} ######################################################################################## #q = 0.1 ######################################################################################## #y0s = array([-1. , -0.25, 0.5 , 1.25]) hybrid_hyper_params = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.0001340433236446365, 'spectral_radius': 7.1109442710876465, 'regularization': 0.0040541553015366605, 'leaking_rate': 0.022500092163681984, 'bias': 0.7761751413345337} exact_hyper_params = {'dt': 0.007943282347242814, 'n_nodes': 500, 'connectivity': 0.00457819326682001, 'spectral_radius': 4.214494228363037, 'regularization': 672.3718753390342, 'leaking_rate': 0.11203678697347641, 'bias': 0.7799162864685059} ######################################################################################## #esn.fit dRay=0.75 np.arange(-1., 1 + dRay, dRay) y0s = np.arange(-1., 1 + dRay, dRay) ``` dt -2.1 n_nodes 500 connectivity -3.8727548122406006 spectral_radius 7.1109442710876465 regularization -2.392099618911743 leaking_rate 0.022500092163681984 bias 0.7761751413345337 ``` log_vars = ['connectivity', 'llambda', 'llambda2', 'noise', 'regularization', 'dt'] hps = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.0008771738385033052, 'spectral_radius': 3.8187756538391113, 'regularization': 2.6243606290132924, 'leaking_rate': 0.05788800120353699, 'bias': -0.4182356595993042} for key, val in hps.items(): if key in log_vars: print(key, np.log10(val)) else: print(key, val) #declare the bounds dict. See above for which variables are optimized in linear vs logarithmic space. bounds_dict = {"connectivity" : (-4, -0.1), "spectral_radius" : (1.5, 8.5), "n_nodes" : 500, "regularization" : (-2, 2), "leaking_rate" : (0, 0.1), #"input_scaling" : (0, 1), #"feedback_scaling" : (0, 1), "dt" : -2, "bias": (-1,1) } #declare the esn_cv optimizer: this class will run bayesian optimization to optimize the bounds dict. esn_cv = EchoStateNetworkCV(bounds = bounds_dict, interactive = True, batch_size = 1, cv_samples = 2, initial_samples = 50, #200 subsequence_length = int(xtrain.shape[0] * 0.8), random_seed = 209, success_tolerance = 10, windowsOS =False, validate_fraction = 0.3, ODE_order = 1, length_min = 2 **(-7), esn_burn_in = BURN_IN, log_score = True ) #optimize: opt = False if opt: opt_hps = esn_cv.optimize(y = None, x = xtrain,#.cuda(), reparam_f = reparam, ODE_criterion = custom_loss, init_conditions = [(y0s[0], y0s[-1])], force = force, rounds = 5, ode_coefs = [1, 1], backprop_f = optimize_last_layer, solve = False, q = q, n_outputs = 1, eq_system = False, nonlinear_ode = True) # # new_prop_hps = {'dt': 0.01, 'n_nodes': 500, 'connectivity': 0.001237975145359088, 'spectral_radius': 5.298933029174805, 'regularization': 18.616127927682236, 'leaking_rate': 0.0048981658183038235, 'bias': -0.40049731731414795} #opt_hps #assert False esn = EchoStateNetwork(**hybrid_hps_q05 , random_state = 209, id_ = 10, dtype = torch.float32) sns.heatmap(esn.LinOut.weight[:,:5].detach()); #1. get the linear trajectories #2. do a pure backprop training rez = plot_result(esn, xtrain.cpu(), lams = torch.ones_like(torch.tensor(y0s)),#np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], plot_gt = True, ode_coefs = [1,1], q = q, backprop_f = None,#optimize_last_layer, solve = True) esn.init_conds RC =esn RC.DH1[0:10,:] sns.heatmap(RC.DH2); plt.show(); sns.heatmap(matmul(-esn.DH2, esn.D_A).view(-1,1)) esn.init_conds sns.heatmap(esn.LinOut.weight[0].detach().view(-1,1)) #t2_, ys_, gts_, ws_, bs_, l_trajs = rez #linear_trajs = _, l_trajs, l_trajs_dot, _, _, _, _ = plot_result(esn, xtrain.cpu(), lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], plot_gt = True, ode_coefs = [1,1], q = q, backprop_f = None,#optimize_last_layer, solve = True) esn.ode_coefs esn.DH2.shape sns.heatmap(esn.DH); torch.mean(esn.D_A) sns.heatmap(esn.DH2); plt.show(); torch.mean(esn.D_A) sns.heatmap(esn.LinOut.weight.detach()) sns.heatmap(esn.D_A) assert False t2, ys, ydots, gts, ws, bs, Ls = rez n = 3 plt.loglog(Ls[n]["loss_history"], label = "prop_only") #plt.loglog(h["loss"][n]["loss_history"], label = "hybrid") plt.legend(); assert False import pickle filename = 'bernoulli_q05_hybrid' with open(filename + '_plot_data_.pickle', 'rb') as handle: h = pickle.load(handle) #filename = 'bernoulli_q05_backprop' with open(filename + '_plot_data_.pickle', 'rb') as handle: b = pickle.load(handle) with open(filename + '_plot_data_.pickle', 'rb') as handle: b = pickle.load(handle) n = 3 plt.loglog(b['loss'][n]["loss_history"], color = "blue", label = "backprop_only") plt.loglog(h['loss'][n]["loss_history"], color = "red", label = "hybrid") plt.legend() # for i, key in enumerate(b['loss']): # plt.loglog(key["loss_history"], color = "blue") # for i, key in enumerate(a['loss']): # plt.loglog(key["loss_history"], color = "red") assert False ls import pickle filename = 'bernoulli_q05_linear' #t2, ys, ydots, gts, ws, bs, Ls = rez q05_data = { # "time": esn.X, # "ys" : ys, # "ydots" : ydots, # "gts" : gts, # "q": 0.5, # "loss": Ls, "linear_trajectories" : l_trajs, "linear_trajectories_dot" : l_trajs_dot } #"bprop_only_loss" : Ls_bprop} with open(filename + '_plot_data.pickle', 'wb') as handle: pickle.dump(q05_data, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_plot_data.pickle', 'rb') as handle: b = pickle.load(handle) b.keys() for i in b['linear_trajectories']: plt.plot(i) import matplotlib.pyplot as plt with open(filename + '_plot_data.pickle', 'rb') as handle: b = pickle.load(handle) b.keys() plt.plot(b["ydots"][0]) import pickle q05 = {"time": esn.X, "hyper_params" : hybrid_hps_q05, "out_weights" : {"weights": ws, "bias": bs}, "burn_in" : BURN_IN, "epochs" : 30000, "learning_rate": 0.0001, "y0s" : y0s, "q" : 0.5} with open(filename + '_reproduce.pickle', 'wb') as handle: pickle.dump(q05, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(filename + '_reproduce.pickle', 'rb') as handle: b = pickle.load(handle) for param in esn.parameters(): #print(param) if param.requires_grad: print(param) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = a) opt_hps plt.plot(esn.states[:,7]) correction = (esn.D_A.T * esn.gH.T @ esn.gH) esn.DH.shape (esn.DH1 + correction).shape %%time esn = EchoStateNetwork(**hybrid_03_hps, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = 0.1) %%time esn = EchoStateNetwork(**hybrid_hyper_params, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) #A * torch.sin(X) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = None)#optimize_last_layer) %%time esn = EchoStateNetwork(**exact_hyper_params, random_state = 109, feedback = False, id_ = 10, backprop = False, dtype = torch.float32) # y0s = np.linspace(-2, 2, 10) dRay=0.75 y0s = np.arange(-1., 1 + dRay, dRay) plot_result(esn, xtrain, lams = np.array([1] * len(y0s)),#[1, 1, 1, 2]), y0s = y0s, #[1, 2, 3, 1], lam_title = 1, y0_title = "[-5, 5]", plot_gt = True, ode_coefs = [1,1], force_k = 0, fileName='population', backprop_f = optimize_last_layer, q = 0.1) ```
github_jupyter
# Using TensorNet (Basic) This notebook will demonstrate some of the core functionalities of TensorNet: - Creating and setting up a dataset - Augmenting the dataset - Creating and configuring a model and viewing its summary - Defining an optimizer and a criterion - Setting up callbacks - Training and validating the model - Displaying plots for viewing the change in accuracy during training # Installing Packages ``` !pip install --upgrade --no-cache-dir torch-tensornet ``` # Imports Importing necessary packages and modules ``` %matplotlib inline import matplotlib.pyplot as plt from tensornet.data import CIFAR10 from tensornet.models import mobilenet_v2 from tensornet.models.loss import cross_entropy_loss from tensornet.models.optimizer import sgd from tensornet.utils import initialize_cuda, plot_metric from tensornet.engine.ops import ModelCheckpoint from tensornet.engine.ops.lr_scheduler import reduce_lr_on_plateau ``` ## Set Seed and Get GPU Availability ``` # Initialize CUDA and set random seed cuda, device = initialize_cuda(1) # random seed is set to 1 ``` ## Setup Dataset Downloading and initializing `CIFAR-10` dataset and applying the following augmentations: - Horizontal Flip - Random Rotation - Cutout Augmentation ``` dataset = CIFAR10( train_batch_size=64, val_batch_size=64, cuda=cuda, num_workers=4, horizontal_flip_prob=0.2, rotate_degree=20, cutout_prob=0.3, cutout_dim=(8, 8), ) ``` ## Data Visualization Let's see how our data looks like. This information will help us decide the transformations that can be used on the dataset. ``` # Fetch data classes = dataset.classes sample_data, sample_targets = dataset.data() # Set number of images to display num_images = 4 # Display images with labels fig, axs = plt.subplots(1, 4, figsize=(8, 8)) fig.tight_layout() for i in range(num_images): axs[i].axis('off') axs[i].set_title(f'Label: {classes[sample_targets[i]]}') axs[i].imshow(sample_data[i]) ``` ## Training and Validation Dataloaders This is the final step in data preparation. It sets the dataloader arguments and then creates the dataloader ``` # Create train data loader train_loader = dataset.loader(train=True) # Create val data loader val_loader = dataset.loader(train=False) ``` # Model Architecture and Summary We'll download a pretrained ResNet18 model and train it on our dataset using fine-tuning. ``` model = mobilenet_v2(pretrained=True).to(device) # Create model model.summary(dataset.image_size) # Display model summary ``` # Model Training and Validation - Loss Function: `Cross Entropy Loss` - Optimizer: `SGD` - Callbacks: `Model Checkpoint` and `Reduce LR on Plateau` ``` criterion = cross_entropy_loss() # Create loss function optimizer = sgd(model) # Create optimizer with deafult learning rate # Create callbacks checkpoint_path = 'checkpoints' callbacks = [ ModelCheckpoint(checkpoint_path, monitor='val_accuracy'), reduce_lr_on_plateau(optimizer, factor=0.2, patience=2, min_lr=1e-6), ] model.fit( train_loader, optimizer, criterion, device=device, epochs=10, val_loader=val_loader, callbacks=callbacks, metrics=['accuracy'], ) ``` ## Result Analysis Displaying the change in accuracy of the training and the validation set during training ``` plot_metric({ 'Training': model.learner.train_metrics[0]['accuracy'], 'Validation': model.learner.val_metrics[0]['accuracy'] }, 'Accuracy') ```
github_jupyter
# Exploring Datasets with Python In this short demo we will analyse a given dataset from 1978, which contains information about politicians having affairs. To analyse it, we will use a Jupyter Notebook, which is basically a REPL++ for Python. Entering a command with shift executes the line and prints the result. ``` 4 + 4 def sum(a, b): return a + b sum(40, 2) import pandas as pd affairs = pd.read_csv('affairs.csv') affairs.head() affairs['sex'].head() affairs['sex'].value_counts() affairs['age'].describe() affairs['age'].max() affairs.describe() affairs[affairs['sex'] == 'female'].head() affairs[affairs['sex'] == 'female'].describe() affairs['below_30'] = affairs['age'] < 30 affairs['below_30'].value_counts() affairs.head() rel_meanings = ['not', 'mildly', 'fairly', 'strongly'] affairs['religious'] = affairs['religious'].apply(lambda x: rel_meanings[min(x, 4)-1]) affairs.head() ``` # Visualize Data To visualize our data, we will use Seaborn, a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics. Let's import it. ``` import seaborn as sns %matplotlib inline sns.set() sns.set_context('talk') ``` Seaborn together with Pandas makes it pretty easy to create charts to analyze our data. We can pass our Dataframes and Series directly into Seaborn methods. We will see how in the following sections. # Univariate Plotting Let's start by visualizing the distribution of the age our our people. We can achieve this with a simple method called distplot by passing our series of ages as argument. ``` sns.distplot(affairs['age']) sns.distplot(affairs['age'], bins=50, rug=True, kde=False) sns.distplot(affairs['ym'], bins=10, kde=False) ``` The average age of our people is around 32, but the most people are married for more than 14 years! # Bivariate Plotting Numbers get even more interesting when we can compare them to other numbers! Lets start comparing the number of years married vs the number of affairs. Seaborn provides us with a method called jointplot for this use case. ``` sns.jointplot(affairs['ym'], affairs['nbaffairs']) sns.jointplot(affairs['ym'], affairs['nbaffairs'], kind='reg') sns.jointplot(affairs['ym'], affairs['age'], kind='kde', shade=True) sns.pairplot(affairs.drop('below_30', axis=1), hue='sex', kind='reg') sns.lmplot(x="ym", y="nbaffairs", hue="sex", col="child", row="religious", data=affairs) sns.boxplot(x="sex", y="ym", hue="child", data=affairs); sns.violinplot(x="religious", y="nbaffairs", hue="sex", data=affairs, split=True); affairs.corr() sns.heatmap(affairs.corr(), cmap='coolwarm') ```
github_jupyter
``` import torch from transformers import MT5ForConditionalGeneration, MT5Config, MT5EncoderModel, MT5Tokenizer, Trainer, TrainingArguments from progeny_tokenizer import TAPETokenizer import numpy as np import math import random import scipy import time import pandas as pd from torch.utils.data import DataLoader, RandomSampler, Dataset, BatchSampler import typing from pathlib import Path import argparse from collections import OrderedDict import pickle import matplotlib.pyplot as plt from tape.metrics import spearmanr before_foldx = False ``` # Analyze 250K gen seqs and prepare for FoldX saved output tsv file to run FoldX inference ``` wt_seq = 'STIEEQAKTFLDKFNHEAEDLFYQSSLASWNYNTNITEENVQNMNNAGDKWSAFLKEQSTLAQMYPLQEIQNLTVKLQLQALQ' constant_region = 'NTNITEEN' wt_cs_ind = wt_seq.index(constant_region) gen250k_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000.tsv' # gen250k_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000.tsv' gen250k_df = pd.read_table(gen250k_tsv_name) gen250k_df ``` filter out sequences without constant region ``` indices_to_drop = [] dropped_seqs = [] for index, row in gen250k_df.iterrows(): seq = row['MT_seq'] if constant_region not in seq: indices_to_drop.append(index) dropped_seqs.append(seq) else: cs_ind = seq.index(constant_region) if cs_ind != wt_cs_ind: indices_to_drop.append(index) dropped_seqs.append(seq) print(len(indices_to_drop)) print(indices_to_drop) print(dropped_seqs) gen250k_df_dropped_nocon = gen250k_df.drop(indices_to_drop) gen250k_df_dropped_nocon ``` filter out sequences with non-AA tokens ``` rejected_tokens = ["<pad>", "<sep>", "<cls>", "<mask>", "<unk>"] indices_to_drop = [] dropped_seqs = [] for index, row in gen250k_df_dropped_nocon.iterrows(): seq = row['MT_seq'] for rejected_token in rejected_tokens: if rejected_token in seq: indices_to_drop.append(index) dropped_seqs.append(seq) break print(len(indices_to_drop)) print(indices_to_drop) print(dropped_seqs) gen250k_df_dropped = gen250k_df_dropped_nocon.drop(indices_to_drop) print(len(gen250k_df_dropped)) gen250k_df_dropped ``` # Filter out sequences that are repeat or in training set ``` input_data_file = 'data/gen_train_data/top_half_ddG/train_ddG.pkl' input_data_df = pd.read_pickle(input_data_file) input_data_df.iloc[0]['MT_seq'] input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq'] input_data_df.iloc[0]['MT_seq'] in input_data_df['MT_seq'].tolist() train_seq_list = input_data_df['MT_seq'].tolist() train_seq_list len(train_seq_list) ``` Filter out those that are repeat ``` gen250k_df_dropped_norepeat = gen250k_df_dropped[gen250k_df_dropped['repeated_gen'] == False] gen250k_df_dropped_norepeat gen250k_df_dropped_norepeat.iloc[0] ``` Filter out those from the training set ``` gen250k_df_filtered = gen250k_df_dropped_norepeat[gen250k_df_dropped_norepeat['in_train_data_gen'] == False] gen250k_df_filtered gen250k_df_filtered.iloc[0] np.sum(gen250k_df_filtered['repeated_gen']) np.sum(gen250k_df_filtered['in_train_data_gen']) topK_saved = 10000 gen250k_df_filtered = gen250k_df_filtered[:250000] gen250k_df_filtered = gen250k_df_filtered.sort_values(by='latent_head_pred', ascending=True) # gen250k_df_filtered = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True) gen250k_df_filtered_topK = gen250k_df_filtered.iloc[:topK_saved] filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered.tsv' # filtered_LHscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Klatentheadfiltered.tsv' disc_latenthead_cor = spearmanr(gen250k_df_filtered_topK['disc_pred'], gen250k_df_filtered_topK['latent_head_pred']) print("disc_latenthead_cor: ", disc_latenthead_cor) gen250k_df_filtered_sorted_disc = gen250k_df_filtered.sort_values(by='disc_pred', ascending=True) gen250k_df_filtered_sorted_disc_topK = gen250k_df_filtered_sorted_disc.iloc[:topK_saved] filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered.tsv' # filtered_Dscored_gen250k_top10K_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqs260000_top10Kdiscfiltered.tsv' all250K_disc_latenthead_cor = spearmanr(gen250k_df_filtered['disc_pred'], gen250k_df_filtered['latent_head_pred']) print("all250K_disc_latenthead_cor: ", all250K_disc_latenthead_cor) ``` # Save top 10K seqs for FoldX Evaluation ``` if before_foldx: gen250k_df_filtered_topK.to_csv(filtered_LHscored_gen250k_top10K_tsv_name, sep="\t", index=False) gen250k_df_filtered_sorted_disc_topK.to_csv(filtered_Dscored_gen250k_top10K_tsv_name, sep="\t", index=False) len(gen250k_df_filtered_topK) df_toplot = gen250k_df_filtered ``` # Analyze hamming distance ``` # Compute hamming distance between MT and WT def hamming_dist(str1, str2): i = 0 count = 0 while(i < len(str1)): if(str1[i] != str2[i]): count += 1 i += 1 return count hamming_dist_list = [] wt_seq = df_toplot.iloc[0]['WT_seq'] for index, row in df_toplot.iterrows(): gen_seq = row['MT_seq'] h_dist = hamming_dist(gen_seq, wt_seq) hamming_dist_list.append(h_dist) print("Hamming distance stats") print("max: ", np.max(hamming_dist_list)) print("min: ", np.min(hamming_dist_list)) print("median: ", np.median(hamming_dist_list)) print("mean: ", np.mean(hamming_dist_list)) print("std: ", np.std(hamming_dist_list)) ``` hamming distance for generator training data ``` gen_train_data = 'data/gen_train_data/top_half_ddG/train_ddG.pkl' gen_train_df = pd.read_pickle(gen_train_data) wt_seq = gen_train_df.iloc[0]['WT_seq'] gen_train_hamming_dist_list = [] for index, row in gen_train_df.iterrows(): train_seq = row['MT_seq'] h_dist = hamming_dist(train_seq, wt_seq) gen_train_hamming_dist_list.append(h_dist) plt.figure(figsize=(8,6)) plt.hist(hamming_dist_list, density=True, label='generated', bins=[i for i in range(46)], alpha=0.4) # plt.xlabel("Hamming Distance", size=14) # plt.ylabel("Count", size=14) # plt.title("Hamming Distance from WT seq") plt.hist(gen_train_hamming_dist_list, density=True, label='train_data', bins=[i for i in range(46)], alpha=0.4) plt.xlabel("Hamming Distance", size=14) plt.ylabel("Density", size=14) plt.title("Top 5% Generator") plt.legend(loc='upper left') ``` # Sample for E[min] FoldX Computation ``` gen250k_df_filtered # Get topk seqs num_rounds = 100 # N round_pool_size = 10000 topk = 10 # K round_topk = {} cols_to_sort = ['latent_head_pred'] # cols_to_sort = ['disc_pred', 'latent_head_pred'] foldx_df = None in_count = 0 for col_to_sort in cols_to_sort: print("col_to_sort: ", col_to_sort) round_topk[col_to_sort] = {} for round_ind in range(num_rounds): sampled_rows = gen250k_df_filtered.sample(n=round_pool_size) sorted_sampled_rows = sampled_rows.sort_values(by=col_to_sort, ascending=True)[:topk] topk_rows = sorted_sampled_rows[:topk] round_topk[col_to_sort][round_ind] = topk_rows for round_ind in round_topk[col_to_sort]: round_topk_df = round_topk[col_to_sort][round_ind] if foldx_df is None: foldx_df = round_topk_df else: all_mt = foldx_df['MT_seq'].tolist() for row_ind, row in round_topk_df.iterrows(): if row['MT_seq'] not in all_mt: foldx_df = foldx_df.append(row) else: in_count += 1 print("len(foldx_df)+in_count: ", len(foldx_df)+in_count) foldx_df in_count ``` # save E[min] seqs to do FoldX¶ ``` seqsforEmin_dict_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_df.pkl' # seqsforEmin_dict_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_df.pkl' if before_foldx: with open(seqsforEmin_dict_name, 'wb') as f: pickle.dump(round_topk, f) # with open(seqsforEmin_dict_name, 'rb') as f: # b = pickle.load(f) seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_foldx.tsv' # seqsforEmin_tsv_name = 'generated_seqs/congen/clspool_sephead512dim_lre-04_gen_perturb-1/unique250K_clspool_sephead512dim_lre-04_gen_perturb-1-congen_seqsforEmin_foldx.tsv' if before_foldx: foldx_df.to_csv(seqsforEmin_tsv_name, sep="\t", index=False) ``` # <<===== After Foldx Computation =====>> ``` # foldx_results_name = "path_to_foldx_results" # # foldx_results_name = "foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv" # foldx_results_df = pd.read_table(foldx_results_name) foldx_results_names = [ "foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Klatentheadfiltered/results_full.tsv", # "foldx_sim_results/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqs260000_top10Kdiscfiltered/results_full.tsv", ] # foldx_results_name = "foldx_sim_results/tophalf-basegen_top10K-Dscore_250Kgen/results_full.tsv" foldx_results_df = None for foldx_results_name in foldx_results_names: if foldx_results_df is None: foldx_results_df = pd.read_table(foldx_results_name) else: next_foldx_results_df = pd.read_table(foldx_results_name) foldx_results_df = foldx_results_df.append(next_foldx_results_df, ignore_index=True) foldx_results_df # Compute Emin from foldx values rows_to_patch = None Emin_results_dict = {} mean_disc_ddG_cor_results_dict = {} mean_latent_ddG_cor_results_dict = {} for col_to_sort in round_topk: print(col_to_sort) current_score_round_topk = round_topk[col_to_sort] round_min_list = [] round_disc_ddG_cor_list = [] round_latent_ddG_cor_list = [] for round_ind in current_score_round_topk: round_topk_df = current_score_round_topk[round_ind] round_ddG = [] round_disc_pred = [] round_latent_head_pred = [] for row_ind, row in round_topk_df.iterrows(): row_seq = row['MT_seq'] matched_row = foldx_results_df.loc[foldx_results_df['MT_seq'] == row_seq] if len(matched_row) != 1 : # print("matched_row: ", matched_row) if len(matched_row) == 0 : if rows_to_patch is None: rows_to_patch = row else: rows_to_patch.append(row) # raise else: # round_ddG.append(matched_row.iloc[0]['ddG'].to_numpy()[0]) # round_disc_pred.append(matched_row.iloc[0]['disc_pred'].to_numpy()[0]) # round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred'].to_numpy()[0]) round_ddG.append(matched_row.iloc[0]['ddG']) round_disc_pred.append(matched_row.iloc[0]['disc_pred']) round_latent_head_pred.append(matched_row.iloc[0]['latent_head_pred']) else: # print("matched_row['ddG'] to_numpy: ", matched_row['ddG'].to_numpy()) # print("matched_row['ddG'] to_numpy 0: ", matched_row['ddG'].to_numpy()[0]) # print("matched_row['ddG']: ", matched_row['ddG']) # print("matched_row['disc_pred']: ", matched_row['disc_pred']) round_ddG.append(matched_row['ddG'].to_numpy()[0]) # ! changed to ddG round_disc_pred.append(matched_row['disc_pred'].to_numpy()[0]) round_latent_head_pred.append(matched_row['latent_head_pred'].to_numpy()[0]) # round_ddG.append(matched_row['ddG']) # ! changed to ddG # round_disc_pred.append(matched_row['disc_pred']) # round_latent_head_pred.append(matched_row['latent_head_pred']) # print("len(round_disc_pred): ", len(round_disc_pred)) # print("len(round_ddG): ", len(round_ddG)) # print("round_disc_pred: ", round_disc_pred) # print("round_ddG: ", round_ddG) # print("round_ddG.to_numpy(): ", round_ddG.to_numpy()) round_disc_ddG_cor = spearmanr(round_disc_pred, round_ddG) round_disc_ddG_cor_list.append(round_disc_ddG_cor) round_latent_ddG_cor = spearmanr(round_latent_head_pred, round_ddG) round_latent_ddG_cor_list.append(round_latent_ddG_cor) round_min = np.min(round_ddG) # print("round_ddG: ", round_ddG) # print("round_min: ", round_min) round_min_list.append(round_min) Emin = np.mean(round_min_list) # print("round_min_list: ", round_min_list) # print("Emin: ", Emin) mean_disc_ddG_cor = np.mean(round_disc_ddG_cor_list) mean_latent_ddG_cor = np.mean(round_latent_ddG_cor_list) Emin_results_dict[col_to_sort] = Emin mean_disc_ddG_cor_results_dict[col_to_sort] = mean_disc_ddG_cor mean_latent_ddG_cor_results_dict[col_to_sort] = mean_latent_ddG_cor print(rows_to_patch) ``` # Save Emin Results ``` Emin_results_name = 'generated_seqs/congen/clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080/unique250K_clspool_waeDeterencStart84kstep1024dim_cyccon1Start84kstep_lre-04_24ep_gen_perturb-080-congen_seqsforEmin_results.txt' # Emin_results_name = 'generated_seqs/baseline_gen/Emin_results/tophalf-basegen_seqsforEmin_results.txt' with open(Emin_results_name, "w") as writer: writer.write("***** E[min] results *****\n") writer.write("seqsforEmin_dict_name: {}\n".format(seqsforEmin_dict_name)) for key in sorted(Emin_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(Emin_results_dict[key]))) writer.write("***** mean_disc_ddG_cor results *****\n") for key in sorted(mean_disc_ddG_cor_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(mean_disc_ddG_cor_results_dict[key]))) writer.write("***** mean_latent_ddG_cor results *****\n") for key in sorted(mean_latent_ddG_cor_results_dict.keys()): writer.write("sorted by %s = %s\n" % (key, str(mean_latent_ddG_cor_results_dict[key]))) ```
github_jupyter
# Part 1 - 2D mesh tallies So far we have seen that neutron and photon interactions can be tallied on surfaces or cells, but what if we want to tally neutron behaviour throughout a geometry? (rather than the integrated neutron behaviour over a surface or cell). A mesh tally allows a visual inspection of the neutron behaviour spatially throughout the geometry. The geometry is subdivided into many rectangles and the neutron behaviour is recorded (tallied) by the simulation in each of the small rectangles. This can form a 2D slice of the neutron interactions throughout the model. This notebook allows users to create a simple geometry from a few different materials and plot the results of a 2D regular mesh tally applied to the geometry. ``` from IPython.display import HTML HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/KYIsDjip1nQ" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>') ``` This code block defines the model geometry, materials, neutron source and regular mesh tally. Run the cell to see the model geometry. Observe how a 2D mesh is achieved by creating a 3D mesh with a thickness of one mesh cell in one dimension. ``` import openmc import matplotlib.pyplot as plt # MATERIALS # creates two materials, one is a neutron multiplier (lead) and the other a tritium breeder (lithium) mats = openmc.Materials() breeder_material = openmc.Material(name="breeder") breeder_material.add_element('Li', 1, percent_type='ao') breeder_material.set_density('g/cm3', 2.0) multiplier_material = openmc.Material(name="multiplier") multiplier_material.add_element('Pb', 1, percent_type='ao') multiplier_material.set_density('g/cm3', 11.0) mats = [breeder_material, multiplier_material] # GEOMETRY # surfaces sph1 = openmc.Sphere(r=50) sph2 = openmc.Sphere(r=90, boundary_type='vacuum') plane1 = openmc.XPlane(20) # cells breeder_cell = openmc.Cell(region=+sph1 & -sph2 & -plane1) breeder_cell.fill = breeder_material multiplier_cell = openmc.Cell(region=+sph1 & -sph2 & +plane1) multiplier_cell.fill = multiplier_material inner_vacuum_cell = openmc.Cell(region=-sph1) universe = openmc.Universe(cells=[inner_vacuum_cell, breeder_cell, multiplier_cell]) geom = openmc.Geometry(universe) # SETTINGS # Instantiate a Settings object sett = openmc.Settings() sett.batches = 100 sett.inactive = 0 sett.particles = 50 sett.particle = "neutron" sett.run_mode = 'fixed source' # creates a 14MeV point source source = openmc.Source() source.space = openmc.stats.Point((0, 0, 0)) source.angle = openmc.stats.Isotropic() source.energy = openmc.stats.Discrete([14e6], [1]) sett.source = source # Create mesh which will be used for tally mesh = openmc.RegularMesh() mesh_height = 100 # number of cells in the X and Z dimensions mesh_width = mesh_height mesh.dimension = [mesh_width, 1, mesh_height] # only 1 cell in the Y dimension mesh.lower_left = [-200, -200, -200] # physical limits (corners) of the mesh mesh.upper_right = [200, 200, 200] tallies = openmc.Tallies() # Create mesh filter for tally mesh_filter = openmc.MeshFilter(mesh) mesh_tally = openmc.Tally(name='tallies_on_mesh') mesh_tally.filters = [mesh_filter] mesh_tally.scores = ['flux', 'absorption', '(n,2n)'] # change flux to absorption tallies.append(mesh_tally) # combines the geometry, materials, settings and tallies to create a neutronics model model = openmc.model.Model(geom, mats, sett, tallies) plt.show(universe.plot(width=(180, 180), basis='xz')) ``` The next code block performs the simulation which tallies neutron flux on the mesh, and loads the results for inspection. ``` # deletes old files !rm summary.h5 !rm statepoint.*.h5 # runs the simulation output_filename = model.run() # open the results file results = openmc.StatePoint(output_filename) ``` This code block filters the results to show the neutron flux recorded by the mesh tally. ``` # access the flux tally my_tally = results.get_tally(scores=['flux']) my_slice = my_tally.get_slice(scores=['flux']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are produced and emitted isotropically from a point source. # There is a slight increase in flux within the neutron multiplier. ``` This code block filters the results to show the neutron absorption recorded by the mesh tally. ``` # access the absorption tally my_tally = results.get_tally(scores=['absorption']) my_slice = my_tally.get_slice(scores=['absorption']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are being absorpted on the left hand side of the model ``` This code block filters the results to show the neutron multiplication recorded by the mesh tally. ``` # access the neutron multiplication tally my_tally = results.get_tally(scores=['(n,2n)']) my_slice = my_tally.get_slice(scores=['(n,2n)']) my_slice.mean.shape = (mesh_width, mesh_height) fig = plt.subplot() plt.show(fig.imshow(my_slice.mean)) # notice that neutrons are being muliplied on the right hand side of the model # Bonus information # The 2D mesh tally is currently recording all interactions in the 3rd dimention (z). # The diagrams are showing the xy plane and all interactions in the z direction. # However one can also change the mesh to take a central slice of with a 1cm thickness in the following way. # The tally takes a little longer to converge as less neutrons are interacting in the tally region. # Create mesh which will be used for tally mesh = openmc.RegularMesh() mesh_height = 100 mesh_width = mesh_height mesh.dimension = [mesh_width, 1, mesh_height] # only one entry in the Y direction mesh.lower_left = [-200, -0.5, -200] # Y thickness is now smaller mesh.upper_right = [200, 0.5, 200] # Y thickness is now smaller ``` **Learning Outcomes for Part 1:** - Mesh tallies can be used to visualise neutron interactions spatially throughout geometry.
github_jupyter
``` from fastai.text import * from fastai.tabular import * path = Path('') data = pd.read_csv('good_small_dataset.csv', engine='python') data.head() df = data.dropna() df.to_csv('good_small_dataset_drop_missing.csv') data_lm = TextLMDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', text_cols = 'content', label_cols = 'type') data_lm.save('data_lm_export.pkl') data_clas = TextClasDataBunch.from_csv(path, 'good_small_dataset_drop_missing.csv', vocab=data_lm.train_ds.vocab, text_cols = 'content', label_cols = 'type',bs=16) data_clas.save('data_clas_export.pkl') from fastai.text import * data_lm = load_data('NLP/', 'data_lm_export.pkl') data_clas = load_data('', 'data_clas_export.pkl') learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.5) learn.save('initial') learn.fit_one_cycle(1, 1e-2) learn.save('initial') learn.unfreeze() learn.fit_one_cycle(1, 1e-3) learn.save_encoder('ft_enc') learn.save('ft_encoder_model') learn.predict("The President today spoke on", n_words=10) learn.predict("Kim Kardashian released a new photo depicting her doing", n_words=6) learn.predict("World War Three has begun between", n_words=10) learn = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5); learn.load_encoder('ft_enc') learn.load('good_model_epoc_2'); learn.summary() data_clas.show_batch() learn.fit_one_cycle(1, 1e-2) learn.save('good_model') learn.freeze_to(-2) learn.fit_one_cycle(1, slice(5e-3/2., 5e-3)) learn.save('good_model_epoc_2') learn.unfreeze() learn.fit_one_cycle(1, slice(2e-3/100, 2e-3)) learn.save('good_model_epoc_3') # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") # BrightBert again learn.predict("The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,” Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.” Trump’s 'tougher direction” statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,” according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,” partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children” are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight” amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,” one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’” But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,” said a February letter from union President Chris Crane.") # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") # BrightBert again learn.predict("The border agencies need tougher leadership, President Donald Trump declared Friday as he dropped plans to appoint a long-time agency staffer to run the Immigration and Customs Enforcement agency (ICE).'Ron [Vitiello is] a good man,' Trump told reporters. 'But we’re going in a tougher direction. We want to go in a tougher direction.' Trump’s 'tougher direction' statement suggests he may pressure Department of Homeland Secretary (DHS) Secretary Kirstjen Nielsen to implement policies that top agency staffers oppose, such as rejecting legal interpretations and bureaucratic practices set by former President Barack Obama. Immigration reformers blame those Obama policies for encouraging the wave of economic migrants from Central America.Breitbart TVDonald Trump Says Everything Jared Kushner Touched ‘Turned To Gold’The shift comes amid the growing wave of Central American economic migrants who are using Obama-era legal loopholes to walk through the border wall and into jobs, neighborhoods, and blue-collar schools throughout the United States. That wave is expected to deliver one million migrants into the United States by October, and it is made possible because Democrats are blocking any reform the border loopholes.Immigration reformers fear that Obama-appointed staffers and former business lobbyists are keeping Trump in the dark about ways to improve operation at the DHS. 'I don’t now know if the President is getting the information he needs about what powers he has,' according to Rosemary Jenks, policy director at the Center for Immigration Studies. 'Secretary Nielsen and some of the attorneys in DHS are blocking the information because they are afraid of implementing some of the things they can do,' partly because they are afraid of lawsuits, she said.For example, many so-called 'Unaccompanied Alien Children' are being smuggled up the border because Trump’s agencies will pass them to their illegal immigrant parents living throughout the United States, under policies set by Obama. But those youths and children should be sent home, said Jenks, because the 2008 law only protects trafficked victims, such as forced prostitutes, not youths and children who have parents in the United States or who are willingly smuggled up to the border. According to the Washington Post, Vitiello’s exit was prompted by Steve Miller, one of Trump’s first aides who earlier played a key role in derailing the 2013 'Gang of Eight' amnesty and cheap labor bill. The Post said:Six administration officials said Friday that the decision to jettison Vitiello was a sign of the expanding influence that Miller now wields over immigration matters in the White House, particularly as Trump lashes out at Mexico and Central American nations — as well as Homeland Security officials and aides who express doubts about the legality of his ideas.The New York Times reported:One person familiar with the president’s thinking said that Mr. Trump believed that Mr. Vitiello did not favor closing the border, as the president had proposed before backing off that threat this week.Another person said that Stephen Miller, the president’s chief policy adviser and a supporter of curtailing legal and illegal immigration, did not support Mr. Vitiello’s nomination.Vitiello’s defenders lashed out at Miller. The Washington Post highlighted the complaints:'Ron Vitiello has spent as much time defending our nation’s borders as Stephen Miller has been alive,' one official said of Miller, who is 33.One senior official said: 'This is part of an increasingly desperate effort by Stephen to throw people under the bus when the policies he has advocated are not effective. Once it becomes clear that Stephen’s policies aren’t working, he tells the president, ‘They’re not the right people.’' But Vitiello’s appointment was opposed by the ICE officers’ union, the National ICE Council. Vitiello 'lacks the judgment and professionalism to effectively lead a federal agency,' said a February letter from union President Chris Crane.") # BBC learn.predict("Israeli PM Benjamin Netanyahu has said he will annex Jewish settlements in the occupied West Bank if he is re-elected.Israelis go to the polls on Tuesday and Mr Netanyahu is competing for votes with right-wing parties who support annexing part of the West Bank.The settlements are illegal under international law, though Israel disputes this.Last month the US recognised the occupied Golan Heights, seized from Syria in 1967, as Israeli territory.Can Jewish settlement issue be resolved?What Trump’s Golan Heights move really meansIsrael's Benjamin Netanyahu: Commando turned PMIsrael has settled about 400,000 Jews in West Bank settlements, with another 200,000 living in East Jerusalem. There are about 2.5 million Palestinians living in the West Bank.Palestinians want to establish a state in the occupied West Bank, East Jerusalem and the Gaza Strip.What happens to the settlements is one of the most contentious issues between Israel and the Palestinians - Palestinians say the presence of settlements make a future independent state impossible.Israel says the Palestinians are using the issue of settlements as a pretext to avoid direct peace talks. It says settlements are not a genuine obstacle to peace and are negotiable.What exactly did Netanyahu say?He was asked during an interview on Israeli TV why he had not extended Israeli sovereignty to large settlements in the West Bank.'You are asking whether we are moving on to the next stage - the answer is yes, we will move to the next stage,' he said.Image copyrightREUTERSImage captionMr Netanyahu is seeking re-election'I am going to extend [Israeli] sovereignty and I don't distinguish between settlement blocs and the isolated settlements.'A spokesman for Palestinian leader Mahmoud Abbas told Reuters: 'Any measures and any announcements will not change the facts. Settlements are illegal and they will be removed.'Potentially explosive commentsBy Sebastian Usher, BBC Arab affairs editorThese comments by Benjamin Netanyahu are potentially explosive over an issue that has helped stall peace efforts for years.They will resonate with several parties with which he'll try to form a coalition government if he wins the biggest share of votes.But the very idea of annexation will rouse new Palestinian fury, as well as international condemnation.Mr Netanyahu may have been emboldened by the Trump administration, which just last month recognised Israeli sovereignty over the Golan Heights.What is the political background?Mr Netanyahu's right-wing Likud party is in a tight race with the new centre-right Blue and White alliance.However other parties, some of which support annexation, could end up being kingmakers when they try to form a governing coalition.Israel election: Who are the key candidates?The ex-military chief trying to unseat NetanyahuIn Mr Netanyahu's own Likud party, 28 out of the 29 lawmakers running for re-election are on record as supporting this approach. Until now the prime minister was the only exception.What is the situation of peace negotiations?Mr Trump's administration is preparing to unveil a long-awaited Middle East peace plan, which US officials say will be fair.However the Trump administration has carried out a series of actions that have inflamed Palestinian opinion and generally pleased Israel.In 2017 Mr Trump announced that the US recognised Jerusalem as Israel's capital, overturning decades of official US policy.In response Mr Abbas cut off relations with the US, saying the US could no longer be a peace broker.Last year the US stopped contributing to the UN Relief and Works Agency (Unrwa), which has been looking after Palestinian refugees since 1949.Last month President Trump officially recognised Israeli sovereignty over the occupied Golan Heights.Peace negotiations between Israel and the Palestinians have been at a standstill since 2014, when a US-brokered attempt to reach a deal collapsed.") # Fox News: learn.predict("Former President Barack Obama said on Saturday that he is worried that progressives are creating a “circular firing squad” as prospective Democratic presidential candidates race to the left on a number of hot topic issues ahead of the 2020 election.“The way we structure democracy requires you to take into account people who don’t agree with you,” he said at an Obama Foundation town hall event in Berlin, according to The New York Post. “And that by definition means you’re not going to get 100 percent of what you want.”BARACK OBAMA STILL BELIEVES BIDEN WOULD BE 'AN EXCELLENT PRESIDENT' AMID INAPPROPRIATE TOUCHING ALLEGATIONS: REPORT“One of the things I do worry about sometimes among progressives … we start sometimes creating what’s called a ‘circular firing squad’ where you start shooting at your allies because one of them has strayed from purity on the issues,” he said.Obama’s remarks come as freshman House Democrats such as Rep. Alexandria Ocasio-Cortez, D-N.Y., have pushed once-fringe positions on Medicare-for-all, the Green New Deal and reparations for slavery. In turn, 2020 presidential hopefuls have also taken some of those positions.In that climate, candidates have come under criticism for their past stances from activists. South Bend Mayor Pete Buttigieg was forced this week to address remarks he made in 2015 when he said that “all lives matter” -- which some activists say is a counterslogan to the “black lives matter” sloganSen. Kamala Harris, D-Calif., meanwhile has been hit by controversy over her past as a prosecutor. A scathing op-ed published in January in The New York Times, written by law professor Lara Bazelon, has kickstarted renewed scrutiny.Obama reportedly warns freshmen House Democrats about pricey policy proposalsVideoBazelon says Harris previously 'fought tooth and nail to uphold wrongful convictions that had been secured through official misconduct that included evidence tampering, false testimony and the suppression of crucial information by prosecutors.'Bazelon further suggested that Harris should 'apologize to the wrongfully convicted people she has fought to keep in prison and to do what she can to make sure they get justice' or otherwise make clear she has 'radically broken from her past.'Former vice president under Obama, Joe Biden, meanwhile has faced criticism for inappropriate past physical contact with women, as well a a 1993 speech on crime in which he warned of “predators on our streets”'They are beyond the pale many of those people, beyond the pale,' Biden continued. 'And it's a sad commentary on society. We have no choice but to take them out of society.'The latter was reminiscent of heat 2016 presidential nominee Hillary Clinton took from activists for her description of some gang members as “superpredators” in 1996.Obama himself may not escape criticism in the election cycle. His signature health care legislation, the Affordable Care Act, is quickly being eclipsed by calls from Democrats for single-payer and Medicare-for-all plans. Meanwhile, a number of Democrats have said they are open to reparations for black Americans for slavery -- something that Obama opposed when he was in office.") #Pseudoscience learn.predict("Have you ever clicked on a link like 'What does your favorite animal say about you?' wondering what your love of hedgehogs reveals about your psyche? Or filled out a personality assessment to gain new understanding into whether you’re an introverted or extroverted 'type'? People love turning to these kinds of personality quizzes and tests on the hunt for deep insights into themselves. People tend to believe they have a 'true' and revealing self hidden somewhere deep within, so it’s natural that assessments claiming to unveil it will be appealing.As psychologists, we noticed something striking about assessments that claim to uncover people’s 'true type.' Many of the questions are poorly constructed – their wording can be ambiguous and they often contain forced choices between options that are not opposites. This can be true of BuzzFeed-type quizzes as well as more seemingly sober assessments.On the other hand, assessments created by trained personality psychologists use questions that are more straightforward to interpret. The most notable example is probably the well-respected Big Five Inventory. Rather than sorting people into 'types,' it scores people on the established psychological dimensions of openness to new experience, conscientiousness, extroversion, agreeableness and neuroticism. This simplicity is by design; psychology researchers know that the more respondents struggle to understand the question, the worse the question is.But the lack of rigor in 'type' assessments turns out to be a feature, not a bug, for the general public. What makes tests less valid can ironically make them more interesting. Since most people aren’t trained to think about psychology in a scientifically rigorous way, it stands to reason they also won’t be great at evaluating those assessments. We recently conducted series of studies to investigate how consumers view these tests. When people try to answer these harder questions, do they think to themselves 'This question is poorly written'? Or instead do they focus on its difficulty and think 'This question’s deep'? Our results suggest that a desire for deep insight can lead to deep confusion.Confusing difficult for deepIn our first study, we showed people items from both the Big Five and from the Keirsey Temperament Sorter (KTS), a popular 'type' assessment that contains many questions we suspected people find comparatively difficult. Our participants rated each item in two ways. First, they rated difficulty. That is, how confusing and ambiguous did they find it? Second, what was its perceived 'depth'? In other words, to what extent did they feel the item seemed to be getting at something hidden deep in the unconscious?Sure enough, not only were these perceptions correlated, the KTS was seen as both more difficult and deeper. In follow-up studies, we experimentally manipulated difficulty. In one study, we modified Big Five items to make them harder to answer like the KTS items, and again we found that participants rated the more difficult versions as 'deeper.'We also noticed that some personality assessments seem to derive their intrigue from having seemingly nothing to do with personality at all. Take one BuzzFeed quiz, for example, that asks about which colors people associate with abstract concepts like letters and days of the week and then outputs 'the true age of your soul.' Even if people trust BuzzFeed more for entertainment than psychological truths, perhaps they are actually on board with the idea that these difficult, abstract decisions do reveal some deep insights. In fact, that is the entire idea behind classically problematic measures such as the Rorschach, or 'ink blot,' test.In two studies inspired by that BuzzFeed quiz, we found exactly that. We gave people items from purported 'personality assessment' checklists. In one study, we assigned half the participants to the 'difficult' condition, wherein the assessment items required them to choose which of two colors they associated with abstract concepts, like the letter 'M.' In the 'easier' condition, respondents were still required to rate colors on how much they associated them with those abstract concepts, but they more simply rated one color at a time instead of choosing between two.Again, participants rated the difficult version as deeper. Seemingly, the sillier the assessment, the better people think it can read the hidden self.Intuition may steer you wrongOne of the implications of this research is that people are going to have a hard time leaving behind the bad ideas baked into popular yet unscientific personality assessments. The most notable example is the Myers-Briggs Type Indicator, which infamously remains quite popular while doing a fairly poor job of assessing personality, due to longstanding issues with the assessment itself and the long-discredited Jungian theory behind it. Our findings suggest that Myers-Briggs-like assessments that have largely been debunked by experts might persist in part because their formats overlap quite well with people’s intuitions about what will best access the “true self.”People’s intuitions do them no favors here. Intuitions often undermine scientific thinking on topics like physics and biology. Psychology is no different. People arbitrarily divide parts of themselves into “true” and superficial components and seem all too willing to believe in tests that claim to definitively make those distinctions. But the idea of a “true self” doesn’t really work as a scientific concept.Some people might be stuck in a self-reinforcing yet unproductive line of thought: Personality assessments can cause confusion. That confusion in turn overlaps with intuitions of how they think their deep psychology works, and then they tell themselves the confusion is profound. So intuitions about psychology might be especially pernicious. Following them too closely could lead you to know less about yourself, not more.", thresh=0.5) learn.predict("PETALUMA, CA — An incident in which a white man was reportedly beaten in downtown Petaluma by a group of suspects the victim described as four or five black men is being investigated as a hate crime and an assault, the Petaluma Police Department said Tuesday in a news release.Petaluma police Lt. Ed Crosby said officers immediately responded at 9:03 p.m. Saturday, March 9 to the intersection of Mary Street at Petaluma Boulevard North to a woman's report that her domestic partner, a 60-year-old white man, had just been attacked.The lieutenant said when officers arrived they found the victim on the ground suffering from numerous facial injuries.The man was rushed to Santa Rosa Memorial Hospital where according to police, he stayed two days. Injuries to the victim were confirmed as a fractured left eye socket, a broken nose and other abrasions to his face including facial swelling, Crosby said.The couple told police that the night of the incident they had just finished eating dinner at a restaurant on Petaluma Boulevard North and were walking westbound toward their car, which was parked on Mary Street, when they passed a group of several African-American men who looked to be in their 20s, standing around a four-door, emerald green Honda Civic.The couple said they did not interact with the group and were continuing on their way when one of the men by the green Honda 'hurled profanity at the victim and referred to his [the victim's] race,' Crosby said.'The victim turned around and saw one of the males rushing at him, swinging his arms,' Crosby said.'The victim grabbed the advancing male, brought him to the ground, and pinned him,' Crosby said. 'In response, the other males by the green Honda repeatedly kicked the victim in the face before getting into the green Honda and fleeing the scene.'Petaluma police are asking anyone with information about the incident to contact or leave a message for Petaluma Police Department Officer Ron Flores by calling 707-778-4372.The victim and his female companion were not able to give many descriptive details about the suspects, the lieutenant said, and thus far, officers' efforts in canvassing the downtown area for any witnesses or video footage that would help identify the suspects have not been successful.The green Honda was missing a front license plate; the back license plate may possibly include the numbers 611, according to police.", thresh=.5) learn.data.classes ```
github_jupyter
Taken from fastai NLP "8-translation-transformer" FastText embeddings: https://fasttext.cc/docs/en/crawl-vectors.html ``` from fastai2.text.all import * from fastai2.callback.all import * from fastai2.basics import * import seaborn as sns from einops import rearrange import gc import csv path = Path('../data/irish/crosslang') path.ls(), path ``` ### Load saved dataset ``` df=pd.read_csv(path/'paracrawl_cleaned_en-ga.csv') print(len(df)) df.head() sns.distplot(df['ga_len'].values) print(f'Median length is: {np.median(df["ga_len"])}') import seaborn as sns sns.distplot(df['en_len'].values) print(f'Median length is: {np.median(df["en_len"])}') ``` ### Pre-processing **Remove long texts to make things easier** ``` # Word count 90th percentile np.percentile([o for o in df.en_len.values], 90), np.percentile([o for o in df.ga_len.values], 90) print(f'Removing {len(df.query("en_len > 60"))} EN samples where len was > 60') print(len(df)) df=df[~df.index.isin(df.query("en_len > 60").index)] print(len(df)) print(f'Removing {len(df.query("ga_len > 60"))} FR samples where len was > 60') print(len(df)) df=df[~df.index.isin(df.query("ga_len > 60").index)] print(len(df)) sns.distplot(df['en_len'].values), np.median(df['en_len']) ``` **Lowercase everything** ``` df['en'] = df['en'].apply(lambda x:x.lower()) df['ga'] = df['ga'].apply(lambda x:x.lower()) ``` Rules used as part of tokenization ``` proc_rules=defaults.text_proc_rules[:-1] + [partial(lowercase, add_eos=True)] proc_rules ``` ### Get Dataloaders Load vocab to speed up data loading ``` splits = ColSplitter()(df) tfms = [[Tokenizer.from_df(text_cols='en' , rules=proc_rules), attrgetter("text"), Numericalize(max_vocab=20000)], [Tokenizer.from_df(text_cols='ga', lang='ga', rules=proc_rules), attrgetter("text"), Numericalize(max_vocab=20000)]] dl = partial(SortedDL, shuffle=True) dsets = Datasets(df, tfms, splits=splits, dl_type=dl) # en_vocab=[] # ga_vocab=[] # with open('paracrawl_vocab_en.csv', newline='') as csvfile: # v_reader = csv.reader(csvfile, delimiter=',') # for row in v_reader: # en_vocab.append(row[0]) # with open('paracrawl_vocab_ga.csv', newline='') as csvfile: # v_reader = csv.reader(csvfile, delimiter=',') # for row in v_reader: # ga_vocab.append(row[0]) #len(en_vocab), len(ga_vocab), en_vocab[:10], ga_vocab[:10] len(dsets), splits, len(dsets[2][0]), len(dsets[2][1]), dsets[2] bs,sl = 48, 108 dls = dsets.dataloaders(bs=bs, seq_len=sl, before_batch=partial(pad_input, pad_fields=[0,1])) dls.show_batch() ``` Save vocab to speed up data loading ``` with open('paracrawl_vocab_en_v0.2_exp1.csv', 'w', newline='') as csvfile: v_writer = csv.writer(csvfile, delimiter=',') for l in dls.vocab[0]: v_writer.writerow([l]) with open('paracrawl_vocab_ga_v0.2_exp1.csv', 'w', newline='') as csvfile: v_writer = csv.writer(csvfile, delimiter=',') for l in dls.vocab[1]: v_writer.writerow([l]) len(dls.train_ds)+len(dls.valid_ds), len(dls.train), len(dls.valid) print(f'Vocab lengths are : {len(dls.vocab[0]), len(dls.vocab[1])}') o=dls.one_batch(); o[0].size(), o[1].size(), o ``` ## Transformer model ``` class PositionalEncoding(nn.Module): "Encode the position with a sinusoid." def __init__(self, d): super().__init__() self.register_buffer('freq', 1 / (10000 ** (torch.arange(0., d, 2.)/d))) def forward(self, pos): inp = torch.ger(pos, self.freq) enc = torch.cat([inp.sin(), inp.cos()], dim=-1) return enc class TransformerEmbedding(nn.Module): "Embedding + positional encoding + dropout" def __init__(self, vocab_sz, emb_sz, inp_p=0.): super().__init__() self.emb_sz = emb_sz self.embed = Embedding(vocab_sz, emb_sz) self.pos_enc = PositionalEncoding(emb_sz) self.drop = nn.Dropout(inp_p) def forward(self, inp): pos = torch.arange(0, inp.size(1), device=inp.device).float() return self.drop(self.embed(inp) * math.sqrt(self.emb_sz) + self.pos_enc(pos)) ``` ## PyTorch Transformer Simple Note: [src/tgt/memory]_mask should be filled with float(‘-inf’) for the masked positions and float(0.0) else. These masks ensure that predictions for position i depend only on the unmasked positions j and are applied identically for each sequence in a batch. [src/tgt/memory]_key_padding_mask should be a ByteTensor where True values are positions that should be masked with float(‘-inf’) and False values will be unchanged. This mask ensures that no information will be taken from position i if it is masked, and has a separate mask for each sequence in a batch. attn mask with -inf key_padding mask with True ### pt_Transformer ``` class pt_Transformer(Module): def __init__(self, src_vcbsz, trg_vcbsz, n_enc_layers=6, n_dec_layers=6, n_heads=8, d_model=256, d_head=32, d_inner=1024, p=0.1, bias=True, scale=True, double_drop=True, pad_idx=1): self.pad_idx = pad_idx self.enc_tfmr_emb = TransformerEmbedding(src_vcbsz, d_model, p) self.dec_tfmr_emb = TransformerEmbedding(trg_vcbsz, d_model, 0.) self.final = nn.Linear(d_model, trg_vcbsz) # !!! #self.final.weight = self.dec_tfmr_emb.embed.weight # !! What does this do? self.transformer_model=torch.nn.Transformer(d_model=d_model, nhead=n_heads, num_encoder_layers=n_enc_layers, num_decoder_layers=n_dec_layers, dim_feedforward=d_inner, dropout=p, activation='relu', custom_encoder=None, custom_decoder=None) def forward(self, src, trg, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None): enc_emb, dec_emb = self.enc_tfmr_emb(src), self.dec_tfmr_emb(trg) src_mask=self.transformer_model.generate_square_subsequent_mask(src.size(1)).cuda() trg_mask=self.transformer_model.generate_square_subsequent_mask(trg.size(1)).cuda() dec_out = self.transformer_model(enc_emb.permute(1,0,2), dec_emb.permute(1,0,2), src_mask=src_mask, tgt_mask=trg_mask, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) out=self.final(dec_out) return out.permute(1,0,2) ``` ### Metric ``` class CorpusBLEUMetric(Metric): def __init__(self, vocab_sz=5000, axis=-1): """BLEU Metric calculated over the validation corpus""" self.pred_len, self.targ_len, self.corrects, self.counts = 0,0,[0]*4,[0]*4 self.axis, self.vocab_sz = axis, vocab_sz def reset(self): self.pred_len,self.targ_len,self.corrects,self.counts = 0,0,[0]*4,[0]*4 class NGram(): def __init__(self, ngram, max_n=5000): self.ngram,self.max_n = ngram,max_n def __eq__(self, other): if len(self.ngram) != len(other.ngram): return False return np.all(np.array(self.ngram) == np.array(other.ngram)) def __hash__(self): return int(sum([o * self.max_n**i for i,o in enumerate(self.ngram)])) def get_grams(self, x, n, max_n=5000): return x if n==1 else [self.NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)] def get_correct_ngrams(self, pred, targ, n, max_n=5000): pred_grams,targ_grams = self.get_grams(pred, n, max_n=max_n),self.get_grams(targ, n, max_n=max_n) pred_cnt,targ_cnt = Counter(pred_grams),Counter(targ_grams) return sum([min(c, targ_cnt[g]) for g,c in pred_cnt.items()]),len(pred_grams) def accumulate(self, learn): last_output = learn.pred.argmax(dim=self.axis) last_target = learn.y for pred,targ in zip(last_output.cpu().numpy(),last_target.cpu().numpy()): self.pred_len += len(pred) self.targ_len += len(targ) for i in range(4): c,t = self.get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz) self.corrects[i] += c self.counts[i] += t @property def value(self): if self.counts == 0: return None else: precs = [c/t for c,t in zip(self.corrects,self.counts)] len_penalty = exp(1 - self.targ_len/self.pred_len) if self.pred_len < self.targ_len else 1 return len_penalty * ((precs[0]*precs[1]*precs[2]*precs[3]) ** 0.25) ``` ### Callbacks #### Present Input and Target in a single tuple ``` class CombineInputOutputCallback(Callback): '''Callback to combine the input and target text into self.xb''' def __init__(self): pass def begin_batch(self): self.learn.xb = (self.xb[0], self.yb[0]) ``` Shifting and masking of y, from [Annotated Transformer](http://nlp.seas.harvard.edu/2018/04/03/attention.html#training): > We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position i can depend only on the known outputs at positions less than i. #### Shifting #### Target shift/offset explained **Taken from [@bentrevett's brilliant github repo "pytorch-seq2seq" tutorials](https://github.com/bentrevett/pytorch-seq2seq/blob/master/6%20-%20Attention%20is%20All%20You%20Need.ipynb):** As we want our model to predict the <eos> token but not have it be an input into our model we simply slice the <eos> token off the end of the sequence. Thus: $$\begin{align*}\text{trg} &= [sos, x_1, x_2, x_3, eos]\\\text{trg[:-1]} &= [sos, x_1, x_2, x_3]\end{align*}$$ $x_i$ denotes **actual** target sequence element. We then feed this into the model to get a predicted sequence that should hopefully predict the <eos> token: $$\begin{align*} \text{output} &= [y_1, y_2, y_3, eos] \end{align*}$$ $y_i$ denotes **predicted** target sequence element. We then calculate our loss using the original trg tensor with the <sos> token sliced off the front, leaving the <eos> token: $$\begin{align*} \text{output} &= [y_1, y_2, y_3, eos]\\ \text{trg[1:]} &= [x_1, x_2, x_3, eos] \end{align*}$$ We then calculate our losses and update our parameters as is standard. We don't want to punish the model for not translating the 'sos' token, but we do need it to predict/define the end of the sentence **RemoveEOSCallback** Cut the *EOS* token token from the **output_x** presented to the model as we are trying to predict the next word. Therefore don't want to model to try anything after the *EOS* token. So the last token given to the model will be the token before *EOS*. This callback is modifies the second element of our learn.xb, (which is the *copied* yb) But this should also ignore padding, as otherwise we'll be just cutting the last padding token and not the EOS ``` class RemoveEOSCallback(Callback): ''' Shift the target presented to the model during training to remove the "eos" token as we don't want the model to learn to translate EOS. When it sees EOS. In practice we actually mask the EOS token as due to batching the last token will often be a <pad> token, not EOS ''' def __init__(self, eos_idx): self.eos_idx=eos_idx def begin_batch(self): eos_mask=(self.learn.xb[1]!=self.eos_idx) sz=torch.tensor(self.learn.xb[1].size()) sz[1]=sz[1]-1 self.learn.xb = (self.learn.xb[0], self.learn.xb[1][eos_mask].view((sz[0],sz[1]))) ``` **LossTargetShiftCallback:** Shift the target shown to the loss to exclude the "eos" token, as translating "bos" is not part of our language translation objective ``` class LossTargetShiftCallback(Callback): ''' Shift the target shown to the loss to exclude the "bos" token as the first token we want predicted should be an actual word, not the "bos" token (as we have already given the model "bos" ) ''' def __init__(self): pass def after_pred(self): self.learn.yb = (self.learn.yb[0][:,1:],) ``` ### Model Transformer size from Annotated Transformer: N=6, d_model=512, d_ff=2048, h=8 ``` pad_idx=1 assert dls.vocab[1][pad_idx] == 'xxpad' n_x_vocab, n_y_vocab = len(dls.vocab[0]), len(dls.vocab[1]) d_model=512 n_heads=8 #12 d_inner=2048 #1024 #model = Transformer(n_x_vocab, n_y_vocab, d_model=d_model, n_heads=n_heads, pad_idx=pad_idx) model=pt_Transformer(src_vcbsz=n_x_vocab, trg_vcbsz=n_y_vocab, d_model=d_model, d_inner=d_inner) model ``` Kaiming_Normal works terrribly, at least if you apply it to everything except LayerNorm... DistilBERT works ok Could try xavier: ``` def initialize_weights(m): if hasattr(m, 'weight') and m.weight.dim() > 1: nn.init.xavier_uniform_(m.weight.data) model.apply(initialize_weights); ``` **DistilBERT initialisation** ``` # DistilERT HF init weights https://github.com/huggingface/transformers/blob/31e67dd19f1b3fe2bc9a13f86d814f3f7bba48e4/src/transformers/modeling_distilbert.py def distil_apply_leaf(m, f): "Apply `f` to children of `m`." c = m.children() if isinstance(m, nn.Module): f(m) for l in c: apply_leaf(l,f) def _distilbert_init_weights(module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=0.02) #std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=0.02) #self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() distil_apply_leaf(model, _distilbert_init_weights) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') ``` ### Learner ``` cbs = [CombineInputOutputCallback, RemoveEOSCallback(eos_idx=3), LossTargetShiftCallback] pad_idx=1 assert dls.vocab[1][pad_idx] == 'xxpad' loss_func = CrossEntropyLossFlat(ignore_index=pad_idx) learn = Learner(dls, model, metrics=[accuracy, Perplexity(), CorpusBLEUMetric(vocab_sz=n_y_vocab)], cbs=cbs, loss_func=loss_func) #learn.load('paracrawl_en_ga_5e_5e-4') ``` # Training ``` learn.lr_find() learn.fit_one_cycle(5, 5e-4, div=5) learn.recorder.plot_loss() learn.save('paracrawl_en_ga_5e_5e-4_v0.2_exp1') ``` ## 5e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## v0.1 - 5e Run ``` learn.fit_one_cycle(5, 5e-4, div=5) learn.save('paracrawl_en_ga_5e_5e-4') learn.export(fname='paracrawl_en_ga_5e_5e-4_learner.pkl') ``` ## Generation ``` def generate(model, sentence, vocab): #model = torch.load('output/transformer.pth') # lang_model = spacy.load('en') # with open('data/processed/en/freq_list.pkl', 'rb') as f: # en_freq_list = pickle.load(f) # with open('data/processed/fr/freq_list.pkl', 'rb') as f: # fr_freq_list = pickle.load(f) #sentence = input('Please enter your english sentence: ') #sentence = tokenize(sentence, en_freq_list, lang_model) model=model.eval() sentence=learn.dls.tokenizer[0][1].encodes(sentence) sentence=learn.dls.numericalize[0].encodes(sentence) translated_sentence = [2] # xxbos #translated_sentence = [fr_freq_list['[SOS]']] i = 0 while int(translated_sentence[-1]) != 3 and i < 75: # xxeos #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15: #output = forward_model(model, sentence, translated_sentence).to('cuda') output = forward_model(model, sentence, translated_sentence).cuda() values, indices = torch.topk(output, 5) translated_sentence.append(int(indices[-1][0])) i+=1 detok_translated_sentence=detokenize(translated_sentence, vocab) print(' '.join(detok_translated_sentence)) def forward_model(model, src, tgt): src = torch.as_tensor(src).unsqueeze(0).long().cuda() tgt = torch.as_tensor(tgt).unsqueeze(0).cuda() tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda() output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) #return output.squeeze(0).to('cpu') return output.squeeze(0).detach() # def tokenize(sentence, freq_list, lang_model): # punctuation = ['(', ')', ':', '"', ' '] # sentence = sentence.lower() # sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation] # return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence] def detokenize(sentence, vocab): #freq_list = {v: k for k, v in freq_list.items()} return [vocab[token] for token in sentence] #return [freq_list[token] for token in sentence] # def detokenize(sentence, freq_list): # freq_list = {v: k for k, v in freq_list.items()} # return [freq_list[token] for token in sentence] def gen_nopeek_mask(length): mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h') mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask ``` ## 5e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## 20e Run ``` # 20e, added shuffle to sorteddl, PT Transformer, distilbert init, Adam, distilbert init # CONCLUSION: learn.fit_one_cycle(20, 5e-4, div=5) learn.save('paracrawl_en_ga_20e_5e-4') learn.export(fname='paracrawl_en_ga_20e_5e-4_learner.pkl') ``` ## Generation ``` def generate(model, sentence, vocab): #model = torch.load('output/transformer.pth') # lang_model = spacy.load('en') # with open('data/processed/en/freq_list.pkl', 'rb') as f: # en_freq_list = pickle.load(f) # with open('data/processed/fr/freq_list.pkl', 'rb') as f: # fr_freq_list = pickle.load(f) #sentence = input('Please enter your english sentence: ') #sentence = tokenize(sentence, en_freq_list, lang_model) model=model.eval() sentence=learn.dls.tokenizer[0][1].encodes(sentence) sentence=learn.dls.numericalize[0].encodes(sentence) translated_sentence = [2] # xxbos #translated_sentence = [fr_freq_list['[SOS]']] i = 0 while int(translated_sentence[-1]) != 3 and i < 75: # xxeos #while int(translated_sentence[-1]) != fr_freq_list['[EOS]'] and i < 15: #output = forward_model(model, sentence, translated_sentence).to('cuda') output = forward_model(model, sentence, translated_sentence).cuda() values, indices = torch.topk(output, 5) translated_sentence.append(int(indices[-1][0])) i+=1 detok_translated_sentence=detokenize(translated_sentence, vocab) print(' '.join(detok_translated_sentence)) def forward_model(model, src, tgt): src = torch.as_tensor(src).unsqueeze(0).long().cuda() tgt = torch.as_tensor(tgt).unsqueeze(0).cuda() tgt_mask = gen_nopeek_mask(tgt.shape[1]).cuda() output = model.forward(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None) #return output.squeeze(0).to('cpu') return output.squeeze(0).detach() # def tokenize(sentence, freq_list, lang_model): # punctuation = ['(', ')', ':', '"', ' '] # sentence = sentence.lower() # sentence = [tok.text for tok in lang_model.tokenizer(sentence) if tok.text not in punctuation] # return [freq_list[word] if word in freq_list else freq_list['[OOV]'] for word in sentence] def detokenize(sentence, vocab): #freq_list = {v: k for k, v in freq_list.items()} return [vocab[token] for token in sentence] #return [freq_list[token] for token in sentence] # def detokenize(sentence, freq_list): # freq_list = {v: k for k, v in freq_list.items()} # return [freq_list[token] for token in sentence] def gen_nopeek_mask(length): mask = rearrange(torch.triu(torch.ones(length, length)) == 1, 'h w -> w h') mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask ``` ## 20e results ``` generate(learn.model, "hello, how are you?", dls.vocab[1]) generate(learn.model, "Can you tell we where the bus station is please?", dls.vocab[1]) generate(learn.model, "Yesterday it rained, but tomorrow will be very sunny", dls.vocab[1]) generate(learn.model, "I had a great day, my translator is working", dls.vocab[1]) generate(learn.model, "So this is a story all about how my lift got flip turned \ upside down, so I'd like to take a minute just sit right there, I'll you all about how I became the fresh prince\ of belair", dls.vocab[1]) generate(learn.model, "dog", dls.vocab[1]) generate(learn.model, "cat", dls.vocab[1]) generate(learn.model, "tree", dls.vocab[1]) generate(learn.model, "building", dls.vocab[1]) generate(learn.model, "city", dls.vocab[1]) generate(learn.model, "woman", dls.vocab[1]) generate(learn.model, "man", dls.vocab[1]) generate(learn.model, "chocolate", dls.vocab[1]) generate(learn.model, "spaceship", dls.vocab[1]) ``` ## Alternative generation ``` # https://forums.fast.ai/t/fastai-v2-text/53529/334 from fastai2.text.all import * defaults.device = torch.device('cpu') path = Path('.') learner = load_learner("./export.pkl") f = open("/tmp/test.txt", "r") test_file_contents = f.read() _, _, losses = learner.predict(test_file_contents) cats = [learner.dls.categorize.decode(i) for i in range(len(losses))] predictions = sorted( zip(cats, map(float, losses)), key=lambda p: p[1], reverse=True ) print(predictions) # OR items = pd.read_csv("/tmp/test.txt", sep = '\t') test_dl = learner.dls.test_dl(items.values) learner.get_preds(dl=test_dl, with_decoded=False) ```
github_jupyter
# Deep Markov Model ## Introduction We're going to build a deep probabilistic model for sequential data: the deep markov model. The particular dataset we want to model is composed of snippets of polyphonic music. Each time slice in a sequence spans a quarter note and is represented by an 88-dimensional binary vector that encodes the notes at that time step. Since music is (obviously) temporally coherent, we need a model that can represent complex time dependencies in the observed data. It would not, for example, be appropriate to consider a model in which the notes at a particular time step are independent of the notes at previous time steps. One way to do this is to build a latent variable model in which the variability and temporal structure of the observations is controlled by the dynamics of the latent variables. One particular realization of this idea is a markov model, in which we have a chain of latent variables, with each latent variable in the chain conditioned on the previous latent variable. This is a powerful approach, but if we want to represent complex data with complex (and in this case unknown) dynamics, we would like our model to be sufficiently flexible to accommodate dynamics that are potentially highly non-linear. Thus a deep markov model: we allow for the transition probabilities governing the dynamics of the latent variables as well as the the emission probabilities that govern how the observations are generated by the latent dynamics to be parameterized by (non-linear) neural networks. The specific model we're going to implement is based on the following reference: [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; Rahul G. Krishnan, Uri Shalit, David Sontag Please note that while we do not assume that the reader of this tutorial has read the reference, it's definitely a good place to look for a more comprehensive discussion of the deep markov model in the context of other time series models. We've described the model, but how do we go about training it? The inference strategy we're going to use is variational inference, which requires specifying a parameterized family of distributions that can be used to approximate the posterior distribution over the latent random variables. Given the non-linearities and complex time-dependencies inherent in our model and data, we expect the exact posterior to be highly non-trivial. So we're going to need a flexible family of variational distributions if we hope to learn a good model. Happily, together PyTorch and Pyro provide all the necessary ingredients. As we will see, assembling them will be straightforward. Let's get to work. ## The Model A convenient way to describe the high-level structure of the model is with a graphical model. Here, we've rolled out the model assuming that the sequence of observations is of length three: $\{{\bf x}_1, {\bf x}_2, {\bf x}_3\}$. Mirroring the sequence of observations we also have a sequence of latent random variables: $\{{\bf z}_1, {\bf z}_2, {\bf z}_3\}$. The figure encodes the structure of the model. The corresponding joint distribution is $$p({\bf x}_{123} , {\bf z}_{123})=p({\bf x}_1|{\bf z}_1)p({\bf x}_2|{\bf z}_2)p({\bf x}_3|{\bf z}_3)p({\bf z}_1)p({\bf z}_2|{\bf z}_1)p({\bf z}_3|{\bf z}_2)$$ Conditioned on ${\bf z}_t$, each observation ${\bf x}_t$ is independent of the other observations. This can be read off from the fact that each ${\bf x}_t$ only depends on the corresponding latent ${\bf z}_t$, as indicated by the downward pointing arrows. We can also read off the markov property of the model: each latent ${\bf z}_t$, when conditioned on the previous latent ${\bf z}_{t-1}$, is independent of all previous latents $\{ {\bf z}_{t-2}, {\bf z}_{t-3}, ...\}$. This effectively says that everything one needs to know about the state of the system at time $t$ is encapsulated by the latent ${\bf z}_{t}$. We will assume that the observation likelihoods, i.e. the probability distributions $p({{\bf x}_t}|{{\bf z}_t})$ that control the observations, are given by the bernoulli distribution. This is an appropriate choice since our observations are all 0 or 1. For the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ that control the latent dynamics, we choose (conditional) gaussian distributions with diagonal covariances. This is reasonable since we assume that the latent space is continuous. The solid black squares represent non-linear functions parameterized by neural networks. This is what makes this a _deep_ markov model. Note that the black squares appear in two different places: in between pairs of latents and in between latents and observations. The non-linear function that connects the latent variables ('Trans' in Fig. 1) controls the dynamics of the latent variables. Since we allow the conditional probability distribution of ${\bf z}_{t}$ to depend on ${\bf z}_{t-1}$ in a complex way, we will be able to capture complex dynamics in our model. Similarly, the non-linear function that connects the latent variables to the observations ('Emit' in Fig. 1) controls how the observations depend on the latent dynamics. Some additional notes: - we can freely choose the dimension of the latent space to suit the problem at hand: small latent spaces for simple problems and larger latent spaces for problems with complex dynamics - note the parameter ${\bf z}_0$ in Fig. 1. as will become more apparent from the code, this is just a convenient way for us to parameterize the probability distribution $p({\bf z}_1)$ for the first time step, where there are no previous latents to condition on. ### The Gated Transition and the Emitter Without further ado, let's start writing some code. We first define the two PyTorch Modules that correspond to the black squares in Fig. 1. First the emission function: ```python class Emitter(nn.Module): """ Parameterizes the bernoulli observation likelihood p(x_t | z_t) """ def __init__(self, input_dim, z_dim, emission_dim): super().__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, emission_dim) self.lin_hidden_to_hidden = nn.Linear(emission_dim, emission_dim) self.lin_hidden_to_input = nn.Linear(emission_dim, input_dim) # initialize the two non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, z_t): """ Given the latent z at a particular time step t we return the vector of probabilities `ps` that parameterizes the bernoulli distribution p(x_t|z_t) """ h1 = self.relu(self.lin_z_to_hidden(z_t)) h2 = self.relu(self.lin_hidden_to_hidden(h1)) ps = self.sigmoid(self.lin_hidden_to_input(h2)) return ps ``` In the constructor we define the linear transformations that will be used in our emission function. Note that `emission_dim` is the number of hidden units in the neural network. We also define the non-linearities that we will be using. The forward call defines the computational flow of the function. We take in the latent ${\bf z}_{t}$ as input and do a sequence of transformations until we obtain a vector of length 88 that defines the emission probabilities of our bernoulli likelihood. Because of the sigmoid, each element of `ps` will be between 0 and 1 and will define a valid probability. Taken together the elements of `ps` encode which notes we expect to observe at time $t$ given the state of the system (as encoded in ${\bf z}_{t}$). Now we define the gated transition function: ```python class GatedTransition(nn.Module): """ Parameterizes the gaussian latent transition probability p(z_t | z_{t-1}) See section 5 in the reference for comparison. """ def __init__(self, z_dim, transition_dim): super().__init__() # initialize the six linear transformations used in the neural network self.lin_gate_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim, transition_dim) self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim) self.lin_sig = nn.Linear(z_dim, z_dim) self.lin_z_to_loc = nn.Linear(z_dim, z_dim) # modify the default initialization of lin_z_to_loc # so that it's starts out as the identity function self.lin_z_to_loc.weight.data = torch.eye(z_dim) self.lin_z_to_loc.bias.data = torch.zeros(z_dim) # initialize the three non-linearities used in the neural network self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.softplus = nn.Softplus() def forward(self, z_t_1): """ Given the latent z_{t-1} corresponding to the time step t-1 we return the mean and scale vectors that parameterize the (diagonal) gaussian distribution p(z_t | z_{t-1}) """ # compute the gating function _gate = self.relu(self.lin_gate_z_to_hidden(z_t_1)) gate = self.sigmoid(self.lin_gate_hidden_to_z(_gate)) # compute the 'proposed mean' _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(z_t_1)) proposed_mean = self.lin_proposed_mean_hidden_to_z(_proposed_mean) # assemble the actual mean used to sample z_t, which mixes # a linear transformation of z_{t-1} with the proposed mean # modulated by the gating function loc = (1 - gate) * self.lin_z_to_loc(z_t_1) + gate * proposed_mean # compute the scale used to sample z_t, using the proposed # mean from above as input. the softplus ensures that scale is positive scale = self.softplus(self.lin_sig(self.relu(proposed_mean))) # return loc, scale which can be fed into Normal return loc, scale ``` This mirrors the structure of `Emitter` above, with the difference that the computational flow is a bit more complicated. This is for two reasons. First, the output of `GatedTransition` needs to define a valid (diagonal) gaussian distribution. So we need to output two parameters: the mean `loc`, and the (square root) covariance `scale`. These both need to have the same dimension as the latent space. Second, we don't want to _force_ the dynamics to be non-linear. Thus our mean `loc` is a sum of two terms, only one of which depends non-linearily on the input `z_t_1`. This way we can support both linear and non-linear dynamics (or indeed have the dynamics of part of the latent space be linear, while the remainder of the dynamics is non-linear). ### Model - a Pyro Stochastic Function So far everything we've done is pure PyTorch. To finish translating our model into code we need to bring Pyro into the picture. Basically we need to implement the stochastic nodes (i.e. the circles) in Fig. 1. To do this we introduce a callable `model()` that contains the Pyro primitive `pyro.sample`. The `sample` statements will be used to specify the joint distribution over the latents ${\bf z}_{1:T}$. Additionally, the `obs` argument can be used with the `sample` statements to specify how the observations ${\bf x}_{1:T}$ depend on the latents. Before we look at the complete code for `model()`, let's look at a stripped down version that contains the main logic: ```python def model(...): z_prev = self.z_0 # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next two lines of code sample z_t ~ p(z_t | z_{t-1}). # first compute the parameters of the diagonal gaussian # distribution p(z_t | z_{t-1}) z_loc, z_scale = self.trans(z_prev) # then sample z_t according to dist.Normal(z_loc, z_scale) z_t = pyro.sample("z_%d" % t, dist.Normal(z_loc, z_scale)) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.sample("obs_x_%d" % t, dist.Bernoulli(emission_probs_t), obs=mini_batch[:, t - 1, :]) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t ``` The first thing we need to do is sample ${\bf z}_1$. Once we've sampled ${\bf z}_1$, we can sample ${\bf z}_2 \sim p({\bf z}_2|{\bf z}_1)$ and so on. This is the logic implemented in the `for` loop. The parameters `z_loc` and `z_scale` that define the probability distributions $p({\bf z}_t|{\bf z}_{t-1})$ are computed using `self.trans`, which is just an instance of the `GatedTransition` module defined above. For the first time step at $t=1$ we condition on `self.z_0`, which is a (trainable) `Parameter`, while for subsequent time steps we condition on the previously drawn latent. Note that each random variable `z_t` is assigned a unique name by the user. Once we've sampled ${\bf z}_t$ at a given time step, we need to observe the datapoint ${\bf x}_t$. So we pass `z_t` through `self.emitter`, an instance of the `Emitter` module defined above to obtain `emission_probs_t`. Together with the argument `dist.Bernoulli()` in the `sample` statement, these probabilities fully specify the observation likelihood. Finally, we also specify the slice of observed data ${\bf x}_t$: `mini_batch[:, t - 1, :]` using the `obs` argument to `sample`. This fully specifies our model and encapsulates it in a callable that can be passed to Pyro. Before we move on let's look at the full version of `model()` and go through some of the details we glossed over in our first pass. ```python def model(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all PyTorch (sub)modules with pyro # this needs to happen in both the model and guide pyro.module("dmm", self) # set z_prev = z_0 to setup the recursive conditioning in p(z_t | z_{t-1}) z_prev = self.z_0.expand(mini_batch.size(0), self.z_0.size(0)) # we enclose all the sample statements in the model in a plate. # this marks that each datapoint is conditionally independent of the others with pyro.plate("z_minibatch", len(mini_batch)): # sample the latents z and observed x's one time step at a time for t in range(1, T_max + 1): # the next chunk of code samples z_t ~ p(z_t | z_{t-1}) # note that (both here and elsewhere) we use poutine.scale to take care # of KL annealing. we use the mask() method to deal with raggedness # in the observed data (i.e. different sequences in the mini-batch # have different lengths) # first compute the parameters of the diagonal gaussian # distribution p(z_t | z_{t-1}) z_loc, z_scale = self.trans(z_prev) # then sample z_t according to dist.Normal(z_loc, z_scale). # note that we use the reshape method so that the univariate # Normal distribution is treated as a multivariate Normal # distribution with a diagonal covariance. with poutine.scale(None, annealing_factor): z_t = pyro.sample("z_%d" % t, dist.Normal(z_loc, z_scale) .mask(mini_batch_mask[:, t - 1:t]) .to_event(1)) # compute the probabilities that parameterize the bernoulli likelihood emission_probs_t = self.emitter(z_t) # the next statement instructs pyro to observe x_t according to the # bernoulli distribution p(x_t|z_t) pyro.sample("obs_x_%d" % t, dist.Bernoulli(emission_probs_t) .mask(mini_batch_mask[:, t - 1:t]) .to_event(1), obs=mini_batch[:, t - 1, :]) # the latent sampled at this time step will be conditioned upon # in the next time step so keep track of it z_prev = z_t ``` The first thing to note is that `model()` takes a number of arguments. For now let's just take a look at `mini_batch` and `mini_batch_mask`. `mini_batch` is a three dimensional tensor, with the first dimension being the batch dimension, the second dimension being the temporal dimension, and the final dimension being the features (88-dimensional in our case). To speed up the code, whenever we run `model` we're going to process an entire mini-batch of sequences (i.e. we're going to take advantage of vectorization). This is sensible because our model is implicitly defined over a single observed sequence. The probability of a set of sequences is just given by the products of the individual sequence probabilities. In other words, given the parameters of the model the sequences are conditionally independent. This vectorization introduces some complications because sequences can be of different lengths. This is where `mini_batch_mask` comes in. `mini_batch_mask` is a two dimensional 0/1 mask of dimensions `mini_batch_size` x `T_max`, where `T_max` is the maximum length of any sequence in the mini-batch. This encodes which parts of `mini_batch` are valid observations. So the first thing we do is grab `T_max`: we have to unroll our model for at least this many time steps. Note that this will result in a lot of 'wasted' computation, since some of the sequences will be shorter than `T_max`, but this is a small price to pay for the big speed-ups that come with vectorization. We just need to make sure that none of the 'wasted' computations 'pollute' our model computation. We accomplish this by passing the mask appropriate to time step $t$ to the `mask` method (which acts on the distribution that needs masking). Finally, the line `pyro.module("dmm", self)` is equivalent to a bunch of `pyro.param` statements for each parameter in the model. This lets Pyro know which parameters are part of the model. Just like for the `sample` statement, we give the module a unique name. This name will be incorporated into the name of the `Parameters` in the model. We leave a discussion of the KL annealing factor for later. ## Inference At this point we've fully specified our model. The next step is to set ourselves up for inference. As mentioned in the introduction, our inference strategy is going to be variational inference (see [SVI Part I](svi_part_i.ipynb) for an introduction). So our next task is to build a family of variational distributions appropriate to doing inference in a deep markov model. However, at this point it's worth emphasizing that nothing about the way we've implemented `model()` ties us to variational inference. In principle we could use _any_ inference strategy available in Pyro. For example, in this particular context one could imagine using some variant of Sequential Monte Carlo (although this is not currently supported in Pyro). ### Guide The purpose of the guide (i.e. the variational distribution) is to provide a (parameterized) approximation to the exact posterior $p({\bf z}_{1:T}|{\bf x}_{1:T})$. Actually, there's an implicit assumption here which we should make explicit, so let's take a step back. Suppose our dataset $\mathcal{D}$ consists of $N$ sequences $\{ {\bf x}_{1:T_1}^1, {\bf x}_{1:T_2}^2, ..., {\bf x}_{1:T_N}^N \}$. Then the posterior we're actually interested in is given by $p({\bf z}_{1:T_1}^1, {\bf z}_{1:T_2}^2, ..., {\bf z}_{1:T_N}^N | \mathcal{D})$, i.e. we want to infer the latents for _all_ $N$ sequences. Even for small $N$ this is a very high-dimensional distribution that will require a very large number of parameters to specify. In particular if we were to directly parameterize the posterior in this form, the number of parameters required would grow (at least) linearly with $N$. One way to avoid this nasty growth with the size of the dataset is *amortization* (see the analogous discussion in [SVI Part II](svi_part_ii.ipynb)). #### Aside: Amortization This works as follows. Instead of introducing variational parameters for each sequence in our dataset, we're going to learn a single parametric function $f({\bf x}_{1:T})$ and work with a variational distribution that has the form $\prod_{n=1}^N q({\bf z}_{1:T_n}^n | f({\bf x}_{1:T_n}^n))$. The function $f(\cdot)$&mdash;which basically maps a given observed sequence to a set of variational parameters tailored to that sequence&mdash;will need to be sufficiently rich to capture the posterior accurately, but now we can handle large datasets without having to introduce an obscene number of variational parameters. So our task is to construct the function $f(\cdot)$. Since in our case we need to support variable-length sequences, it's only natural that $f(\cdot)$ have a RNN in the loop. Before we look at the various component parts that make up our $f(\cdot)$ in detail, let's look at a computational graph that encodes the basic structure: <p> At the bottom of the figure we have our sequence of three observations. These observations will be consumed by a RNN that reads the observations from right to left and outputs three hidden states $\{ {\bf h}_1, {\bf h}_2,{\bf h}_3\}$. Note that this computation is done _before_ we sample any latent variables. Next, each of the hidden states will be fed into a `Combiner` module whose job is to output the mean and covariance of the the conditional distribution $q({\bf z}_t | {\bf z}_{t-1}, {\bf x}_{t:T})$, which we take to be given by a diagonal gaussian distribution. (Just like in the model, the conditional structure of ${\bf z}_{1:T}$ in the guide is such that we sample ${\bf z}_t$ forward in time.) In addition to the RNN hidden state, the `Combiner` also takes the latent random variable from the previous time step as input, except for $t=1$, where it instead takes the trainable (variational) parameter ${\bf z}_0^{\rm{q}}$. #### Aside: Guide Structure Why do we setup the RNN to consume the observations from right to left? Why not left to right? With this choice our conditional distribution $q({\bf z}_t |...)$ depends on two things: - the latent ${\bf z}_{t-1}$ from the previous time step; and - the observations ${\bf x}_{t:T}$, i.e. the current observation together with all future observations We are free to make other choices; all that is required is that that the guide is a properly normalized distribution that plays nice with autograd. This particular choice is motivated by the dependency structure of the true posterior: see reference [1] for a detailed discussion. In brief, while we could, for example, condition on the entire sequence of observations, because of the markov structure of the model everything that we need to know about the previous observations ${\bf x}_{1:t-1}$ is encapsulated by ${\bf z}_{t-1}$. We could condition on more things, but there's no need; and doing so will probably tend to dilute the learning signal. So running the RNN from right to left is the most natural choice for this particular model. Let's look at the component parts in detail. First, the `Combiner` module: ```python class Combiner(nn.Module): """ Parameterizes q(z_t | z_{t-1}, x_{t:T}), which is the basic building block of the guide (i.e. the variational distribution). The dependence on x_{t:T} is through the hidden state of the RNN (see the pytorch module `rnn` below) """ def __init__(self, z_dim, rnn_dim): super().__init__() # initialize the three linear transformations used in the neural network self.lin_z_to_hidden = nn.Linear(z_dim, rnn_dim) self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim) self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim) # initialize the two non-linearities used in the neural network self.tanh = nn.Tanh() self.softplus = nn.Softplus() def forward(self, z_t_1, h_rnn): """ Given the latent z at at a particular time step t-1 as well as the hidden state of the RNN h(x_{t:T}) we return the mean and scale vectors that parameterize the (diagonal) gaussian distribution q(z_t | z_{t-1}, x_{t:T}) """ # combine the rnn hidden state with a transformed version of z_t_1 h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(z_t_1)) + h_rnn) # use the combined hidden state to compute the mean used to sample z_t loc = self.lin_hidden_to_loc(h_combined) # use the combined hidden state to compute the scale used to sample z_t scale = self.softplus(self.lin_hidden_to_scale(h_combined)) # return loc, scale which can be fed into Normal return loc, scale ``` This module has the same general structure as `Emitter` and `GatedTransition` in the model. The only thing of note is that because the `Combiner` needs to consume two inputs at each time step, it transforms the inputs into a single combined hidden state `h_combined` before it computes the outputs. Apart from the RNN, we now have all the ingredients we need to construct our guide distribution. Happily, PyTorch has great built-in RNN modules, so we don't have much work to do here. We'll see where we instantiate the RNN later. Let's instead jump right into the definition of the stochastic function `guide()`. ```python def guide(self, mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor=1.0): # this is the number of time steps we need to process in the mini-batch T_max = mini_batch.size(1) # register all PyTorch (sub)modules with pyro pyro.module("dmm", self) # if on gpu we need the fully broadcast view of the rnn initial state # to be in contiguous gpu memory h_0_contig = self.h_0.expand(1, mini_batch.size(0), self.rnn.hidden_size).contiguous() # push the observed x's through the rnn; # rnn_output contains the hidden state at each time step rnn_output, _ = self.rnn(mini_batch_reversed, h_0_contig) # reverse the time-ordering in the hidden state and un-pack it rnn_output = poly.pad_and_reverse(rnn_output, mini_batch_seq_lengths) # set z_prev = z_q_0 to setup the recursive conditioning in q(z_t |...) z_prev = self.z_q_0.expand(mini_batch.size(0), self.z_q_0.size(0)) # we enclose all the sample statements in the guide in a plate. # this marks that each datapoint is conditionally independent of the others. with pyro.plate("z_minibatch", len(mini_batch)): # sample the latents z one time step at a time for t in range(1, T_max + 1): # the next two lines assemble the distribution q(z_t | z_{t-1}, x_{t:T}) z_loc, z_scale = self.combiner(z_prev, rnn_output[:, t - 1, :]) z_dist = dist.Normal(z_loc, z_scale) # sample z_t from the distribution z_dist with pyro.poutine.scale(None, annealing_factor): z_t = pyro.sample("z_%d" % t, z_dist.mask(mini_batch_mask[:, t - 1:t]) .to_event(1)) # the latent sampled at this time step will be conditioned # upon in the next time step so keep track of it z_prev = z_t ``` The high-level structure of `guide()` is very similar to `model()`. First note that the model and guide take the same arguments: this is a general requirement for model/guide pairs in Pyro. As in the model, there's a call to `pyro.module` that registers all the parameters with Pyro. Also, the `for` loop has the same structure as the one in `model()`, with the difference that the guide only needs to sample latents (there are no `sample` statements with the `obs` keyword). Finally, note that the names of the latent variables in the guide exactly match those in the model. This is how Pyro knows to correctly align random variables. The RNN logic should be familar to PyTorch users, but let's go through it quickly. First we prepare the initial state of the RNN, `h_0`. Then we invoke the RNN via its forward call; the resulting tensor `rnn_output` contains the hidden states for the entire mini-batch. Note that because we want the RNN to consume the observations from right to left, the input to the RNN is `mini_batch_reversed`, which is a copy of `mini_batch` with all the sequences running in _reverse_ temporal order. Furthermore, `mini_batch_reversed` has been wrapped in a PyTorch `rnn.pack_padded_sequence` so that the RNN can deal with variable-length sequences. Since we do our sampling in latent space in normal temporal order, we use the helper function `pad_and_reverse` to reverse the hidden state sequences in `rnn_output`, so that we can feed the `Combiner` RNN hidden states that are correctly aligned and ordered. This helper function also unpacks the `rnn_output` so that it is no longer in the form of a PyTorch `rnn.pack_padded_sequence`. ## Packaging the Model and Guide as a PyTorch Module At this juncture, we're ready to proceed to inference. But before we do so let's quickly go over how we packaged the model and guide as a single PyTorch Module. This is generally good practice, especially for larger models. ```python class DMM(nn.Module): """ This PyTorch Module encapsulates the model as well as the variational distribution (the guide) for the Deep Markov Model """ def __init__(self, input_dim=88, z_dim=100, emission_dim=100, transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0, num_iafs=0, iaf_dim=50, use_cuda=False): super().__init__() # instantiate pytorch modules used in the model and guide below self.emitter = Emitter(input_dim, z_dim, emission_dim) self.trans = GatedTransition(z_dim, transition_dim) self.combiner = Combiner(z_dim, rnn_dim) self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu', batch_first=True, bidirectional=False, num_layers=1, dropout=rnn_dropout_rate) # define a (trainable) parameters z_0 and z_q_0 that help define # the probability distributions p(z_1) and q(z_1) # (since for t = 1 there are no previous latents to condition on) self.z_0 = nn.Parameter(torch.zeros(z_dim)) self.z_q_0 = nn.Parameter(torch.zeros(z_dim)) # define a (trainable) parameter for the initial hidden state of the rnn self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim)) self.use_cuda = use_cuda # if on gpu cuda-ize all pytorch (sub)modules if use_cuda: self.cuda() # the model p(x_{1:T} | z_{1:T}) p(z_{1:T}) def model(...): # ... as above ... # the guide q(z_{1:T} | x_{1:T}) (i.e. the variational distribution) def guide(...): # ... as above ... ``` Since we've already gone over `model` and `guide`, our focus here is on the constructor. First we instantiate the four PyTorch modules that we use in our model and guide. On the model-side: `Emitter` and `GatedTransition`. On the guide-side: `Combiner` and the RNN. Next we define PyTorch `Parameter`s for the initial state of the RNN as well as `z_0` and `z_q_0`, which are fed into `self.trans` and `self.combiner`, respectively, in lieu of the non-existent random variable $\bf z_0$. The important point to make here is that all of these `Module`s and `Parameter`s are attributes of `DMM` (which itself inherits from `nn.Module`). This has the consequence they are all automatically registered as belonging to the module. So, for example, when we call `parameters()` on an instance of `DMM`, PyTorch will know to return all the relevant parameters. It also means that when we invoke `pyro.module("dmm", self)` in `model()` and `guide()`, all the parameters of both the model and guide will be registered with Pyro. Finally, it means that if we're running on a GPU, the call to `cuda()` will move all the parameters into GPU memory. ## Stochastic Variational Inference With our model and guide at hand, we're finally ready to do inference. Before we look at the full logic that is involved in a complete experimental script, let's first see how to take a single gradient step. First we instantiate an instance of `DMM` and setup an optimizer. ```python # instantiate the dmm dmm = DMM(input_dim, z_dim, emission_dim, transition_dim, rnn_dim, args.rnn_dropout_rate, args.num_iafs, args.iaf_dim, args.cuda) # setup optimizer adam_params = {"lr": args.learning_rate, "betas": (args.beta1, args.beta2), "clip_norm": args.clip_norm, "lrd": args.lr_decay, "weight_decay": args.weight_decay} optimizer = ClippedAdam(adam_params) ``` Here we're using an implementation of the Adam optimizer that includes gradient clipping. This mitigates some of the problems that can occur when training recurrent neural networks (e.g. vanishing/exploding gradients). Next we setup the inference algorithm. ```python # setup inference algorithm svi = SVI(dmm.model, dmm.guide, optimizer, Trace_ELBO()) ``` The inference algorithm `SVI` uses a stochastic gradient estimator to take gradient steps on an objective function, which in this case is given by the ELBO (the evidence lower bound). As the name indicates, the ELBO is a lower bound to the log evidence: $\log p(\mathcal{D})$. As we take gradient steps that maximize the ELBO, we move our guide $q(\cdot)$ closer to the exact posterior. The argument `Trace_ELBO()` constructs a version of the gradient estimator that doesn't need access to the dependency structure of the model and guide. Since all the latent variables in our model are reparameterizable, this is the appropriate gradient estimator for our use case. (It's also the default option.) Assuming we've prepared the various arguments of `dmm.model` and `dmm.guide`, taking a gradient step is accomplished by calling ```python svi.step(mini_batch, ...) ``` That's all there is to it! Well, not quite. This will be the main step in our inference algorithm, but we still need to implement a complete training loop with preparation of mini-batches, evaluation, and so on. This sort of logic will be familiar to any deep learner but let's see how it looks in PyTorch/Pyro. ## The Black Magic of Optimization Actually, before we get to the guts of training, let's take a moment and think a bit about the optimization problem we've setup. We've traded Bayesian inference in a non-linear model with a high-dimensional latent space&mdash;a hard problem&mdash;for a particular optimization problem. Let's not kid ourselves, this optimization problem is pretty hard too. Why? Let's go through some of the reasons: - the space of parameters we're optimizing over is very high-dimensional (it includes all the weights in all the neural networks we've defined). - our objective function (the ELBO) cannot be computed analytically. so our parameter updates will be following noisy Monte Carlo gradient estimates - data-subsampling serves as an additional source of stochasticity: even if we wanted to, we couldn't in general take gradient steps on the ELBO defined over the whole dataset (actually in our particular case the dataset isn't so large, but let's ignore that). - given all the neural networks and non-linearities we have in the loop, our (stochastic) loss surface is highly non-trivial The upshot is that if we're going to find reasonable (local) optima of the ELBO, we better take some care in deciding how to do optimization. This isn't the time or place to discuss all the different strategies that one might adopt, but it's important to emphasize how decisive a good or bad choice in learning hyperparameters (the learning rate, the mini-batch size, etc.) can be. Before we move on, let's discuss one particular optimization strategy that we're making use of in greater detail: KL annealing. In our case the ELBO is the sum of two terms: an expected log likelihood term (which measures model fit) and a sum of KL divergence terms (which serve to regularize the approximate posterior): $\rm{ELBO} = \mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ This latter term can be a quite strong regularizer, and in early stages of training it has a tendency to favor regions of the loss surface that contain lots of bad local optima. One strategy to avoid these bad local optima, which was also adopted in reference [1], is to anneal the KL divergence terms by multiplying them by a scalar `annealing_factor` that ranges between zero and one: $\mathbb{E}_{q({\bf z}_{1:T})}[\log p({\bf x}_{1:T}|{\bf z}_{1:T})] - \rm{annealing\_factor} \times \mathbb{E}_{q({\bf z}_{1:T})}[ \log q({\bf z}_{1:T}) - \log p({\bf z}_{1:T})]$ The idea is that during the course of training the `annealing_factor` rises slowly from its initial value at/near zero to its final value at 1.0. The annealing schedule is arbitrary; below we will use a simple linear schedule. In terms of code, to scale the log likelihoods by the appropriate annealing factor we enclose each of the latent sample statements in the model and guide with a `pyro.poutine.scale` context. Finally, we should mention that the main difference between the DMM implementation described here and the one used in reference [1] is that they take advantage of the analytic formula for the KL divergence between two gaussian distributions (whereas we rely on Monte Carlo estimates). This leads to lower variance gradient estimates of the ELBO, which makes training a bit easier. We can still train the model without making this analytic substitution, but training probably takes somewhat longer because of the higher variance. Support for analytic KL divergences in Pyro is something we plan to add in the future. ## Data Loading, Training, and Evaluation First we load the data. There are 229 sequences in the training dataset, each with an average length of ~60 time steps. ```python jsb_file_loc = "./data/jsb_processed.pkl" data = pickle.load(open(jsb_file_loc, "rb")) training_seq_lengths = data['train']['sequence_lengths'] training_data_sequences = data['train']['sequences'] test_seq_lengths = data['test']['sequence_lengths'] test_data_sequences = data['test']['sequences'] val_seq_lengths = data['valid']['sequence_lengths'] val_data_sequences = data['valid']['sequences'] N_train_data = len(training_seq_lengths) N_train_time_slices = np.sum(training_seq_lengths) N_mini_batches = int(N_train_data / args.mini_batch_size + int(N_train_data % args.mini_batch_size > 0)) ``` For this dataset we will typically use a `mini_batch_size` of 20, so that there will be 12 mini-batches per epoch. Next we define the function `process_minibatch` which prepares a mini-batch for training and takes a gradient step: ```python def process_minibatch(epoch, which_mini_batch, shuffled_indices): if args.annealing_epochs > 0 and epoch < args.annealing_epochs: # compute the KL annealing factor appropriate # for the current mini-batch in the current epoch min_af = args.minimum_annealing_factor annealing_factor = min_af + (1.0 - min_af) * \ (float(which_mini_batch + epoch * N_mini_batches + 1) / float(args.annealing_epochs * N_mini_batches)) else: # by default the KL annealing factor is unity annealing_factor = 1.0 # compute which sequences in the training set we should grab mini_batch_start = (which_mini_batch * args.mini_batch_size) mini_batch_end = np.min([(which_mini_batch + 1) * args.mini_batch_size, N_train_data]) mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end] # grab the fully prepped mini-batch using the helper function in the data loader mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths \ = poly.get_mini_batch(mini_batch_indices, training_data_sequences, training_seq_lengths, cuda=args.cuda) # do an actual gradient step loss = svi.step(mini_batch, mini_batch_reversed, mini_batch_mask, mini_batch_seq_lengths, annealing_factor) # keep track of the training loss return loss ``` We first compute the KL annealing factor appropriate to the mini-batch (according to a linear schedule as described earlier). We then compute the mini-batch indices, which we pass to the helper function `get_mini_batch`. This helper function takes care of a number of different things: - it sorts each mini-batch by sequence length - it calls another helper function to get a copy of the mini-batch in reversed temporal order - it packs each reversed mini-batch in a `rnn.pack_padded_sequence`, which is then ready to be ingested by the RNN - it cuda-izes all tensors if we're on a GPU - it calls another helper function to get an appropriate 0/1 mask for the mini-batch We then pipe all the return values of `get_mini_batch()` into `elbo.step(...)`. Recall that these arguments will be further piped to `model(...)` and `guide(...)` during construction of the gradient estimator in `elbo`. Finally, we return a float which is a noisy estimate of the loss for that mini-batch. We now have all the ingredients required for the main bit of our training loop: ```python times = [time.time()] for epoch in range(args.num_epochs): # accumulator for our estimate of the negative log likelihood # (or rather -elbo) for this epoch epoch_nll = 0.0 # prepare mini-batch subsampling indices for this epoch shuffled_indices = np.arange(N_train_data) np.random.shuffle(shuffled_indices) # process each mini-batch; this is where we take gradient steps for which_mini_batch in range(N_mini_batches): epoch_nll += process_minibatch(epoch, which_mini_batch, shuffled_indices) # report training diagnostics times.append(time.time()) epoch_time = times[-1] - times[-2] log("[training epoch %04d] %.4f \t\t\t\t(dt = %.3f sec)" % (epoch, epoch_nll / N_train_time_slices, epoch_time)) ``` At the beginning of each epoch we shuffle the indices pointing to the training data. We then process each mini-batch until we've gone through the entire training set, accumulating the training loss as we go. Finally we report some diagnostic info. Note that we normalize the loss by the total number of time slices in the training set (this allows us to compare to reference [1]). ## Evaluation This training loop is still missing any kind of evaluation diagnostics. Let's fix that. First we need to prepare the validation and test data for evaluation. Since the validation and test datasets are small enough that we can easily fit them into memory, we're going to process each dataset batchwise (i.e. we will not be breaking up the dataset into mini-batches). [_Aside: at this point the reader may ask why we don't do the same thing for the training set. The reason is that additional stochasticity due to data-subsampling is often advantageous during optimization: in particular it can help us avoid local optima._] And, in fact, in order to get a lessy noisy estimate of the ELBO, we're going to compute a multi-sample estimate. The simplest way to do this would be as follows: ```python val_loss = svi.evaluate_loss(val_batch, ..., num_particles=5) ``` This, however, would involve an explicit `for` loop with five iterations. For our particular model, we can do better and vectorize the whole computation. The only way to do this currently in Pyro is to explicitly replicate the data `n_eval_samples` many times. This is the strategy we follow: ```python # package repeated copies of val/test data for faster evaluation # (i.e. set us up for vectorization) def rep(x): return np.repeat(x, n_eval_samples, axis=0) # get the validation/test data ready for the dmm: pack into sequences, etc. val_seq_lengths = rep(val_seq_lengths) test_seq_lengths = rep(test_seq_lengths) val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths = poly.get_mini_batch( np.arange(n_eval_samples * val_data_sequences.shape[0]), rep(val_data_sequences), val_seq_lengths, cuda=args.cuda) test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths = \ poly.get_mini_batch(np.arange(n_eval_samples * test_data_sequences.shape[0]), rep(test_data_sequences), test_seq_lengths, cuda=args.cuda) ``` With the test and validation data now fully prepped, we define the helper function that does the evaluation: ```python def do_evaluation(): # put the RNN into evaluation mode (i.e. turn off drop-out if applicable) dmm.rnn.eval() # compute the validation and test loss val_nll = svi.evaluate_loss(val_batch, val_batch_reversed, val_batch_mask, val_seq_lengths) / np.sum(val_seq_lengths) test_nll = svi.evaluate_loss(test_batch, test_batch_reversed, test_batch_mask, test_seq_lengths) / np.sum(test_seq_lengths) # put the RNN back into training mode (i.e. turn on drop-out if applicable) dmm.rnn.train() return val_nll, test_nll ``` We simply call the `evaluate_loss` method of `elbo`, which takes the same arguments as `step()`, namely the arguments that are passed to the model and guide. Note that we have to put the RNN into and out of evaluation mode to account for dropout. We can now stick `do_evaluation()` into the training loop; see [the source code](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py) for details. ## Results Let's make sure that our implementation gives reasonable results. We can use the numbers reported in reference [1] as a sanity check. For the same dataset and a similar model/guide setup (dimension of the latent space, number of hidden units in the RNN, etc.) they report a normalized negative log likelihood (NLL) of `6.93` on the testset (lower is better$)^{\S}$. This is to be compared to our result of `6.87`. These numbers are very much in the same ball park, which is reassuring. It seems that, at least for this dataset, not using analytic expressions for the KL divergences doesn't degrade the quality of the learned model (although, as discussed above, the training probably takes somewhat longer). In the figure we show how the test NLL progresses during training for a single sample run (one with a rather conservative learning rate). Most of the progress is during the first 3000 epochs or so, with some marginal gains if we let training go on for longer. On a GeForce GTX 1080, 5000 epochs takes about 20 hours. | `num_iafs` | test NLL | |---|---| | `0` | `6.87` | | `1` | `6.82` | | `2` | `6.80` | Finally, we also report results for guides with normalizing flows in the mix (details to be found in the next section). ${ \S\;}$ Actually, they seem to report two numbers—6.93 and 7.03—for the same model/guide and it's not entirely clear how the two reported numbers are different. ## Bells, whistles, and other improvements ### Inverse Autoregressive Flows One of the great things about a probabilistic programming language is that it encourages modularity. Let's showcase an example in the context of the DMM. We're going to make our variational distribution richer by adding normalizing flows to the mix (see reference [2] for a discussion). **This will only cost us four additional lines of code!** First, in the `DMM` constructor we add ```python iafs = [AffineAutoregressive(AutoRegressiveNN(z_dim, [iaf_dim])) for _ in range(num_iafs)] self.iafs = nn.ModuleList(iafs) ``` This instantiates `num_iafs` many bijective transforms of the `AffineAutoregressive` type (see references [3,4]); each normalizing flow will have `iaf_dim` many hidden units. We then bundle the normalizing flows in a `nn.ModuleList`; this is just the PyTorchy way to package a list of `nn.Module`s. Next, in the guide we add the lines ```python if self.iafs.__len__() > 0: z_dist = TransformedDistribution(z_dist, self.iafs) ``` Here we're taking the base distribution `z_dist`, which in our case is a conditional gaussian distribution, and using the `TransformedDistribution` construct we transform it into a non-gaussian distribution that is, by construction, richer than the base distribution. Voila! ### Checkpointing If we want to recover from a catastrophic failure in our training loop, there are two kinds of state we need to keep track of. The first is the various parameters of the model and guide. The second is the state of the optimizers (e.g. in Adam this will include the running average of recent gradient estimates for each parameter). In Pyro, the parameters can all be found in the `ParamStore`. However, PyTorch also keeps track of them for us via the `parameters()` method of `nn.Module`. So one simple way we can save the parameters of the model and guide is to make use of the `state_dict()` method of `dmm` in conjunction with `torch.save()`; see below. In the case that we have `AffineAutoregressive`'s in the loop, this is in fact the only option at our disposal. This is because the `AffineAutoregressive` module contains what are called 'persistent buffers' in PyTorch parlance. These are things that carry state but are not `Parameter`s. The `state_dict()` and `load_state_dict()` methods of `nn.Module` know how to deal with buffers correctly. To save the state of the optimizers, we have to use functionality inside of `pyro.optim.PyroOptim`. Recall that the typical user never interacts directly with PyTorch `Optimizers` when using Pyro; since parameters can be created dynamically in an arbitrary probabilistic program, Pyro needs to manage `Optimizers` for us. In our case saving the optimizer state will be as easy as calling `optimizer.save()`. The loading logic is entirely analagous. So our entire logic for saving and loading checkpoints only takes a few lines: ```python # saves the model and optimizer states to disk def save_checkpoint(): log("saving model to %s..." % args.save_model) torch.save(dmm.state_dict(), args.save_model) log("saving optimizer states to %s..." % args.save_opt) optimizer.save(args.save_opt) log("done saving model and optimizer checkpoints to disk.") # loads the model and optimizer states from disk def load_checkpoint(): assert exists(args.load_opt) and exists(args.load_model), \ "--load-model and/or --load-opt misspecified" log("loading model from %s..." % args.load_model) dmm.load_state_dict(torch.load(args.load_model)) log("loading optimizer states from %s..." % args.load_opt) optimizer.load(args.load_opt) log("done loading model and optimizer states.") ``` ## Some final comments A deep markov model is a relatively complex model. Now that we've taken the effort to implement a version of the deep markov model tailored to the polyphonic music dataset, we should ask ourselves what else we can do. What if we're handed a different sequential dataset? Do we have to start all over? Not at all! The beauty of probalistic programming is that it enables&mdash;and encourages&mdash;modular approaches to modeling and inference. Adapting our polyphonic music model to a dataset with continuous observations is as simple as changing the observation likelihood. The vast majority of the code could be taken over unchanged. This means that with a little bit of extra work, the code in this tutorial could be repurposed to enable a huge variety of different models. See the complete code on [Github](https://github.com/pyro-ppl/pyro/blob/dev/examples/dmm/dmm.py). ## References [1] `Structured Inference Networks for Nonlinear State Space Models`,<br />&nbsp;&nbsp;&nbsp;&nbsp; Rahul G. Krishnan, Uri Shalit, David Sontag [2] `Variational Inference with Normalizing Flows`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Danilo Jimenez Rezende, Shakir Mohamed [3] `Improving Variational Inference with Inverse Autoregressive Flow`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling [4] `MADE: Masked Autoencoder for Distribution Estimation`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Mathieu Germain, Karol Gregor, Iain Murray, Hugo Larochelle [5] `Modeling Temporal Dependencies in High-Dimensional Sequences:` <br />&nbsp;&nbsp;&nbsp;&nbsp; `Application to Polyphonic Music Generation and Transcription`, <br />&nbsp;&nbsp;&nbsp;&nbsp; Boulanger-Lewandowski, N., Bengio, Y. and Vincent, P.
github_jupyter
``` !pip install --upgrade language-check import numpy as np import seaborn as sns import pandas as pd import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer,_preprocess,TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel,cosine_similarity from nltk.stem.snowball import SnowballStemmer from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import * from sklearn.model_selection import train_test_split data = pd.read_csv(r'data/medical_data.csv',low_memory=False) data = data.drop_duplicates().reset_index().drop('index',axis = 1) data punctuation='["\'?,\.]' # I will replace all these punctuation with '' abbr_dict={ "what's":"what is", "what're":"what are", "where's":"where is", "where're":"where are", "i'm":"i am", "we're":"we are", "it's":"it is", "that's":"that is", "there's":"there is", "there're":"there are", "i've":"i have", "who've":"who have", "would've":"would have", "not've":"not have", "i'll":"i will", "it'll":"it will", "isn't":"is not", "wasn't":"was not", "aren't":"are not", "weren't":"were not", "can't":"can not", "couldn't":"could not", "don't":"do not", "didn't":"did not", "shouldn't":"should not", "wouldn't":"would not", "doesn't":"does not", "haven't":"have not", "hasn't":"has not", "hadn't":"had not", "won't":"will not", punctuation:'', '\s+':' ', # replace multi space with one single space } def process_data(data): # Convert to lower case data.Phrase=data.Phrase.str.lower() data.Prompt=data.Prompt.str.lower() # convert to string data.Phrase=data.Phrase.astype(str) data.Prompt=data.Prompt.astype(str) # replace abbreviations data.replace(abbr_dict,regex=True,inplace=True) #apply stemming stemmer = SnowballStemmer("english") data['stemmed_phrase'] = data['Phrase'].apply(lambda x : ' '.join([stemmer.stem(y) for y in x.split()])) display(data.head(10)) return data data = process_data(data) d2 = data[['stemmed_phrase','Prompt']] d2.to_csv('data/trial_data.csv') ailments = data['Prompt'].unique() dict_ail = {} # for a in ailments: # dict_ail[a] = 0 for k in data.index: name = data['Prompt'][k] dict_ail[name] = dict_ail.get(name,0) + 1 ailment_dict = {} for i,k in enumerate(dict_ail.keys()): ailment_dict[i] = k plt.figure(figsize = (18,8)) plt.title("Ailment Frequencies",fontsize=35) plt.barh(color = 'Red',y=[i for i in range(len(list(ailments)))], width = list(dict_ail.values()),tick_label = list(dict_ail.keys())) plt.tight_layout() Cv = CountVectorizer(stop_words='english',ngram_range = (1,3), max_df=0.7) transformed_count = Cv.fit_transform(data['stemmed_phrase']) TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7) transformed_idf = TfIdf.fit_transform(data['stemmed_phrase']) input_text = ['I am experiencing pain in the leg from the past two days'] trial = TfIdf.transform(input_text) trial ``` ## Flow - Get the text input from the patient - This text input is processed first by the vectorizer and made into a list of frequeny counts using the learned vocabulary from the data provided - Now this list is passed into a model which generates the probabilities of which ailment does that sentence phrase correspond to - The final returned phrases are evaluated and the phrases having the least levenshtein distance are used for predictions - The two of the highest probability ailments are returned to the doctor with a wrapper sentence ## Output Tensor - We have a 25 element output vector which is the result from the model ``` ailment_dict ``` ## Input Tensor ``` # the query is first processed and made into lower case query = "From past few weeks feeling sad" def process_query(query): # Change to lower query = query.lower() # Removed abbreviations res = '' # print(query.split()) for k in query.split(): if k in abbr_dict: print(abbr_dict[k]) res+=' ' + abbr_dict[k] else: res+=' ' + k stemmer = SnowballStemmer('english') res = ' '.join([stemmer.stem(y) for y in res.split()]) return res print("Example query: ") print("Final query:",process_query(query)) processed = process_query(query) query =[processed] res = TfIdf.transform(query) sim = cosine_similarity(res,transformed_idf) res = list(np.argsort(sim))[0] res = res[::-1][:3] for k in res: print(data.loc[k]['Prompt']) def get_prediction(query): print("Query is :",query) processed = process_query(query) query = [processed] print("Processed :",query) res = TfIdf.transform(query) sim = cosine_similarity(res,transformed_idf) res = list(np.argsort(sim))[0] res = res[::-1][:20] print(sim[0][res[0]],sim[0][res[1]]) ailment =[] # let's find most similar sentences and then see # use levenshtein distance after you have got the result for k in res[:1]: ailment.append(data.loc[k]['Prompt']) print("Results :") return ailment ``` ## To - Do - Use document distance after you find the sentences to evaluate the best possible match for your query ``` for q in data['stemmed_phrase'][500:]: print(get_prediction(q)) ``` ## Use random forest ``` model = RandomForestClassifier(n_estimators=100,min_samples_leaf=2,bootstrap=True) ``` ## Generate Data first - First make a transformed matrix and associate each of the sentences with a numeric row and each prompt with a numeric dictionary value ``` data[:3] TfIdf = TfidfVectorizer(stop_words = 'english', ngram_range= (1,3),max_df= 0.7) X = TfIdf.fit_transform(data['stemmed_phrase']).toarray() ``` ## Generate the Y - Generate the class data ``` ailment_dict # ailment_dict ailment = {} for i,j in ailment_dict.items(): ailment[j] = i print(ailment) Y = data['Prompt'].map(ailment) Y ``` ## Got X and Y - Split in training and validation sets ``` X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size = 0.8, random_state = 43, shuffle = True) model.fit(X_train,Y_train) y_preds = model.predict(X_test) correct,incorrect =0,0 for k,i in zip(y_preds,Y_test): if(k==i): correct+=1 else: incorrect+=1 correct incorrect score =[] for est in range(10,50): model = RandomForestClassifier(n_estimators=est,min_samples_leaf=2) model.fit(X_train,Y_train) s = model.score(X_test,Y_test) score.append(s) plt.figure(figsize= (15,7)) plt.title("Accuracy of classification",fontsize=17) plt.xlabel("Number of estimators",fontsize = 14) plt.ylabel("Percentage",fontsize = 14) plt.plot([i for i in range(10,50)],score,color= 'red') ``` ## Now - Whenever you get a query, you need to transform it according to your vocabulary and then predict the class and then return the predicted class from model ``` def process_query(query): # Change to lower query = query.lower() # Removed abbreviations res = '' # print(query.split()) for k in query.split(): if k in abbr_dict: print(abbr_dict[k]) res+=' ' + abbr_dict[k] else: res+=' ' + k stemmer = SnowballStemmer('english') res = ' '.join([stemmer.stem(y) for y in res.split()]) return res # suppose I have the best model model = RandomForestClassifier(n_estimators=33,min_samples_leaf=2,bootstrap=True,max_features=300) model.fit(X_train,Y_train) for i,j in zip(X_test,Y_test): query = data.iloc['Phrase'][i] print("Query :",query) print("Original :",ailment_dict[j]) query = process_query(query) query = [query] #now transform the document according to the vectorizer query = TfIdf.transform(query) # now predict it pred = model.predict_proba(query) res = list(np.argsort(pred))[0] res = res[::-1][:3] for k in res: print(ailment_dict[k],end=',') print() ``` ## KNN ``` score =[] for est in range(3,40): model = KNeighborsClassifier(n_neighbors=est,metric='minkowski') model.fit(X_train,Y_train) s = model.score(X_test,Y_test) score.append(s) plt.figure(figsize= (15,7)) plt.title("Accuracy of classification",fontsize=17) plt.xlabel("Number of estimators",fontsize = 14) plt.ylabel("Percentage",fontsize = 14) plt.plot([i for i in range(3,40)],score,color= 'red') p = pd.DataFrame([[1,2],[2,3]],columns=['a','b']) p p = p.append([{'a':1,'b':23}],ignore_index=True) p.append([{'a':1,'b':2223}],ignore_index=True) ```
github_jupyter
##### Copyright 2020 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Basic training loops <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/basic_training_loops"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/basic_training_loops.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In the previous guides, you have learned about [tensors](./tensor.ipynb), [variables](./variable.ipynb), [gradient tape](autodiff.ipynb), and [modules](./intro_to_modules.ipynb). In this guide, you will fit these all together to train models. TensorFlow also includes the [tf.Keras API](https://www.tensorflow.org/guide/keras/overview), a high-level neural network API that provides useful abstractions to reduce boilerplate. However, in this guide, you will use basic classes. ## Setup ``` import tensorflow as tf ``` ## Solving machine learning problems Solving a machine learning problem usually consists of the following steps: - Obtain training data. - Define the model. - Define a loss function. - Run through the training data, calculating loss from the ideal value - Calculate gradients for that loss and use an *optimizer* to adjust the variables to fit the data. - Evaluate your results. For illustration purposes, in this guide you'll develop a simple linear model, $f(x) = x * W + b$, which has two variables: $W$ (weights) and $b$ (bias). This is the most basic of machine learning problems: Given $x$ and $y$, try to find the slope and offset of a line via [simple linear regression](https://en.wikipedia.org/wiki/Linear_regression#Simple_and_multiple_linear_regression). ## Data Supervised learning uses *inputs* (usually denoted as *x*) and *outputs* (denoted *y*, often called *labels*). The goal is to learn from paired inputs and outputs so that you can predict the value of an output from an input. Each input of your data, in TensorFlow, is almost always represented by a tensor, and is often a vector. In supervised training, the output (or value you'd like to predict) is also a tensor. Here is some data synthesized by adding Gaussian (Normal) noise to points along a line. ``` # The actual line TRUE_W = 3.0 TRUE_B = 2.0 NUM_EXAMPLES = 1000 # A vector of random x values x = tf.random.normal(shape=[NUM_EXAMPLES]) # Generate some noise noise = tf.random.normal(shape=[NUM_EXAMPLES]) # Calculate y y = x * TRUE_W + TRUE_B + noise # Plot all the data import matplotlib.pyplot as plt plt.scatter(x, y, c="b") plt.show() ``` Tensors are usually gathered together in *batches*, or groups of inputs and outputs stacked together. Batching can confer some training benefits and works well with accelerators and vectorized computation. Given how small this dataset is, you can treat the entire dataset as a single batch. ## Define the model Use `tf.Variable` to represent all weights in a model. A `tf.Variable` stores a value and provides this in tensor form as needed. See the [variable guide](./variable.ipynb) for more details. Use `tf.Module` to encapsulate the variables and the computation. You could use any Python object, but this way it can be easily saved. Here, you define both *w* and *b* as variables. ``` class MyModel(tf.Module): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.w * x + self.b model = MyModel() # List the variables tf.modules's built-in variable aggregation. print("Variables:", model.variables) # Verify the model works assert model(3.0).numpy() == 15.0 ``` The initial variables are set here in a fixed way, but Keras comes with any of a number of [initalizers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) you could use, with or without the rest of Keras. ### Define a loss function A loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Define the standard L2 loss, also known as the "mean squared" error: ``` # This computes a single loss value for an entire batch def loss(target_y, predicted_y): return tf.reduce_mean(tf.square(target_y - predicted_y)) ``` Before training the model, you can visualize the loss value by plotting the model's predictions in red and the training data in blue: ``` plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(y, model(x)).numpy()) ``` ### Define a training loop The training loop consists of repeatedly doing three tasks in order: * Sending a batch of inputs through the model to generate outputs * Calculating the loss by comparing the outputs to the output (or label) * Using gradient tape to find the gradients * Optimizing the variables with those gradients For this example, you can train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.keras.optimizers`. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`): ``` # Given a callable model, inputs, outputs, and a learning rate... def train(model, x, y, learning_rate): with tf.GradientTape() as t: # Trainable variables are automatically tracked by GradientTape current_loss = loss(y, model(x)) # Use GradientTape to calculate the gradients with respect to W and b dw, db = t.gradient(current_loss, [model.w, model.b]) # Subtract the gradient scaled by the learning rate model.w.assign_sub(learning_rate * dw) model.b.assign_sub(learning_rate * db) ``` For a look at training, you can send the same batch of *x* and *y* through the training loop, and see how `W` and `b` evolve. ``` model = MyModel() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) # Define a training loop def training_loop(model, x, y): for epoch in epochs: # Update the model with the single giant batch train(model, x, y, learning_rate=0.1) # Track this before I update Ws.append(model.w.numpy()) bs.append(model.b.numpy()) current_loss = loss(y, model(x)) print("Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f" % (epoch, Ws[-1], bs[-1], current_loss)) print("Starting: W=%1.2f b=%1.2f, loss=%2.5f" % (model.w, model.b, loss(y, model(x)))) # Do the training training_loop(model, x, y) # Plot it plt.plot(epochs, Ws, "r", epochs, bs, "b") plt.plot([TRUE_W] * len(epochs), "r--", [TRUE_B] * len(epochs), "b--") plt.legend(["W", "b", "True W", "True b"]) plt.show() # Visualize how the trained model performs plt.scatter(x, y, c="b") plt.scatter(x, model(x), c="r") plt.show() print("Current loss: %1.6f" % loss(model(x), y).numpy()) ``` ## The same solution, but with Keras It's useful to contrast the code above with the equivalent in Keras. Defining the model looks exactly the same if you subclass `tf.keras.Model`. Remember that Keras models inherit ultimately from module. ``` class MyModelKeras(tf.keras.Model): def __init__(self, **kwargs): super().__init__(**kwargs) # Initialize the weights to `5.0` and the bias to `0.0` # In practice, these should be randomly initialized self.w = tf.Variable(5.0) self.b = tf.Variable(0.0) def call(self, x): return self.w * x + self.b keras_model = MyModelKeras() # Reuse the training loop with a Keras model training_loop(keras_model, x, y) # You can also save a checkpoint using Keras's built-in support keras_model.save_weights("my_checkpoint") ``` Rather than write new training loops each time you create a model, you can use the built-in features of Keras as a shortcut. This can be useful when you do not want to write or debug Python training loops. If you do, you will need to use `model.compile()` to set the parameters, and `model.fit()` to train. It can be less code to use Keras implementations of L2 loss and gradient descent, again as a shortcut. Keras losses and optimizers can be used outside of these convenience functions, too, and the previous example could have used them. ``` keras_model = MyModelKeras() # compile sets the training parameters keras_model.compile( # By default, fit() uses tf.function(). You can # turn that off for debugging, but it is on now. run_eagerly=False, # Using a built-in optimizer, configuring as an object optimizer=tf.keras.optimizers.SGD(learning_rate=0.1), # Keras comes with built-in MSE error # However, you could use the loss function # defined above loss=tf.keras.losses.mean_squared_error, ) ``` Keras `fit` expects batched data or a complete dataset as a NumPy array. NumPy arrays are chopped into batches and default to a batch size of 32. In this case, to match the behavior of the hand-written loop, you should pass `x` in as a single batch of size 1000. ``` print(x.shape[0]) keras_model.fit(x, y, epochs=10, batch_size=1000) ``` Note that Keras prints out the loss after training, not before, so the first loss appears lower, but otherwise this shows essentially the same training performance. ## Next steps In this guide, you have seen how to use the core classes of tensors, variables, modules, and gradient tape to build and train a model, and further how those ideas map to Keras. This is, however, an extremely simple problem. For a more practical introduction, see [Custom training walkthrough](../tutorials/customization/custom_training_walkthrough.ipynb). For more on using built-in Keras training loops, see [this guide](https://www.tensorflow.org/guide/keras/train_and_evaluate). For more on training loops and Keras, see [this guide](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch). For writing custom distributed training loops, see [this guide](distributed_training.ipynb#using_tfdistributestrategy_with_basic_training_loops_loops).
github_jupyter
``` import os import numpy as np import pandas as pd import json import pickle from scipy import sparse import scipy.io dataset_name = 'dblp' data_path = os.path.join('../dataset/raw/{}'.format(dataset_name)) citations = [] incomming = {} for i in range(4): fn = os.path.join(data_path, 'dblp-ref-{}.json'.format(i)) with open(fn) as in_fn: for line in in_fn: paper = json.loads(line.strip()) citations.append(paper) if 'references' in paper: for ref_id in paper['references']: if ref_id in incomming: incomming[ref_id].append(paper['id']) else: incomming[ref_id] = [paper['id']] df = pd.DataFrame(citations) is_first_line = True conferences = {} with open('../dataset/clean/dblp/venue_info.tsv') as in_csv: for line in in_csv: tokens = line.strip().split('\t') if is_first_line: #print(tokens) is_first_line = False else: conf_name = tokens[0] labels = [int(num_str) for num_str in tokens[2].split(',')] labels = [n-2 for n in labels if n > 1] # remove the first label (signal processing has too many documents) conferences[conf_name] = {'name': conf_name, 'label': labels} #conferences[conf_name] = {'name': conf_name, } max_labels = np.max([np.max(val['label']) for key, val in conferences.items()]) min_labels = np.min([np.min(val['label']) for key, val in conferences.items()]) num_labels = max_labels - min_labels + 1 print('label min:{} max:{} total:{}'.format(min_labels, max_labels, num_labels)) # remove any row that is not present in the selected venues def is_selected_venue(row): return (row in conferences) print("num paper (before): {}".format(len(df))) df = df[df.venue.apply(is_selected_venue)] print("num paper (after): {}".format(len(df))) cut_off_years = 2016 df_train = df[df.year < cut_off_years] df_test = df[df.year >= cut_off_years] num_trains = len(df_train) num_tests = len(df_test) print("num trains: {} num tests: {} ratio: {:.4f}".format(num_trains, num_tests, num_tests / num_trains)) #venue_count = df_train.groupby('venue').count().sort_values(['abstract'], ascending=False).abstract def assign_labels(venue): label_list = conferences[venue]['label'] return np.sum(np.eye(num_labels)[label_list], axis=0).astype(np.int) df_train = df_train.copy() df_train['label'] = df_train.venue.apply(assign_labels) df_train.set_index('id', inplace=True) # set paper as the row index df_test = df_test.copy() df_test['label'] = df_test.venue.apply(assign_labels) df_test.set_index('id', inplace=True) # set paper as the row index num_train_doc_per_labels = np.sum(np.array(list(df_train.label)), axis=0) num_test_doc_per_labels = np.sum(np.array(list(df_test.label)), axis=0) print(num_train_doc_per_labels) print(num_test_doc_per_labels) # remove any row that does not have abstract, title, paperId, or venue print("num paper = {}".format(len(df_train))) df_train.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True) print("num paper = {}".format(len(df_train))) # This method adds incoming edges to each node as well as removing any edge that points outside the train set def createEdges(row): if row.references is not np.nan: outgoing_edges = [r for r in row.references if r in df_train.index] else: outgoing_edges = [] if row.name in incomming: incomming_edges = [r for r in incomming[row.name] if r in df_train.index] else: incomming_edges = [] return outgoing_edges + incomming_edges df_train['links'] = df_train.apply(createEdges, axis=1) # Remove any row that has no link print("num paper = {}".format(len(df_train))) df_train = df_train[df_train.links.apply(len) > 0] print("num paper = {}".format(len(df_train))) # There must be no train nodes that references to non-train nodes def count_invalid_edges(refs): return len([r for r in refs if r not in df_train.index]) assert(len(df_train[df_train.links.apply(count_invalid_edges) > 0]) == 0) global_id_2_train_id = {node_id: idx for idx, node_id in enumerate(df_train.index)} def convert_2_train_id(ref): return [global_id_2_train_id[r] for r in ref] train_edges = df_train.links.apply(convert_2_train_id) train_graph = {} for node_id, value in train_edges.iteritems(): train_graph[global_id_2_train_id[node_id]] = value print('num train: {}'.format(len(train_graph))) ``` # Process Test Data ``` # remove any row that does not have abstract, title, paperId, or venue print("num paper = {}".format(len(df_test))) df_test.dropna(axis=0, subset=['abstract', 'venue', 'year', 'label'], inplace=True) print("num paper = {}".format(len(df_test))) # This method adds incoming edges to each node as well as removing any edge that points outside the train set def createEdges(row): if row.references is not np.nan: outgoing_edges = [r for r in row.references if r in df_train.index] else: outgoing_edges = [] if row.name in incomming: incomming_edges = [r for r in incomming[row.name] if r in df_train.index] else: incomming_edges = [] return outgoing_edges + incomming_edges df_test['links'] = df_test.apply(createEdges, axis=1) # Remove any row that has no link print("num paper = {}".format(len(df_test))) df_test = df_test[df_test.links.apply(len) > 0] print("num paper = {}".format(len(df_test))) # There must be no train nodes that references to non-train nodes def count_invalid_edges(refs): return len([r for r in refs if r not in df_train.index]) assert(len(df_test[df_test.links.apply(count_invalid_edges) > 0]) == 0) global_id_2_test_id = {node_id: idx for idx, node_id in enumerate(df_test.index)} # each link MUST point to the train nodes test_edges = df_test.links.apply(convert_2_train_id) test_graph = {} for node_id, value in test_edges.iteritems(): test_graph[global_id_2_test_id[node_id]] = value print('num test: {}'.format(len(test_graph))) ``` # Save Graph Data ``` data_path = '../dataset/clean/dblp' save_fn = os.path.join(data_path, 'ind.{}.train.graph.pk'.format(dataset_name)) pickle.dump(train_graph, open(save_fn, 'wb')) print('save graph data to {}'.format(save_fn)) save_fn = os.path.join(data_path, 'ind.{}.test.graph.pk'.format(dataset_name)) pickle.dump(test_graph, open(save_fn, 'wb')) print('save graph data to {}'.format(save_fn)) ``` # Process contents ``` from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(stop_words='english', max_df=0.8, min_df=5, sublinear_tf=True, max_features=10000) train_feas = vectorizer.fit_transform(list(df_train.abstract)) print(np.nonzero(np.sum(train_feas, axis=1))[0].shape) test_feas = vectorizer.transform(list(df_test.abstract)) print(np.nonzero(np.sum(test_feas, axis=1))[0].shape) gnd_train = sparse.csr_matrix(np.array(list(df_train.label))) gnd_test = sparse.csr_matrix(np.array(list(df_test.label))) assert(train_feas.shape[1] == test_feas.shape[1]) assert(gnd_train.shape[1] == gnd_test.shape[1]) assert(train_feas.shape[0] == gnd_train.shape[0]) assert(test_feas.shape[0] == gnd_test.shape[0]) data_path = '../dataset/clean/dblp' save_fn = os.path.join(data_path, 'ind.{}.mat'.format(dataset_name)) scipy.io.savemat(save_fn, mdict={'train': train_feas, 'test': test_feas, 'cv': test_feas, 'gnd_train': gnd_train, 'gnd_test': gnd_test, 'gnd_cv': gnd_test}) print('save data to {}'.format(save_fn)) ``` # Convert to dataframe with the format as doc_id, bow, label, and neighbors ``` # create a connection matrix n_train = train_feas.shape[0] row = [] col = [] for doc_id in train_graph: row += [doc_id] * len(train_graph[doc_id]) col += train_graph[doc_id] data = [1] * len(row) train_connections = sparse.csr_matrix((data, (row, col)), shape=(n_train, n_train)) n_test = test_feas.shape[0] row = [] col = [] for doc_id in test_graph: row += [doc_id] * len(test_graph[doc_id]) col += test_graph[doc_id] data = [1] * len(row) test_connections = sparse.csr_matrix((data, (row, col)), shape=(n_test, n_train)) # test graph points to train graph from tqdm import tqdm save_dir = os.path.join('../dataset/clean', dataset_name) ########################################################################################## train = [] for doc_id in tqdm(train_graph): doc = {'doc_id': doc_id, 'bow': train_feas[doc_id], 'label': gnd_train[doc_id], 'neighbors': train_connections[doc_id]} train.append(doc) train_df = pd.DataFrame.from_dict(train) train_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.train.pkl'.format(dataset_name)) train_df.to_pickle(fn) ########################################################################################## test = [] for doc_id in tqdm(test_graph): doc = {'doc_id': doc_id, 'bow': test_feas[doc_id], 'label': gnd_test[doc_id], 'neighbors': test_connections[doc_id]} test.append(doc) test_df = pd.DataFrame.from_dict(test) test_df.set_index('doc_id', inplace=True) fn = os.path.join(save_dir, '{}.test.pkl'.format(dataset_name)) test_df.to_pickle(fn) ```
github_jupyter
# Introduction to PyCaret - An open source low-code ML library ## This notebook consists 2 parts - Classification part using Titanic DataSet - Regression part using House Price Regression DataSet ![](https://pycaret.org/wp-content/uploads/2020/03/Divi93_43.png) You can reach pycaret website and documentation from https://pycaret.org PyCaret is an open source, low-code machine learning library in Python that allows you to go from preparing your data to deploying your model within seconds in your choice of notebook environment. PyCaret being a low-code library makes you more productive. With less time spent coding, you and your team can now focus on business problems. PyCaret is simple and easy to use machine learning library that will help you to perform end-to-end ML experiments with less lines of code. PyCaret is a business ready solution. It allows you to do prototyping quickly and efficiently from your choice of notebook environment. # let's install pycaret ! ``` !pip install pycaret ``` # Part 1 Classification ![](https://www.sciencealert.com/images/articles/processed/titanic-1_1024.jpg) # We start by loading the libraries ``` import numpy as np import pandas as pd ``` # Read our files ``` train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') sub = pd.read_csv('../input/titanic/gender_submission.csv') ``` # Import whole classification ``` from pycaret.classification import * ``` # let's see what we're dealing with ``` train.head() train.info() ``` # Set up our dataset (preprocessing) ``` clf1 = setup(data = train, target = 'Survived', numeric_imputation = 'mean', categorical_features = ['Sex','Embarked'], ignore_features = ['Name','Ticket','Cabin'], silent = True) #quite intuitive isn't it ? ``` # Compare the models ``` compare_models() ``` # let's create a Light GBM Model ``` lgbm = create_model('lightgbm') ``` # Let's tune it! ``` tuned_lightgbm = tune_model('lightgbm') ``` # Learning Curve ``` plot_model(estimator = tuned_lightgbm, plot = 'learning') ``` # AUC Curve ``` plot_model(estimator = tuned_lightgbm, plot = 'auc') ``` # Confusion Matrix ``` plot_model(estimator = tuned_lightgbm, plot = 'confusion_matrix') ``` # Feature Importance ``` plot_model(estimator = tuned_lightgbm, plot = 'feature') ``` # whole thing! ``` evaluate_model(tuned_lightgbm) ``` # Interpretation ``` interpret_model(tuned_lightgbm) ``` # Predictions ``` predict_model(tuned_lightgbm, data=test) predictions = predict_model(tuned_lightgbm, data=test) predictions.head() sub['Survived'] = round(predictions['Score']).astype(int) sub.to_csv('submission.csv',index=False) sub.head() ``` # Extra: Blending made easy! ``` logr = create_model('lr'); xgb = create_model('xgboost'); #blending 3 models blend = blend_models(estimator_list=[tuned_lightgbm,logr,xgb]) ``` # Part2 - Regression ![](https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcSYeyNpaoAW-3rFX9-ORmiJ-uLAAswYBRhszs2QzllV7MCfFPvk&usqp=CAU) # Import Whole Regression ``` from pycaret.regression import * ``` # let's see the data ``` train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') sample= pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv') train.head() train.info() ``` # Set up our dataset (preprocessing) ``` reg = setup(data = train, target = 'SalePrice', numeric_imputation = 'mean', categorical_features = ['MSZoning','Exterior1st','Exterior2nd','KitchenQual','Functional','SaleType', 'Street','LotShape','LandContour','LotConfig','LandSlope','Neighborhood', 'Condition1','Condition2','BldgType','HouseStyle','RoofStyle','RoofMatl', 'MasVnrType','ExterQual','ExterCond','Foundation','BsmtQual','BsmtCond', 'BsmtExposure','BsmtFinType1','BsmtFinType2','Heating','HeatingQC','CentralAir', 'Electrical','GarageType','GarageFinish','GarageQual','GarageCond','PavedDrive', 'SaleCondition'] , ignore_features = ['Alley','PoolQC','MiscFeature','Fence','FireplaceQu','Utilities'], normalize = True, silent = True) ``` # let's compare different regression models! ``` compare_models() ``` # let's do CatBoost ``` cb = create_model('catboost') ``` # gotta tune it ``` tuned_cb = tune_model('catboost') ``` # SHAP Values (impact on model output) ``` interpret_model(tuned_cb) predictions = predict_model(tuned_cb, data = test) sample['SalePrice'] = predictions['Label'] sample.to_csv('submission_house_price.csv',index=False) sample.head() ``` # thank you very much for checking my notebook!
github_jupyter
## Boxplot plots _______ tg: @misha_grol and [email protected] Boxplots for features based on DEM and NDVI ``` # Uncomment for Google colab # !pip install maxvolpy # !pip install clhs # !git clone https://github.com/EDSEL-skoltech/maxvol_sampling # %cd maxvol_sampling/ import csv import seaborn as sns import argparse import numpy as np import osgeo.gdal as gdal import os import pandas as pd import matplotlib.cm as cm import matplotlib.pyplot as plt from numpy import genfromtxt import gdal import xarray as xr import clhs as cl from scipy.spatial import ConvexHull, convex_hull_plot_2d from scipy.spatial import voronoi_plot_2d, Voronoi from scipy.spatial import distance from scipy.stats import entropy from scipy.special import kl_div from scipy.stats import ks_2samp from scipy.stats import wasserstein_distance %matplotlib inline from src.util import MaxVolSampling # Uncoment "Times New Roman" and "science" stule plt if you have it # plt.rcParams["font.family"] = "Times New Roman" plt.rcParams.update({'font.size': 16}) #use science style for plots # plt.style.use(['science', 'grid']) plt.rcParams['xtick.labelsize'] = 15 plt.rcParams['ytick.labelsize'] = 20 ``` ## Interpolation plots ``` import matplotlib.pyplot as plt from matplotlib import gridspec from tqdm.notebook import tqdm from scipy.stats import ks_2samp dict_for_dict_wasserstein = {} csv_file_to_process = './src/data_v0.csv' df_name = list(pd.read_csv(csv_file_to_process, sep=',').columns) soil_parameters = df_name path_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/' np.random.seed(42) units = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] interpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/')) path = './experiments/cLHS_million_steps' for index, file in enumerate(interpolation_files): list_to_test_zeros = [] print('Parameter:', file) df_for_plots = pd.DataFrame(columns=['Sampling', 'Points', 'Value']) dict_for_parameter = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_wasserstein = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_plots = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} number_of_points = [10,15,20,25,30] from itertools import compress list_of_cLHS_million_runs = sorted(os.listdir('./experiments/cLHS_million_steps')) selection = ['NDVI' in name for name in list_of_cLHS_million_runs] cLHS_points_files = list(compress(list_of_cLHS_million_runs, selection)) for num_points, csv_file in zip(number_of_points, cLHS_points_files): dict_for_parameter['cLHS'][num_points] = np.genfromtxt(os.path.join(path, csv_file),delimiter=',', dtype=int) SAR = MaxVolSampling() SAR.soil_feature = soil_parameters[index] SAR.num_of_points = num_points SAR.soil_data = pd.read_csv('./src/data_v0.csv', sep=',') SAR.path_to_file_with_indices = None SAR.wd = './DEM_files/' SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, file) _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None) SAR.original_soil_data(SAR.soil_feature) #data from interpolation interpolation_map = SAR.interpolation_array #Points selection by MAXVOL MAXVOL = interpolation_map[SAR.i_am_maxvol_function()] print for value in MAXVOL: df_for_plots.loc[len(df_for_plots)]=['MAXVOL', num_points, value] cLHS = interpolation_map[dict_for_parameter['cLHS'][num_points]] for value in cLHS: df_for_plots.loc[len(df_for_plots)]=['cLHS', num_points, value] RANDOM = interpolation_map[SAR.i_am_random()] for value in RANDOM: df_for_plots.loc[len(df_for_plots)]=['Random', num_points, value] #original distribution df_original = pd.DataFrame(data={'Points':[51]*len(SAR.original_data), 'Value':SAR.original_data}) fig = plt.figure(figsize=(18,18)) gs = gridspec.GridSpec(4, 5, wspace=.25) ax_1 = fig.add_subplot(gs[:,:4]) ax_2 = fig.add_subplot(gs[:,4]) sns.boxplot(ax = ax_1, x="Points", y="Value", hue="Sampling", palette=["#1F77B4", "#2CA02C", "#FF7F0E"], data=df_for_plots, width=0.8) sns.boxplot(ax = ax_2, x='Points', y="Value", palette=["#CCCCCC"], data=df_original, width=0.25) fig.set_figwidth(16) fig.set_figheight(7) ax_2.set_xticklabels([]) ax_2.set_ylabel('') ax_2.set_xlabel('') ax_2.grid(True) ax_1.set_xticklabels([]) ax_1.set_xlabel('') ax_1.set_ylabel(units[index], fontsize = 17) ax_1.axhline(np.quantile(SAR.original_data, 0.25), color='grey', linestyle='--',zorder=0) ax_1.axhline(np.quantile(SAR.original_data, 0.50), color='grey', linestyle='--',zorder=0) ax_1.axhline(np.quantile(SAR.original_data, 0.75), color='grey', linestyle='--',zorder=0) ax_1.get_shared_y_axes().join(ax_1, ax_2) ax_1.get_legend().remove() ax_1.grid(True) ax_2.set_yticklabels([]) # plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.svg') # plt.savefig('../plots/agricultural_systems_plots/boxplots_interpolation/'+str(soil_parameters[index])+'boxplot.png', dpi=300) plt.show() # break ``` ## Plots of Wasserstein distance evolution ``` fig, ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)) = plt.subplots(nrows=5, ncols=2, sharex=True,figsize=(18, 25)) names_for_plots = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %', 'Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa', 'Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C', 'Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] path = './experiments/cLHS_10_000/exp_fem_poins/npy_files/' files_with_points = os.listdir(path) range_files_allocation=[] for file in files_with_points: range_files_allocation.append(np.load(os.path.join(path,file), allow_pickle=True)[None]) res = np.load(os.path.join(path,file), allow_pickle=True) dict_for_indices = {'MAXVOL':[], 'cLHS':[], 'Random':[]} from collections import ChainMap for sampling in [*range_files_allocation[0][0].keys()]: loc_list = [dict(loc_dict[0][sampling]) for loc_dict in range_files_allocation] dict_for_indices[sampling] = dict(ChainMap(*loc_list)) n = 0 number_of_points = range(7,31) csv_file_to_process = './src/data_v0.csv' for row in ((ax0, ax1), (ax2, ax3), (ax4, ax5), (ax6, ax7),(ax8, ax9)): for col in row: # COMPUTE WASSERSTEIN DISTANCE df_name = list(pd.read_csv(csv_file_to_process, sep=',').columns) soil_parameters = df_name path_to_inter_npy_files = './experiments/cLHS_10_000/Interpolation_data/' np.random.seed(42) units = ['Soil moisture 10 cm, %','Soil moisture 30 cm, %','Soil moisture 80 cm, %','Mean crop yield, c/ha', 'Penetration resistance 10 cm, kPa','Penetration resistance 30 cm, kPa','Penetration resistance 80 cm, kPa', 'Soil Temperature 10 cm, °C','Soil Temperature 30 cm, °C','Soil Temperature 80 cm, °C'] interpolation_files = sorted(os.listdir('./experiments/cLHS_10_000/Interpolation_data/')) print('Parameter:', interpolation_files[n]) dict_for_plots = {'MAXVOL':{}, 'cLHS':{}, 'Random':{}} dict_for_new_maxvol = {'MAXVOL_NEW': {}} for points in number_of_points: SAR = MaxVolSampling() SAR.soil_feature = soil_parameters[n] SAR.num_of_points = points SAR.soil_data = pd.read_csv(csv_file_to_process, sep=',') SAR.path_to_file_with_indices = None SAR.wd = './DEM_files//' SAR.path_to_interpolation_file = os.path.join(path_to_inter_npy_files, interpolation_files[n]) _ =SAR.data_preparation(SAR.wd, data_m=3, dem_dir = None) SAR.original_soil_data(SAR.soil_feature) interpolation_map = SAR.interpolation_array[::-1] MAXVOL_ = interpolation_map[SAR.i_am_maxvol_function()] # List to iterate over 100 realization of cLHS and Random cLHS_ = [interpolation_map[dict_for_indices['cLHS'][points][i]] for i in range(100)] Random_ = [interpolation_map[dict_for_indices['Random'][points][i]] for i in range(100)] dict_for_plots['MAXVOL'][points] = wasserstein_distance(SAR.original_data, MAXVOL_) dict_for_plots['cLHS'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in cLHS_] dict_for_plots['Random'][points] = [wasserstein_distance(SAR.original_data, mdt) for mdt in Random_] quantile_lower_random = np.array([np.quantile(dict_for_plots['Random'][i], .10) for i in number_of_points]) quantile_upper_random = np.array([np.quantile(dict_for_plots['Random'][i], .90) for i in number_of_points]) median_random = np.array([np.median(dict_for_plots['Random'][i]) for i in number_of_points]) quantile_lower_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .10) for i in number_of_points]) quantile_upper_cLHS = np.array([np.quantile(dict_for_plots['cLHS'][i], .90) for i in number_of_points]) median_cLHS = np.array([np.median(dict_for_plots['cLHS'][i]) for i in number_of_points]) col.plot(number_of_points, [*dict_for_plots['MAXVOL'].values()], '-.',label='Maxvol',linewidth=4,markersize=10 ) col.plot(number_of_points, median_random, label='Random median',linewidth=3,markersize=10 ) col.plot(number_of_points, median_cLHS,'--',label='cLHS median',linewidth=3,markersize=14) col.fill_between(number_of_points, quantile_lower_random, quantile_upper_random , alpha=0.1, color='orange', label='CI Random') col.fill_between(number_of_points, quantile_lower_cLHS, quantile_upper_cLHS , alpha=0.1, color='green', label='CI cLHS') col.set_xlim(min(number_of_points), max(number_of_points)) # col.set_xticks(number_of_points) col.set_title(names_for_plots[n]) col.grid(True) col.set(ylabel="Wasserstein distance") if n==8 or n==9: col.set(xlabel="Number of points for sampling", ylabel="Wasserstein distance") # plt.show() n+=1 # plt.legend() # plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/wasserstein_disctance_IQR.png', dpi=300) # plt.savefig('../plots/agricultural_systems_plots/plots_with_evolution_of_wassersterin/nwasserstein_disctance_IQR.svg') ```
github_jupyter
``` from tensorflow.python.keras import backend as K from tensorflow.python.keras.applications.resnet50 import ResNet50, preprocess_input from tensorflow.python.keras.preprocessing import image from tensorflow.python.keras.layers import Conv2D, GlobalAveragePooling2D, Input, Dropout, Dense from tensorflow.python.keras.utils import to_categorical from tensorflow.python.keras.models import Model from tensorflow.python.keras.datasets import cifar10 from tensorflow.python.keras.callbacks import Callback, TensorBoard from tensorflow.python.keras.backend import set_session from tensorflow.python.keras.models import load_model import tensorflow as tf from sklearn.model_selection import train_test_split from tqdm import tqdm from collections import defaultdict from matplotlib.pyplot import imshow from PIL import Image import datetime import numpy as np import os, glob, io import base64 %matplotlib inline sess = tf.Session() graph = tf.get_default_graph() set_session(sess) batch_size = 32 test_train_split = 0.2 max_epoch = 1 dropout_prob = 0.3 shape = (224, 224) train_size_per_label = 500 test_size_per_label = 100 test_train_split=0.2 image_path = "/Users/adammenges/Development/notebooks/basicClassifier/houses_120px_classes" def resize(arr, shape): return np.array(Image.fromarray(arr).resize(shape)) def decode_img(msg): # msg = msg[msg.find(b"<plain_txt_msg:img>")+len(b"<plain_txt_msg:img>"): # msg.find(b"<!plain_txt_msg>")] msg = base64.b64decode(msg) buf = io.BytesIO(msg) img = Image.open(buf) return img def preprocess(arr, shape=(224, 224)): arr = np.array([resize(arr[i], shape) for i in range(0, len(arr))]).astype('float32') arr = preprocess_input(arr) return arr def get_local_images(): classes = os.listdir(image_path) input_arr = [] target_labels = [] for class_idx in range(len(classes)): paths = glob.glob(os.path.join(image_path, classes[class_idx]) + "/*.png") for img_path in tqdm(paths, desc=f'Processing label {classes[class_idx]}: '): img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) # x = np.expand_dims(x, axis=0) x = preprocess_input(x) target_labels.append(class_idx) input_arr.append(x) X_train, X_test, y_train, y_test = train_test_split(input_arr, target_labels, test_size=test_train_split) X_train = np.array(X_train) X_test = np.array(X_test) y_train = np.array(y_train) y_test = np.array(y_test) return X_train, X_test, y_train, y_test, classes def get_cifar10(): (input_train, out_train), (input_test, out_test) = cifar10.load_data() return input_train, input_test, out_train, out_test, range(10) def get_resnet50(shape=(224, 224, 3)): return ResNet50(weights='imagenet', include_top=False, input_shape=shape) def restrain_data(input_train, out_train, input_test, out_test, num_class, num_train, num_test, shape=(224, 224)): train_dict = defaultdict(list) test_dict = defaultdict(list) [train_dict[out_train[idx][0]].append(input_train[idx]) for idx in range(input_train.shape[0])] [test_dict[out_test[idx][0]].append(input_test[idx]) for idx in range(input_test.shape[0])] restrain_class = range(num_class) restrain_train = [[train_dict[i][idx], i] for idx in range(num_train) for i in restrain_class] restrain_test = [[test_dict[i][idx], i] for idx in range(num_test) for i in restrain_class] rand_train_idx = np.random.choice(num_train * num_class, num_train * num_class) rand_test_idx = np.random.choice(num_test * num_class, num_test * num_class) i_train = np.array([restrain_train[idx][0] for idx in rand_train_idx]) o_train = np.array([[restrain_train[idx][1]] for idx in rand_train_idx]) i_test = np.array([restrain_test[idx][0] for idx in rand_test_idx]) o_test = np.array([[restrain_test[idx][1]] for idx in rand_test_idx]) i_train = preprocess(i_train, shape=shape) i_test = preprocess(i_test, shape=shape) return i_train, i_test, o_train, o_test, restrain_class input_train, input_test, out_train, out_test, classes = get_local_images() input_test.shape x = get_cifar10() x[0].shape # input_train, input_test, out_train, out_test, classes = restrain_data( # input_train, # out_train, # input_test, # out_test, # len(classes), # train_size_per_label, # test_size_per_label) # input_train = preprocess(input_train, shape=shape) # input_test = preprocess(input_test, shape=shape) total_train_steps = len(input_train) // batch_size out_train = to_categorical(out_train, len(classes)) out_test = to_categorical(out_test, len(classes)) def batch_generator(x, y, batch_size=32): while True: for step in range(len(x) // batch_size): yield x[step*batch_size:(step+1)*batch_size, ...], y[step*batch_size:(step+1)*batch_size, ...] class RecordAccuracy(Callback): def on_epoch_begin(self, epoch, logs=None): print(f'Running epoch {epoch}. Total {total_train_steps} batches') def on_batch_end(self, batch, logs=None): loss = logs['loss'] if not batch % 10: print(f'Running batch {batch}: train loss - {loss}') def on_epoch_end(self, epoch, logs=None): loss = logs["loss"] val_acc = logs["val_acc"] print(f'Epoch {epoch}: train loss - {loss}. test accuracy - {val_acc}') def freeze_layers(model, layer_num): for layer in model.layers[:layer_num]: layer.trainable = False def train_layers(model, layer_num): for layer in model.layers[layer_num:]: layer.trainable = True resnet50 = get_resnet50(shape=shape + (3,)) bottleneck_train_features = resnet50.predict(input_train) bottleneck_test_features = resnet50.predict(input_test) in_layer = Input(shape=(bottleneck_train_features.shape[1:])) x = Conv2D(filters=100, kernel_size=2)(in_layer) x = Dropout(0.4)(x) x = GlobalAveragePooling2D()(x) x = Dropout(0.3)(x) predictions = Dense(len(classes), activation='softmax')(x) model = Model(inputs=in_layer, outputs=predictions) model.summary() ``` ## Train the model! And now it's time to train the model! ``` model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']) model.fit_generator(batch_generator(bottleneck_train_features, out_train), steps_per_epoch=len(bottleneck_train_features) // batch_size, validation_data=(bottleneck_test_features, out_test), verbose=2, epochs=max_epoch, callbacks=[RecordAccuracy(), TensorBoard()]) ``` # Server Okay now let's host a server for grasshopper ``` print(model.predict(resnet50.predict(np.array([input_test[0]])))) print(classes) print('----') print(input_test[0].shape) print(list(zip(model.predict(resnet50.predict(np.array([input_test[0]])))[0], classes))) out_test[0] from flask import Flask from flask import request app = Flask(__name__) @app.route('/predict', methods=['POST']) #GET requests will be blocked def hello_world(): req_data = request.get_json() img = req_data['image'] img = decode_img(img).resize((224,224)).convert('RGB') img = image.img_to_array(img) x = preprocess_input(img) print('----') print(x.shape) print('----') global sess global graph with graph.as_default(): set_session(sess) pred = model.predict(resnet50.predict(np.array([x])))[0] pred = [str(f) for f in pred] prediction = list(zip(pred, classes)) print('prediction') print(prediction) return { 'prediction': prediction } app.run(debug=True, use_reloader=False) ```
github_jupyter
``` import csv import numpy as np import tensorflow as tf from sklearn.model_selection import train_test_split RANDOM_SEED = 42 ``` # 各パス指定 ``` dataset = 'model/point_history_classifier/point_history_allkeypoints.csv' model_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.hdf5' ``` # 分類数設定 ``` NUM_CLASSES = 3 # point_history_classifier_label_allkeypoints の分類数 ``` # 入力長 ``` TIME_STEPS = 16 # 時系列 NUM_KEYPOINTS = 21 # 全点数 DIMENSION = 2 * NUM_KEYPOINTS # [x,y]2要素 * 全点数 ``` # 学習データ読み込み ``` # "座標"のデータセット X_dataset = np.loadtxt(dataset, delimiter=',', dtype='float32', usecols=list(range(1, (TIME_STEPS * DIMENSION) + 1))) # usecols=: どの列(縦列)を読み取るのか,ここでは1列目のindex列を抜かして入力長分を指定 # "index番号"のデータセット usecols=(0)は最初の縦列指定 y_dataset = np.loadtxt(dataset, delimiter=',', dtype='int32', usecols=(0)) X_train, X_test, y_train, y_test = train_test_split( X_dataset, y_dataset, train_size=0.75, random_state=RANDOM_SEED) # Xはデータの実体,yはindex番号 #これでcsvデータのうち4分の3が練習用trainにX(実体)とy(番号)を振り分けられながら入り、 # 本番testにも残り4分の1が同様に入った -> trainには 16 * 2 * 21 個の要素持ったデータの列がある ``` # モデル構築 ``` use_lstm = False model = None if use_lstm: model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Reshape((TIME_STEPS, DIMENSION), input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Dropout(0.2), tf.keras.layers.LSTM(16, input_shape=[TIME_STEPS, DIMENSION]), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) else: model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(TIME_STEPS * DIMENSION, )), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(24, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(10, activation='relu'), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) model.summary() # tf.keras.utils.plot_model(model, show_shapes=True) # モデルチェックポイントのコールバック cp_callback = tf.keras.callbacks.ModelCheckpoint( model_save_path, verbose=1, save_weights_only=False) # 早期打ち切り用コールバック es_callback = tf.keras.callbacks.EarlyStopping(patience=20, verbose=1) # エポック終了時にモデルを保存するModelCheckpointと、 # 改善が見られなくなった時点で訓練を終了するEarlyStoppingを指定している。 # モデルコンパイル model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) ``` # モデル訓練 ``` model.fit( X_train, y_train, epochs=1000, batch_size=128, validation_data=(X_test, y_test), callbacks=[cp_callback, es_callback] ) # 保存したモデルのロード model = tf.keras.models.load_model(model_save_path) # 推論テスト predict_result = model.predict(np.array([X_test[0]])) print(np.squeeze(predict_result)) print(np.argmax(np.squeeze(predict_result))) ``` # 混同行列 ``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, classification_report def print_confusion_matrix(y_true, y_pred, report=True): labels = sorted(list(set(y_true))) cmx_data = confusion_matrix(y_true, y_pred, labels=labels) df_cmx = pd.DataFrame(cmx_data, index=labels, columns=labels) fig, ax = plt.subplots(figsize=(7, 6)) sns.heatmap(df_cmx, annot=True, fmt='g' ,square=False) ax.set_ylim(len(set(y_true)), 0) plt.show() if report: print('Classification Report') print(classification_report(y_test, y_pred)) Y_pred = model.predict(X_test) y_pred = np.argmax(Y_pred, axis=1) print_confusion_matrix(y_test, y_pred) ``` <h3>ONNXモデルへの変換(追加Cell)</> <h5>save_modelの2つ目の引数がモデルのファイル名になります</> ``` import keras2onnx # convert model to ONNX onnx_model = keras2onnx.convert_keras(model, # keras model name="example", # the converted ONNX model internal name target_opset=9, # the ONNX version to export the model to channel_first_inputs=None # which inputs to transpose from NHWC to NCHW ) keras2onnx.save_model(onnx_model, "example_h1.onnx") # save as "example_h1.onnx" ``` # Tensorflow-Lite用のモデルへ変換 ``` # 推論専用のモデルとして保存 model.save(model_save_path, include_optimizer=False) model = tf.keras.models.load_model(model_save_path) tflite_save_path = 'model/point_history_classifier/point_history_classifier_allkeypoints.tflite' # モデルを変換(量子化 converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_path) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_quantized_model = converter.convert() open(tflite_save_path, 'wb').write(tflite_quantized_model) ``` # 推論テスト ``` interpreter = tf.lite.Interpreter(model_path=tflite_save_path) interpreter.allocate_tensors() # 入出力テンソルを取得 input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() print(input_details) interpreter.set_tensor(input_details[0]['index'], np.array([X_test[0]])) %%time # 推論実施 interpreter.invoke() tflite_results = interpreter.get_tensor(output_details[0]['index']) print(np.squeeze(tflite_results)) print(np.argmax(np.squeeze(tflite_results))) ```
github_jupyter
# Shor's algorithm, fully classical implementation ``` %matplotlib inline import random import math import itertools def period_finding_classical(a,N): # This is an inefficient classical algorithm to find the period of f(x)=a^x (mod N) # f(0) = a**0 (mod N) = 1, so we find the first x greater than 0 for which f(x) is also 1 for r in itertools.count(start=1): if (a**r) % N == 1: return r def shors_algorithm_classical(N): assert(N>0) assert(int(N)==N) while True: a=random.randint(0,N-1) g=math.gcd(a,N) if g!=1 or N==1: first_factor=g second_factor=int(N/g) return first_factor,second_factor else: r=period_finding_classical(a,N) if r % 2 != 0: continue elif a**(int(r/2)) % N == -1 % N: continue else: first_factor=math.gcd(a**int(r/2)+1,N) second_factor=math.gcd(a**int(r/2)-1,N) if first_factor==N or second_factor==N: continue return first_factor,second_factor # Testing it out. Note because of the probabilistic nature of the algorithm, different factors and different ordering is possible shors_algorithm_classical(15) shors_algorithm_classical(91) ``` # Shor's algorithm, working on a quantum implementation ## The following code will help give intuition for how to design a quantum circuit to do modular multiplication ``` def U_a_modN(a,N,binary=False): """ a and N are decimal This algorithm returns U_a where: U_a is a modular multiplication operator map from |x> to |ax mod N> If binary is set to True, the mapping is given in binary instead of in decimal notation. """ res={} l=[] for i in range(1,N): l+=[a*i%N] res=set() for i in range(1,N): mp=[i] end=i nxt=i-1 while l[nxt]!=end: mp+=[l[nxt]] nxt=l[nxt]-1 res.add(tuple(mp)) final_res=[] for item in res: dup=False for final_item in final_res: if set(item) == set(final_item): dup=True if not dup: final_res+=[item] if not binary: return final_res else: final_res_bin=[] for mapping in final_res: final_res_bin+=[tuple(['{0:06b}'.format(decimal) for decimal in mapping])] return final_res_bin print(U_a_modN(8,35)) print(U_a_modN(8,35,binary=True)) ``` # This code implements modular multiplication by 2 mod 15 ``` import qiskit import matplotlib from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, QISKitError from qiskit.tools.visualization import circuit_drawer from qiskit.extensions.standard import cx, cswap from qiskit import Aer from qiskit import IBMQ # Authenticate an account and add for use during this session. Replace string # argument with your private token. IBMQ.enable_account("INSERT_YOUR_API_TOKEN_HERE") def mult_2mod15_quantum(qr,qc): # Swap 0th qubit and 3rd qubit qc.cx(qr[0],qr[3]) qc.cx(qr[3],qr[0]) qc.cx(qr[0],qr[3]) # Swap 0th qubit and 1st qubit qc.cx(qr[1],qr[0]) qc.cx(qr[0],qr[1]) qc.cx(qr[1],qr[0]) # Swap 1st qubit and 2nd qubit qc.cx(qr[1],qr[2]) qc.cx(qr[2],qr[1]) qc.cx(qr[1],qr[2]) def mult_2mod15_quantum_test(x): qr = QuantumRegister(4) cr = ClassicalRegister(4) qc = QuantumCircuit(qr,cr) # input x_bin='{0:04b}'.format(x) for i,b in enumerate(x_bin): if int(b): qc.x(qr[i]) # run circuit mult_2mod15_quantum(qr,qc) # measure results for i in range(4): qc.measure(qr[i],cr[i]) import time from qiskit.tools.visualization import plot_histogram backend=Aer.get_backend('qasm_simulator') shots=50 job_exp = qiskit.execute(qc, backend=backend) result = job_exp.result() final=result.get_counts(qc) result_in_order=list(final.keys())[0] dec=0 for i,b in enumerate(result_in_order): if int(b): dec+=2**i return (x,dec) def mult_2mod15_classical_test(x): return (x,2*x%15) # testing! for i in range(1,15): quantum=mult_2mod15_quantum_test(i) classical=mult_2mod15_classical_test(i) if quantum!=classical: print(quantum,classical) ``` ## This code makes the previous an operation controlled by a control qubit ``` def controlled_mult_2mod15_quantum(qr,qc,control_qubit): """ Controlled quantum circuit for multiplication by 2 mod 15. Note: control qubit should an index greater than 3, and qubits 0,1,2,3 are reserved for circuit operations """ # Swap 0th qubit and 3rd qubit qc.cswap(control_qubit,qr[0],qr[3]) # Swap 0th qubit and 1st qubit qc.cswap(control_qubit,qr[1],qr[0]) # Swap 1st qubit and 2nd qubit qc.cswap(control_qubit,qr[1],qr[2]) ``` # This code performas the entire Shor's algorithm subroutine for multiplication by 2 mod 15 ``` import math def shors_subroutine_period_2mod15(qr,qc,cr): qc.x(qr[0]) qc.h(qr[4]) qc.h(qr[4]) qc.measure(qr[4],cr[0]) qc.h(qr[5]) qc.cx(qr[5],qr[0]) qc.cx(qr[5],qr[2]) if cr[0] == 1: qc.u1(math.pi/2,qr[4]) #pi/2 is 90 degrees in radians qc.h(qr[5]) qc.measure(qr[5],cr[1]) qc.h(qr[6]) controlled_mult_2mod15_quantum(qr,qc,qr[6]) if cr[1] == 1: qc.u1(math.pi/2,qr[6]) # pi/2 is 90 degrees in radians if cr[0] == 1: qc.u1(math.pi/4,qr[6]) #pi/4 is 45 degrees in radians qc.h(qr[6]) qc.measure(qr[6],cr[2]) ``` # This code will help us read out the results from our quantum Shor's subroutine. First, implementing the code to compute the period from the output of the quantum computation: ``` # see https://arxiv.org/pdf/quant-ph/0010034.pdf for more details (convergence relations on page 11) import math def continued_fraction(xi,max_steps=100): # stop_after is cutoff for algorithm, for debugging """ This function computes the continued fraction expansion of input xi per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf """ #a and xi initial all_as=[] all_xis=[] a_0=math.floor(xi) xi_0=xi-a_0 all_as+=[a_0] all_xis+=[xi_0] # p and q initial all_ps=[] all_qs=[] p_0=all_as[0] q_0=1 all_ps+=[p_0] all_qs+=[q_0] xi_n=xi_0 while not numpy.isclose(xi_n,0,atol=1e-7): if len(all_as)>=max_steps: print("Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps"%max_steps) break # computing a and xi a_nplus1=math.floor(1/xi_n) xi_nplus1=1/xi_n-a_nplus1 all_as+=[a_nplus1] all_xis+=[xi_nplus1] xi_n=xi_nplus1 # computing p and q n=len(all_as)-1 if n==1: p_1=all_as[1]*all_as[0]+1 q_1=all_as[1] all_ps+=[p_1] all_qs+=[q_1] else: p_n=all_as[n]*all_ps[n-1]+all_ps[n-2] q_n=all_as[n]*all_qs[n-1]+all_qs[n-2] all_ps+=[p_n] all_qs+=[q_n] return all_ps,all_qs,all_as,all_xis import numpy def test_continued_fraction(): """ Testing the continued fraction see https://arxiv.org/pdf/quant-ph/0010034.pdf, step 2.5 chart page 20 NOTE: I believe there is a mistake in this chart at the last row, and that n should range as in my code below their chart is missing one line. Please contact me if you find differently! """ xi=13453/16384 all_ps,all_qs,all_as,all_xis=continued_fraction(xi) ## step 2.5 chart in https://arxiv.org/pdf/quant-ph/0010034.pdf page 20 #n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] #a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,3] #p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,13453] #q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,16384] ## what I find instead: n_13453_16384=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] a_n_13453_16384=[0,1,4,1,1,2,3,1,1,3,1,1,1,1,2,1] p_n_13453_16384=[0,1,4,5,9,23,78,101,179,638,817,1455,2272,3727,9726,13453] q_n_13453_16384=[1,1,5,6,11,28,95,123,218,777,995,1772,2767,4539,11845,16384] for tup in [("ns",range(len(all_ps)),range(len(n_13453_16384))), ("as",all_as,a_n_13453_16384), ("ps",all_ps,p_n_13453_16384), ("qs",all_qs,q_n_13453_16384), ]: if not numpy.array_equal(tup[1],tup[2]): print(tup[0]) print("act:",tup[1]) print("exp:",tup[2]) print() from IPython.display import display, Math def pretty_print_continued_fraction(results,raw_latex=False): all_ps,all_qs,all_as,all_xis=results for i,vals in enumerate(zip(all_ps,all_qs,all_as,all_xis)): p,q,a,xi=vals if raw_latex: print(r'\frac{p_%d}{q_%d}=\frac{%d}{%d}'%(i,i,p,q)) else: display(Math(r'$\frac{p_%d}{q_%d}=\frac{%d}{%d}$'%(i,i,p,q))) test_continued_fraction() #pretty_print_continued_fraction(continued_fraction(5/8),raw_latex=True) #pretty_print_continued_fraction(continued_fraction(0/8)) pretty_print_continued_fraction(continued_fraction(6/8)) ``` # Next we will integrate the check for whether we have found the period into the continued fraction code, so that we can stop computing the continued fraction as soon as we've found the period ``` import math def period_from_quantum_measurement(quantum_measurement, number_qubits, a_shor, N_shor, max_steps=100): # stop_after is cutoff for algorithm, for debugging """ This function computes the continued fraction expansion of input xi per the recurrance relations on page 11 of https://arxiv.org/pdf/quant-ph/0010034.pdf a_shor is the random number chosen as part of Shor's algorithm N_shor is the number Shor's algorithm is trying to factor """ xi=quantum_measurement/2**number_qubits #a and xi initial all_as=[] all_xis=[] a_0=math.floor(xi) xi_0=xi-a_0 all_as+=[a_0] all_xis+=[xi_0] # p and q initial all_ps=[] all_qs=[] p_0=all_as[0] q_0=1 all_ps+=[p_0] all_qs+=[q_0] xi_n=xi_0 while not numpy.isclose(xi_n,0,atol=1e-7): if len(all_as)>=max_steps: print("Warning: algorithm did not converge within max_steps %d steps, try increasing max_steps"%max_steps) break # computing a and xi a_nplus1=math.floor(1/xi_n) xi_nplus1=1/xi_n-a_nplus1 all_as+=[a_nplus1] all_xis+=[xi_nplus1] xi_n=xi_nplus1 # computing p and q n=len(all_as)-1 if n==1: p_1=all_as[1]*all_as[0]+1 q_1=all_as[1] all_ps+=[p_1] all_qs+=[q_1] else: p_n=all_as[n]*all_ps[n-1]+all_ps[n-2] q_n=all_as[n]*all_qs[n-1]+all_qs[n-2] all_ps+=[p_n] all_qs+=[q_n] # check the q to see if it is our answer (note with this we skip the first q, as a trivial case) if a_shor**all_qs[-1]%N_shor == 1 % N_shor: return all_qs[-1] period_from_quantum_measurement(13453,14,3,91) #should return, for example 6 per page 20 of https://arxiv.org/pdf/quant-ph/0010034.pdf # Testing this: import qiskit from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister def binary_string_to_decimal(s): dec=0 for i in s[::-1]: if int(i): dec+=2**int(i) return dec def run_shors_subroutine_period2_mod15(): qr = QuantumRegister(7) cr = ClassicalRegister(3) qc = QuantumCircuit(qr,cr) # initialize x to be a superposition of all possible r quibit values #for i in range(4): # qc.h(qr[i]) # run circuit (which includes measurement steps) shors_subroutine_period_2mod15(qr,qc,cr) import time from qiskit.tools.visualization import plot_histogram backend=Aer.get_backend('qasm_simulator') job_exp = qiskit.execute(qc, backend=backend,shots=1) result = job_exp.result() final=result.get_counts(qc) # convert final result to decimal measurement=binary_string_to_decimal(list(final.keys())[0]) period_r=period_from_quantum_measurement(measurement,3,2,15) return period_r print(run_shors_subroutine_period2_mod15()) ``` # The last thing to do will be to implement the full Shor's algorithm and check if the r is correct by plugging it in, getting factors and checking results. If not, rerun the algorithm. ``` def period_finding_quantum(a,N): # for the sake of example we will not implement this algorithm in full generality # rather, we will create an example with one specific a and one specific N # extension work could be done to impl if a==2 and N==15: return run_shors_subroutine_period2_mod15() else: raise Exception("Not implemented for N=%d, a=%d" % (N,a)) def shors_algorithm_quantum(N,fixed_a=None): assert(N>0) assert(int(N)==N) while True: if not fixed_a: a=random.randint(0,N-1) else: a=fixed_a g=math.gcd(a,N) if g!=1 or N==1: first_factor=g second_factor=int(N/g) return first_factor,second_factor else: r=period_finding_quantum(a,N) if not r: continue if r % 2 != 0: continue elif a**(int(r/2)) % N == -1 % N: continue else: first_factor=math.gcd(a**int(r/2)+1,N) second_factor=math.gcd(a**int(r/2)-1,N) if first_factor==N or second_factor==N: continue if first_factor*second_factor!=N: # checking our work continue return first_factor,second_factor # Here's our final result shors_algorithm_quantum(15,fixed_a=2) # Now trying it out to see how the algorithm would function if we let it choose a given random a: for a in range(15): # Here's the result for a given a: try: print("randomly chosen a=%d would result in %s"%(a,shors_algorithm_quantum(15,fixed_a=a))) except: print("FINISH IMPLEMENTING algorithm doesn't work with a randomly chosen a=%d at this stage"%a) ```
github_jupyter
``` %reset ``` # Simulate particles translating through OAM beam Liz Strong 4/17/2020 ``` import sys sys.path.append('../slvel') import pandas as pd import numpy as np import matplotlib.pyplot as plt from calc_intensity import calculate_e_field_intensity from scattering_particle import Particle import scattering_sim as scatsim import random ``` ### make pretty plots ``` %matplotlib notebook ``` ### calculate intensity ``` xval = 986 # grid x size [pixels] yval = 616 # grid y size [pixels] l = 4 # OAM azimuthal mode number w0 = 93.8458 # beam waist [pixels] intensity, intensity_shape = calculate_e_field_intensity(l=l, p=0, w0=w0, x=xval, y=yval, petaledbeam=True) plt.figure() plt.imshow(intensity) plt.colorbar() plt.title('Intensity field') ``` ### make particle to sample the intensity field ``` r = 20 # particle radius [pixels] p1 = Particle(intensity_shape, particle_radius=r, orbit_radius=75, orbit_offset_x=75, orbit_offset_y=0, v=5000, sample_rate=100000, direction=-1) ``` ### presum intensities particle will experience This is slow, so calculate the file once and then save it to load the file later. ``` #presummed_intensity = p1.calculate_sampled_intensities_throughout(Intensity_normalized) #np.save('psi_11_24_19_w0_93.8458.npy', presummed_intensity) psi = np.load('psi_11_24_19_w0_93.8458.npy') ``` ### calculate intensities on particle's orbit ### set parameters for simulation Total number of simulations: num_theta * num_radii * num_d * num_angvels ``` offset_x = [355, 425] # x coordinate of D [pixels] offset_y = [0,0] # y coordinate of D [pixels] orbit_radius = {} # orbit radius [pixels] D = [np.sqrt(x**2+y**2) for x,y in zip(offset_x, offset_y)] # D [pixels] radius = [490, 325] # orbit radius [pixels] for dist, rad in zip(D, radius): orbit_radius[dist] = [rad] orbit_radius = [490, 325] angle = np.arctan2(offset_y, offset_x) # phi [rad] angular_velocities = [250, 250] # Omega [rad/s] ``` ### Make time series. ``` data = scatsim.simulate_time_series(p1, psi, intensity_shape, offset_x, offset_y, D, angle, angular_velocities, orbit_radius, xval, yval) time_keep = data[0] intensities_keep = data[1] positions_keep = data[2] angular_velocities_keep = data[3] R_keep = data[4] d_keep = data[5] offset_x_keep = data[6] offset_y_keep = data[7] theta_keep = data[8] ``` ### plot results ``` plt.figure() plt.imshow(intensity) for selxn in range(2): plt.plot(positions_keep[selxn].T[0],positions_keep[selxn].T[1],'g') plt.plot(positions_keep[selxn].T[0][0],positions_keep[selxn].T[1][0],'ms') plt.colorbar() plt.figure() for selxn in range(2): plt.plot(intensities_keep[selxn],'.',label='R='+str(orbit_radius[selxn])) plt.legend() ``` ### add noise & concatenate into a long time series ``` timeseries_time, timeseries_intensity, intensities_extended = scatsim.concat_timeseries(intensities_keep, time_keep, ext_length=420) plt.figure() plt.plot(timeseries_time, timeseries_intensity) plt.xlabel('Time, t [sec]') plt.ylabel('Signal, y [summed intensity]') ``` ### Save angular velocities with times to correlate them later for ML training purposes ``` # save angular velocities corresponding to each seg vs = scatsim.save_series_info(angular_velocities_keep, intensities_keep, intensities_extended) # save orbit radii corresponding to each seg Rs = scatsim.save_series_info(R_keep, intensities_keep, intensities_extended) # save x offset radii corresponding to each seg delta_xs = scatsim.save_series_info(offset_x_keep, intensities_keep, intensities_extended) # save y offset radii corresponding to each seg delta_ys = scatsim.save_series_info(offset_y_keep, intensities_keep, intensities_extended) ``` ### save data ``` data_to_save = np.array([timeseries_time,timeseries_intensity,angular_velocities_keep,vs,Rs,delta_xs,delta_ys]) np.save('example_simulated_signal.npy',data_to_save) ```
github_jupyter
# 02. Custom Dataset 만들어보기 - Dataset Generation! - 폴더별로 사진들이 모여있다면, 그 dataset을 우리가 원하는 형태로 바꿔봅시다! ``` import numpy as np import os from scipy.misc import imread, imresize import matplotlib.pyplot as plt %matplotlib inline print ("Package loaded") cwd = os.getcwd() print ("Current folder is %s" % (cwd) ) # 학습할 폴더 경로 설정 paths = {"../../img_dataset/celebs/Arnold_Schwarzenegger" , "../../img_dataset/celebs/Junichiro_Koizumi" , "../../img_dataset/celebs/Vladimir_Putin" , "../../img_dataset/celebs/George_W_Bush"} categories = ["Arnold","Koizumi","Putin","Bush"] # The reshape size imgsize = [64, 64] # Grayscale use_gray = 1 # Save name data_name = "custom_data" print ("Your images should be at") for i, path in enumerate(paths): print (" [%d/%d] %s/%s" % (i, len(paths), cwd, path)) print ("Data will be saved to %s" % (cwd + '/data/' + data_name + '.npz')) ``` # RGB 2 GRAY ``` def rgb2gray(rgb): if len(rgb.shape) is 3: return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) else: # print ("Current Image if GRAY!") return rgb ``` # LOAD Image ``` nclass = len(paths) valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"] imgcnt = 0 for i, relpath in zip(range(nclass), paths): path = cwd + "/" + relpath flist = os.listdir(path) for f in flist: if os.path.splitext(f)[1].lower() not in valid_exts: continue fullpath = os.path.join(path, f) currimg = imread(fullpath) # Convert to grayscale if use_gray: grayimg = rgb2gray(currimg) else: grayimg = currimg # Reshape graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255. grayvec = np.reshape(graysmall, (1, -1)) # Save curr_label = np.eye(nclass, nclass)[i:i+1, :] # np.eye : 단위행렬을 구함 -> One Hot Vector를 만듬 if imgcnt is 0: totalimg = grayvec totallabel = curr_label else: totalimg = np.concatenate((totalimg, grayvec), axis=0) totallabel = np.concatenate((totallabel, curr_label), axis=0) imgcnt = imgcnt + 1 print ("Total %d images loaded." % (imgcnt)) def print_shape(string, x): print ("Shape of '%s' is %s" % (string, x.shape,)) randidx = np.random.randint(imgcnt, size=imgcnt) trainidx = randidx[0:int(3*imgcnt/5)] testidx = randidx[int(3*imgcnt/5):imgcnt] trainimg = totalimg[trainidx, :] trainlabel = totallabel[trainidx, :] testimg = totalimg[testidx, :] testlabel = totallabel[testidx, :] print_shape("trainimg", trainimg) print_shape("trainlabel", trainlabel) print_shape("testimg", testimg) print_shape("testlabel", testlabel) ``` # Save ``` savepath = cwd + "/data/" + data_name + ".npz" np.savez(savepath, trainimg=trainimg, trainlabel=trainlabel , testimg=testimg, testlabel=testlabel, imgsize=imgsize, use_gray=use_gray) print ("Saved to %s" % (savepath)) ``` # Load to Check ``` # Load them! cwd = os.getcwd() loadpath = cwd + "/data/" + data_name + ".npz" l = np.load(loadpath) # See what's in here l.files # Parse data trainimg_loaded = l['trainimg'] trainlabel_loaded = l['trainlabel'] testimg_loaded = l['testimg'] testlabel_loaded = l['testlabel'] print ("%d train images loaded" % (trainimg_loaded.shape[0])) print ("%d test images loaded" % (testimg_loaded.shape[0])) print ("Loaded from to %s" % (savepath)) ``` # Plot randomly train images ``` # Load them! cwd = os.getcwd() loadpath = cwd + "/data/" + data_name + ".npz" l = np.load(loadpath) # See what's in here l.files # Parse data trainimg_loaded = l['trainimg'] trainlabel_loaded = l['trainlabel'] testimg_loaded = l['testimg'] testlabel_loaded = l['testlabel'] print ("%d train images loaded" % (trainimg_loaded.shape[0])) print ("%d test images loaded" % (testimg_loaded.shape[0])) print ("Loaded from to %s" % (savepath)) ``` # Plot randomly test images ``` # Do batch stuff using loaded data ntest_loaded = testimg_loaded.shape[0] batch_size = 3; randidx = np.random.randint(ntest_loaded, size=batch_size) for i in randidx: currimg = np.reshape(testimg_loaded[i, :], (imgsize[0], -1)) currlabel_onehot = testlabel_loaded[i, :] currlabel = np.argmax(currlabel_onehot) if use_gray: currimg = np.reshape(testimg[i, :], (imgsize[0], -1)) plt.matshow(currimg, cmap=plt.get_cmap('gray')) plt.colorbar() else: currimg = np.reshape(testimg[i, :], (imgsize[0], imgsize[1], 3)) plt.imshow(currimg) title_string = "[%d] %d-class" % (i, currlabel) plt.title(title_string) plt.show() ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Migrate your TensorFlow 1 code to TensorFlow 2 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/guide/migrate"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/migrate.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/migrate.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This doc for users of low level TensorFlow APIs. If you are using the high level APIs (`tf.keras`) there may be little or no action you need to take to make your code fully TensorFlow 2.0 compatible: * Check your [optimizer's default learning rate](#keras_optimizer_lr). * Note that the "name" that metrics are logged to [may have changed](#keras_metric_names). It is still possible to run 1.X code, unmodified ([except for contrib](https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md)), in TensorFlow 2.0: ``` import tensorflow.compat.v1 as tf tf.disable_v2_behavior() ``` However, this does not let you take advantage of many of the improvements made in TensorFlow 2.0. This guide will help you upgrade your code, making it simpler, more performant, and easier to maintain. ## Automatic conversion script The first step, before attempting to implement the changes described in this doc, is to try running the [upgrade script](./upgrade.md). This will do an initial pass at upgrading your code to TensorFlow 2.0. But it can't make your code idiomatic to 2.0. Your code may still make use of `tf.compat.v1` endpoints to access placeholders, sessions, collections, and other 1.x-style functionality. ## Top-level behavioral changes If your code works in TensorFlow 2.0 using `tf.compat.v1.disable_v2_behavior()`, there are still global behavioral changes you may need to address. The major changes are: * *Eager execution, `v1.enable_eager_execution()`* : Any code that implicitly uses a `tf.Graph` will fail. Be sure to wrap this code in a `with tf.Graph().as_default()` context. * *Resource variables, `v1.enable_resource_variables()`*: Some code may depends on non-deterministic behaviors enabled by TF reference variables. Resource variables are locked while being written to, and so provide more intuitive consistency guarantees. * This may change behavior in edge cases. * This may create extra copies and can have higher memory usage. * This can be disabled by passing `use_resource=False` to the `tf.Variable` constructor. * *Tensor shapes, `v1.enable_v2_tensorshape()`*: TF 2.0 simplifies the behavior of tensor shapes. Instead of `t.shape[0].value` you can say `t.shape[0]`. These changes should be small, and it makes sense to fix them right away. See [TensorShape](#tensorshape) for examples. * *Control flow, `v1.enable_control_flow_v2()`*: The TF 2.0 control flow implementation has been simplified, and so produces different graph representations. Please [file bugs](https://github.com/tensorflow/tensorflow/issues) for any issues. ## Make the code 2.0-native This guide will walk through several examples of converting TensorFlow 1.x code to TensorFlow 2.0. These changes will let your code take advantage of performance optimizations and simplified API calls. In each case, the pattern is: ### 1. Replace `v1.Session.run` calls Every `v1.Session.run` call should be replaced by a Python function. * The `feed_dict` and `v1.placeholder`s become function arguments. * The `fetches` become the function's return value. * During conversion eager execution allows easy debugging with standard Python tools like `pdb`. After that add a `tf.function` decorator to make it run efficiently in graph. See the [Autograph Guide](function.ipynb) for more on how this works. Note that: * Unlike `v1.Session.run` a `tf.function` has a fixed return signature, and always returns all outputs. If this causes performance problems, create two separate functions. * There is no need for a `tf.control_dependencies` or similar operations: A `tf.function` behaves as if it were run in the order written. `tf.Variable` assignments and `tf.assert`s, for example, are executed automatically. ### 2. Use Python objects to track variables and losses All name-based variable tracking is strongly discouraged in TF 2.0. Use Python objects to to track variables. Use `tf.Variable` instead of `v1.get_variable`. Every `v1.variable_scope` should be converted to a Python object. Typically this will be one of: * `tf.keras.layers.Layer` * `tf.keras.Model` * `tf.Module` If you need to aggregate lists of variables (like `tf.Graph.get_collection(tf.GraphKeys.VARIABLES)`), use the `.variables` and `.trainable_variables` attributes of the `Layer` and `Model` objects. These `Layer` and `Model` classes implement several other properties that remove the need for global collections. Their `.losses` property can be a replacement for using the `tf.GraphKeys.LOSSES` collection. See the [keras guides](keras.ipynb) for details. Warning: Many `tf.compat.v1` symbols use the global collections implicitly. ### 3. Upgrade your training loops Use the highest level API that works for your use case. Prefer `tf.keras.Model.fit` over building your own training loops. These high level functions manage a lot of the low-level details that might be easy to miss if you write your own training loop. For example, they automatically collect the regularization losses, and set the `training=True` argument when calling the model. ### 4. Upgrade your data input pipelines Use `tf.data` datasets for data input. These objects are efficient, expressive, and integrate well with tensorflow. They can be passed directly to the `tf.keras.Model.fit` method. ``` model.fit(dataset, epochs=5) ``` They can be iterated over directly standard Python: ``` for example_batch, label_batch in dataset: break ``` #### 5. Migrate off `compat.v1` symbols The `tf.compat.v1` module contains the complete TensorFlow 1.x API, with its original semantics. The [TF2 upgrade script](upgrade.ipynb) will convert symbols to their 2.0 equivalents if such a conversion is safe, i.e., if it can determine that the behavior of the 2.0 version is exactly equivalent (for instance, it will rename `v1.arg_max` to `tf.argmax`, since those are the same function). After the upgrade script is done with a piece of code, it is likely there are many mentions of `compat.v1`. It is worth going through the code and converting these manually to the 2.0 equivalent (it should be mentioned in the log if there is one). ## Converting models ### Setup ``` import tensorflow as tf import tensorflow_datasets as tfds ``` ### Low-level variables & operator execution Examples of low-level API use include: * using variable scopes to control reuse * creating variables with `v1.get_variable`. * accessing collections explicitly * accessing collections implicitly with methods like : * `v1.global_variables` * `v1.losses.get_regularization_loss` * using `v1.placeholder` to set up graph inputs * executing graphs with `Session.run` * initializing variables manually #### Before converting Here is what these patterns may look like in code using TensorFlow 1.x. ```python in_a = tf.placeholder(dtype=tf.float32, shape=(2)) in_b = tf.placeholder(dtype=tf.float32, shape=(2)) def forward(x): with tf.variable_scope("matmul", reuse=tf.AUTO_REUSE): W = tf.get_variable("W", initializer=tf.ones(shape=(2,2)), regularizer=tf.contrib.layers.l2_regularizer(0.04)) b = tf.get_variable("b", initializer=tf.zeros(shape=(2))) return W * x + b out_a = forward(in_a) out_b = forward(in_b) reg_loss=tf.losses.get_regularization_loss(scope="matmul") with tf.Session() as sess: sess.run(tf.global_variables_initializer()) outs = sess.run([out_a, out_b, reg_loss], feed_dict={in_a: [1, 0], in_b: [0, 1]}) ``` #### After converting In the converted code: * The variables are local Python objects. * The `forward` function still defines the calculation. * The `Session.run` call is replaced with a call to `forward` * The optional `tf.function` decorator can be added for performance. * The regularizations are calculated manually, without referring to any global collection. * **No sessions or placeholders.** ``` W = tf.Variable(tf.ones(shape=(2,2)), name="W") b = tf.Variable(tf.zeros(shape=(2)), name="b") @tf.function def forward(x): return W * x + b out_a = forward([1,0]) print(out_a) out_b = forward([0,1]) regularizer = tf.keras.regularizers.l2(0.04) reg_loss=regularizer(W) ``` ### Models based on `tf.layers` The `v1.layers` module is used to contain layer-functions that relied on `v1.variable_scope` to define and reuse variables. #### Before converting ```python def model(x, training, scope='model'): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = tf.layers.conv2d(x, 32, 3, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.04)) x = tf.layers.max_pooling2d(x, (2, 2), 1) x = tf.layers.flatten(x) x = tf.layers.dropout(x, 0.1, training=training) x = tf.layers.dense(x, 64, activation=tf.nn.relu) x = tf.layers.batch_normalization(x, training=training) x = tf.layers.dense(x, 10) return x train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` #### After converting * The simple stack of layers fits neatly into `tf.keras.Sequential`. (For more complex models see [custom layers and models](keras/custom_layers_and_models.ipynb), and [the functional API](keras/functional.ipynb).) * The model tracks the variables, and regularization losses. * The conversion was one-to-one because there is a direct mapping from `v1.layers` to `tf.keras.layers`. Most arguments stayed the same. But notice the differences: * The `training` argument is passed to each layer by the model when it runs. * The first argument to the original `model` function (the input `x`) is gone. This is because object layers separate building the model from calling the model. Also note that: * If you were using regularizers of initializers from `tf.contrib`, these have more argument changes than others. * The code no longer writes to collections, so functions like `v1.losses.get_regularization_loss` will no longer return these values, potentially breaking your training loops. ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.04), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) train_data = tf.ones(shape=(1, 28, 28, 1)) test_data = tf.ones(shape=(1, 28, 28, 1)) train_out = model(train_data, training=True) print(train_out) test_out = model(test_data, training=False) print(test_out) # Here are all the trainable variables. len(model.trainable_variables) # Here is the regularization loss. model.losses ``` ### Mixed variables & `v1.layers` Existing code often mixes lower-level TF 1.x variables and operations with higher-level `v1.layers`. #### Before converting ```python def model(x, training, scope='model'): with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): W = tf.get_variable( "W", dtype=tf.float32, initializer=tf.ones(shape=x.shape), regularizer=tf.contrib.layers.l2_regularizer(0.04), trainable=True) if training: x = x + W else: x = x + W * 0.5 x = tf.layers.conv2d(x, 32, 3, activation=tf.nn.relu) x = tf.layers.max_pooling2d(x, (2, 2), 1) x = tf.layers.flatten(x) return x train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` #### After converting To convert this code, follow the pattern of mapping layers to layers as in the previous example. A `v1.variable_scope` is effectively a layer of its own. So rewrite it as a `tf.keras.layers.Layer`. See [the guide](keras/custom_layers_and_models.ipynb) for details. The general pattern is: * Collect layer parameters in `__init__`. * Build the variables in `build`. * Execute the calculations in `call`, and return the result. The `v1.variable_scope` is essentially a layer of its own. So rewrite it as a `tf.keras.layers.Layer`. See [the guide](keras/custom_layers_and_models.ipynb) for details. ``` # Create a custom layer for part of the model class CustomLayer(tf.keras.layers.Layer): def __init__(self, *args, **kwargs): super(CustomLayer, self).__init__(*args, **kwargs) def build(self, input_shape): self.w = self.add_weight( shape=input_shape[1:], dtype=tf.float32, initializer=tf.keras.initializers.ones(), regularizer=tf.keras.regularizers.l2(0.02), trainable=True) # Call method will sometimes get used in graph mode, # training will get turned into a tensor @tf.function def call(self, inputs, training=None): if training: return inputs + self.w else: return inputs + self.w * 0.5 custom_layer = CustomLayer() print(custom_layer([1]).numpy()) print(custom_layer([1], training=True).numpy()) train_data = tf.ones(shape=(1, 28, 28, 1)) test_data = tf.ones(shape=(1, 28, 28, 1)) # Build the model including the custom layer model = tf.keras.Sequential([ CustomLayer(input_shape=(28, 28, 1)), tf.keras.layers.Conv2D(32, 3, activation='relu'), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), ]) train_out = model(train_data, training=True) test_out = model(test_data, training=False) ``` Some things to note: * Subclassed Keras models & layers need to run in both v1 graphs (no automatic control dependencies) and in eager mode * Wrap the `call()` in a `tf.function()` to get autograph and automatic control dependencies * Don't forget to accept a `training` argument to `call`. * Sometimes it is a `tf.Tensor` * Sometimes it is a Python boolean. * Create model variables in constructor or `Model.build` using `self.add_weight()`. * In `Model.build` you have access to the input shape, so can create weights with matching shape. * Using `tf.keras.layers.Layer.add_weight` allows Keras to track variables and regularization losses. * Don't keep `tf.Tensors` in your objects. * They might get created either in a `tf.function` or in the eager context, and these tensors behave differently. * Use `tf.Variable`s for state, they are always usable from both contexts * `tf.Tensors` are only for intermediate values. ### A note on Slim & contrib.layers A large amount of older TensorFlow 1.x code uses the [Slim](https://ai.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html) library, which was packaged with TensorFlow 1.x as `tf.contrib.layers`. As a `contrib` module, this is no longer available in TensorFlow 2.0, even in `tf.compat.v1`. Converting code using Slim to TF 2.0 is more involved than converting repositories that use `v1.layers`. In fact, it may make sense to convert your Slim code to `v1.layers` first, then convert to Keras. * Remove `arg_scopes`, all args need to be explicit * If you use them, split `normalizer_fn` and `activation_fn` into their own layers * Separable conv layers map to one or more different Keras layers (depthwise, pointwise, and separable Keras layers) * Slim and `v1.layers` have different arg names & default values * Some args have different scales * If you use Slim pre-trained models, try out Keras's pre-traimed models from `tf.keras.applications` or [TF Hub](https://tfhub.dev/s?q=slim%20tf2)'s TF2 SavedModels exported from the original Slim code. Some `tf.contrib` layers might not have been moved to core TensorFlow but have instead been moved to the [TF add-ons package](https://github.com/tensorflow/addons). ## Training There are many ways to feed data to a `tf.keras` model. They will accept Python generators and Numpy arrays as input. The recommended way to feed data to a model is to use the `tf.data` package, which contains a collection of high performance classes for manipulating data. If you are still using `tf.queue`, these are now only supported as data-structures, not as input pipelines. ### Using Datasets The [TensorFlow Datasets](https://tensorflow.org/datasets) package (`tfds`) contains utilities for loading predefined datasets as `tf.data.Dataset` objects. For this example, load the MNISTdataset, using `tfds`: ``` datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] ``` Then prepare the data for training: * Re-scale each image. * Shuffle the order of the examples. * Collect batches of images and labels. ``` BUFFER_SIZE = 10 # Use a much larger value for real code. BATCH_SIZE = 64 NUM_EPOCHS = 5 def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label ``` To keep the example short, trim the dataset to only return 5 batches: ``` train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) test_data = mnist_test.map(scale).batch(BATCH_SIZE) STEPS_PER_EPOCH = 5 train_data = train_data.take(STEPS_PER_EPOCH) test_data = test_data.take(STEPS_PER_EPOCH) image_batch, label_batch = next(iter(train_data)) ``` ### Use Keras training loops If you don't need low level control of your training process, using Keras's built-in `fit`, `evaluate`, and `predict` methods is recommended. These methods provide a uniform interface to train the model regardless of the implementation (sequential, functional, or sub-classed). The advantages of these methods include: * They accept Numpy arrays, Python generators and, `tf.data.Datasets` * They apply regularization, and activation losses automatically. * They support `tf.distribute` [for multi-device training](distributed_training.ipynb). * They support arbitrary callables as losses and metrics. * They support callbacks like `tf.keras.callbacks.TensorBoard`, and custom callbacks. * They are performant, automatically using TensorFlow graphs. Here is an example of training a model using a `Dataset`. (For details on how this works see [tutorials](../tutorials).) ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) # Model is the full model w/o custom layers model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(train_data, epochs=NUM_EPOCHS) loss, acc = model.evaluate(test_data) print("Loss {}, Accuracy {}".format(loss, acc)) ``` ### Write your own loop If the Keras model's training step works for you, but you need more control outside that step, consider using the `tf.keras.Model.train_on_batch` method, in your own data-iteration loop. Remember: Many things can be implemented as a `tf.keras.callbacks.Callback`. This method has many of the advantages of the methods mentioned in the previous section, but gives the user control of the outer loop. You can also use `tf.keras.Model.test_on_batch` or `tf.keras.Model.evaluate` to check performance during training. Note: `train_on_batch` and `test_on_batch`, by default return the loss and metrics for the single batch. If you pass `reset_metrics=False` they return accumulated metrics and you must remember to appropriately reset the metric accumulators. Also remember that some metrics like `AUC` require `reset_metrics=False` to be calculated correctly. To continue training the above model: ``` # Model is the full model w/o custom layers model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) for epoch in range(NUM_EPOCHS): #Reset the metric accumulators model.reset_metrics() for image_batch, label_batch in train_data: result = model.train_on_batch(image_batch, label_batch) metrics_names = model.metrics_names print("train: ", "{}: {:.3f}".format(metrics_names[0], result[0]), "{}: {:.3f}".format(metrics_names[1], result[1])) for image_batch, label_batch in test_data: result = model.test_on_batch(image_batch, label_batch, # return accumulated metrics reset_metrics=False) metrics_names = model.metrics_names print("\neval: ", "{}: {:.3f}".format(metrics_names[0], result[0]), "{}: {:.3f}".format(metrics_names[1], result[1])) ``` <a name="custom_loop"></a> ### Customize the training step If you need more flexibility and control, you can have it by implementing your own training loop. There are three steps: 1. Iterate over a Python generator or `tf.data.Dataset` to get batches of examples. 2. Use `tf.GradientTape` to collect gradients. 3. Use one of the `tf.keras.optimizers` to apply weight updates to the model's variables. Remember: * Always include a `training` argument on the `call` method of subclassed layers and models. * Make sure to call the model with the `training` argument set correctly. * Depending on usage, model variables may not exist until the model is run on a batch of data. * You need to manually handle things like regularization losses for the model. Note the simplifications relative to v1: * There is no need to run variable initializers. Variables are initialized on creation. * There is no need to add manual control dependencies. Even in `tf.function` operations act as in eager mode. ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) optimizer = tf.keras.optimizers.Adam(0.001) loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) @tf.function def train_step(inputs, labels): with tf.GradientTape() as tape: predictions = model(inputs, training=True) regularization_loss=tf.math.add_n(model.losses) pred_loss=loss_fn(labels, predictions) total_loss=pred_loss + regularization_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) for epoch in range(NUM_EPOCHS): for inputs, labels in train_data: train_step(inputs, labels) print("Finished epoch", epoch) ``` ### New-style metrics and losses In TensorFlow 2.0, metrics and losses are objects. These work both eagerly and in `tf.function`s. A loss object is callable, and expects the (y_true, y_pred) as arguments: ``` cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True) cce([[1, 0]], [[-1.0,3.0]]).numpy() ``` A metric object has the following methods: * `Metric.update_state()` — add new observations * `Metric.result()` —get the current result of the metric, given the observed values * `Metric.reset_states()` — clear all observations. The object itself is callable. Calling updates the state with new observations, as with `update_state`, and returns the new result of the metric. You don't have to manually initialize a metric's variables, and because TensorFlow 2.0 has automatic control dependencies, you don't need to worry about those either. The code below uses a metric to keep track of the mean loss observed within a custom training loop. ``` # Create the metrics loss_metric = tf.keras.metrics.Mean(name='train_loss') accuracy_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') @tf.function def train_step(inputs, labels): with tf.GradientTape() as tape: predictions = model(inputs, training=True) regularization_loss=tf.math.add_n(model.losses) pred_loss=loss_fn(labels, predictions) total_loss=pred_loss + regularization_loss gradients = tape.gradient(total_loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) # Update the metrics loss_metric.update_state(total_loss) accuracy_metric.update_state(labels, predictions) for epoch in range(NUM_EPOCHS): # Reset the metrics loss_metric.reset_states() accuracy_metric.reset_states() for inputs, labels in train_data: train_step(inputs, labels) # Get the metric results mean_loss=loss_metric.result() mean_accuracy = accuracy_metric.result() print('Epoch: ', epoch) print(' loss: {:.3f}'.format(mean_loss)) print(' accuracy: {:.3f}'.format(mean_accuracy)) ``` <a id="keras_metric_names"></a> ### Keras metric names In TensorFlow 2.0 keras models are more consistent about handling metric names. Now when you pass a string in the list of metrics, that _exact_ string is used as the metric's `name`. These names are visible in the history object returned by `model.fit`, and in the logs passed to `keras.callbacks`. is set to the string you passed in the metric list. ``` model.compile( optimizer = tf.keras.optimizers.Adam(0.001), loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics = ['acc', 'accuracy', tf.keras.metrics.SparseCategoricalAccuracy(name="my_accuracy")]) history = model.fit(train_data) history.history.keys() ``` This differs from previous versions where passing `metrics=["accuracy"]` would result in `dict_keys(['loss', 'acc'])` ### Keras optimizers The optimizers in `v1.train`, like `v1.train.AdamOptimizer` and `v1.train.GradientDescentOptimizer`, have equivalents in `tf.keras.optimizers`. #### Convert `v1.train` to `keras.optimizers` Here are things to keep in mind when converting your optimizers: * Upgrading your optimizers [may make old checkpoints incompatible](#checkpoints). * All epsilons now default to `1e-7` instead of `1e-8` (which is negligible in most use cases). * `v1.train.GradientDescentOptimizer` can be directly replaced by `tf.keras.optimizers.SGD`. * `v1.train.MomentumOptimizer` can be directly replaced by the `SGD` optimizer using the momentum argument: `tf.keras.optimizers.SGD(..., momentum=...)`. * `v1.train.AdamOptimizer` can be converted to use `tf.keras.optimizers.Adam`. The `beta1` and `beta2` arguments have been renamed to `beta_1` and `beta_2`. * `v1.train.RMSPropOptimizer` can be converted to `tf.keras.optimizers.RMSprop`. The `decay` argument has been renamed to `rho`. * `v1.train.AdadeltaOptimizer` can be converted directly to `tf.keras.optimizers.Adadelta`. * `tf.train.AdagradOptimizer` can be converted directly to `tf.keras.optimizers.Adagrad`. * `tf.train.FtrlOptimizer` can be converted directly to `tf.keras.optimizers.Ftrl`. The `accum_name` and `linear_name` arguments have been removed. * The `tf.contrib.AdamaxOptimizer` and `tf.contrib.NadamOptimizer`, can be converted directly to `tf.keras.optimizers.Adamax` and `tf.keras.optimizers.Nadam`. The `beta1`, and `beta2` arguments have been renamed to `beta_1` and `beta_2`. #### New defaults for some `tf.keras.optimizers` <a id="keras_optimizer_lr"></a> Warning: If you see a change in convergence behavior for your models, check the default learning rates. There are no changes for `optimizers.SGD`, `optimizers.Adam`, or `optimizers.RMSprop`. The following default learning rates have changed: * `optimizers.Adagrad` from 0.01 to 0.001 * `optimizers.Adadelta` from 1.0 to 0.001 * `optimizers.Adamax` from 0.002 to 0.001 * `optimizers.Nadam` from 0.002 to 0.001 ### TensorBoard TensorFlow 2 includes significant changes to the `tf.summary` API used to write summary data for visualization in TensorBoard. For a general introduction to the new `tf.summary`, there are [several tutorials available](https://www.tensorflow.org/tensorboard/get_started) that use the TF 2 API. This includes a [TensorBoard TF 2 Migration Guide](https://www.tensorflow.org/tensorboard/migrate) ## Saving & Loading <a id="checkpoints"></a> ### Checkpoint compatibility TensorFlow 2.0 uses [object-based checkpoints](checkpoint.ipynb). Old-style name-based checkpoints can still be loaded, if you're careful. The code conversion process may result in variable name changes, but there are workarounds. The simplest approach it to line up the names of the new model with the names in the checkpoint: * Variables still all have a `name` argument you can set. * Keras models also take a `name` argument as which they set as the prefix for their variables. * The `v1.name_scope` function can be used to set variable name prefixes. This is very different from `tf.variable_scope`. It only affects names, and doesn't track variables & reuse. If that does not work for your use-case, try the `v1.train.init_from_checkpoint` function. It takes an `assignment_map` argument, which specifies the mapping from old names to new names. Note: Unlike object based checkpoints, which can [defer loading](checkpoint.ipynb#loading_mechanics), name-based checkpoints require that all variables be built when the function is called. Some models defer building variables until you call `build` or run the model on a batch of data. The [TensorFlow Estimator repository](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py) includes a [conversion tool](#checkpoint_converter) to upgrade the checkpoints for premade estimators from TensorFlow 1.X to 2.0. It may serve as an example of how to build a tool fr a similar use-case. ### Saved models compatibility There are no significant compatibility concerns for saved models. * TensorFlow 1.x saved_models work in TensorFlow 2.x. * TensorFlow 2.x saved_models work in TensorFlow 1.x—if all the ops are supported. ### A Graph.pb or Graph.pbtxt There is no straightforward way to upgrade a raw `Graph.pb` file to TensorFlow 2.0. Your best bet is to upgrade the code that generated the file. But, if you have a "Frozen graph" (a `tf.Graph` where the variables have been turned into constants), then it is possible to convert this to a [`concrete_function`](https://tensorflow.org/guide/concrete_function) using `v1.wrap_function`: ``` def wrap_frozen_graph(graph_def, inputs, outputs): def _imports_graph_def(): tf.compat.v1.import_graph_def(graph_def, name="") wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph return wrapped_import.prune( tf.nest.map_structure(import_graph.as_graph_element, inputs), tf.nest.map_structure(import_graph.as_graph_element, outputs)) ``` For example, here is a frozed graph for Inception v1, from 2016: ``` path = tf.keras.utils.get_file( 'inception_v1_2016_08_28_frozen.pb', 'http://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz', untar=True) ``` Load the `tf.GraphDef`: ``` graph_def = tf.compat.v1.GraphDef() loaded = graph_def.ParseFromString(open(path,'rb').read()) ``` Wrap it into a `concrete_function`: ``` inception_func = wrap_frozen_graph( graph_def, inputs='input:0', outputs='InceptionV1/InceptionV1/Mixed_3b/Branch_1/Conv2d_0a_1x1/Relu:0') ``` Pass it a tensor as input: ``` input_img = tf.ones([1,224,224,3], dtype=tf.float32) inception_func(input_img).shape ``` ## Estimators ### Training with Estimators Estimators are supported in TensorFlow 2.0. When you use estimators, you can use `input_fn()`, `tf.estimator.TrainSpec`, and `tf.estimator.EvalSpec` from TensorFlow 1.x. Here is an example using `input_fn` with train and evaluate specs. #### Creating the input_fn and train/eval specs ``` # Define the estimator's input_fn def input_fn(): datasets, info = tfds.load(name='mnist', with_info=True, as_supervised=True) mnist_train, mnist_test = datasets['train'], datasets['test'] BUFFER_SIZE = 10000 BATCH_SIZE = 64 def scale(image, label): image = tf.cast(image, tf.float32) image /= 255 return image, label[..., tf.newaxis] train_data = mnist_train.map(scale).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) return train_data.repeat() # Define train & eval specs train_spec = tf.estimator.TrainSpec(input_fn=input_fn, max_steps=STEPS_PER_EPOCH * NUM_EPOCHS) eval_spec = tf.estimator.EvalSpec(input_fn=input_fn, steps=STEPS_PER_EPOCH) ``` ### Using a Keras model definition There are some differences in how to construct your estimators in TensorFlow 2.0. We recommend that you define your model using Keras, then use the `tf.keras.estimator.model_to_estimator` utility to turn your model into an estimator. The code below shows how to use this utility when creating and training an estimator. ``` def make_model(): return tf.keras.Sequential([ tf.keras.layers.Conv2D(32, 3, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02), input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(), tf.keras.layers.Flatten(), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dense(10) ]) model = make_model() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) estimator = tf.keras.estimator.model_to_estimator( keras_model = model ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` Note: We do not support creating weighted metrics in Keras and converting them to weighted metrics in the Estimator API using `model_to_estimator` You will have to create these metrics directly on the estimator spec using the `add_metrics` function. ### Using a custom `model_fn` If you have an existing custom estimator `model_fn` that you need to maintain, you can convert your `model_fn` to use a Keras model. However, for compatibility reasons, a custom `model_fn` will still run in 1.x-style graph mode. This means there is no eager execution and no automatic control dependencies. <a name="minimal_changes"></a> #### Custom model_fn with minimal changes To make your custom `model_fn` work in TF 2.0, if you prefer minimal changes to the existing code, `tf.compat.v1` symbols such as `optimizers` and `metrics` can be used. Using a Keras models in a custom `model_fn` is similar to using it in a custom training loop: * Set the `training` phase appropriately, based on the `mode` argument. * Explicitly pass the model's `trainable_variables` to the optimizer. But there are important differences, relative to a [custom loop](#custom_loop): * Instead of using `Model.losses`, extract the losses using `Model.get_losses_for`. * Extract the model's updates using `Model.get_updates_for`. Note: "Updates" are changes that need to be applied to a model after each batch. For example, the moving averages of the mean and variance in a `layers.BatchNormalization` layer. The following code creates an estimator from a custom `model_fn`, illustrating all of these concerns. ``` def my_model_fn(features, labels, mode): model = make_model() optimizer = tf.compat.v1.train.AdamOptimizer() loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) training = (mode == tf.estimator.ModeKeys.TRAIN) predictions = model(features, training=training) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) reg_losses = model.get_losses_for(None) + model.get_losses_for(features) total_loss=loss_fn(labels, predictions) + tf.math.add_n(reg_losses) accuracy = tf.compat.v1.metrics.accuracy(labels=labels, predictions=tf.math.argmax(predictions, axis=1), name='acc_op') update_ops = model.get_updates_for(None) + model.get_updates_for(features) minimize_op = optimizer.minimize( total_loss, var_list=model.trainable_variables, global_step=tf.compat.v1.train.get_or_create_global_step()) train_op = tf.group(minimize_op, update_ops) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=total_loss, train_op=train_op, eval_metric_ops={'accuracy': accuracy}) # Create the Estimator & Train estimator = tf.estimator.Estimator(model_fn=my_model_fn) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` #### Custom `model_fn` with TF 2.0 symbols If you want to get rid of all TF 1.x symbols and upgrade your custom `model_fn` to native TF 2.0, you need to update the optimizer and metrics to `tf.keras.optimizers` and `tf.keras.metrics`. In the custom `model_fn`, besides the above [changes](#minimal_changes), more upgrades need to be made: * Use [`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers) instead of `v1.train.Optimizer`. * Explicitly pass the model's `trainable_variables` to the `tf.keras.optimizers`. * To compute the `train_op/minimize_op`, * Use `Optimizer.get_updates()` if the loss is scalar loss `Tensor`(not a callable). The first element in the returned list is the desired `train_op/minimize_op`. * If the loss is a callable (such as a function), use `Optimizer.minimize()` to get the `train_op/minimize_op`. * Use [`tf.keras.metrics`](https://www.tensorflow.org/api_docs/python/tf/keras/metrics) instead of `tf.compat.v1.metrics` for evaluation. For the above example of `my_model_fn`, the migrated code with 2.0 symbols is shown as: ``` def my_model_fn(features, labels, mode): model = make_model() training = (mode == tf.estimator.ModeKeys.TRAIN) loss_obj = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) predictions = model(features, training=training) # Get both the unconditional losses (the None part) # and the input-conditional losses (the features part). reg_losses = model.get_losses_for(None) + model.get_losses_for(features) total_loss=loss_obj(labels, predictions) + tf.math.add_n(reg_losses) # Upgrade to tf.keras.metrics. accuracy_obj = tf.keras.metrics.Accuracy(name='acc_obj') accuracy = accuracy_obj.update_state( y_true=labels, y_pred=tf.math.argmax(predictions, axis=1)) train_op = None if training: # Upgrade to tf.keras.optimizers. optimizer = tf.keras.optimizers.Adam() # Manually assign tf.compat.v1.global_step variable to optimizer.iterations # to make tf.compat.v1.train.global_step increased correctly. # This assignment is a must for any `tf.train.SessionRunHook` specified in # estimator, as SessionRunHooks rely on global step. optimizer.iterations = tf.compat.v1.train.get_or_create_global_step() # Get both the unconditional updates (the None part) # and the input-conditional updates (the features part). update_ops = model.get_updates_for(None) + model.get_updates_for(features) # Compute the minimize_op. minimize_op = optimizer.get_updates( total_loss, model.trainable_variables)[0] train_op = tf.group(minimize_op, *update_ops) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=total_loss, train_op=train_op, eval_metric_ops={'Accuracy': accuracy_obj}) # Create the Estimator & Train. estimator = tf.estimator.Estimator(model_fn=my_model_fn) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) ``` ### Premade Estimators [Premade Estimators](https://www.tensorflow.org/guide/premade_estimators) in the family of `tf.estimator.DNN*`, `tf.estimator.Linear*` and `tf.estimator.DNNLinearCombined*` are still supported in the TensorFlow 2.0 API, however, some arguments have changed: 1. `input_layer_partitioner`: Removed in 2.0. 2. `loss_reduction`: Updated to `tf.keras.losses.Reduction` instead of `tf.compat.v1.losses.Reduction`. Its default value is also changed to `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` from `tf.compat.v1.losses.Reduction.SUM`. 3. `optimizer`, `dnn_optimizer` and `linear_optimizer`: this arg has been updated to `tf.keras.optimizers` instead of the `tf.compat.v1.train.Optimizer`. To migrate the above changes: 1. No migration is needed for `input_layer_partitioner` since [`Distribution Strategy`](https://www.tensorflow.org/guide/distributed_training) will handle it automatically in TF 2.0. 2. For `loss_reduction`, check [`tf.keras.losses.Reduction`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/losses/Reduction) for the supported options. 3. For `optimizer` args, if you do not pass in an `optimizer`, `dnn_optimizer` or `linear_optimizer` arg, or if you specify the `optimizer` arg as a `string` in your code, you don't need to change anything. `tf.keras.optimizers` is used by default. Otherwise, you need to update it from `tf.compat.v1.train.Optimizer` to its corresponding [`tf.keras.optimizers`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/optimizers) #### Checkpoint Converter <a id="checkpoint_converter"></a> The migration to `keras.optimizers` will break checkpoints saved using TF 1.x, as `tf.keras.optimizers` generates a different set of variables to be saved in checkpoints. To make old checkpoint reusable after your migration to TF 2.0, try the [checkpoint converter tool](https://github.com/tensorflow/estimator/blob/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py). ``` ! curl -O https://raw.githubusercontent.com/tensorflow/estimator/master/tensorflow_estimator/python/estimator/tools/checkpoint_converter.py ``` The tool has builtin help: ``` ! python checkpoint_converter.py -h ``` <a id="tensorshape"></a> ## TensorShape This class was simplified to hold `int`s, instead of `tf.compat.v1.Dimension` objects. So there is no need to call `.value()` to get an `int`. Individual `tf.compat.v1.Dimension` objects are still accessible from `tf.TensorShape.dims`. The following demonstrate the differences between TensorFlow 1.x and TensorFlow 2.0. ``` # Create a shape and choose an index i = 0 shape = tf.TensorShape([16, None, 256]) shape ``` If you had this in TF 1.x: ```python value = shape[i].value ``` Then do this in TF 2.0: ``` value = shape[i] value ``` If you had this in TF 1.x: ```python for dim in shape: value = dim.value print(value) ``` Then do this in TF 2.0: ``` for value in shape: print(value) ``` If you had this in TF 1.x (Or used any other dimension method): ```python dim = shape[i] dim.assert_is_compatible_with(other_dim) ``` Then do this in TF 2.0: ``` other_dim = 16 Dimension = tf.compat.v1.Dimension if shape.rank is None: dim = Dimension(None) else: dim = shape.dims[i] dim.is_compatible_with(other_dim) # or any other dimension method shape = tf.TensorShape(None) if shape: dim = shape.dims[i] dim.is_compatible_with(other_dim) # or any other dimension method ``` The boolean value of a `tf.TensorShape` is `True` if the rank is known, `False` otherwise. ``` print(bool(tf.TensorShape([]))) # Scalar print(bool(tf.TensorShape([0]))) # 0-length vector print(bool(tf.TensorShape([1]))) # 1-length vector print(bool(tf.TensorShape([None]))) # Unknown-length vector print(bool(tf.TensorShape([1, 10, 100]))) # 3D tensor print(bool(tf.TensorShape([None, None, None]))) # 3D tensor with no known dimensions print() print(bool(tf.TensorShape(None))) # A tensor with unknown rank. ``` ## Other Changes * Remove `tf.colocate_with`: TensorFlow's device placement algorithms have improved significantly. This should no longer be necessary. If removing it causes a performance degredation [please file a bug](https://github.com/tensorflow/tensorflow/issues). * Replace `v1.ConfigProto` usage with the equivalent functions from `tf.config`. ## Conclusions The overall process is: 1. Run the upgrade script. 2. Remove contrib symbols. 3. Switch your models to an object oriented style (Keras). 4. Use `tf.keras` or `tf.estimator` training and evaluation loops where you can. 5. Otherwise, use custom loops, but be sure to avoid sessions & collections. It takes a little work to convert code to idiomatic TensorFlow 2.0, but every change results in: * Fewer lines of code. * Increased clarity and simplicity. * Easier debugging.
github_jupyter
**Principal Component Analysis (PCA)** is widely used in Machine Learning pipelines as a means to compress data or help visualization. This notebook aims to walk through the basic idea of the PCA and build the algorithm from scratch in Python. Before diving directly into the PCA, let's first talk about several import concepts - the **"eigenvectors & eigenvalues"** and **"Singular Value Decomposition (SVD)"**. An **eigenvector** of a square matrix is a column vector that satisfies: $$Av=\lambda v$$ Where A is a $[n\times n]$ square matrix, v is a $[n\times 1]$ **eigenvector**, and $\lambda$ is a scalar value which is also known as the **eigenvalue**. If A is both a square and symmetric matrix (like a typical variance-covariance matrix), then we can write A as: $$A=U\Sigma U^T$$ Here columns of matrix U are eigenvectors of matrix A; and $\Sigma$ is a diaonal matrix containing the corresponding eigenvalues. This is also a special case of the well-known theorem **"Singular Value Decomposition" (SVD)**, where a rectangular matrix M can be expressed as: $$M=U\Sigma V^T$$ ####With SVD, we can calcuate the eigenvectors and eigenvalues of a square & symmetric matrix. This will be the key to solve the PCA. The goal of the PCA is to find a lower dimension surface to maxmize total variance of the projection, or in other means, to minimize the projection error. The entire algorithm can be summarized as the following: 1) Given a data matrix **$X$** with **$m$** rows (number of records) and **$n$** columns (number of dimensions), we should first substract the column mean for each dimension. 2) Then we can calculate the variance-covariance matrix using the equation (X here already has zero mean for each column from step 1): $$cov=\frac{1}{m}X^TX$$ 3) We can then use SVD to compute the eigenvectors and corresponding eigenvalues of the above covariance matrix "$cov$": $$cov=U\Sigma U^T$$ 4) If our target dimension is $p$ ($p<n$), then we will select the first $p$ columns of the $U$ matrix and get matrix $U_{reduce}$. 5) To get the compressed data set, we can do the transformation as below: $$X_{reduce}=XU_{reduce}$$ 6) To appoximate the original data set given the compressed data, we can use: $$X=X_{reduce}U_{reduce}^T$$ Note this is true because $U_{reduce}^{-1}=U_{reduce}^T$ (in this case, all the eigenvectors are unit vectors). ####In practice, it is also important to choose the proper number of principal components. For data compression, we want to retain as much variation in the original data while reducing the dimension. Luckily, with SVD, we can get a estimate of the retained variation by: $$\%\ of\ variance\ retained = \frac{\sum_{i=1}^{p}S_{ii}}{\sum_{i=1}^{n}S_{ii}}$$ Where $S_{ii}$ is the $ith$ diagonal element of the $\Sigma$ matrix, $p$ is the number of reduced dimension, and $n$ is the dimension of the original data. ####For data visulization purposes, we usually choose 2 or 3 dimensions to plot the compressed data. ####The following class PCA() implements the idea of principal component analysis. ``` import numpy as np class PCA(): def __init__(self, num_components): self.num_components = num_components self.U = None self.S = None def fit(self, X): # perform pca m = X.shape[0] X_mean = np.mean(X, axis=0) X -= X_mean cov = X.T.dot(X) * 1.0 / m self.U, self.S, _ = np.linalg.svd(cov) return self def project(self, X): # project data based on reduced dimension U_reduce = self.U[:, :self.num_components] X_reduce = X.dot(U_reduce) return X_reduce def inverse(self, X_reduce): # recover the original data based on the reduced form U_reduce = self.U[:, :self.num_components] X = X_reduce.dot(U_reduce.T) return X def explained_variance(self): # print the ratio of explained variance with the pca explained = np.sum(self.S[:self.num_components]) total = np.sum(self.S) return explained * 1.0 / total ``` ####Now we can use a demo data set to show dimensionality reduction and data visualization. We will use the Iris Data set as always. ``` from sklearn.datasets import load_iris iris = load_iris() X = iris['data'] y = iris['target'] print X.shape ``` We can find that the dimension of the original $X$ matrix is 4. We can then compress it to 2 using PCA technique with the **PCA()** class that we defined above. ``` pca = PCA(num_components=2) pca.fit(X) X_reduce = pca.project(X) print X_reduce.shape ``` Now that the data has been compressed, we can check the ratianed variance. ``` print "{:.2%}".format(pca.explained_variance()) ``` We have 97.76% of variance retained. This is okay for data visulization purposes. But if we used PCA in supervised learning pipelines, we might want to add more dimension to keep more than 99% of the variation from the original data. Finally, with the compressed dimension, we can plot to see the distribution of iris dataset. ``` %pylab inline pylab.rcParams['figure.figsize'] = (10, 6) from matplotlib import pyplot as plt for c, marker, class_num in zip(['green', 'r', 'cyan'], ['o', '^', 's'], np.unique(y)): plt.scatter(x=X_reduce[:, 0][y == class_num], y=X_reduce[:, 1][y == class_num], c=c, marker=marker, label="Class {}".format(class_num), alpha=0.7, s=30) plt.xlabel("Component 1") plt.ylabel("Component 2") plt.legend() plt.show() ``` From the above example, we can see that PCA can help us visualize data with more than 3 feature dimensions. The general use of PCA is for dimensionality reductions in Machine Learning Pipelines. It can speed up the learning process and save memory when running supervised and unsupervised algorithms on large dataset. However, it also throws away some information when reducing the feature dimension. Thus it is always beneficial to test whether using PCA on top of something else since it's pretty easy to set up.
github_jupyter
``` # Visualization of the KO+ChIP Gold Standard from: # Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells" # TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load # NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full # network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold" # You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by") # Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels # Change "SVG" to "canvas" to speed up layout operations # More info about jp_gene_viz and user interface instructions are available on Github: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb # directory containing gene expression data and network folder directory = "." # folder containing networks netPath = 'Networks' # network file name networkFile = 'ChIP_A17_KOall_ATh_bias25_TFmRNA_sp.tsv' # title for network figure netTitle = 'ChIP/ATAC(Th17)+KO+ATAC(Th), bias = 25_TFmRNA, TFA = TF mRNA' # name of gene expression file expressionFile = 'Th0_Th17_48hTh.txt' # column of gene expression file to color network nodes rnaSampleOfInt = 'Th17(48h)' # edge cutoff -- for Inferelator TRNs, corresponds to signed quantile (rank of edges in 15 TFs / gene models), # increase from 0 --> 1 to get more significant edges (e.g., .33 would correspond to edges only in 10 TFs / gene # models) edgeCutoff = .93 import sys if ".." not in sys.path: sys.path.append("..") from jp_gene_viz import dNetwork dNetwork.load_javascript_support() # from jp_gene_viz import multiple_network from jp_gene_viz import LExpression LExpression.load_javascript_support() # Load network linked to gene expression data L = LExpression.LinkedExpressionNetwork() L.show() # Load Network and Heatmap L.load_network(directory + '/' + netPath + '/' + networkFile) L.load_heatmap(directory + '/' + expressionFile) N = L.network N.set_title(netTitle) N.threshhold_slider.value = edgeCutoff N.apply_click(None) N.draw() # Add labels to nodes N.labels_button.value=True # Limit to TFs only, remove unconnected TFs, choose and set network layout N.restore_click() N.tf_only_click() N.connected_only_click() N.layout_dropdown.value = 'fruchterman_reingold' N.layout_click() # Interact with Heatmap # Limit genes in heatmap to network genes L.gene_click(None) # Z-score heatmap values L.expression.transform_dropdown.value = 'Z score' L.expression.apply_transform() # Choose a column in the heatmap (e.g., 48h Th17) to color nodes L.expression.col = rnaSampleOfInt L.condition_click(None) # Switch SVG layout to get line colors, then switch back to faster canvas mode N.force_svg(None) ```
github_jupyter
[this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/polyglot) # Visualizing the Johns Hopkins COVID-19 time series data **This is a work in progress.** It doesn't work yet in [Binder](https://mybinder.org/v2/gh/dotnet/interactive/master?urlpath=lab) because it relies on HTTP communication between the kernel and the Jupyter frontend. Also, due to travel restrictions, you should run this at home on isolated compute. *And don't forget to wash your hands.* Since Johns Hopkins has put COVID-19 time series data on [GitHub](https://github.com/CSSEGISandData/COVID-19), let's take a look at it. We can download it using PowerShell: ``` #!pwsh Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" -OutFile "./Confirmed.csv" Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv" -OutFile "./Deaths.csv" Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv" -OutFile "./Recovered.csv" ``` It needs a little cleaning up: ``` using System.IO; using System.Text.RegularExpressions; Clean("Confirmed.csv"); Clean("Deaths.csv"); Clean("Recovered.csv"); void Clean(string filePath) { var raw = File.ReadAllText(filePath); var regex = new Regex("\\\"(.*?)\\\""); var cleaned = regex.Replace(raw, m => m.Value.Replace(",", " in ")); File.WriteAllText(filePath, cleaned); } "All cleaned up!" ``` Next, let's load it into a data frame. ``` #r "nuget:Microsoft.Data.Analysis,0.2.0" using Microsoft.Data.Analysis; var deaths = DataFrame.LoadCsv("./Deaths.csv"); var confirmed = DataFrame.LoadCsv("./Confirmed.csv"); var recovered = DataFrame.LoadCsv("./Recovered.csv"); var displayedValue = display("Processing data"); var offset = 4; var series = new List<object>(); for(var i = offset; i < deaths.Columns.Count; i++){ await Task.Delay(100); var date = deaths.Columns[i].Name; var deathFiltered = deaths[deaths.Columns[i].ElementwiseNotEquals(0)]; var confirmedFiltered = confirmed[confirmed.Columns[i].ElementwiseNotEquals(0)]; var recoveredFiltered = recovered[recovered.Columns[i].ElementwiseNotEquals(0)]; displayedValue.Update($"processing {date}"); series.Add(new { date = date, deathsSeries = new { latitude = deathFiltered["Lat"], longitude = deathFiltered["Long"], data = deathFiltered.Columns[i] }, confirmedSeries = new { latitude = confirmedFiltered["Lat"], longitude = confirmedFiltered["Long"], data = confirmedFiltered.Columns[i] }, recoveredSeries = new { latitude = recoveredFiltered["Lat"], longitude = recoveredFiltered["Long"], data = recoveredFiltered.Columns[i] } }); } displayedValue.Update("Ready."); ``` Because we've stored our data in top-level variables (`deathsSeries`, `confirmedSeries`, `recoveredSeries`, etc.) in the C# kernel, they're accessible from JavaScript by calling `interactive.csharp.getVariable`. The data will be returned as JSON and we can plot it using the library of our choice, pulled in using [RequireJS](https://requirejs.org/). We'll use [Plotly](https://plot.ly/). ``` #!js notebookScope.plot = function (plotTarget) { let loadPlotly = getJsLoader({ context: "COVID", paths: { plotly: "https://cdn.plot.ly/plotly-latest.min" } }); loadPlotly(["plotly"], (Plotly) => { if (typeof (notebookScope.updateInterval) !== 'undefined') { clearInterval(notebookScope.updateInterval); } let index = 0; if (typeof (document.getElementById(plotTarget)) !== 'undefined') { interactive.csharp.getVariable("series") .then(series => { var { deathsSeries, confirmedSeries, recoveredSeries, date } = series[index]; var recovered = { name: "Recovered", type: "scattergeo", mode: "markers", geo: "geo", lat: recoveredSeries.latitude, lon: recoveredSeries.longitude, text: recoveredSeries.data, marker: { symbol: "square", color: "Green" } }; var deaths = { name: "Fatal", type: "scattergeo", geo: "geo2", mode: "markers", lat: deathsSeries.latitude, lon: deathsSeries.longitude, text: deathsSeries.data, marker: { symbol: "circle", color: "Black" } }; var confirmed = { name: "Total confirmed", type: "scattergeo", geo: "geo3", mode: "markers", lat: confirmedSeries.latitude, lon: confirmedSeries.longitude, text: confirmedSeries.data, marker: { symbol: "diamond", color: "#DC7633" } }; var traces = [recovered, deaths, confirmed]; var layout = { title: "COVID-19 cases (" + date + ")", grid: { columns: 3, rows: 1 }, geo: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 0 } }, geo2: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 1 } }, geo3: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 2 } } }; if (typeof (document.getElementById(plotTarget)) !== 'undefined') { Plotly.newPlot(plotTarget, traces, layout); } let updateCovidPlot = () => { if (typeof (document.getElementById(plotTarget)) !== 'undefined') { index++; if (index === series.length) { clearInterval(notebookScope.updateInterval); return; } var { deathsSeries, confirmedSeries, recoveredSeries, currentSeries, date } = series[index]; Plotly.animate("plotlyChartCovid", { data: [ { lat: recoveredSeries.latitude, lon: recoveredSeries.longitude, text: recoveredSeries.data }, { lat: deathsSeries.latitude, lon: deathsSeries.longitude, text: deathsSeries.data }, { lat: confirmedSeries.latitude, lon: confirmedSeries.longitude, text: confirmedSeries.data }], layout: { title: "COVID-19 " + date } }); } } notebookScope.updateInterval = setInterval(() => updateCovidPlot(), 250); }); } }); }; ``` Notice the `setInterval` call near the end of the previous cell. This rechecks the data in the kernel and updates the plot. Back on the kernel, we can now update the data so that the kernel can see it. Yes, this is a contrived example, and we're planning to support true streaming data, but it's a start. ``` #!html <div id="plotlyChartCovid"></div> #!js notebookScope.plot("plotlyChartCovid"); #!about ```
github_jupyter
<a href="https://colab.research.google.com/github/ai-fast-track/icevision-gradio/blob/master/IceApp_pets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # IceVision Deployment App: PETS Dataset This example uses Faster RCNN trained weights using the [PETS dataset](https://airctic.github.io/icedata/pets/) About IceVision: - an Object-Detection Framework that connects to different libraries/frameworks such as Fastai, Pytorch Lightning, and Pytorch with more to come. - Features a Unified Data API with out-of-the-box support for common annotation formats (COCO, VOC, etc.) - Provides flexible model implementations with pluggable backbones ## Installing packages ``` !pip install icevision[inference] !pip install icedata !pip install gradio ``` ## Imports ``` from icevision.all import * import icedata import PIL, requests import torch from torchvision import transforms import gradio as gr ``` ## Loading trained model ``` class_map = icedata.pets.class_map() model = icedata.pets.trained_models.faster_rcnn_resnet50_fpn() ``` ## Defininig the predict() method ``` def predict( model, image, detection_threshold: float = 0.5, mask_threshold: float = 0.5 ): tfms_ = tfms.A.Adapter([tfms.A.Normalize()]) # Whenever you have images in memory (numpy arrays) you can use `Dataset.from_images` infer_ds = Dataset.from_images([image], tfms_) batch, samples = faster_rcnn.build_infer_batch(infer_ds) preds = faster_rcnn.predict( model=model, batch=batch, detection_threshold=detection_threshold ) return samples[0]["img"], preds[0] ``` ## Defining the `show_preds` method: called by `gr.Interface(fn=show_preds, ...)` ``` def show_preds(input_image, display_list, detection_threshold): display_label = ("Label" in display_list) display_bbox = ("BBox" in display_list) if detection_threshold==0: detection_threshold=0.5 img, pred = predict(model=model, image=input_image, detection_threshold=detection_threshold) # print(pred) img = draw_pred(img=img, pred=pred, class_map=class_map, denormalize_fn=denormalize_imagenet, display_label=display_label, display_bbox=display_bbox) img = PIL.Image.fromarray(img) # print("Output Image: ", img.size, type(img)) return img ``` ## Gradio User Interface ``` display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display") detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold") outputs = gr.outputs.Image(type="pil") gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox, detection_threshold_slider], outputs=outputs, title='IceApp - PETS') gr_interface.launch(inline=False, share=True, debug=True) ``` ## Enjoy! If you have any questions, please feel free to [join us](https://discord.gg/JDBeZYK)
github_jupyter
<div class="alert alert-info" role="alert"> This tutorial contains a lot of bokeh plots, which may take a little while to load and render. </div> ``Element``s are the basic building blocks for any HoloViews visualization. These are the objects that can be composed together using the various [Container](Containers.ipynb) types. Here in this overview, we show an example of how to build each of these ``Element``s directly out of Python or Numpy data structures. An even more powerful way to use them is by collecting similar ``Element``s into a HoloMap, as described in [Exploring Data](Exploring_Data.ipynb), so that you can explore, select, slice, and animate them flexibly, but here we focus on having small, self-contained examples. Complete reference material for each type can be accessed using our [documentation system](Introduction.ipynb#ParamDoc). This tutorial uses the default matplotlib plotting backend; see the [Bokeh Elements](Bokeh_Elements.ipynb) tutorial for the corresponding bokeh plots. ## Element types This class hierarchy shows each of the ``Element`` types. Each type is named for the default or expected way that the underlying data can be visualized. E.g., if your data is wrapped into a ``Surface`` object, it will display as a 3D surface by default, whereas the same data embedded in an ``Image`` object will display as a 2D raster image. But please note that the specification and implementation for each ``Element`` type does not actually include *any* such visualization -- the name merely serves as a semantic indication that you ordinarily think of the data as being laid out visually in that way. The actual plotting is done by a separate plotting subsystem, while the objects themselves focus on storing your data and the metadata needed to describe and use it. This separation of data and visualization is described in detail in the [Options tutorial](Options.ipynb), which describes all about how to find out the options available for each ``Element`` type and change them if necessary, from either Python or IPython Notebook. When using this tutorial interactively in an IPython/Jupyter notebook session, we suggest adding ``%output info=True`` after the call to ``notebook_extension`` below, which will pop up a detailed list and explanation of the available options for visualizing each ``Element`` type, after that notebook cell is executed. Then, to find out all the options for any of these ``Element`` types, just press ``<Shift-Enter>`` on the corresponding cell in the live notebook. The types available: <dl class="dl-horizontal"> <dt><a href="#Element"><code>Element</code></a></dt><dd>The base class of all <code>Elements</code>.</dd> </dl> ### <a id='ChartIndex'></a> <a href="#Chart Elements"><code>Charts:</code></a> <dl class="dl-horizontal"> <dt><a href="#Curve"><code>Curve</code></a></dt><dd>A continuous relation between a dependent and an independent variable. <font color='green'>&#x2713;</font></dd> <dt><a href="#ErrorBars"><code>ErrorBars</code></a></dt><dd>A collection of x-/y-coordinates with associated error magnitudes. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spread"><code>Spread</code></a></dt><dd>Continuous version of ErrorBars. <font color='green'>&#x2713;</font></dd> <dt><a href="#Area"><code>Area</code></a></dt><dd>Area under the curve or between curves. <font color='green'>&#x2713;</font></dd> <dt><a href="#Bars"><code>Bars</code></a></dt><dd>Data collected and binned into categories. <font color='green'>&#x2713;</font></dd> <dt><a href="#Histogram"><code>Histogram</code></a></dt><dd>Data collected and binned in a continuous space using specified bin edges. <font color='green'>&#x2713;</font></dd> <dt><a href="#BoxWhisker"><code>BoxWhisker</code></a></dt><dd>Distributions of data varying by 0-N key dimensions.<font color='green'>&#x2713;</font></dd> <dt><a href="#Scatter"><code>Scatter</code></a></dt><dd>Discontinuous collection of points indexed over a single dimension. <font color='green'>&#x2713;</font></dd> <dt><a href="#Points"><code>Points</code></a></dt><dd>Discontinuous collection of points indexed over two dimensions. <font color='green'>&#x2713;</font></dd> <dt><a href="#VectorField"><code>VectorField</code></a></dt><dd>Cyclic variable (and optional auxiliary data) distributed over two-dimensional space. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spikes"><code>Spikes</code></a></dt><dd>A collection of horizontal or vertical lines at various locations with fixed height (1D) or variable height (2D). <font color='green'>&#x2713;</font></dd> <dt><a href="#SideHistogram"><code>SideHistogram</code></a></dt><dd>Histogram binning data contained by some other <code>Element</code>. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='Chart3DIndex'></a> <a href="#Chart3D Elements"><code>Chart3D Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#Surface"><code>Surface</code></a></dt><dd>Continuous collection of points in a three-dimensional space. <font color='red'>&#x2717;</font></dd> <dt><a href="#Scatter3D"><code>Scatter3D</code></a></dt><dd>Discontinuous collection of points in a three-dimensional space. <font color='red'>&#x2717;</font></dd> <dt><a href="#TriSurface"><code>TriSurface</code></a></dt><dd>Continuous but irregular collection of points interpolated into a Surface using Delaunay triangulation. <font color='red'>&#x2717;</font></dd> </dl> ### <a id='RasterIndex'></a> <a href="#Raster Elements"><code>Raster Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#Raster"><code>Raster</code></a></dt><dd>The base class of all rasters containing two-dimensional arrays. <font color='green'>&#x2713;</font></dd> <dt><a href="#QuadMesh"><code>QuadMesh</code></a></dt><dd>Raster type specifying 2D bins with two-dimensional array of values. <font color='green'>&#x2713;</font></dd> <dt><a href="#HeatMap"><code>HeatMap</code></a></dt><dd>Raster displaying sparse, discontinuous data collected in a two-dimensional space. <font color='green'>&#x2713;</font></dd> <dt><a href="#Image"><code>Image</code></a></dt><dd>Raster containing a two-dimensional array covering a continuous space (sliceable). <font color='green'>&#x2713;</font></dd> <dt><a href="#RGB"><code>RGB</code></a></dt><dd>Image with 3 (R,G,B) or 4 (R,G,B,Alpha) color channels. <font color='green'>&#x2713;</font></dd> <dt><a href="#HSV"><code>HSV</code></a></dt><dd>Image with 3 (Hue, Saturation, Value) or 4 channels. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='TabularIndex'></a> <a href="#Tabular Elements"><code>Tabular Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#ItemTable"><code>ItemTable</code></a></dt><dd>Ordered collection of key-value pairs (ordered dictionary). <font color='green'>&#x2713;</font></dd> <dt><a href="#Table"><code>Table</code></a></dt><dd>Collection of arbitrary data with arbitrary key and value dimensions. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='AnnotationIndex'></a> <a href="#Annotation Elements"><code>Annotations:</code></a> <dl class="dl-horizontal"> <dt><a href="#VLine"><code>VLine</code></a></dt><dd>Vertical line annotation. <font color='green'>&#x2713;</font></dd> <dt><a href="#HLine"><code>HLine</code></a></dt><dd>Horizontal line annotation. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spline"><code>Spline</code></a></dt><dd>Bezier spline (arbitrary curves). <font color='green'>&#x2713;</font></dd> <dt><a href="#Text"><code>Text</code></a></dt><dd>Text annotation on an <code>Element</code>. <font color='green'>&#x2713;</font></dd> <dt><a href="#Arrow"><code>Arrow</code></a></dt><dd>Arrow on an <code>Element</code> with optional text label. <font color='red'>&#x2717;</font></dd> </dl> ### <a id='PathIndex'></a> <a href="#Path Elements"><code>Paths:</code></a> <dl class="dl-horizontal"> <dt><a href="#Path"><code>Path</code></a></dt><dd>Collection of paths. <font color='green'>&#x2713;</font></dd> <dt><a href="#Contours"><code>Contours</code></a></dt><dd>Collection of paths, each with an associated value. <font color='green'>&#x2713;</font></dd> <dt><a href="#Polygons"><code>Polygons</code></a></dt><dd>Collection of filled, closed paths with an associated value. <font color='green'>&#x2713;</font></dd> <dt><a href="#Bounds"><code>Bounds</code></a></dt><dd>Box specified by corner positions. <font color='green'>&#x2713;</font></dd> <dt><a href="#Box"><code>Box</code></a></dt><dd>Box specified by center position, radius, and aspect ratio. <font color='green'>&#x2713;</font></dd> <dt><a href="#Ellipse"><code>Ellipse</code></a></dt><dd>Ellipse specified by center position, radius, and aspect ratio. <font color='green'>&#x2713;</font></dd> </dl> ## ``Element`` <a id='Element'></a> **The basic or fundamental types of data that can be visualized.** ``Element`` is the base class for all the other HoloViews objects shown in this section. All ``Element`` objects accept ``data`` as the first argument to define the contents of that element. In addition to its implicit type, each element object has a ``group`` string defining its category, and a ``label`` naming this particular item, as described in the [Introduction](Introduction.ipynb#value). When rich display is off, or if no visualization has been defined for that type of ``Element``, the ``Element`` is presented with a default textual representation: ``` import holoviews as hv hv.notebook_extension(bokeh=True) hv.Element(None, group='Value', label='Label') ``` In addition, ``Element`` has key dimensions (``kdims``), value dimensions (``vdims``), and constant dimensions (``cdims``) to describe the semantics of indexing within the ``Element``, the semantics of the underlying data contained by the ``Element``, and any constant parameters associated with the object, respectively. Dimensions are described in the [Introduction](Introduction.ipynb). The remaining ``Element`` types each have a rich, graphical display as shown below. ## ``Chart`` Elements <a id='Chart Elements'></a> **Visualization of a dependent variable against an independent variable** The first large class of ``Elements`` is the ``Chart`` elements. These objects have at least one fully indexable, sliceable key dimension (typically the *x* axis in a plot), and usually have one or more value dimension(s) (often the *y* axis) that may or may not be indexable depending on the implementation. The key dimensions are normally the parameter settings for which things are measured, and the value dimensions are the data points recorded at those settings. As described in the [Columnar Data tutorial](Columnar_Data.ipynb), the data can be stored in several different internal formats, such as a NumPy array of shape (N, D), where N is the number of samples and D the number of dimensions. A somewhat larger list of formats can be accepted, including any of the supported internal formats, or 1. As a list of length N containing tuples of length D. 2. As a tuple of length D containing iterables of length N. ### ``Curve`` <a id='Curve'></a> ``` import numpy as np points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] hv.Curve(points) ``` A ``Curve`` is a set of values provided for some set of keys from a [continuously indexable 1D coordinate system](Continuous_Coordinates.ipynb), where the plotted values will be connected up because they are assumed to be samples from a continuous relation. ### ``ErrorBars`` <a id='ErrorBars'></a> ``` np.random.seed(7) points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2) for i in np.linspace(0, 100, 11)] hv.Curve(points) * hv.ErrorBars(errors) ``` ``ErrorBars`` is a set of x-/y-coordinates with associated error values. Error values may be either symmetric or asymmetric, and thus can be supplied as an Nx3 or Nx4 array (or any of the alternative constructors Chart Elements allow). ``` %%opts ErrorBars points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2, np.random.rand()/4) for i in np.linspace(0, 100, 11)] hv.Curve(points) * hv.ErrorBars(errors, vdims=['y', 'yerrneg', 'yerrpos']) ``` ### ``Area`` <a id='Area'></a> ** *Area under the curve* ** By default the Area Element draws just the area under the curve, i.e. the region between the curve and the origin. ``` xs = np.linspace(0, np.pi*4, 40) hv.Area((xs, np.sin(xs))) ``` ** * Area between curves * ** When supplied a second value dimension the area is defined as the area between two curves. ``` X = np.linspace(0,3,200) Y = X**2 + 3 Y2 = np.exp(X) + 2 Y3 = np.cos(X) hv.Area((X, Y, Y2), vdims=['y', 'y2']) * hv.Area((X, Y, Y3), vdims=['y', 'y3']) ``` #### Stacked areas Areas are also useful to visualize multiple variables changing over time, but in order to be able to compare them the areas need to be stacked. Therefore the ``operation`` module provides the ``stack_area`` operation which makes it trivial to stack multiple Area in an (Nd)Overlay. In this example we will generate a set of 5 arrays representing percentages and create an Overlay of them. Then we simply call the ``stack_area`` operation on the Overlay to get a stacked area chart. ``` values = np.random.rand(5, 20) percentages = (values/values.sum(axis=0)).T*100 overlay = hv.Overlay([hv.Area(percentages[:, i], vdims=[hv.Dimension('value', unit='%')]) for i in range(5)]) overlay + hv.Area.stack(overlay) ``` ### ``Spread`` <a id='Spread'></a> ``Spread`` elements have the same data format as the ``ErrorBars`` element, namely x- and y-values with associated symmetric or asymmetric errors, but are interpreted as samples from a continuous distribution (just as ``Curve`` is the continuous version of ``Scatter``). These are often paired with an overlaid ``Curve`` to show both the mean (as a curve) and the spread of values; see the [Columnar Data tutorial](Columnar_Data.ipynb) for examples. ##### Symmetric ``` np.random.seed(42) xs = np.linspace(0, np.pi*2, 20) err = 0.2+np.random.rand(len(xs)) hv.Spread((xs, np.sin(xs), err)) ``` ##### Asymmetric ``` %%opts Spread (fill_color='indianred' fill_alpha=1) xs = np.linspace(0, np.pi*2, 20) hv.Spread((xs, np.sin(xs), 0.1+np.random.rand(len(xs)), 0.1+np.random.rand(len(xs))), vdims=['y', 'yerrneg', 'yerrpos']) ``` ### ``Bars`` <a id='Bars'></a> ``` data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)] bars = hv.Bars(data, kdims=[hv.Dimension('Car occupants', values='initial')], vdims=['Count']) bars + bars[['one', 'two', 'three']] ``` ``Bars`` is an ``NdElement`` type, so by default it is sorted. To preserve the initial ordering specify the ``Dimension`` with values set to 'initial', or you can supply an explicit list of valid dimension keys. ``Bars`` support up to two key dimensions which can be laid by ``'group'`` and ``'stack'`` dimensions. By default the key dimensions are mapped onto the first, second ``Dimension`` of the ``Bars`` object, but this behavior can be overridden via the ``group_index`` and ``stack_index`` options. ``` %%opts Bars [group_index=0 stack_index=1] from itertools import product np.random.seed(3) groups, stacks = ['A', 'B'], ['a', 'b'] keys = product(groups, stacks) hv.Bars([k+(np.random.rand()*100.,) for k in keys], kdims=['Group', 'Stack'], vdims=['Count']) ``` ### ``BoxWhisker`` <a id='BoxWhisker'></a> The ``BoxWhisker`` Element allows representing distributions of data varying by 0-N key dimensions. To represent the distribution of a single variable, we can create a BoxWhisker Element with no key dimensions and a single value dimension: ``` hv.BoxWhisker(np.random.randn(200), kdims=[], vdims=['Value']) ``` BoxWhisker Elements support any number of dimensions and may also be rotated. To style the boxes and whiskers, supply ``boxprops``, ``whiskerprops``, and ``flierprops``. ``` %%opts BoxWhisker [invert_axes=True width=600] groups = [chr(65+g) for g in np.random.randint(0, 3, 200)] hv.BoxWhisker((groups, np.random.randint(0, 5, 200), np.random.randn(200)), kdims=['Group', 'Category'], vdims=['Value']).sort() ``` ### ``Histogram`` <a id='Histogram'></a> ``` np.random.seed(1) data = [np.random.normal() for i in range(10000)] frequencies, edges = np.histogram(data, 20) hv.Histogram(frequencies, edges) ``` ``Histogram``s partition the `x` axis into discrete (but not necessarily regular) bins, showing counts in each as a bar. Almost all Element types, including ``Histogram``, may be projected onto a polar axis by supplying ``projection='polar'`` as a plot option. ``` %%opts Histogram [projection='polar' show_grid=True] data = [np.random.rand()*np.pi*2 for i in range(100)] frequencies, edges = np.histogram(data, 20) hv.Histogram(frequencies, edges, kdims=['Angle']) ``` ### ``Scatter`` <a id='Scatter'></a> ``` %%opts Scatter (color='k', marker='s', s=10) np.random.seed(42) points = [(i, np.random.random()) for i in range(20)] hv.Scatter(points) + hv.Scatter(points)[12:20] ``` Scatter is the discrete equivalent of Curve, showing *y* values for discrete *x* values selected. See [``Points``](#Points) for more information. The marker shape specified above can be any supported by [matplotlib](http://matplotlib.org/api/markers_api.html), e.g. ``s``, ``d``, or ``o``; the other options select the color and size of the marker. For convenience with the [bokeh backend](Bokeh_Backend), the matplotlib marker options are supported using a compatibility function in HoloViews. ### ``Points`` <a id='Points'></a> ``` np.random.seed(12) points = np.random.rand(50,2) hv.Points(points) + hv.Points(points)[0.6:0.8,0.2:0.5] ``` As you can see, ``Points`` is very similar to ``Scatter``, and can produce some plots that look identical. However, the two ``Element``s are very different semantically. For ``Scatter``, the dots each show a dependent variable *y* for some *x*, such as in the ``Scatter`` example above where we selected regularly spaced values of *x* and then created a random number as the corresponding *y*. I.e., for ``Scatter``, the *y* values are the data; the *x*s are just where the data values are located. For ``Points``, both *x* and *y* are independent variables, known as ``key_dimensions`` in HoloViews: ``` for o in [hv.Points(points,name="Points "), hv.Scatter(points,name="Scatter")]: for d in ['key','value']: print("%s %s_dimensions: %s " % (o.name, d, o.dimensions(d,label=True))) ``` The ``Scatter`` object expresses a dependent relationship between *x* and *y*, making it useful for combining with other similar ``Chart`` types, while the ``Points`` object expresses the relationship of two independent keys *x* and *y* with optional ``vdims`` (zero in this case), which makes ``Points`` objects meaningful to combine with the ``Raster`` types below. Of course, the ``vdims`` need not be empty for ``Points``; here is an example with two additional quantities for each point, as ``value_dimension``s *z* and &alpha; visualized as the color and size of the dots, respectively: ``` %%opts Points [color_index=2 size_index=3 scaling_factor=50] np.random.seed(10) data = np.random.rand(100,4) points = hv.Points(data, vdims=['z', 'alpha']) points + points[0.3:0.7, 0.3:0.7].hist() ``` Such a plot wouldn't be meaningful for ``Scatter``, but is a valid use for ``Points``, where the *x* and *y* locations are independent variables representing coordinates, and the "data" is conveyed by the size and color of the dots. ### ``Spikes`` <a id='Spikes'></a> Spikes represent any number of horizontal or vertical line segments with fixed or variable heights. There are a number of disparate uses for this type. First of all, they may be used as a rugplot to give an overview of a one-dimensional distribution. They may also be useful in more domain-specific cases, such as visualizing spike trains for neurophysiology or spectrograms in physics and chemistry applications. In the simplest case, a Spikes object represents coordinates in a 1D distribution: ``` %%opts Spikes (line_alpha=0.4) [spike_length=0.1] xs = np.random.rand(50) ys = np.random.rand(50) hv.Points((xs, ys)) * hv.Spikes(xs) ``` When supplying two dimensions to the Spikes object, the second dimension will be mapped onto the line height. Optionally, you may also supply a cmap and color_index to map color onto one of the dimensions. This way we can, for example, plot a mass spectrogram: ``` %%opts Spikes (cmap='Reds') hv.Spikes(np.random.rand(20, 2), kdims=['Mass'], vdims=['Intensity']) ``` Another possibility is to draw a number of spike trains as you would encounter in neuroscience. Here we generate 10 separate random spike trains and distribute them evenly across the space by setting their ``position``. By also declaring some ``yticks``, each spike train can be labeled individually: ``` %%opts Spikes [spike_length=0.1] NdOverlay [show_legend=False] hv.NdOverlay({i: hv.Spikes(np.random.randint(0, 100, 10), kdims=['Time']).opts(plot=dict(position=0.1*i)) for i in range(10)}).opts(plot=dict(yticks=[((i+1)*0.1-0.05, i) for i in range(10)])) ``` Finally, we may use ``Spikes`` to visualize marginal distributions as adjoined plots using the ``<<`` adjoin operator: ``` %%opts Spikes (line_alpha=0.2) points = hv.Points(np.random.randn(500, 2)) points << hv.Spikes(points['y']) << hv.Spikes(points['x']) ``` ### ``VectorField`` <a id='VectorField'></a> ``` %%opts VectorField [size_index=3] x,y = np.mgrid[-10:10,-10:10] * 0.25 sine_rings = np.sin(x**2+y**2)*np.pi+np.pi exp_falloff = 1/np.exp((x**2+y**2)/8) vector_data = (x,y,sine_rings, exp_falloff) hv.VectorField(vector_data) ``` As you can see above, here the *x* and *y* positions are chosen to make a regular grid. The arrow angles follow a sinsoidal ring pattern, and the arrow lengths fall off exponentially from the center, so this plot has four dimensions of data (direction and length for each *x,y* position). Using the IPython ``%%opts`` cell-magic (described in the [Options tutorial](Options), along with the Python equivalent), we can also use color as a redundant indicator to the direction or magnitude: ``` %%opts VectorField [size_index=3] VectorField.A [color_index=2] VectorField.M [color_index=3] hv.VectorField(vector_data, group='A') + hv.VectorField(vector_data, group='M') ``` ### ``SideHistogram`` <a id='SideHistogram'></a> The ``.hist`` method conveniently adjoins a histogram to the side of any ``Chart``, ``Surface``, or ``Raster`` component, as well as many of the container types (though it would be reporting data from one of these underlying ``Element`` types). For a ``Raster`` using color or grayscale to show values (see ``Raster`` section below), the side histogram doubles as a color bar or key. ``` import numpy as np np.random.seed(42) points = [(i, np.random.normal()) for i in range(800)] hv.Scatter(points).hist() ``` ## ``Chart3D`` Elements <a id='Chart3D Elements'></a> ### ``Surface`` <a id='Surface'></a> ``` %%opts Surface (cmap='jet' rstride=20, cstride=2) hv.Surface(np.sin(np.linspace(0,100*np.pi*2,10000)).reshape(100,100)) ``` Surface is used for a set of gridded points whose associated value dimension represents samples from a continuous surface; it is the equivalent of a ``Curve`` but with two key dimensions instead of just one. ### ``Scatter3D`` <a id='Scatter3D'></a> ``` %%opts Scatter3D [azimuth=40 elevation=20] x,y = np.mgrid[-5:5, -5:5] * 0.1 heights = np.sin(x**2+y**2) hv.Scatter3D(zip(x.flat,y.flat,heights.flat)) ``` ``Scatter3D`` is the equivalent of ``Scatter`` but for two key dimensions, rather than just one. ### ``TriSurface`` <a id='TriSurface'></a> The ``TriSurface`` Element renders any collection of 3D points as a Surface by applying Delaunay triangulation. It thus supports arbitrary, non-gridded data, but it does not support indexing to find data values, since finding the closest ones would require a search. ``` %%opts TriSurface [fig_size=200] (cmap='hot_r') hv.TriSurface((x.flat,y.flat,heights.flat)) ``` ## ``Raster`` Elements <a id='Raster Elements'></a> **A collection of raster image types** The second large class of ``Elements`` is the raster elements. Like ``Points`` and unlike the other ``Chart`` elements, ``Raster Elements`` live in a 2D key-dimensions space. For the ``Image``, ``RGB``, and ``HSV`` elements, the coordinates of this two-dimensional key space are defined in a [continuously indexable coordinate system](Continuous_Coordinates.ipynb). ### ``Raster`` <a id='Raster'></a> A ``Raster`` is the base class for image-like ``Elements``, but may be used directly to visualize 2D arrays using a color map. The coordinate system of a ``Raster`` is the raw indexes of the underlying array, with integer values always starting from (0,0) in the top left, with default extents corresponding to the shape of the array. The ``Image`` subclass visualizes similarly, but using a continuous Cartesian coordinate system suitable for an array that represents some underlying continuous region. ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 hv.Raster(np.sin(x**2+y**2)) ``` ### ``QuadMesh`` <a id='QuadMesh'></a> The basic ``QuadMesh`` is a 2D grid of bins specified as x-/y-values specifying a regular sampling or edges, with arbitrary sampling and an associated 2D array containing the bin values. The coordinate system of a ``QuadMesh`` is defined by the bin edges, therefore any index falling into a binned region will return the appropriate value. Unlike ``Image`` objects, slices must be inclusive of the bin edges. ``` n = 21 xs = np.logspace(1, 3, n) ys = np.linspace(1, 10, n) hv.QuadMesh((xs, ys, np.random.rand(n-1, n-1))) ``` QuadMesh may also be used to represent an arbitrary mesh of quadrilaterals by supplying three separate 2D arrays representing the coordinates of each quadrilateral in a 2D space. Note that when using ``QuadMesh`` in this mode, slicing and indexing semantics and most operations will currently not work. ``` coords = np.linspace(-1.5,1.5,n) X,Y = np.meshgrid(coords, coords); Qx = np.cos(Y) - np.cos(X) Qz = np.sin(Y) + np.sin(X) Z = np.sqrt(X**2 + Y**2) hv.QuadMesh((Qx, Qz, Z)) ``` ### ``HeatMap`` <a id='HeatMap'></a> A ``HeatMap`` displays like a typical raster image, but the input is a dictionary indexed with two-dimensional keys, not a Numpy array or Pandas dataframe. As many rows and columns as required will be created to display the values in an appropriate grid format. Values unspecified are left blank, and the keys can be any Python datatype (not necessarily numeric). One typical usage is to show values from a set of experiments, such as a parameter space exploration, and many other such visualizations are shown in the [Containers](Containers.ipynb) and [Exploring Data](Exploring_Data.ipynb) tutorials. Each value in a ``HeatMap`` is labeled explicitly by default, and so this component is not meant for very large numbers of samples. With the default color map, high values (in the upper half of the range present) are colored orange and red, while low values (in the lower half of the range present) are colored shades of blue. ``` data = {(chr(65+i),chr(97+j)): i*j for i in range(5) for j in range(5) if i!=j} hv.HeatMap(data).sort() ``` ### ``Image`` <a id='Image'></a> Like ``Raster``, a HoloViews ``Image`` allows you to view 2D arrays using an arbitrary color map. Unlike ``Raster``, an ``Image`` is associated with a [2D coordinate system in continuous space](Continuous_Coordinates.ipynb), which is appropriate for values sampled from some underlying continuous distribution (as in a photograph or other measurements from locations in real space). Slicing, sampling, etc. on an ``Image`` all use this continuous space, whereas the corresponding operations on a ``Raster`` work on the raw array coordinates. ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 bounds=(-1,-1,1,1) # Coordinate system: (left, bottom, top, right) (hv.Image(np.sin(x**2+y**2), bounds=bounds) + hv.Image(np.sin(x**2+y**2), bounds=bounds)[-0.5:0.5, -0.5:0.5]) ``` Notice how, because our declared coordinate system is continuous, we can slice with any floating-point value we choose. The appropriate range of the samples in the input numpy array will always be displayed, whether or not there are samples at those specific floating-point values. It is also worth noting that the name ``Image`` can clash with other common libraries, which is one reason to avoid unqualified imports like ``from holoviews import *``. For instance, the Python Imaging Libray provides an ``Image`` module, and IPython itself supplies an ``Image`` class in ``IPython.display``. Python namespaces allow you to avoid such problems, e.g. using ``from PIL import Image as PILImage`` or using ``import holoviews as hv`` and then ``hv.Image()``, as we do in these tutorials. ### ``RGB`` <a id='RGB'></a> The ``RGB`` element is an ``Image`` that supports red, green, blue channels: ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 r = 0.5*np.sin(np.pi +3*x**2+y**2)+0.5 g = 0.5*np.sin(x**2+2*y**2)+0.5 b = 0.5*np.sin(np.pi/2+x**2+y**2)+0.5 hv.RGB(np.dstack([r,g,b])) ``` You can see how the RGB object is created from the original channels: ``` %%opts Image (cmap='gray') hv.Image(r,label="R") + hv.Image(g,label="G") + hv.Image(b,label="B") ``` ``RGB`` also supports an optional alpha channel, which will be used as a mask revealing or hiding any ``Element``s it is overlaid on top of: ``` %%opts Image (cmap='gray') mask = 0.5*np.sin(0.2*(x**2+y**2))+0.5 rgba = hv.RGB(np.dstack([r,g,b,mask])) bg = hv.Image(0.5*np.cos(x*3)+0.5, label="Background") * hv.VLine(x=0,label="Background") overlay = bg*rgba overlay.label="RGBA Overlay" bg + hv.Image(mask,label="Mask") + overlay ``` ### ``HSV`` <a id='HSV'></a> HoloViews makes it trivial to work in any color space that can be converted to ``RGB`` by making a simple subclass of ``RGB`` as appropriate. For instance, we also provide the HSV (hue, saturation, value) color space, which is useful for plotting cyclic data (as the Hue) along with two additional dimensions (controlling the saturation and value of the color, respectively): ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 h = 0.5 + np.sin(0.2*(x**2+y**2)) / 2.0 s = 0.5*np.cos(y*3)+0.5 v = 0.5*np.cos(x*3)+0.5 hsv = hv.HSV(np.dstack([h, s, v])) hsv ``` You can see how this is created from the original channels: ``` %%opts Image (cmap='gray') hv.Image(h, label="H") + hv.Image(s, label="S") + hv.Image(v, label="V") ``` # ``Tabular`` Elements <a id='Tabular Elements'></a> **General data structures for holding arbitrary information** ## ``ItemTable`` <a id='ItemTable'></a> An ``ItemTable`` is an ordered collection of key, value pairs. It can be used to directly visualize items in a tabular format where the items may be supplied as an ``OrderedDict`` or a list of (key,value) pairs. A standard Python dictionary can be easily visualized using a call to the ``.items()`` method, though the entries in such a dictionary are not kept in any particular order, and so you may wish to sort them before display. One typical usage for an ``ItemTable`` is to list parameter values or measurements associated with an adjacent ``Element``. ``` hv.ItemTable([('Age', 10), ('Weight',15), ('Height','0.8 meters')]) ``` ## ``Table`` <a id='Table'></a> A table is more general than an ``ItemTable``, as it allows multi-dimensional keys and multidimensional values. ``` keys = [('M',10), ('M',16), ('F',12)] values = [(15, 0.8), (18, 0.6), (10, 0.8)] table = hv.Table(zip(keys,values), kdims = ['Gender', 'Age'], vdims=['Weight', 'Height']) table ``` Note that you can use select using tables, and once you select using a full, multidimensional key, you get an ``ItemTable`` (shown on the right): ``` table.select(Gender='M') + table.select(Gender='M', Age=10) ``` The ``Table`` is used as a common data structure that may be converted to any other HoloViews data structure using the ``TableConversion`` class. The functionality of the ``TableConversion`` class may be conveniently accessed using the ``.to`` property. For more extended usage of table conversion see the [Columnar Data](Columnnar_Data.ipynb) and [Pandas Conversion](Pandas_Conversion.ipynb) Tutorials. ``` table.select(Gender='M').to.curve(kdims=["Age"], vdims=["Weight"]) ``` # ``Annotation`` Elements <a id='Annotation Elements'></a> **Useful information that can be overlaid onto other components** Annotations are components designed to be overlaid on top of other ``Element`` objects. To demonstrate annotation and paths, we will be drawing many of our elements on top of an RGB Image: ``` scene = hv.RGB.load_image('../assets/penguins.png') ``` ### ``VLine`` and ``HLine`` <a id='VLine'></a><a id='HLine'></a> ``` scene * hv.VLine(-0.05) + scene * hv.HLine(-0.05) ``` ### ``Spline`` <a id='Spline'></a> The ``Spline`` annotation is used to draw Bezier splines using the same semantics as [matplotlib splines](http://matplotlib.org/api/path_api.html). In the overlay below, the spline is in dark blue and the control points are in light blue. ``` points = [(-0.3, -0.3), (0,0), (0.25, -0.25), (0.3, 0.3)] codes = [1,4,4,4] scene * hv.Spline((points,codes)) * hv.Curve(points) ``` ### Text and Arrow <a id='Text'></a><a id='Arrow'></a> ``` scene * hv.Text(0, 0.2, 'Adult\npenguins') + scene * hv.Arrow(0,-0.1, 'Baby penguin', 'v') ``` # Paths <a id='Path Elements'></a> **Line-based components that can be overlaid onto other components** Paths are a subclass of annotations that involve drawing line-based components on top of other elements. Internally, Path Element types hold a list of Nx2 arrays, specifying the x/y-coordinates along each path. The data may be supplied in a number of ways, including: 1. A list of Nx2 numpy arrays. 2. A list of lists containing x/y coordinate tuples. 3. A tuple containing an array of length N with the x-values and a second array of shape NxP, where P is the number of paths. 4. A list of tuples each containing separate x and y values. ## ``Path`` <a id='Path'></a> A ``Path`` object is actually a collection of paths which can be arbitrarily specified. Although there may be multiple unconnected paths in a single ``Path`` object, they will all share the same style. Only by overlaying multiple ``Path`` objects do you iterate through the defined color cycle (or any other style options that have been defined). ``` angle = np.linspace(0, 2*np.pi, 100) baby = list(zip(0.15*np.sin(angle), 0.2*np.cos(angle)-0.2)) adultR = [(0.25, 0.45), (0.35,0.35), (0.25, 0.25), (0.15, 0.35), (0.25, 0.45)] adultL = [(-0.3, 0.4), (-0.3, 0.3), (-0.2, 0.3), (-0.2, 0.4),(-0.3, 0.4)] scene * hv.Path([adultL, adultR, baby]) * hv.Path([baby]) ``` ## ``Contours`` <a id='Contours'></a> A ``Contours`` object is similar to ``Path`` object except each of the path elements is associated with a numeric value, called the ``level``. Sadly, our penguins are too complicated to give a simple example so instead we will simply mark the first couple of rings of our earlier ring pattern: ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 def circle(radius, x=0, y=0): angles = np.linspace(0, 2*np.pi, 100) return np.array( list(zip(x+radius*np.sin(angles), y+radius*np.cos(angles)))) hv.Image(np.sin(x**2+y**2)) * hv.Contours([circle(0.22)], level=0) * hv.Contours([circle(0.33)], level=1) ``` ## ``Polygons`` <a id='Polygons'></a> A ``Polygons`` object is similar to a ``Contours`` object except that each supplied path is closed and filled. Just like ``Contours``, optionally a ``level`` may be supplied; the Polygons will then be colored according to the supplied ``cmap``. Non-finite values such as ``np.NaN`` or ``np.inf`` will default to the supplied ``facecolor``. Polygons with values can be used to build heatmaps with arbitrary shapes. ``` %%opts Polygons (cmap='hot' line_color='black' line_width=2) np.random.seed(35) hv.Polygons([np.random.rand(4,2)], level=0.5) *\ hv.Polygons([np.random.rand(4,2)], level=1.0) *\ hv.Polygons([np.random.rand(4,2)], level=1.5) *\ hv.Polygons([np.random.rand(4,2)], level=2.0) ``` Polygons without a value are useful as annotation, but also allow us to draw arbitrary shapes. ``` def rectangle(x=0, y=0, width=1, height=1): return np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)]) (hv.Polygons([rectangle(width=2), rectangle(x=6, width=2)]).opts(style={'fill_color': '#a50d0d'}) * hv.Polygons([rectangle(x=2, height=2), rectangle(x=5, height=2)]).opts(style={'fill_color': '#ffcc00'}) * hv.Polygons([rectangle(x=3, height=2, width=2)]).opts(style={'fill_color': 'cyan'})) ``` ## ``Bounds`` <a id='Bounds'></a> A bounds is a rectangular area specified as a tuple in ``(left, bottom, right, top)`` format. It is useful for denoting a region of interest defined by some bounds, whereas ``Box`` (below) is useful for drawing a box at a specific location. ``` scene * hv.Bounds(0.2) * hv.Bounds((0.2, 0.2, 0.45, 0.45,)) ``` ## ``Box`` <a id='Box'></a> and ``Ellipse`` <a id='Ellipse'></a> A ``Box`` is similar to a ``Bounds`` except you specify the box position, width, and aspect ratio instead of the coordinates of the box corners. An ``Ellipse`` is specified just as for ``Box``, but has a rounded shape. ``` scene * hv.Box( -0.25, 0.3, 0.3, aspect=0.5) * hv.Box( 0, -0.2, 0.1) + \ scene * hv.Ellipse(-0.25, 0.3, 0.3, aspect=0.5) * hv.Ellipse(0, -0.2, 0.1) ```
github_jupyter
# Testing Click-Through-Rates for Banner Ads (A/B Testing) * Lets say we are a new apparel store; after thorough market research, we decide to open up an <b> Online Apparel Store.</b> We hire Developers, Digital Media Strategists and Data Scientists, who help develop the store, place products and conduct controlled experiments on the website. * Traditionally, companies ran controlled experiments, either A/B Tests or Multivariate tests, based on requirements. <b>Multiple versions of Banner Ads, Text Ads and Video Ads are created, tested and placed on the website. Website layouts, Ad positions, transitions and many other attributes can be tested.</b> * Our version-A (Still in red colored background after the Holiday season), was on our website for 2 months or so, and we think its time for a change. Assuming everything else kept constant, we develop <b>version-B with subtle, earthy colored banner with the same text.</b> ### How do we decide if we should go for the switch (replace version-a with version-b) ? ### Controlled A/B Test * Content, color, text style, text size, banner location and placement and many other things need to be taken into account when trying to conduct a controlled experiment. If we plan to replace version-A with version-B, we need <b>strong evidence that click-through-rate (clicks/ impression) for version-B is significantly higher than version-A.</b> * Every visitor who visits our homepage, is <b>randomly (with equal probability) going to see either version-A (Older version) or version-B (New creative) on our homepage.</b> We observe, that the older version has a CTR (Click-through-rate) of <b>9 % (9 clicks every 100 impressions).</b> Let us say we have an <b>average of 200 visitors every day (new + returning users).</b> * We assume and test for the hypothesis that our new banner Ad (version-B), can provide some boost to the CTR. 25 % boost would mean an average-CTR of 11.25 % (11.25 clicks every 100 impressions). ``` # importing necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # CTR previous version ctr_version_a = 0.09 # CTR new version with 25 % expected boost ctr_version_b = 0.09 + (0.25)*(0.09) ctr_version_a, ctr_version_b ``` * Our null hypothesis is that there is no difference in CTR for version a and b, with alternate hypothesis that CTR of version-B sees a boost in CTR. We conduct a Two-Sample Proportion Test to validate our hypotheses. $$H_0: \mu_b > \mu_a$$ $$H_a: \mu_b <= \mu_a $$ We know, t-stat is calculated by the following $$t = \frac{(\mu_b - \mu_a) - 0}{SE}$$ $$t = \frac{(\mu_b - \mu_a) - 0}{\sqrt{\frac{CTR_b(1-CTR_b)}{N_b} + \frac{CTR_a(1-CTR_a)}{N_a}}} $$ * Let us choose a type-I error rate of 5 % (alpha = 0.05). Now, we simluate the test by sending customers to either of the pages randomly with equal chance. Let us say we start pushing these two version randomly on day 1. On Average, we expect around 200 customers to open the website, of which approximately 100 of them are exposed to version-A, and 100 are exposed to version-B. ``` # function to flip between version-a and b. def flipVersion(version_a): if version_a: return False else: return True ``` ### End of Day 1 * After end of day 1, we observe that there were 202 customers who visited the webiste, and 101 customers were shown version-a and another 101 were shown version-b. ``` # total customer incoming per day are normally distributed with mean 200 an deviation 10. np.random.seed(25) num_cust = int(np.random.normal(200, 10)) # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) num_cust, num_imps_version_a, num_imps_version_b num_clicks_version_a, num_clicks_version_b ``` * We observe that 6 customers clicked on version-a, and 12 clicked on version-b. Plugging it into the above t-stat formula, we obtain the following. The Day-1 CTRs after running the experiment are as follows: ``` ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b ctr_day_one_version_a, ctr_day_one_version_b p = (num_clicks_version_a + num_clicks_version_b)/(num_imps_version_a + num_imps_version_b) SE = np.sqrt(p*(1-p)*( (1/num_imps_version_a) + (1/num_imps_version_b) )) p, SE t = (ctr_day_one_version_b - ctr_day_one_version_a)/(SE) t ``` * After Day-1, we observe the t-stat is 1.48 (We did not find a significant set of observations to conclude that verion-b is better than version-a). ### How long do we run the test for ? When do we know exactly that Version-B is better than Version-A ? * In few cases, sample size is pre-defined to control Type-II error along with Type-I error, and once enough samples are collected, choice is made. In few cases, analysis is done over how t-stat improves as samples are collected. * In our case, we can observe how t-stat changes (increases or decreases with time and sample size), and then decide when to stop or continue the experiment. Note that it is always better to estimate the Power and decide on sample size to allocate budgets before the experiment. ``` def conductExperiment(n_days): list_num_cust = [] list_t_stat = [] list_ctr_version_a, list_ctr_version_b = [], [] list_imp_version_a, list_imp_version_b = [], [] for i in range(0,n_days): # total customer incoming per day are normally distributed with mean 200 an deviation 10. num_cust = int(np.random.normal(200,10)) list_num_cust.append(num_cust) # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b list_ctr_version_a.append(ctr_day_one_version_a) list_ctr_version_b.append(ctr_day_one_version_b) list_imp_version_a.append(num_imps_version_a) list_imp_version_b.append(num_imps_version_b) df_abtest = pd.DataFrame() df_abtest['num_cust'] = list_num_cust df_abtest['IMP_version_a'] = list_imp_version_a df_abtest['IMP_version_b'] = list_imp_version_b df_abtest['CTR_version_a'] = list_ctr_version_a df_abtest['CTR_version_b'] = list_ctr_version_b df_abtest['Clicks_version_b'] = df_abtest['IMP_version_b']*df_abtest['CTR_version_b'] df_abtest['Clicks_version_a'] = df_abtest['IMP_version_a']*df_abtest['CTR_version_a'] return df_abtest ``` ## Simulating experiment results for 3 Days * Now, let us simulate the results for first 3-days, we have the impressions and CTRs for both versions. We can calculate a rolling t-statistic, which can help decide if the CTR of version-b outperforms CTR of version-a. * As days pass by and we collect more data, the sample size (N) increases, decreasing the Standard Error term over time (Daily standard error are probably be very close). Conducting t-test on daily level does not make sense, on Day-2, we need to include the numbers from Day-1 and Day-2 as well, and calculate the t-statistics cumulatively. ``` df_abtest = conductExperiment(3) df_abtest ``` * Below, we re-write the previous function to get cumulative t-stat and Standard Error terms. ``` def tStatAfterNDays(n_days): # total customer incoming per day are normally distributed with mean 200 an deviation 10. np.random.seed(25) num_cust = 200*n_days # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b p = (num_clicks_version_a + num_clicks_version_b)/num_cust SE = np.sqrt(p*(1-p)*( (1/num_imps_version_a) + (1/num_imps_version_b) )) t = (ctr_day_one_version_b - ctr_day_one_version_a)/(SE) return t, SE ``` * Let us simulate the results for 3 consecutive days to obtain cumulative T-stats and Standard Errors. We observe in figure-1, that the cumulative t-stat has an increase gradually and is approximately 1.645 + after day and a half. On the right, we observe that the Standard Errors reduce cumulatively due to increase in sample size. ``` n_consecutive_days = 3 ndays = [i for i in range(1, n_consecutive_days + 1)] tStatsCumulative = [tStatAfterNDays(i)[0] for i in range(1,n_consecutive_days + 1)] SEStatsCumulative = [tStatAfterNDays(i)[1] for i in range(1,n_consecutive_days + 1)] fig = plt.figure(figsize=(18,6)) plt.subplot(1, 2, 1) plt.plot(ndays, tStatsCumulative) plt.grid() plt.title('Cumulative T-Stat') plt.xlabel('Number of Days') plt.ylabel('Cumulative T-Stat') plt.subplot(1, 2, 2) plt.plot(ndays, SEStatsCumulative) plt.grid() plt.title('Cumulative SE') plt.xlabel('Number of Days') plt.ylabel('Cumulative SE') ``` # Observartions: * We observe that after day and a half of both versions up and running, <b>there is a statistically significant difference between CTRs of version-a and version-b, with version-b outperforming version-a.</b> * Could we have <b>stopped the experiment after one and a half days ? Do we know if this effect is consistent on weekends ? Can we attribute these spike in CTR due to these changes only ? These are all design choices and can be decided only with additional Business context. Ideally, we would want to know the effects of weekdays vs weekends. Collecting more samples by experimentation provides deeper understanding of customer behaviour.</b> * Now, let us take a look at how to calculate the sample size required to control for a required Beta (Type-II Error). Note that deciding alpha and beta (Type-I and Type-II Error) rates are design choices as well, and deciding sample size before conducting the experiment is not only a best practice, but also helps decide the approximate Time and Budget it takes to provide confident and conclusive results. ### Controlling Power by varying Sample Size * Just like choosing significance level alpha (0.05), we need to choose power (1 - beta), generally chosen around 95 % power (beta = 0.05). First, let us look at the distribution of version-A, cumulatively for 3 days. Our sample size (Number of impressions for version-a) is 600 (3 days x 200 impressions per day). The average CTR for 3 days is 0.09. * Given we know sample size and proportion, we can now calculate the critical cut-off value (cut off proportion). $$p_{crit+} = p_0 + 1.645(SE)$$ ### Version-a * We observe that 95 % of data lies within 0 and 0.1171 with mean Click-Through-Rate of 0.09. ``` n_a = 100*3 ctr_a = 0.09 SE = np.sqrt(ctr_a*(1-ctr_a)/n_a) p_crit_a = ctr_a + 1.645*(SE) p_crit_a ``` ### Version-b * Let us assume that version-b, has an average CTR at the critical cutoff value of version-a (0.117). <img src="power1.png" width="400"></img> ### Type-I and Type-II Errors: * Type-I error corresponds to the green shaded region (alpha=0.05), where we are allowing upto 5 % of sampling data to be misclassified (Assume they come from version-b and not version-a). * Type-II error corresponds to data sampled from version-b, but falls within region of version-a, and hence misclassified (Shaded in red). We observe that exactly half of the version-b falls within rejection region, making 50 % Type-II errors, which is high. * <b>Increasing sampling size can help reduce Type-II error at the same version-b mean, as higher sampling size reduces the standard error, and shrinks the tails of both distributions.</b> ### Ideal sample size for alpha (0.05) and beta (0.1) * We can calculate the ideal sample size for constrained alpha and beta parameters. Essentially, the version-b needs to have a mean such that 10 % of data falls out of rejection region, and that needs to be there line where version-a has critical value. <img src="power2.png" width="400"></img> * Given we want to control for Type-II error (0.1), with Power of 90 % (1-beta), the Z-stat for 10 % Error rate is 1.29. Hence, for the given sample size, The mean of version-b needs to be atleast 1.29 Z's away from the cutoff value of version-a. $$z_{critical} = \frac{0.117 - \mu_b}{SE}$$ $$-1.29 = \frac{0.117 - \mu_b}{SE}$$ ``` p=0.143 z = (0.117 - p)/(np.sqrt(( p*(1-p))/(300))) z ``` <b>Therefore, for sample size of 3 days (600 samples, 300 each version), for alpha 0.05 and beta 0.1 (power 0.9), we reach statistical significance if the average click-through-rate of version-b is 14.3 % </b> <hr> ## Deciding on sample size before conducting the experiment * Let us construct/ pose the question in an experimental setting. <b>First, lets setup our initial hypothesis for testing. Let us conduct an experiment to test if version-b can provide 50 % boost, when compared to previous version-a.</b> * We are strict with both Type-I and Type-II errors this time, and choose alpha 0.05 and beta 0.05 (0.95 Power). Below is the stated null and alternate hypotheses. We conduct a Two-Sample, One-Tailed Proportion Test to validate our hypotheses. $$H_0: \mu_b - \mu_a <= 0.5(\mu_a)$$ $$H_a: \mu_b - \mu_a > 0.5(\mu_a) $$ We know, Z-stat is calculated by the following $$z = \frac{(\mu_b - \mu_a) - 0.5(\mu_a)}{SE}$$ To solve for n, we can check it by plugging in Z-values (for null and alternate hypothesis), mean of null and alternate hypothesis. $$ \mu_0 + z_{0-critical}(\sqrt{\frac{p_0(1-p_0)}{n}}) = \mu_a - z_{a-critical}(\sqrt{\frac{p_a(1-p_a)}{n}})$$ <b> Knowing sample size in advance can help decide budget, and also provide a good estimate of how long we might have to run the test, to understand which version works better. </b>
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/5_update_hyperparams/1_model_params/5)%20Switch%20deep%20learning%20model%20from%20default%20mode.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Goals ### Learn how to switch models post default mode # Table of Contents ## [Install](#0) ## [Load experiment with resnet defaults](#1) ## [Change model to densenet ](#2) ## [Train](#3) <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` ## Dataset - Weather Classification - https://data.mendeley.com/datasets/4drtyfjtfy/1 ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ" -O weather.zip && rm -rf /tmp/cookies.txt ! unzip -qq weather.zip ``` # Imports ``` #Using gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype ``` <a id='1'></a> # Load experiment with resnet defaults ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "experiment-switch-models"); gtf.Default(dataset_path="weather/train", model_name="resnet18_v1", freeze_base_network=True, # If True, then freeze base num_epochs=5); #Read the summary generated once you run this cell. ``` ## As per the summary above Model Loaded on device Model name: resnet18_v1 Num of potentially trainable layers: 41 Num of actual trainable layers: 1 <a id='2'></a> # Switch now to densenet ``` gtf.update_model_name("densenet121"); # Very impotant to reload network gtf.Reload(); ``` <a id='3'></a> # Train ``` #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` # Goals Completed ### Learn how to switch models post default mode
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import gpplot as gpp from poola import core as pool import anchors import core_functions as fns gpp.set_aesthetics(palette='Set2') def run_guide_residuals(lfc_df, paired_lfc_cols=[]): ''' Calls get_guide_residuals function from anchors package to calculate guide-level residual z-scores Inputs: 1. lfc_df: data frame with log-fold changes (relative to pDNA) 2. paired_lfc_cols: grouped list of initial populations and corresponding resistant populations ''' lfc_df = lfc_df.drop_duplicates() if not paired_lfc_cols: paired_lfc_cols = fns.pair_cols(lfc_df)[1] #get lfc pairs modified = [] unperturbed = [] #reference_df: column1 = modifier condition, column2 = unperturbed column ref_df = pd.DataFrame(columns=['modified', 'unperturbed']) row = 0 #row index for reference df for pair in paired_lfc_cols: #number of resistant pops in pair = len(pair)-1 res_idx = 1 #if multiple resistant populations, iterate while res_idx < len(pair): ref_df.loc[row, 'modified'] = pair[res_idx] ref_df.loc[row, 'unperturbed'] = pair[0] res_idx +=1 row +=1 print(ref_df) #input lfc_df, reference_df #guide-level residuals_lfcs, all_model_info, model_fit_plots = anchors.get_guide_residuals(lfc_df, ref_df) return residuals_lfcs, all_model_info, model_fit_plots ``` ## Data summary ``` reads = pd.read_excel('../../Data/Reads/Wilen/supplementary_reads_v1.xlsx', sheet_name= 'VeroE6 SARS-2 genomewide reads') reads # Gene Annotations chip = pd.read_csv('../../Data/Interim/Goujon/VeroE6/CP0070_Chlorocebus_sabeus_remapped.chip', sep ='\t') chip = chip.rename(columns={'Barcode Sequence':'Construct Barcode'}) chip_reads = pd.merge(chip[['Construct Barcode', 'Gene']], reads, on = ['Construct Barcode'], how = 'right') chip_reads = chip_reads.rename(columns={'Gene':'Gene Symbol'}) #Calculate lognorm cols = chip_reads.columns[2:].to_list() #reads columns = start at 3rd column lognorms = fns.get_lognorm(chip_reads.dropna(), cols = cols) lognorms # lognorms = lognorms.rename(columns={'count_lognorm':'pDNA_lognorm'}) ``` ## Quality Control ### Population Distributions ``` #Calculate log-fold change relative to pDNA target_cols = list(lognorms.columns[3:]) pDNA_lfc = fns.calculate_lfc(lognorms,target_cols) pDNA_lfc # Average across Cas9-v2 columns Cas9v2_data_cols = [col for col in pDNA_lfc.columns if 'Cas9-v2' in col] Cas9v2_cols = ['Construct Barcode', 'Gene Symbol']+ Cas9v2_data_cols Cas9v2_df = pDNA_lfc.copy()[Cas9v2_cols] Cas9v2_df # Replace spaces with '_' for following functions new_col_list=['Construct Barcode', 'Gene Symbol'] for col in Cas9v2_data_cols: new_col = col.replace(' ','_') new_col_list.append(new_col) Cas9v2_df.columns = new_col_list Cas9v2_df fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(5,6)) i,j = 0,0 cols=[] mock_col = [col for col in Cas9v2_df.columns if 'Mock' in col] hi_MOI_cols = mock_col+ [col for col in Cas9v2_df.columns if 'Hi-MOI' in col] cols.append(hi_MOI_cols) lo_MOI_cols = mock_col+ [col for col in Cas9v2_df.columns if 'Lo-MOI' in col] cols.append(lo_MOI_cols) for k,c in enumerate(cols): # k = sub-list index, c = list of columns in sub-list for l, c1 in enumerate(c): if 'Mock' in c1: label1 = c1 + ' (initial)'#'Initial population' else: label1 = c1 #'Resistant population-'+str(l) Cas9v2_df[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=label1, ax=ax[i], legend=True) ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) sns.despine() t = ax[i].set_xlabel('Log-fold changes') title = ','.join(c[0].split('_')[:2]) t = ax[i].set_title(title) i+=1 fig.savefig('../../Figures/Wilen_Vero_population_distributions.png', bbox_inches="tight") ``` ### Distributions of control sets ``` # NO_SITE controls -> default controls controls = fns.get_controls(Cas9v2_df, control_name=['NO_SITE']) fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(5,6)) i,j = 0,0 for k,c in enumerate(cols): # k = sub-list index, c = list of columns in sub-list for l, c1 in enumerate(c): if l==0: label1 = c1 + ', NO_SITE'#'Initial population, NO_SITE' else: label1 = c1 + ', NO_SITE' #'Resistant population-'+str(l) + ', NO_SITE' controls[c1].plot(kind='kde',color=sns.color_palette('Set2')[l],label=label1, ax=ax[i], legend=True) ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) sns.despine() t = ax[i].set_xlabel('Log-fold changes') title = ','.join(c[0].split('_')[:2]) t = ax[i].set_title(title) i+=1 fig.savefig('../../Figures/Wilen_Vero_control_distributions.png', bbox_inches="tight") ``` ### ROC_AUC ``` ess_genes, non_ess_genes = fns.get_gene_sets() tp_genes = ess_genes.loc[:, 'Gene Symbol'].to_list() fp_genes = non_ess_genes.loc[:, 'Gene Symbol'].to_list() roc_auc, roc_df = pool.get_roc_aucs(Cas9v2_df, tp_genes, fp_genes, gene_col = 'Gene Symbol', score_col=mock_col) fig,ax=plt.subplots(figsize=(6,6)) ax=sns.lineplot(data=roc_df, x='fpr',y='tpr', ci=None, label = 'Mock,' + str(round(roc_auc,2))) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('ROC-AUC') plt.xlabel('False Positive Rate (non-essential)') plt.ylabel('True Positive Rate (essential)') ``` ## Gene level analysis ### Residual z-scores ``` lfc_df = Cas9v2_df.drop('Gene Symbol', axis = 1) lfc_df # run_guide_residuals(lfc_df.drop_duplicates(), cols) residuals_lfcs, all_model_info, model_fit_plots = run_guide_residuals(lfc_df, cols) residuals_lfcs guide_mapping = pool.group_pseudogenes(chip[['Construct Barcode', 'Gene']], pseudogene_size=4, gene_col='Gene', control_regex=['NO_SITE']) guide_mapping = guide_mapping.rename(columns={'Gene':'Gene Symbol'}) gene_residuals = anchors.get_gene_residuals(residuals_lfcs.drop_duplicates(), guide_mapping) gene_residuals gene_residual_sheet = fns.format_gene_residuals(gene_residuals, guide_min = 3, guide_max = 5) guide_residual_sheet = pd.merge(guide_mapping, residuals_lfcs.drop_duplicates(), on = 'Construct Barcode', how = 'inner') guide_residual_sheet with pd.ExcelWriter('../../Data/Processed/GEO_submission_v2/VeroE6_Wilen_v5.xlsx') as writer: gene_residual_sheet.to_excel(writer, sheet_name='VeroE6_avg_zscore', index =False) reads.to_excel(writer, sheet_name='VeroE6_genomewide_reads', index =False) guide_mapping.to_excel(writer, sheet_name='VeroE6_guide_mapping', index =False) with pd.ExcelWriter('../../Data/Processed/Individual_screens_v2/VeroE6_Wilen_indiv_v5.xlsx') as writer: gene_residuals.to_excel(writer, sheet_name='condition_genomewide_zscore', index =False) guide_residual_sheet.to_excel(writer, sheet_name='guide-level_zscore', index =False) ```
github_jupyter
``` from path import Path from PIL import Image import cv2 import random import pandas as pd import pickle def arg_parse(): parser = argparse.ArgumentParser() parser = argparse.ArgumentParser( prog="annotation.py", usage="annotation.py -n <<num_of_evaluation>>", description="", add_help = True ) parser.add_argument("-n", "--num", help = "num of evaluation", type = int, default = None) args = parser.parse_args() return args def get_filepath_list(dir_path): imgs = Path(dir_path).files('*.png') imgs += Path(dir_path).files('*.jpg') imgs += Path(dir_path).files('*.jpeg') return imgs def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC): h_min = min(im.shape[0] for im in im_list) im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation) for im in im_list] return cv2.hconcat(im_list_resize) def evaluate_images(path_list, rand=False, n_shows=None, username=None): df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [] score_list = [] rep_list = [ord(str(i)) for i in range(1, 6)] key_q = ord('q') if rand: path_list = random.sample(path_list, len(path_list)) if n_shows is None: n_shows = len(path_list) for path in path_list[:n_shows]: img = cv2.imread(path) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 800, 600) cv2.imshow('image', img) key = 0 while ((key not in rep_list) and key is not key_q): key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_q: break filename_list.append(path.rsplit('/')[-1]) score_list.append(rep_list.index(key)+1) df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['user'] = username return df def evaluate_images_relative(path_list, combination_list, username=None): df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [path.rsplit('/')[-1] for path in path_list] score_list = [0 for i in range(len(path_list))] num_evals = [0 for i in range(len(path_list))] key_f, key_j, key_q = ord('f'), ord('j'), ord('q') rep_list = [key_f, key_j, key_q] end_flag = False for i, c_list in enumerate(combination_list): img1 = cv2.imread(path_list[i]) for c in c_list: img2 = cv2.imread(path_list[c]) merged = hconcat_resize_min([img1, img2]) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1200, 450) cv2.moveWindow('image', 100, 200) cv2.imshow('image', merged) key = 0 while key not in rep_list: key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_f: score_list[i] = score_list[i] + 1 num_evals[i] = num_evals[i] + 1 num_evals[c] = num_evals[c] + 1 elif key is key_j: score_list[c] = score_list[c] + 1 num_evals[i] = num_evals[i] + 1 num_evals[c] = num_evals[c] + 1 else: end_flag = True break combination_list[c].remove(i) if end_flag: break df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['num_of_evaluations'] = num_evals df['user'] = username return df def evaluate_images_relative_random(path_list, combination_list, start_pos, num=None, username=None): def get_random_combination_list(combination_list): combination_set = set() for i, clist in enumerate(combination_list): for c in clist: tmp_tuple = tuple(sorted([i, c])) combination_set.add(tmp_tuple) return random.sample(list(combination_set), len(combination_set)) df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [path.rsplit('/')[-1] for path in path_list] score_list = [0 for i in range(len(path_list))] num_evals = [0 for i in range(len(path_list))] key_f, key_j, key_q = ord('f'), ord('j'), ord('q') rep_list = [key_f, key_j, key_q] end_flag = False font = cv2.FONT_HERSHEY_SIMPLEX if num is None: num = len(combination_list) random_combination_list = get_random_combination_list(combination_list[start_pos:num]) for count, (i, j) in enumerate(random_combination_list): s1, s2 = random.sample([i, j], 2) img1 = cv2.imread(path_list[s1]) img2 = cv2.imread(path_list[s2]) merged = hconcat_resize_min([img1, img2]) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1200, 450) cv2.moveWindow('image', 100, 200) text_pos = (merged.shape[1] - 250, merged.shape[0] - 50) cv2.putText(merged, "{}/{}".format(count+1, len(random_combination_list)), text_pos, font, 1.5, (0, 0, 0), 2, cv2.LINE_AA) cv2.imshow('image', merged) key = 0 while key not in rep_list: key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_f: score_list[s1] = score_list[s1] + 1 num_evals[s1] = num_evals[s1] + 1 num_evals[s2] = num_evals[s2] + 1 elif key is key_j: score_list[s2] = score_list[s2] + 1 num_evals[s1] = num_evals[s1] + 1 num_evals[s2] = num_evals[s2] + 1 else: end_flag = True break if end_flag: break df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['num_of_evaluations'] = num_evals df['user'] = username return df def save_evaluation_csv(df, username, save_path=None): if save_path is None: save_path = './output/' + username + '.csv' df.to_csv(save_path) def main(): print('Please write your name : ', end='') username = input() filepath_list = get_filepath_list('./images') df_result = evaluate_images(filepath_list, rand=True, username=username) save_evaluation_csv(df_result, username) print('Thank you!') def main_relative(): print('Please write your name : ', end='') username = input() filepath_list = get_filepath_list('./images/omelette_rice/')[:50] with open('./pickle/combination_list.pickle', 'rb') as f: combination_list = pickle.load(f) df_result = evaluate_images_relative(filepath_list, combination_list, username=username) save_evaluation_csv(df_result, username) print('Thank you!') def main_relative_random(): print('Please enter your name : ', end='') username = input() print('Please enter the number of ratings : ', end='') num = int(input()) filepath_list = get_filepath_list('../images/omelette_rice_500/images/') try: with open('..pickle/start_position.pickle', 'rb') as f: start_pos = pickle.load(f) except: start_pos = 0 with open('../pickle/combination500_list.pickle', 'rb') as f: combination_list = pickle.load(f) df_result = evaluate_images_relative_random(filepath_list, combination_list, start_pos, num, username=username) save_evaluation_csv(df_result, username) with open('..pickle/start_position.pickle', 'wb') as f: start_pos = start_pos + num print('Thank you!') if __name__=='__main__': main_relative_random() with open('../pickle/combination500_list.pickle', 'rb') as f: combination_list = pickle.load(f) # 集合に登録 def get_random_combination_set(combination_list): combination_set = set() for i, clist in enumerate(combination_list): for c in clist: tmp_tuple = tuple(sorted([i, c])) combination_set.add(tmp_tuple) return random.sample(list(combination_set), len(combination_set)) [i for i in range(10)][5:10] ```
github_jupyter
# Extra Trees Classifier with MinMax Scaler ### Required Packages ``` import numpy as np import pandas as pd import seaborn as se import warnings import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,plot_confusion_matrix warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features=[] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X = df[features] Y = df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) se.countplot(Y) ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) ``` ### Data Rescaling This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one. The transformation is given by: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. ``` minmax_scaler = MinMaxScaler() X_train = minmax_scaler.fit_transform(X_train) X_test = minmax_scaler.transform(X_test) ``` ### Model ExtraTreesClassifier is an ensemble learning method fundamentally based on decision trees. ExtraTreesClassifier, like RandomForest, randomizes certain decisions and subsets of data to minimize over-learning from the data and overfitting. #### Model Tuning Parameters 1.n_estimators:int, default=100 >The number of trees in the forest. 2.criterion:{“gini”, “entropy”}, default="gini" >The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. 3.max_depth:int, default=None >The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. 4.max_features:{“auto”, “sqrt”, “log2”}, int or float, default=”auto” >The number of features to consider when looking for the best split: ``` model=ExtraTreesClassifier(n_jobs = -1,random_state = 123) model.fit(X_train,y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted ``` print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` plot_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues) ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(X_test))) ``` #### Feature Importances. The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. ``` plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) ``` #### Creator: Ayush Gupta , Github: [Profile](https://github.com/guptayush179)
github_jupyter
``` % matplotlib inline import matplotlib.pyplot as plt from matplotlib import colors, cm import numpy as np from numpy import matmul from scipy.spatial.distance import pdist, squareform from sklearn.datasets import load_diabetes import pandas as pd from scipy.linalg import cholesky from scipy.linalg import solve from scipy.optimize import minimize import time # Developer notes # 1) Cholesky decomposition produces NaNs (probably because K+I*s**2 is not pos semidef) causing solve to complain # 2) Including gradient for likelihood made optimization much faster class mintGP(): """ The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. Takes 2D np-arrays """ def __init__(self): pass def fit(self, X, Y): self.yscale = np.std(Y) self.Y = Y/self.yscale self.X = X self.n = np.shape(X)[0] # initialize with heuristics self.lengthscale = np.mean(pdist(X, metric='euclidean')) self.likelihood_variance = 1 ############################################################### # Gradient descent on marginal likelihood with scipy L-BFGS-B # ############################################################### theta0 = np.array([self.lengthscale, self.likelihood_variance]) bnds = ((1e-20, None), (1e-10, None)) sol = minimize(self.neg_log_marg_like, theta0, args=(), method='L-BFGS-B', bounds=bnds, jac=True) self.lengthscale, self.likelihood_variance = sol.x self.marginal_likelihood = np.exp(-sol.fun) # for prediction: K,_ = self.K(X,X,self.lengthscale) self.L = cholesky( K + self.likelihood_variance*np.eye(self.n), lower=True) print(sol.x, theta0) ########################## # Likelihood computation # ########################## def neg_log_marg_like(self, theta): """ Compute negative log marginal likelihood for hyperparameter optimization """ jitter=0 K, D = self.K(self.X ,self.X, theta[0]) L = cholesky( K + (theta[1]+jitter)*np.eye(self.n), lower=True) self.L = L alpha = solve(L.T, solve(L,self.Y, lower=True) ) logmarglike = \ - 0.5*matmul(self.Y.T, alpha)[0,0] \ - np.sum( np.log( np.diag( L ) ) ) \ - 0.5*self.n*np.log(2*np.pi) # compute gradients prefactor = matmul(alpha, alpha.T) - solve(L.T, solve(L, np.eye(self.n) ) ) Kd_lengthscale = np.multiply( D/theta[0]**3, K) Kd_likelihood_variance = np.eye(self.n) logmarglike_grad = 0.5*np.array( [ np.trace( matmul(prefactor, Kd_lengthscale) ), np.trace( matmul(prefactor, Kd_likelihood_variance) )] ) return -logmarglike, -logmarglike_grad def nlml_grad(self): """ Return gradient of negative log marginal likelihood """ return self.logmarglike_grad ###################### # Kernel computation # ###################### def K(self, X, Z, lengthscale): n1 = np.shape(X)[0] n2 = np.shape(Z)[0] n1sq = np.sum(np.square(X), 1) n2sq = np.sum(np.square(Z), 1) D = (np.ones([n2, 1])*n1sq).T + np.ones([n1, 1])*n2sq -2*matmul(X,Z.T) return np.exp(-D/(2*lengthscale**2)), D def scalarK(self, x, z, lengthscale): return( np.exp( np.linalg.norm(x - z)**2/(2*lengthscale**2) ) ) ########################### # Predictive distribution # ########################### def predict(self, Xnew, predvar=False): alpha = solve(self.L.T, solve(self.L,self.Y*self.yscale ) ) if predvar: m = np.shape(Xnew)[0] Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) Knew_new = np.array( [self.scalarK(Xnew[i], Xnew[i], self.lengthscale) for i in range(m)] ).reshape([m,1]) v = solve(self.L, Knew_N.T) return matmul(Knew_N, alpha), np.diag( Knew_new + self.likelihood_variance - matmul(v.T, v) ).reshape(m,1) else: Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) return matmul(Knew_N, alpha) ############################### # Gradient of predictive mean # ############################### def predictive_grad(self, Xnew): alpha = solve(self.L.T, solve(self.L, self.Y*self.yscale ) ) Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) return (-1/self.lengthscale**2)*matmul( np.tile(Xnew.T, self.n) - self.X.T, np.multiply(Knew_N.T, alpha) ) ``` ## 1D Toy example with missing data, gradient computation, likelihood surface plot ``` N = 30 Nt = 400 X = np.linspace(-4,5,N).reshape(N,1); # We can pick out some values to illustrate how the uncertainty estimate behaves # it's interesting to see what happens to likelihood below ind = np.bool8([1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1]); X = X[ ind ]; N=N-sum(~ind) Xt = np.linspace(-4,5,Nt).reshape(Nt,1) Y = np.sin(X)*np.exp(0.2*X) + np.random.randn(N,1)*0.3 t0 = time.time() m = mintGP() m.fit(X,Y) print( m.lengthscale, m.likelihood_variance ) pred, var = m.predict(Xt,predvar=True) t1 = time.time() print('time to compute ',t1-t0) fig, ax = plt.subplots() ax.plot(Xt, pred, label="GP mean") twostd = 2*np.sqrt(var) ax.fill_between(Xt.ravel(), (pred-twostd).ravel(), (pred+twostd).ravel(), alpha=0.5) ax.scatter(X,Y,label='data') ax.legend(loc='best') ``` ### Gradient ``` grad = [ m.predictive_grad(x.reshape(1,1)) for x in Xt ] grad = np.array(grad) fig, ax = plt.subplots() ax.plot(Xt, grad.ravel(), label="GP deriv") ax.plot([-4,5], [0,0], label="GP deriv") ``` ### Likelihood Surface ``` #### Plot LML landscape plt.figure(1) M = 30 theta0 = np.logspace(-0.3, 0.4,M)#np.logspace(-1, 1, M) theta1 = np.logspace(-1.5, 0, M)#np.logspace(-2.5, 0, M) Theta0, Theta1 = np.meshgrid(theta0, theta1) LML = [[m.neg_log_marg_like([Theta0[i, j], Theta1[i, j]])[0] for i in range(M)] for j in range(M)] LML = np.array(LML).T vmin, vmax = (LML).min(), (LML).max() vmax = 50 level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), 50), decimals=1) plt.contour(Theta0, Theta1, LML, levels=level, norm=colors.LogNorm(vmin=vmin, vmax=vmax)) plt.colorbar() plt.xscale("log") plt.yscale("log") plt.xlabel("Length-scale") plt.ylabel("Noise-level") plt.title("neg-log-marginal-likelihood") plt.tight_layout() ``` ### likehood surface gradient ``` import sympy # Plot LML landscape plt.figure(1) LML_grad = [[ m.neg_log_marg_like([Theta0[i, j], Theta1[i, j]])[1] for i in range(M)] for j in range(M)] LML_grad = -np.array(LML_grad).T plt.figure() plt.quiver(Theta0,Theta1,LML_grad[0],LML_grad[1]) plt.xscale("log") plt.yscale("log") plt.show() ``` ## 2D toy example ``` N = 100 # training data Nt = 400 X1 = np.random.uniform(-5,5,size = (N,1)) #np.linspace(-4,5,N).reshape(N,1) X2 = np.random.uniform(-5,5,size = (N,1)) X = np.concatenate([X1,X2],1) # test data Xt = np.concatenate([np.linspace(-4,5,Nt).reshape(Nt,1), np.linspace(-4,5,Nt).reshape(Nt,1)], 1) Y = X1**2 + X2**2 + np.random.randn(N,1)*0.3 t0 = time.time() m = mintGP() m.fit(X,Y) print( m.lengthscale, m.likelihood_variance ) #pred, var = m.predict(Xt,predvar=True) t1 = time.time() print('time to compute ',t1-t0) M = 50 grid = np.linspace(-5,5,M).reshape(M,1) XX1,XX2 = np.meshgrid(grid,grid) Z = [[m.predict( np.array([XX1[i,j], XX2[i,j] ]).reshape(1,2) )[0,0] for i in range(M)] for j in range(M)] Z = np.array(Z) # plot points and fitted surface fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(XX1, XX2, Z, rstride=1, cstride=1, alpha=0.2) ax.scatter(X1, X2, Y, c='r', s=30) plt.xlabel('X') plt.ylabel('Y') ax.set_zlabel('Z') ax.axis('equal') #ax.axis('tight') ``` ### Gradient as colorcode like in Emans sw33t pløts ``` # Color function by norm of gradient fig = plt.figure(figsize=[8,5]) ax = fig.gca(projection='3d') my_col = cm.jet( gradnorm / np.amax(gradnorm) ) cbar = cm.ScalarMappable(cmap=cm.jet) cbar.set_array(my_col) surf = ax.plot_surface(XX1, XX2, Z, rstride=2, cstride=2, alpha=0.5, facecolors = my_col, linewidth = 0 ) fig.colorbar(cbar, shrink=0.8, aspect=8) ``` # Tensordot experiments for gradient function to be able to take NxD arrays ``` a = np.arange(60.).reshape(3,4,5) b = np.arange(24.).reshape(4,3,2) c = np.tensordot(a,b, axes=([1,0],[0,1])) c.shape c # A slower but equivalent way of computing the same d = np.zeros((5,2)) for i in range(5): for j in range(2): for k in range(3): for n in range(4): d[i,j] += a[k,n,i] * b[n,k,j] c == d xnew = np.array([[4,9],[1,8]], ndmin=2) ```
github_jupyter
# Implementing doND using the dataset ``` from functools import partial import numpy as np from qcodes.dataset.database import initialise_database from qcodes.dataset.experiment_container import new_experiment from qcodes.tests.instrument_mocks import DummyInstrument from qcodes.dataset.measurements import Measurement from qcodes.dataset.plotting import plot_by_id initialise_database() # just in case no database file exists new_experiment("doNd-tutorial", sample_name="no sample") ``` First we borrow the dummy instruments from the contextmanager notebook to have something to measure. ``` # preparatory mocking of physical setup dac = DummyInstrument('dac', gates=['ch1', 'ch2']) dmm = DummyInstrument('dmm', gates=['v1', 'v2']) # and we'll make a 2D gaussian to sample from/measure def gauss_model(x0: float, y0: float, sigma: float, noise: float=0.0005): """ Returns a generator sampling a gaussian. The gaussian is normalised such that its maximal value is simply 1 """ while True: (x, y) = yield model = np.exp(-((x0-x)**2+(y0-y)**2)/2/sigma**2)*np.exp(2*sigma**2) noise = np.random.randn()*noise yield model + noise # and finally wire up the dmm v1 to "measure" the gaussian gauss = gauss_model(0.1, 0.2, 0.25) next(gauss) def measure_gauss(dac): val = gauss.send((dac.ch1.get(), dac.ch2.get())) next(gauss) return val dmm.v1.get = partial(measure_gauss, dac) ``` Now lets reimplement the qdev-wrapper do1d function that can measure one one more parameters as a function of another parameter. This is more or less as simple as you would expect. ``` def do1d(param_set, start, stop, num_points, delay, *param_meas): meas = Measurement() meas.register_parameter(param_set) # register the first independent parameter output = [] param_set.post_delay = delay # do1D enforces a simple relationship between measured parameters # and set parameters. For anything more complicated this should be reimplemented from scratch for parameter in param_meas: meas.register_parameter(parameter, setpoints=(param_set,)) output.append([parameter, None]) with meas.run() as datasaver: for set_point in np.linspace(start, stop, num_points): param_set.set(set_point) for i, parameter in enumerate(param_meas): output[i][1] = parameter.get() datasaver.add_result((param_set, set_point), *output) dataid = datasaver.run_id # convenient to have for plotting return dataid dataid = do1d(dac.ch1, 0, 1, 10, 0.01, dmm.v1, dmm.v2) axes, cbaxes = plot_by_id(dataid) def do2d(param_set1, start1, stop1, num_points1, delay1, param_set2, start2, stop2, num_points2, delay2, *param_meas): # And then run an experiment meas = Measurement() meas.register_parameter(param_set1) param_set1.post_delay = delay1 meas.register_parameter(param_set2) param_set1.post_delay = delay2 output = [] for parameter in param_meas: meas.register_parameter(parameter, setpoints=(param_set1,param_set2)) output.append([parameter, None]) with meas.run() as datasaver: for set_point1 in np.linspace(start1, stop1, num_points1): param_set1.set(set_point1) for set_point2 in np.linspace(start2, stop2, num_points2): param_set2.set(set_point2) for i, parameter in enumerate(param_meas): output[i][1] = parameter.get() datasaver.add_result((param_set1, set_point1), (param_set2, set_point2), *output) dataid = datasaver.run_id # convenient to have for plotting return dataid dataid = do2d(dac.ch1, -1, 1, 100, 0.01, dac.ch2, -1, 1, 100, 0.01, dmm.v1, dmm.v2) axes, cbaxes = plot_by_id(dataid) ```
github_jupyter
``` !pip install seaborn !pip install newspaper3k import nltk nltk.download('stopwords') ``` The next two lines are required to load files from your Google drive. ``` from google.colab import drive drive.mount('/content/drive') ``` # SCRAPER ``` from newspaper import Article from newspaper import ArticleException import newspaper # from progress.bar import IncrementalBar import time import string def scrape_news_links(url): ''' Scrapes links : not only google but any online vendor. set url while calling the function ''' print('Scraping links') paper = newspaper.build(url, memoize_articles=False) links = [] # bar = IncrementalBar('Scraping Links', max=len(paper.articles), suffix='%(percent)d%%') for article in paper.articles: links.append(article.url) # bar.next() time.sleep(0.1) # bar.finish() # print(links) return links def clean_text(text): ''' To clean text ''' print('cleaning_text') # text = text.strip() # text = text.lower() # for punct in string.punctuation: # text = text.replace(punct, '') text = text.lower() strin = text.split('\n') text = " ".join(strin) # text.replace('\\', '') exclude = set(string.punctuation) text = ''.join(ch for ch in text if ch not in exclude) return text def get_content(links): ''' get headlines and news content ''' print('getting content') content = [] # next_bar = IncrementalBar('Getting Content', max=) # bar = IncrementalBar('Getting content & Cleaning text', max=len(links), suffix='%(percent)d%%' ) for url in links: try: article = Article(url, language='en') article.download() article.parse() title = clean_text(article.title) news = clean_text(article.text) if title != None: if news != None: if news != ' ': if news != '': # for sites which news content cannot be scraped content.append([title, news]) # bar.next() except ArticleException as ae: # if 'Article \'download()\' failed' in ae: continue # bar.finish() return content def scraper(link='https://timesofindia.indiatimes.com/'): ''' aggregator function ''' # print('scraper_main')5 return get_content(scrape_news_links(link)) # if __name__ == "__main__": # links = scrape_google_links() # print(get_content(links[:15])) ``` # DF AND CSV ``` import csv import pandas as pd LINKS = ['https://timesofindia.indiatimes.com/', 'https://www.thehindu.com/', 'https://www.bbc.com/news', 'https://www.theguardian.co.uk/', 'https://www.hindustantimes.com/', 'https://indianexpress.com/', 'https://www.dailypioneer.com/' 'https://www.deccanherald.com/', 'https://www.telegraphindia.com/', 'https://www.dnaindia.com/', 'https://www.deccanchronicle.com/', 'https://www.asianage.com/', 'https://economictimes.indiatimes.com/', 'https://www.tribuneindia.com/'] def create_df(content_list): ''' To write the data to csv file takes a list of list where the inner list contains ['headline', 'news'] ''' title = [] news = [] print('creating_dataFrame') for content in content_list: title.append(content[0]) news.append(content[1]) # keywords.append(content[2]) data = {'Title' : title, 'News' : news} df = pd.DataFrame(data, columns=['Title', 'News']) return df def df_to_csv(df, filename='NewsCluster.csv'): ''' writes dataframe to csv ''' print('writing_to_csv') df.to_csv('/content/drive/My Drive/data/' + filename) def create_csv(): ''' aggregator function of this module ''' print('create_csv_main') content_list = [] for link in LINKS: content_list.append(scraper(link)) content_lst = [] for content in content_list: for cont in content: content_lst.append(cont) # content_lst = scraper() # print(content_lst) try: num = int(input('Enter the number of articles to be stored : ')) if num < 15: raise ValueError('Provide a larger number for dataset') df_to_csv(create_df(content_lst[:num])) except ValueError as ve: df_to_csv(create_df(content_lst)) ``` # CONVERT TO DB ``` import sqlite3 from sqlite3 import IntegrityError import csv def insert_to_db(tup): with sqlite3.connect('/content/drive/My Drive/data/NEWS.DB') as con: cur = con.cursor() cur.execute("INSERT INTO content (headlines, news) VALUES(?, ?);", tup) con.commit() def to_database(): ''' converts csv to db ''' with sqlite3.connect('/content/drive/My Drive/data/NEWS.DB') as con: cur = con.cursor() cur.execute('CREATE TABLE IF NOT EXISTS content(headlines TEXT, news TEXT PRIMARY KEY);') with open('/content/drive/My Drive/data/NewsCluster.csv', encoding='utf-8') as fin: dr = csv.DictReader(fin) for i in dr: try: tup = (i['Title'], i['News']) insert_to_db(tup) except IntegrityError as ie: # if 'unique constraint' in ie: continue # to_db = [(i['Title'], i['News']) for i in dr] # cur.executemany("INSERT INTO content (headlines, news) VALUES(?, ?);", to_db) con.commit() con.close() def print_db(): ''' prints database used for reference and verification ''' with sqlite3.connect("/content/drive/My Drive/data/NEWS.DB") as con: cur = con.cursor() cur.execute('SELECT * FROM content') return cur.fetchall() # if __name__ == "__main__": ''' execute either of the functions to update database or displahy the content ''' # to_database() # print(print_db()[0]) ``` # CALL SCRAPER, CREATE CSV and DB ``` create_csv() to_database() ``` # CHECK CSV ``` import csv def print_csv(filename): with open('/content/drive/My Drive/data/'+filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: print(row) if __name__ == '__main__': print_csv("NewsCluster.csv") ``` # CLUSTERING ``` """ Wrapper for offline clustering methods that do not take into account temporal aspects of data and online clustering methods that update and/or predict new data as it comes in. Framework supports custom text representations (e.g. Continuous Bag of Words) but will default to tfidf if none are provided. """ import numpy as np import seaborn as sns from sklearn.manifold import MDS from scipy.cluster.hierarchy import ward, dendrogram import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # from hdbscan import HDBSCAN from sklearn.metrics.pairwise import cosine_similarity from nltk.corpus import stopwords from scipy.sparse import issparse, vstack from sklearn.cluster import * from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer nltk_stopwords = stopwords.words('english') class Cluster: """ Clustering methods for text. Be cautious of datasize; in cases of large data, KMeans may be the only efficient choice. Accepts custom matrices Full analysis of methods can be found at: http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html Usage: >> with open('../data/cleaned_text.txt', 'r', encoding='utf8') as f: text = f.readlines() >> clustering = Cluster(text) >> results = clustering('hdbscan', matrix=None, reduce_dim=None, visualize=True, top_terms=False, min_cluster_size=10) >> print(results) """ def __init__(self, text): """ Args: text: strings to be clustered (list of strings) """ self.text = list(set(text)) def __call__(self, method, vectorizer=None, reduce_dim=None, viz=False, *args, **kwargs): """ Args: method: algorithm to use to cluster data (str) vectorizer: initialized method to convert text to np array; assumes __call__ vectorizes the text (Class, optional) reduce_dim: reduce dim of representation matrix (int, optional) visualize: visualize clusters in 3D (bool, optional) *args, **kwargs: see specified method function """ # Make sure method is valid assert method in ['hdbscan', 'dbscan', 'spectral', 'kmeans', 'minikmeans', 'affinity_prop', 'agglomerative', 'mean_shift', 'birch'], 'Invalid method chosen.' if not hasattr(self, 'vectorizer'): if vectorizer is None: self._init_tfidf() else: self.vectorizer = vectorizer self.matrix = self.vectorizer(self.text) # Reduce dimensionality using latent semantic analysis (makes faster) if reduce_dim is not None: self.matrix = self._pca(reduce_dim, self.matrix) # Cache current method method = eval('self.' + method) self.algorithm = method(*args, **kwargs) self.results = self._organize(self.algorithm.labels_) # For plotting self.viz_matrix = self.matrix # Visualize clustering outputs if applicable if viz: # _ = self.viz2D() _ = self.viz3D() _ = self.top_terms() return self.results # def hdbscan(self, min_cluster_size=10, prediction_data=False): # """ DBSCAN but allows for varying density clusters and no longer # requires epsilon parameter, which is difficult to tune. # http://hdbscan.readthedocs.io/en/latest/how_hdbscan_works.html # Scales slightly worse than DBSCAN, but with a more intuitive parameter. # """ # hdbscan = HDBSCAN(min_cluster_size=min_cluster_size, # prediction_data=prediction_data) # if prediction_data: # return hdbscan.fit(self._safe_dense(self.matrix)) # else: # return hdbscan.fit(self.matrix) def dbscan(self, eps=0.50): """ Density-based algorithm that clusters points in dense areas and distances points in sparse areas. Stable, semi-fast, non-global. Scales very well with n_samples, decently with n_clusters (not tunable) """ dbscan = DBSCAN(eps=eps, min_samples=3) return dbscan.fit(self.matrix) def kmeans(self, n_clusters=10, n_init=5): km = KMeans(n_clusters=n_clusters, init='k-means++', max_iter=300, n_init=n_init, verbose=0, random_state=3425) return km.fit(self.matrix) def minikmeans(self, n_clusters=10, n_init=5, batch_size=5000): """ Partition dataset into n_cluster global chunks by minimizing intra-partition distances. Expect quick results, but with noise. Scales exceptionally well with n_samples, decently with n_clusters. """ kmeans = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=n_init, batch_size=batch_size) return kmeans.fit(self.matrix) def birch(self, n_clusters=10): """ Partitions dataset into n_cluster global chunks by repeatedly merging subclusters of a CF tree. Birch does not scale very well to high dimensional data. If many subclusters are desired, set n_clusters=None. Scales well with n_samples, well with n_clusters. """ birch = Birch(n_clusters=n_clusters) return birch.fit(self.matrix) def agglomerative(self, n_clusters=10, linkage='ward'): """ Iteratively clusters dataset semi-globally by starting with each point in its own cluster and then using some criterion to choose another cluster to merge that cluster with another cluster. Scales well with n_samples, decently with n_clusters. """ agglomerative = AgglomerativeClustering(n_clusters=n_clusters, linkage=linkage) return agglomerative.fit(self._safe_dense(self.matrix)) def spectral(self, n_clusters=5): """ Partitions dataset semi-globally by inducing a graph based on the distances between points and trying to learn a manifold, and then running a standard clustering algorithm (e.g. KMeans) on this manifold. Scales decently with n_samples, poorly with n_clusters. """ spectral = SpectralClustering(n_clusters=n_clusters) return spectral.fit(self.matrix) def affinity_prop(self, damping=0.50): """ Partitions dataset globally using a graph based approach to let points ‘vote’ on their preferred ‘exemplar’. Does not scale well with n_samples. Not recommended to use with text. """ affinity_prop = AffinityPropagation(damping=damping) return affinity_prop.fit(self._safe_dense(self.matrix)) def mean_shift(self, cluster_all=False): """ Centroid-based, global method that assumes there exists some probability density function from which the data is drawn, and tries to place centroids of clusters at the maxima of that density function. Unstable, but conservative. Does not scale well with n_samples. Not recommended to use with text. """ mean_shift = MeanShift(cluster_all=False) return mean_shift.fit(self._safe_dense(self.matrix)) def _init_tfidf(self, max_features=30000, analyzer='word', stopwords=nltk_stopwords, token_pattern=r"(?u)\b\w+\b"): """ Default representation for data is sparse tfidf vectors Args: max_features: top N vocabulary to consider (int) analyzer: 'word' or 'char', level at which to segment text (str) stopwords: words to remove from consideration, default nltk (list) """ # Initialize and fit tfidf vectors self.vectorizer = TfidfVectorizer(max_features=max_features, stop_words=stopwords, analyzer=analyzer, token_pattern=token_pattern) self.matrix = self.vectorizer.fit_transform(self.text) # Get top max_features vocabulary self.terms = self.vectorizer.get_feature_names() # For letting user know if tfidf has been initialized self.using_tfidf = True def viz2D(self, matrix=None, plot_kwds={'alpha':0.30, 's':40, 'linewidths':0}): """ Visualize clusters in 2D """ # Run PCA over the data so we can plot # matrix2D = self._pca(n=2, matrix=self.viz_matrix) # # Get labels # labels = np.unique(self.results['labels']) # # Assign a color to each label # palette = sns.color_palette('deep', max(labels)+1) # colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # # Plot the data # plt.close() # fig = plt.figure(figsize=(10,6)) # plt.scatter(matrix2D.T[0], # matrix2D.T[1], # c=colors, # **plot_kwds # ) # frame = plt.gca() # # Turn off axes, since they are arbitrary # frame.axes.get_xaxis().set_visible(False) # frame.axes.get_yaxis().set_visible(False) # # Add a title # alg_name = str(self.algorithm.__class__.__name__) # plt.title('{0} clusters found by {1}'.format(len(labels), # alg_name), # fontsize=20) # plt.tight_layout() # plt.show() # return fig # Run PCA over the data matrix3D = self._pca(n=2, matrix=self.viz_matrix) # Extract labels from results labels = self.results['labels'] # Assign colors palette = sns.color_palette('deep', int(max(labels)+1)) colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # Plot the data plt.close() fig = plt.figure(figsize=(10,6)) # ax = plt.axes(projection='3d') plt.scatter(matrix3D.T[0], matrix3D.T[1], # matrix3D.T[2], c=colors) # Add a title alg_name = str(self.algorithm.__class__.__name__) plt.title('{0} Clusters | {1} Items | {2}'.format(len(set(labels)), matrix3D.shape[0], alg_name), fontsize=20) # Turn off arbitrary axis tick labels # plt.tick_params(axis='both', left=False, top=False, right=False, # bottom=False, labelleft=False, labeltop=False, # labelright=False, labelbottom=False) plt.tight_layout() plt.show() return fig def viz3D(self, matrix=None): """ Visualize clusters in 3D """ # Run PCA over the data matrix3D = self._pca(n=3, matrix=self.viz_matrix) # Extract labels from results labels = self.results['labels'] # Assign colors palette = sns.color_palette('deep', int(max(labels)+1)) colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # Plot the data plt.close() fig = plt.figure(figsize=(10,6)) ax = plt.axes(projection='3d') ax.scatter(matrix3D.T[0], matrix3D.T[1], matrix3D.T[2], c=colors) # Add a title alg_name = str(self.algorithm.__class__.__name__) plt.title('{0} Clusters | {1} Items | {2}'.format(len(set(labels)), matrix3D.shape[0], alg_name), fontsize=20) # Turn off arbitrary axis tick labels plt.tick_params(axis='both', left=False, top=False, right=False, bottom=False, labelleft=False, labeltop=False, labelright=False, labelbottom=False) plt.tight_layout() plt.show() return fig def top_terms(self, topx=10): """ Print out top terms per cluster. """ if self.using_tfidf != True: print('For use with non-tfidf vectorizers,try sklearn NearestNeighbors\ (although NN performs poorly with high dimensional inputs.') return None # Get labels, sort text IDs by cluster labels = self.results['labels'] cluster_idx = {clust_id: np.where(labels == clust_id)[0] for clust_id in set(labels)} # Get centers, stack into array centroids = np.vstack([self.viz_matrix[indexes].mean(axis=0) for key, indexes in cluster_idx.items()]) # Compute closeness of each term representation to each centroid order_centroids = np.array(centroids).argsort()[:, ::-1] # Organize terms into a dictionary cluster_terms = {clust_id: [self.terms[ind] for ind in order_centroids[idx, :topx]] for idx, clust_id in enumerate(cluster_idx.keys())} # Print results print("Top terms per cluster:") for clust_id, terms in cluster_terms.items(): words = ' | '.join(terms) print("Cluster {0} ({1} items): {2}".format(clust_id, len(cluster_idx[clust_id]), words)) return cluster_terms def item_counts(self): """ Print number of counts in each cluster """ for key, vals in self.results.items(): if key == 'labels': continue print('Cluster {0}: {1} items'.format(key, len(vals))) def _organize(self, labels): """ Organize text from clusters into a dictionary """ # Organize text into respective clusters cluster_idx = {clust_id: np.where(labels == clust_id)[0] for clust_id in set(labels)} # Put results in a dictionary; key is cluster idx values are text results = {clust_id: [self.text[idx] for idx in cluster_idx[clust_id]] for clust_id in cluster_idx.keys()} results['labels'] = list(labels) return results def _pca(self, n, matrix): """ Perform PCA on the data """ return TruncatedSVD(n_components=n).fit_transform(matrix) def _safe_dense(self, matrix): """ Some algorithms don't accept sparse input; for these, make sure the input matrix is dense. """ if issparse(matrix): return matrix.todense() else: return matrix class OnlineCluster(Cluster): """ Online (stream) clustering of textual data. Check each method to determine if the model is updating or ad-hoc predicting. These are not 'true' online methods as they preserve all seen data, as opposed to letting data points and clusters fade, merge, etc. over time. Usage: To initialize: >> with open('../data/cleaned_text.txt', 'r', encoding='utf8') as f: text = f.readlines() >> online = OnlineCluster(method='kmeans', text, visualize=True) To predict and update parameters if applicable: >> new_text = text[-10:] >> online.predict(new_text) """ def __init__(self, text, method, *args, **kwargs): """ Args: text: strings to be clustered (list of strings) method: algorithm to use to cluster (string) *args, **kwargs (optional): vectorizer: text representation. Defaults tfidf (array, optional) reduce_dim: reduce dim of representation matrix (int, optional) visualize: visualize clusters in 3D (bool, optional) """ # Only accept valid arguments assert method in ['kmeans', 'birch', 'hdbscan', 'dbscan', 'mean_shift'], \ 'Method incompatible with online clustering.' # Initialize inherited class super().__init__(text) # Get initial results self.results = self.__call__(method=method, *args,**kwargs) # Save args, set method self.__dict__.update(locals()) self.method = eval('self._' + method) def predict(self, new_text): """ 'Predict' a new example based on cluster centroids and update params if applicable (kmeans, birch). If a custom (non-tfidf) text representation is being used, class assumes new_text is already in vectorized form. Args: new_text: list of strings to predict """ # Predict assert type(new_text) == list, 'Input should be list of strings.' self.text = list(set(self.text + new_text)) new_matrix = self._transform(new_text) output_labels = self.method(new_matrix) # Update attribute for results, plotting self._update_results(output_labels) self.viz_matrix = vstack([self.viz_matrix, new_matrix]) return output_labels def _kmeans(self, new_matrix): """ Updates parameters and predicts """ self.algorithm = self.algorithm.partial_fit(new_matrix) return self.algorithm.predict(new_matrix) def _birch(self, new_matrix): """ Updates parameters and predicts """ self.algorithm = self.algorithm.partial_fit(new_matrix) return self.algorithm.predict(new_matrix) def _hdbscan(self, new_matrix): """ Prediction only, HDBSCAN requires training to be done on dense matrices for prediction to work properly. This makes training inefficient, though. """ try: labels, _ = approximate_predict(self.algorithm, self._safe_dense(new_matrix)) except AttributeError: try: self.algorithm.generate_prediction_data() labels, _ = approximate_predict(self.algorithm, self._safe_dense(new_matrix)) except ValueError: print('Must (inefficiently) re-train with prediction_data=True') return labels def _dbscan(self, new_matrix): """ Prediction only """ # Extract labels labels = self.algorithm.labels_ # Result is noise by default output = np.ones(shape=new_matrix.shape[0], dtype=int)*-1 # Iterate all input samples for a label for idx, row in enumerate(new_matrix): # Find a core sample closer than EPS for i, row in enumerate(self.algorithm.components_): # If it's below the threshold of the dbscan model if cosine(row, x_core) < self.algorithm.eps: # Assign label of x_core to the input sample output[idx] = labels[self.algorithm.core_sample_indices_[i]] break return output def _mean_shift(self, new_matrix): """ Prediction only, not efficient """ return self.algorithm.predict(new_matrix) def _transform(self, new_text): """ Transform text to tfidf representation. Assumes already vectorized if tfidf matrix has not been initialized. """ if self.using_tfidf: return self.vectorizer.transform(new_text) else: return self.vectorizer(new_text) return new_matrix def _update_results(self, labels): """ Update running dictionary """ new_results = self._organize(labels) for key in self.results.keys(): try: self.results[key] += new_results[key] except KeyError: continue from matplotlib import pyplot as plt import pandas as pd import string cluster_dict = {2:'dbscan', 3:'spectral', 4:'kmeans', 5:'affinity_prop', 6:'agglomerative', 7:'mean_shift', 8:'birch'} def clean(text): ''' Clean text before running clusterer ''' text = text.strip() text = text.lower() for punct in string.punctuation: text = text.replace(punct, ' ') lst = text.split() text = " ".join(lst) for t in text: if t not in string.printable: text = text.replace(t, '') return text def clust(): df = pd.read_csv('/content/drive/My Drive/data/NewsCluster.csv') data = df["Title"].tolist() data = [clean(dt) for dt in data ] # for dt in data: # data[data.index(dt)] = clean(dt) data = pd.DataFrame(data, columns=["text"]) data['text'].dropna(inplace=True) # %matplotlib inline clustering = Cluster(data.text) # results = clustering(method='dbscan', vectorizer=None, # reduce_dim=None, viz=True, eps=0.9) results = clustering(method='kmeans', vectorizer=None, reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='birch', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='agglomerative', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='spectral', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='affinity_prop', vectorizer=None, # reduce_dim=None, viz=True, damping=0.5) results = clustering(method='minikmeans', vectorizer=None, reduce_dim=None, viz=True, n_clusters=12) # clustering = Cluster(data.text) # for i in range(2,9): # print(cluster_dict[i]) # if i == 4: # result = clustering(cluster_dict[i]) # else: # result = clustering(cluster_dict[i]) # print(result) clust() ```
github_jupyter
# Cross-asset skewness This notebook analyses cross-asset cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness. ``` %matplotlib inline from datetime import datetime import logging import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as mticker plt.style.use('bmh') from vivace.backtest import signal from vivace.backtest import processing from vivace.backtest.contract import all_futures_baltas2019 from vivace.backtest.engine import BacktestEngine from vivace.backtest.enums import Strategy from vivace.backtest.stats import Performance ``` # Data Various futures contracts in commodity, currency, government bond futures and equity index futures are tested. Some contracts are missing in this data set due to data availability. ``` all_futures_baltas2019 all_futures_baltas2019.shape ``` # Performance ## Run backtest For each asset class, a simple portfolio is constructed by using trailing 1-year returns of each futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis. ``` engine_commodity = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "commodity"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_commodity.run() commodity_portfolio_return = (engine_commodity.calculate_equity_curve(calculate_net=False) .rename('Commodity skewness portfolio')) engine_equity = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "equity"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_equity.run() equity_portfolio_return = (engine_equity.calculate_equity_curve(calculate_net=False) .rename('Equity skewness portfolio')) engine_fixed_income = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "fixed_income"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_fixed_income.run() fixed_income_portfolio_return = (engine_fixed_income.calculate_equity_curve(calculate_net=False) .rename('Fixed income skewness portfolio')) engine_currency = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "currency"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_currency.run() currency_portfolio_return = (engine_currency.calculate_equity_curve(calculate_net=False) .rename('Currency skewness portfolio')) fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True) commodity_portfolio_return.plot(ax=ax[0][0], logy=True) equity_portfolio_return.plot(ax=ax[0][1], logy=True) fixed_income_portfolio_return.plot(ax=ax[1][0], logy=True) currency_portfolio_return.plot(ax=ax[1][1], logy=True) ax[0][0].set_title('Commodity skewness portfolio') ax[0][1].set_title('Equity skewness portfolio') ax[1][0].set_title('Fixed income skewness portfolio') ax[1][1].set_title('Currency skewness portfolio') ax[0][0].set_ylabel('Cumulative returns'); ax[1][0].set_ylabel('Cumulative returns'); pd.concat(( commodity_portfolio_return.pipe(Performance).summary(), equity_portfolio_return.pipe(Performance).summary(), fixed_income_portfolio_return.pipe(Performance).summary(), currency_portfolio_return.pipe(Performance).summary(), ), axis=1) ``` ## Performance since 1990 In the original paper, performance since 1990 is reported. The result below confirms that all skewness based portfolios exhibited positive performance over time. Interestingly the equity portfolio somewhat performed weakly in the backtest. This could be due to the slightly different data set. ``` fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True) commodity_portfolio_return['1990':].plot(ax=ax[0][0], logy=True) equity_portfolio_return['1990':].plot(ax=ax[0][1], logy=True) fixed_income_portfolio_return['1990':].plot(ax=ax[1][0], logy=True) currency_portfolio_return['1990':].plot(ax=ax[1][1], logy=True) ax[0][0].set_title('Commodity skewness portfolio') ax[0][1].set_title('Equity skewness portfolio') ax[1][0].set_title('Fixed income skewness portfolio') ax[1][1].set_title('Currency skewness portfolio') ax[0][0].set_ylabel('Cumulative returns'); ax[1][0].set_ylabel('Cumulative returns'); ``` ## GSF The authors defines the global skewness factor (GSF) by combining the 4 asset classes with equal vol weighting. Here, the 4 backtests are simply combined with each ex-post realised volatility. ``` def get_leverage(equity_curve: pd.Series) -> float: return 0.1 / (equity_curve.pct_change().std() * (252 ** 0.5)) gsf = pd.concat(( commodity_portfolio_return.pct_change() * get_leverage(commodity_portfolio_return), equity_portfolio_return.pct_change() * get_leverage(equity_portfolio_return), fixed_income_portfolio_return.pct_change() * get_leverage(fixed_income_portfolio_return), currency_portfolio_return.pct_change() * get_leverage(currency_portfolio_return), ), axis=1).mean(axis=1) gsf = gsf.fillna(0).add(1).cumprod().rename('GSF') fig, ax = plt.subplots(1, 2, figsize=(14, 4)) gsf.plot(ax=ax[0], logy=True); gsf['1990':].plot(ax=ax[1], logy=True); ax[0].set_title('GSF portfolio') ax[1].set_title('Since 1990') ax[0].set_ylabel('Cumulative returns'); pd.concat(( gsf.pipe(Performance).summary(), gsf['1990':].pipe(Performance).summary().add_suffix(' (since 1990)') ), axis=1) ``` ## Post publication ``` publication_date = datetime(2019, 12, 16) fig, ax = plt.subplots(1, 2, figsize=(14, 4)) gsf.plot(ax=ax[0], logy=True); ax[0].set_title('GSF portfolio') ax[0].set_ylabel('Cumulative returns'); ax[0].axvline(publication_date, lw=1, ls='--', color='black') ax[0].text(publication_date, 0.6, 'Publication date ', ha='right') gsf.loc[publication_date:].plot(ax=ax[1], logy=True); ax[1].set_title('GSF portfolio (post publication)'); ``` ## Recent performance ``` fig, ax = plt.subplots(figsize=(8, 4.5)) gsf.tail(252 * 2).plot(ax=ax, logy=True); ax.set_title('GSF portfolio') ax.set_ylabel('Cumulative returns'); ``` # Reference - Baltas, N. and Salinas, G., 2019. Cross-Asset Skew. Available at SSRN. ``` print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}') ```
github_jupyter
# GFA Zero Calibration GFA calibrations should normally be updated in the following sequence: zeros, flats, darks. This notebook should be run using a DESI kernel, e.g. `DESI master`. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import os import sys import json import collections from pathlib import Path import scipy.interpolate import scipy.stats import fitsio ``` Install / upgrade the `desietcimg` package: ``` try: import desietcimg print('desietcimg already installed') except ImportError: print('Installing desietcimg...') !{sys.executable} -m pip install --user git+https://github.com/dkirkby/desietcimg upgrade = False if upgrade: print('Upgrading desietcimg...') !{sys.executable} -m pip install --upgrade --user git+https://github.com/dkirkby/desietcimg import desietcimg.util import desietcimg.plot import desietcimg.gfa ``` NERSC configuration: ``` assert os.getenv('NERSC_HOST', False) ROOT = Path('/project/projectdirs/desi/spectro/data/') assert ROOT.exists() ``` Initial GFA calibration: ``` CALIB = Path('/global/cscratch1/sd/dkirkby/GFA_calib.fits') assert CALIB.exists() ``` Directory for saving plots: ``` plotdir = Path('zerocal') plotdir.mkdir(exist_ok=True) ``` ## Process Zero Sequences Use a sequence of 200 zeros from [20191027](http://desi-www.kpno.noao.edu:8090/nightsum/nightsum-2019-10-27/nightsum.html). **Since this data has not yet been staged to its final location, we fetch it from the `lost+found` directory** (by overriding the definition of `ROOT` above): ``` ROOT = Path('/global/project/projectdirs/desi/spectro/staging/lost+found/') files = desietcimg.util.find_files(ROOT / '20191027' / '{N}/gfa-{N}.fits.fz', min=21968, max=22167) ``` Build master zero images: ``` def build_master_zero(): master_zero = {} GFA = desietcimg.gfa.GFACamera(calib_name=str(CALIB)) for k, gfa in enumerate(GFA.gfa_names): raw, meta = desietcimg.util.load_raw(files, 'EXPTIME', hdu=gfa) assert np.all(np.array(meta['EXPTIME']) == 0) GFA.setraw(raw, name=gfa, subtract_master_zero=False, apply_gain=False) master_zero[gfa] = np.median(GFA.data, axis=0) return master_zero %time master_zero = build_master_zero() ``` Estimate the readnoise in ADU for each amplifier, using the new master zero: ``` desietcimg.gfa.GFACamera.master_zero = master_zero def get_readnoise(hrange=70, hbins=141, nsig=6, save=None): GFA = desietcimg.gfa.GFACamera(calib_name=str(CALIB)) fig, axes = plt.subplots(5, 2, sharex=True, figsize=(18, 11)) bins = np.linspace(-hrange, +hrange, hbins) noise = {} for k, gfa in enumerate(GFA.gfa_names): GFA.name = gfa ax = axes[k // 2, k % 2] raw, meta = desietcimg.util.load_raw(files, 'EXPTIME', hdu=gfa) assert np.all(np.array(meta['EXPTIME']) == 0) GFA.setraw(raw, name=gfa, subtract_master_zero=True, apply_gain=False) noise[gfa] = {} for j, amp in enumerate(GFA.amp_names): # Extract data for this quadrant. qdata = GFA.data[GFA.quad[amp]] X = qdata.reshape(-1) # Clip for std dev calculation. Xclipped, lo, hi = scipy.stats.sigmaclip(X, low=nsig, high=nsig) noise[gfa][amp] = np.std(Xclipped) label = f'{amp} {noise[gfa][amp]:.2f}' c = plt.rcParams['axes.prop_cycle'].by_key()['color'][j] ax.hist(X, bins=bins, label=label, color=c, histtype='step') for x in lo, hi: ax.axvline(x, ls='-', c=c, alpha=0.5) ax.set_yscale('log') ax.set_yticks([]) if k in (8, 9): ax.set_xlabel('Zero Residual [ADU]') ax.set_xlim(bins[0], bins[-1]) ax.legend(ncol=2, title=f'{gfa}', loc='upper left') plt.subplots_adjust(left=0.03, right=0.99, bottom=0.04, top=0.99, wspace=0.07, hspace=0.04) if save: plt.savefig(save) return noise %time readnoise = get_readnoise(save=str(plotdir / 'GFA_readnoise.png')) repr(readnoise) ``` ## Save Updated Calibrations ``` desietcimg.gfa.save_calib_data('GFA_calib_zero.fits', master_zero=master_zero, readnoise=readnoise) ``` Use this for subsequent flat and dark calibrations: ``` !cp GFA_calib_zero.fits {CALIB} ``` ## Comparisons Compare with the read noise values from the lab studies and Aaron Meisner's [independent analysis](https://desi.lbl.gov/trac/wiki/Commissioning/Planning/gfachar/bias_readnoise_20191027): ``` ameisner_rdnoise = { 'GUIDE0': { 'E': 5.56, 'F': 5.46, 'G': 5.12, 'H': 5.24}, 'FOCUS1': { 'E': 5.21, 'F': 5.11, 'G': 4.88, 'H': 4.90}, 'GUIDE2': { 'E': 7.11, 'F': 6.23, 'G': 5.04, 'H': 5.29}, 'GUIDE3': { 'E': 5.28, 'F': 5.16, 'G': 4.89, 'H': 5.00}, 'FOCUS4': { 'E': 5.23, 'F': 5.12, 'G': 5.01, 'H': 5.11}, 'GUIDE5': { 'E': 5.11, 'F': 5.00, 'G': 4.80, 'H': 4.86}, 'FOCUS6': { 'E': 5.12, 'F': 5.09, 'G': 4.85, 'H': 5.07}, 'GUIDE7': { 'E': 5.00, 'F': 4.96, 'G': 4.63, 'H': 4.79}, 'GUIDE8': { 'E': 6.51, 'F': 5.58, 'G': 5.12, 'H': 5.47}, 'FOCUS9': { 'E': 6.85, 'F': 5.53, 'G': 5.07, 'H': 5.57}, } def compare_rdnoise(label='20191027', save=None): # Use the new calibrations written above. desietcimg.gfa.GFACamera.calib_data = None GFA = desietcimg.gfa.GFACamera(calib_name='GFA_calib_zero.fits') markers = '+xo.' fig, ax = plt.subplots(1, 2, figsize=(12, 5)) for k, gfa in enumerate(GFA.gfa_names): color = plt.rcParams['axes.prop_cycle'].by_key()['color'][k] ax[1].scatter([], [], marker='o', c=color, label=gfa) for j, amp in enumerate(desietcimg.gfa.GFACamera.amp_names): marker = markers[j] measured = GFA.calib_data[gfa][amp]['RDNOISE'] # Lab results are given in elec so use lab gains to convert back to ADU lab = GFA.lab_data[gfa][amp]['RDNOISE'] / GFA.lab_data[gfa][amp]['GAIN'] ax[0].scatter(lab, measured, marker=marker, c=color) ax[1].scatter(ameisner_rdnoise[gfa][amp], measured, marker=marker, c=color) for j, amp in enumerate(GFA.amp_names): ax[1].scatter([], [], marker=markers[j], c='k', label=amp) xylim = (4.3, 5.5) for axis in ax: axis.plot(xylim, xylim, 'k-', zorder=-10, alpha=0.25) axis.set_ylabel(f'{label} Read Noise [ADU]') axis.set_xlim(*xylim) axis.set_ylim(*xylim) ax[1].legend(ncol=3) ax[0].set_xlabel('Lab Data Read Noise [ADU]') ax[1].set_xlabel('ameisner Read Noise [ADU]') plt.tight_layout() if save: plt.savefig(save) compare_rdnoise(save=str(plotdir / 'rdnoise_compare.png')) ```
github_jupyter
``` import numpy as np #UNITS #A = mol/cm^3 -s #n = none #Ea = kcal/k*mol #c = #d = #f = six_parameter_fit_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-13.37032086, 32.42060027, 19.23022032, 6.843287462 , 36.62853824 ,-0.220309785 ,-0.099366346, -4.134352081]), 'n':np.array([1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, 0.011862923 ,0.061801326, 0.581628835]), 'Ea':np.array([-0.463042822, 1.529151218, 0.808025472 ,0.359889935, -0.021309254, -0.098013004, -0.102022118, -0.097024727]), 'c':np.array([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149 ,0.001263134, 0.001236963, -0.000390567]), 'd':np.array([1.071992802, -2.780550365, -1.71391034 ,-0.274481751, -4.491132406, -0.054960894, 0.049553379, 0.270885383]), 'f':np.array([-0.027060156, 0.056903076, 0.041102936 ,0.001361221, 0.144385439, 0.003136796 ,0.001374015, -0.006089248])}, '2 HO2 <=> H2O2 + O2': {'A':np.array([-12.93733217, 24.39245077 ,17.73177606, 4.37803475, 33.44985889, 0.381601192 ,3.748890308]), 'n':np.array([1.872602872, -4.096806067, -3.09439453 ,-0.63226683, -5.125008418, -0.061610462, -0.677953862]), 'Ea':np.array([-0.463903763 ,1.259537237, 0.826684258 ,0.257400116, 0.803882706 ,2.20E-05, 0.181336266]), 'c':np.array([0.002069572, -0.008314769, -0.00424128 ,-0.002016113, 0.000134642 ,0.000122049 ,-0.001026567]), 'd':np.array([0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -0.050708107, -0.526284003]), 'f':np.array([-0.022628436, 0.023558844, 0.031573523 ,-0.00732987, 0.096573278 ,0.001668073, 0.01033547])}, 'HO2 + OH <=> H2O + O2': {'A':np.array([-4.795727446, 6.426354909 ,4.878258417, 2.472791017, 7.856296474, 1.328033302 ,-3.457932692, -0.349839371, 2.331070924 ,2.403555921, -0.165397001, 0.246540172 ,0.722946077]), 'n':np.array([0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -0.188622956, 0.421083939 ,0.038859478 ,-0.360855106, -0.38989218, 0.029669899 ,-0.04371581, -0.130487515]), 'Ea':np.array([-0.259799111, 0.205620792 ,0.130799794, 0.137023666 ,0.379232542, 6.19E-02, -0.198196699, -0.023548432, 0.118069394 ,0.104383314 ,-0.003830947, 0.011566499 ,-0.073557828]), 'c':np.array([0.00161312, -0.001906694, -0.000863021, -0.00105112 ,-0.002185605, -0.000334461, 0.001817049 ,0.000170761, -0.000859313, -0.000653029, -3.11E-06 ,-6.37E-05, 0.00047058]), 'd':np.array([0.124499363, -0.645652135, -0.535188558, 0.052734001 ,-0.45181066, -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, 0.057288687, -0.012776017, -0.192422381]), 'f':np.array([0.002033109, -0.011099716, 0.005351213 ,-0.007623667, 0.005327017 ,0.001259485,0.00245957, 0.000976725 ,-0.004879845, 0.001903886 ,-0.001838669 ,0.000252269, 0.004691829])}, '2 OH <=> H2O + O': {'A': np.array([-5.40485067, 18.96061659 ,8.089301961, 6.953940096 ,-12.54280438, -3.264972401, 2.106487623 ,-1.657943467, 1.614935 ,-1.536463599]), 'n': np.array([0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, 0.466415264 ,-0.326136934, 0.355297684 ,-0.16618967, 0.253903734]), 'Ea': np.array([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -0.281992263, 0.099465537 ,0.030650483, 0.176069015 ,-0.056967886]), 'c': np.array([-0.003001658, -0.001870536, 0.003820535 ,-0.002753277, 0.014224162, 0.00032969 ,-0.000627241, -0.001081979, -0.002009835, 0.000255318]), 'd':np.array([0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728 ,0.229877892, -0.194395052, 1.033858025 ,0.527183366, 0.308743056]), 'f':np.array([-0.010053913, 0.025128322, 0.035579811 ,0.00515753 ,-0.0083511, -0.00512885, 0.003954, -0.029711993 ,-0.01986861, -0.007691647])}, 'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]), 'n':np.array([-0.00104,-.36888,.154462]), 'Ea':np.array([.504278,-.44379,-0.03181]), 'c':np.array([0,0,0]), 'd':np.array([0,0,0]), 'f':np.array([0,0,0])}, 'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]), 'n':np.array([-.04282,.150846]), 'Ea':np.array([0.024285,-0.02956]), 'c':np.array([0,0]), 'd':np.array([0,0]), 'f':np.array([0,0])}} six_parameter_fit_nominal_parameters_dict = {'H2O2 + OH <=> H2O + HO2':{'A':4.64E-06,'n':5.605491008,'Ea':-5440.266692,'c':126875776.1,'d':0.000441194,'f':-5.35E-13}, '2 HO2 <=> H2O2 + O2':{'A':1.30E+04,'n':1.997152351,'Ea':-3628.04407,'c':93390973.44,'d':-0.000732521,'f':8.20E-12} , 'HO2 + OH <=> H2O + O2':{'A':1.41E+18,'n':-2.05344973,'Ea':-232.0064051,'c':15243859.12,'d':-0.001187694,'f':8.01E-12}, '2 OH <=> H2O + O':{'A':354.5770856,'n':2.938741717,'Ea':-1836.492972,'c':12010735.18,'d':-4.87E-05,'f':1.22E-12}, 'CH3 + HO2 <=> CH4 + O2':{'A':3.19e3,'n':2.670857,'Ea':-4080.73,'c':0.0,'d':0.0,'f':0.0}, 'CH3 + HO2 <=> CH3O + OH':{'A':8.38e11,'n':.29,'Ea':-785.45,'c':0.0,'d':0.0,'f':0.0}} def calculate_six_parameter_fit(reaction,dictonary,temperature): #finish editing this #calc Ea,c,d,F seprately A = dictonary[reaction]['A'] n = dictonary[reaction]['n'] Ea_temp = dictonary[reaction]['Ea']/(1.987*temperature) c_temp = dictonary[reaction]['c']/((1.987*temperature)**3) d_temp = dictonary[reaction]['d']*(1.987*temperature) f_temp = dictonary[reaction]['f']* ((1.987*temperature)**3) k = A*(temperature**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) return k xdata = [] ydata = [] for t in np.arange(200,2400): xdata.append(t) ydata.append(calculate_six_parameter_fit('2 HO2 <=> H2O2 + O2',six_parameter_fit_nominal_parameters_dict,t)) ydata = np.array(ydata) ydata = np.log(ydata) plt.scatter(xdata,ydata) #fitting sigmas import matplotlib.pyplot as plt from scipy.optimize import curve_fit def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata) print(popt) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') #fitting sigmas import matplotlib.pyplot as plt from scipy.optimize import curve_fit def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #calculate original 3 parameter fit #fitting sigmas import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit nominal_rc_df = pd.read_csv('') xdata=nominal_rc_df['T'] ydata=nominal_rc_df['k'] def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata,maxfev=1000000) print(popt) A_nominal_3pf = popt[0] n_nominal_3pf = popt[1] Ea_nominal_3pf = popt[2]/(1.987*1000) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') #looping over csvs and calculating sens coefficients for 3pf import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit A_list_3pf = [] n_list_3pf = [] Ea_list_3pf = [] for csv in csv list: df = pd.read_csv('') xdata=df['T'] ydata=df['k'] amount_perturbed = def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata,maxfev=1000000) print(popt) A = popt[0] n = popt[1] Ea = popt[2]/(1.987*1000) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') sensitivty_A = (A - A_nominal_3pf)/amount_perturbed sensitivity_n = (n-n_nominal_3pf)/amount_perturbed sensitivty_Ea = (Ea - Ea_nominal_3pf)/amount_perturbed A_list_3pf.append(sensitivty_A) n_list_3pf.append(sensitivity_n) Ea_list_3pf.append(sensitivty_Ea) #calculating original 6 paramter fit #fitting sigmas import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit nominal_rc_df = pd.read_csv('') xdata=nominal_rc_df['T'] ydata=nominal_rc_df['k'] def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) A_nominal_spf = popt[0] n_nominal_spf = popt[1] Ea_nominal_spf = popt[2]/(1.987*1000) c_nominal_spf = popt[3]/((1.987*1000)**3) d_nominal_spf = popt[4]/((1.987*1000)**-1) f_nominal_spf = popt[5]/((1.987*1000)**-3) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #looping over csvs and calculating sens coefficients for 6pf import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit A_list_6pf = [] n_list_6pf = [] Ea_list_6pf = [] c_list_6pf = [] d_list_6pf = [] f_list_6pf = [] for csv in csv list: df = pd.read_csv('') xdata=df['T'] ydata=df['k'] amount_perturbed = def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) A = popt[0] n = popt[1] Ea = popt[2]/(1.987*1000) c = popt[3]/((1.987*1000)**3) d = popt[4]/((1.987*1000)**-1) f = popt[5]/((1.987*1000)**-3) sensitivty_A = (A - A_nominal_6pf)/amount_perturbed sensitivity_n = (n-n_nominal_6pf)/amount_perturbed sensitivty_Ea = (Ea - Ea_nominal_6pf)/amount_perturbed sensitivity_c = (c - c_nominal_6pf)/amount_perturbed sensitivity_d = (d - d_nominal_6pf)/amount_perturbed sensitivity_f = (f - f_nominal_6pf)/amount_perturbed A_list_6pf.append(sensitivty_A) n_list_6pf.append(sensitivity_n) Ea_list_6pf.append(sensitivty_Ea) c_list_6pf.append(sensitivity_c) d_list_6pf.append(sensitivity_d) f_list_6pf.append(sensitivity_f) ```
github_jupyter
``` from music21 import * import numpy as np import torch import pretty_midi import os import sys import pickle import time import random import re class MusicData(object): def __init__(self, abc_file, culture= None): self.stream = None self.metadata = dict() self.description = None self.midi = None self.torch_matrix = None self.title = None self.key = None self.meter = None self.culture = culture self.gene = None self.valid = True self.set_proporties(abc_file) def set_proporties(self, abc_file): # print(abc_file.split('/')[-1]) step_list = ['stream','metadata','key','meter','others'] try: step_counter = 0 self.stream = converter.parse(abc_file) step_counter = 1 self.metadata = dict(self.stream.metadata.all()) step_counter = 2 self.key = self.metadata['key'] = str(self.stream.flat.getElementsByClass('Key')[0]) step_counter = 3 self.meter = self.metadata['meter'] = str(self.stream.flat.getElementsByClass('TimeSignature')[0])[1:-1].split()[-1] step_counter = 4 self.title = self.metadata['title'] self.midi = f"/gpfsnyu/home/yz6492/multimodal/data/midi/{self.title}.mid" if 'localeOfComposition' in self.metadata and self.culture is None: self.culture = self.culture_analyzer(self.metadata['localeOfComposition']) if 'gene' in self.metadata: pass except: self.valid = False print(f'Error in parsing: id - {step_list[step_counter]}') return try: mf = midi.translate.streamToMidiFile(self.stream) mf.open(self.midi, 'wb') mf.write() mf.close() self.torch_matrix = self.melody_to_numpy(fpath = self.midi) except Exception as e: self.stream, flag = self.emergence_fix(abc_file) # if flag is False: # self.stream, flag = self.emergence_fix(abc_file) print(f'Error in Matrix. Fixed? {flag}') self.description = self.generate_description() if self.torch_matrix is None: self.valid = False self.stream = None # for data size compression def emergence_fix(self, abc_file): with open(abc_file, 'r') as f: input_list = [line for line in f] output_list = input_list.copy() for i, line in enumerate(input_list): if 'L:' in line: if line[-3:] == '16\n': output_list[i] = 'L:1/8\n' elif line[-2:] == '8\n': output_list[i] = 'L:1/4\n' with open(abc_file, 'w') as f: f.writelines(output_list) # fix finished. now test try: self.stream = converter.parse(abc_file) mf = midi.translate.streamToMidiFile(self.stream) mf.open(self.midi, 'wb') mf.write() mf.close() self.torch_matrix = self.melody_to_numpy(fpath = self.midi) self.valid = True return stream, True except Exception as e: self.valid = False # do not use this object return stream, False def culture_analyzer(self, text): if 'china' in text.lower(): return 'Chinese' if 'irish' in text.lower(): return 'Irish' if 'english' in text.lower(): return 'English' def melody_to_numpy(self, fpath=None, unit_time=0.125, take_rhythm=False, ): music = pretty_midi.PrettyMIDI(fpath) notes = music.instruments[0].notes t = 0. roll = [] # print(notes[0], notes[-1]) for note in notes: # print(t, note) elapsed_time = note.start - t if elapsed_time > 0.: steps = torch.zeros((int(round(elapsed_time / unit_time)), 130)) steps[range(int(round(elapsed_time / unit_time))), 129] += 1. roll.append(steps) n_units = int(round((note.end - note.start) / unit_time)) steps = torch.zeros((n_units, 130)) if take_rhythm: steps[0, 60] += 1 else: steps[0, note.pitch] += 1 steps[range(1, n_units), 128] += 1 roll.append(steps) t = note.end return torch.cat(roll, 0) def generate_description(self): # order shuffle (total 6 possibilities) order = random.randint(0,5) # connector to decide grammar connecter = [random.randint(0,1), random.randint(0,1)] sequences = [ f'This is a song in {self.key}. It has a {self.meter} tempo. It is a {self.culture} song.', f'This is a song in {self.key}. This is in {self.culture} style with a beat of {self.meter}.', f'This is a song in {self.key}. This is a {self.culture} style song with a rhythm of {self.meter}.', f'This is a {self.key} album. They have got a {self.meter} tempo. It is a song from {self.culture}.', f'This is {self.key} song. This does have a tempo of {self.meter}. It is a song in {self.culture} style.', f'That is a {self.key} song. The tempo is {self.meter}. It is a song of the {self.culture} style.', f'That is a {self.key} hit. There is a pace of {self.meter}. It is a album in {self.culture} style.', f'This is a song in {self.key} with a {self.meter} tempo and it is a {self.culture} style song.', f'It is a {self.meter} pace {self.key} piece, and it is a {self.culture} type piece.', f'This is a {self.meter} tempo composition in {self.key} and is a {self.culture} hit.', f'It is a song of {self.culture} theme. It is a {self.meter} tempo song in {self.key}.', f'This is a song of {self.culture} theme. It is a {self.meter}-tempo composition in {self.key}.', f'This is an album about {self.culture} theme. This is a record of {self.meter} tempo in {self.key}', ] return sequences[random.randint(0, len(sequences)-1)] ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm %matplotlib inline import datetime import cPickle as pickle import csv import numpy as np import random import sys maxInt = sys.maxsize decrement = True while decrement: # decrease the maxInt value by factor 10 # as long as the OverflowError occurs. decrement = False try: csv.field_size_limit(maxInt) except OverflowError: maxInt = int(maxInt/10) decrement = True ``` # get term-/document-frequency ``` csv_reader = csv.reader(open('../data/raw/NELA-17/train.csv', 'r')) tkn2tf = {} len_heads = [] #1 len_paras = [] #2 cnt_paras = [] #3 len_bodys = [] #4 # csv data: 0:id, 1:head, 2:body, 3:label print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, head = row[1].lower().strip() for tkn in head.split(): if tkn in tkn2tf: tkn2tf[tkn] += 1 else: tkn2tf[tkn] = 1 len_heads.append(len(head.split())) #1 body = row[2].lower().strip() tkn_para = [] for para in body.split('<eop>'): if para and para != ' ': _para = para + '<eop>' len_para = len(_para.split()) len_paras.append(len_para) #2 tkn_para.append(_para) cnt_paras.append(len(tkn_para)) #3 body_split = [] for tkn in body.split(): if tkn in tkn2tf: tkn2tf[tkn] += 1 else: tkn2tf[tkn] = 1 body_split.append(tkn) len_bodys.append(len(body_split)) #4 print n+1, 'Done' print datetime.datetime.now().isoformat() print 'voca size :', len(tkn2tf) sorted_token = sorted(tkn2tf.items(), key=lambda kv: kv[1], reverse=True) tkn2idx = {} for idx, (tkn, _) in tqdm(enumerate(sorted_token)): tkn2idx[tkn] = idx + 2 tkn2idx['<UNK>'] = 1 tkn2idx[''] = 0 if len(tkn2idx) == len(tkn2tf)+2: print len(tkn2idx), 'No problem' print print 'Show top-10 tkn:' for tkn, freq in sorted_token[:10]: print tkn,':',freq print '' with open('../data/nela-17/whole/dic_mincut0.txt', 'wb') as f: for key in tkn2idx.keys(): f.write(key+'\n') tkn2tf_mincut5 = {} for tkn, tf in tkn2tf.items(): if tf < 2: continue tkn2tf_mincut5[tkn] = tf print 'voca size :', len(tkn2tf_mincut5) tkn2tf_mincut5['<EOS>'] = tkn2tf_mincut5['<eos>'] tkn2tf_mincut5['<EOP>'] = tkn2tf_mincut5['<eop>'] del tkn2tf_mincut5['<eos>'] del tkn2tf_mincut5['<eop>'] import operator sorted_voca = sorted(tkn2tf_mincut5.items(), key=operator.itemgetter(1)) len(sorted_voca) list_voca_mincut = [] list_voca_mincut.append('') # PAD list_voca_mincut.append('<UNK>') # UNK list_voca_mincut.append('<EOS>') # EOS list_voca_mincut.append('<EOP>') # EOP for word, idx in sorted_voca: if word=='<UNK>' or word=='<EOP>' or word=='<EOS>': print("existing word", word) continue else: list_voca_mincut.append(word) len(list_voca_mincut) with open('../data/nela-17/whole/dic_mincutN.txt', 'wb') as f: for i in range(len(list_voca_mincut)): f.write(list_voca_mincut[i]+'\n') dic_voca = {} for voca in list_voca_mincut: dic_voca[voca] = len(dic_voca) print(dic_voca[''], dic_voca['<UNK>'], dic_voca['<EOS>'], dic_voca['<EOP>']) with open('../data/nela-17/whole/dic_mincutN.pkl', 'wb') as f: pickle.dump(dic_voca, f) ``` #### for data processing ``` import copy dic_voca_lower = copy.deepcopy(dic_voca) dic_voca_lower['<eos>'] = dic_voca_lower['<EOS>'] dic_voca_lower['<eop>'] = dic_voca_lower['<EOP>'] del dic_voca_lower['<EOS>'] del dic_voca_lower['<EOP>'] len(dic_voca_lower) print(dic_voca_lower[''], dic_voca_lower['<UNK>'], dic_voca_lower['<eos>'], dic_voca_lower['<eop>']) ``` ## stats ``` import csv import sys import numpy as np data= [] with open('../data/raw/NELA-17/train.csv', 'r') as f: data_csv = csv.reader(f, delimiter=',') for row in data_csv: data.append(row) def print_info(data): print("mean", np.average(data)) print("std", np.std(data)) print("max", np.max(data)) print("95.xx coverage", np.average(data) + 2*np.std(data) ) print("99.73 coverage", np.average(data) + 3*np.std(data) ) print("99.95 coverage", np.average(data) + 3.5*np.std(data) ) print("99.99 coverage", np.average(data) + 4*np.std(data) ) head = [x[1].strip() for x in data] head_len = [len(x.split()) for x in head] print('head_len') print_info(head_len) body = [x[2].strip() for x in data] body_len = [len(x.split()) for x in body ] print('body_len') print_info(body_len) context_len = [len(x.split('<EOP>')) for x in body] print('context_len') print_info(context_len) body_sentence = [] for sent in body: sent = sent.split('<EOP>') body_sentence.extend(sent) body_len = [ len(x.split()) for x in body_sentence ] print('body_len') print_info(body_len) ``` # encode to numpy ``` def fit_length(data, max_len_t, max_len_b): data_t, data_b = data list_zeros = np.zeros(max_len_b, 'int32').tolist() fl_data_t = [] for datum in data_t: try: datum = list(datum) except: pass _len = len(datum) if _len >= max_len_t: fl_data_t.append( datum[:max_len_t] ) else: fl_data_t.append( datum + list_zeros[:(max_len_t-_len)] ) fl_data_b = [] for datum in data_b: try: datum = list(datum) except: pass _len = len(datum) if _len >= max_len_b: fl_data_b.append( datum[:max_len_b] ) else: fl_data_b.append( datum + list_zeros[:(max_len_b-_len)] ) np_data_t = np.asarray(fl_data_t, dtype='int32') np_data_b = np.asarray(fl_data_b, dtype='int32') data = [np_data_t, np_data_b] return data csv_reader = csv.reader(open('../data/raw/NELA-17/train.csv', 'r')) print datetime.datetime.now().isoformat() ids = [] heads = [] bodys = [] labels = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids.append(row[0]) labels.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0: <UNK> heads.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0: <UNK> bodys.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() # ~5 mins print datetime.datetime.now().isoformat() [np_heads, np_bodys] = fit_length([heads, bodys], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/train/train_title.npy' np.save(t_trainpath, np_heads) b_trainpath = '../data/nela-17/whole/train/train_body.npy' np.save(b_trainpath, np_bodys) l_trainpath = '../data/nela-17/whole/train/train_label.npy' np.save(l_trainpath, labels) print datetime.datetime.now().isoformat() ``` # devset ``` csv_reader = csv.reader(open('../data/raw/NELA-17/dev.csv', 'r')) print datetime.datetime.now().isoformat() ids_dev = [] heads_dev = [] bodys_dev = [] labels_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_dev.append(row[0]) labels_dev.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0: UNK heads_dev.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0: UNK bodys_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_dev, np_bodys_dev] = fit_length([heads_dev, bodys_dev], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/dev/dev_title.npy' np.save(t_trainpath, np_heads_dev) b_trainpath = '../data/nela-17/whole/dev/dev_body.npy' np.save(b_trainpath, np_bodys_dev) l_trainpath = '../data/nela-17/whole/dev/dev_label.npy' np.save(l_trainpath, labels_dev) print datetime.datetime.now().isoformat() ``` # testset ``` csv_reader = csv.reader(open('../data/raw/NELA-17/test.csv', 'r')) print datetime.datetime.now().isoformat() ids_dev = [] heads_dev = [] bodys_dev = [] labels_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_dev.append(row[0]) labels_dev.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0 - UNK heads_dev.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0 - UNK bodys_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_dev, np_bodys_dev] = fit_length([heads_dev, bodys_dev], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/test/test_title.npy' np.save(t_trainpath, np_heads_dev) b_trainpath = '../data/nela-17/whole/test/test_body.npy' np.save(b_trainpath, np_bodys_dev) l_trainpath = '../data/nela-17/whole/test/test_label.npy' np.save(l_trainpath, labels_dev) print datetime.datetime.now().isoformat() ``` # debugset ``` print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17//whole/debug/debug_title.npy' np.save(t_trainpath, np_heads_dev[:200]) b_trainpath = '../data/nela-17/whole/debug/debug_body.npy' np.save(b_trainpath, np_bodys_dev[:200]) l_trainpath = '../data/nela-17/whole/debug/debug_label.npy' np.save(l_trainpath, labels_dev[:200]) print datetime.datetime.now().isoformat() with open('../data/nela-17/whole/dic_mincutN.txt') as f: test_list_voca = f.readlines() test_list_voca = [x.strip() for x in test_list_voca] from nlp_vocab import Vocab tt = Vocab(test_list_voca) print(tt.index2sent(np_heads_dev[100])) ``` # para ver. ``` SEED = 448 random.seed(SEED) csv_reader = csv.reader(open('version2/data_para_train.csv', 'r')) print datetime.datetime.now().isoformat() data = [] true_data = [] for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, if row[3] == "1": data.append(row) else: true_data.append(row) random.shuffle(true_data) data += true_data[:len(data)] print datetime.datetime.now().isoformat() ids_para = [] heads_para = [] bodys_para = [] labels_para = [] for n, row in enumerate(data): if (n+1) % 10000 == 0: print n+1, ids_para.append(row[0]) labels_para.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_para.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_para.append(body) print n+1, ': Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_para, np_bodys_para] = fit_length([heads_para, bodys_para], 49, 170) print 'numpy: Done' print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/train_para_head_mincut5' np.save(t_trainpath, np_heads_para) b_trainpath = 'nps/train_para_body_mincut5' np.save(b_trainpath, np_bodys_para) l_trainpath = 'nps/train_para_label_mincut5' np.save(l_trainpath, labels_para) print 'save: Done' print datetime.datetime.now().isoformat() import numpy as np l_trainpath = np.load('nps/train_para_label_mincut5.npy') l_trainpath.shape csv_reader = csv.reader(open('version2/data_para_dev.csv', 'r')) print datetime.datetime.now().isoformat() ids_para_dev = [] heads_para_dev = [] bodys_para_dev = [] labels_para_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_para_dev.append(row[0]) labels_para_dev.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_para_dev.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_para_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_para_dev, np_bodys_para_dev] = fit_length([heads_para_dev, bodys_para_dev], 49, 170) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/valid_para_head_mincut5' np.save(t_trainpath, np_heads_para_dev) b_trainpath = 'nps/valid_para_body_mincut5' np.save(b_trainpath, np_bodys_para_dev) l_trainpath = 'nps/valid_para_label_mincut5' np.save(l_trainpath, labels_para_dev) print datetime.datetime.now().isoformat() ``` # testset ``` csv_reader = csv.reader(open('version2/data_whole_test.csv', 'r')) print datetime.datetime.now().isoformat() ids_test = [] heads_test = [] bodys_test = [] labels_test = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_test.append(row[0]) labels_test.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_test.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_test.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_test, np_bodys_test] = fit_length([heads_test, bodys_test], 49, 1200) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/test_whole_head_mincut5' np.save(t_trainpath, np_heads_test) b_trainpath = 'nps/test_whole_body_mincut5' np.save(b_trainpath, np_bodys_test) l_trainpath = 'nps/test_whole_label_mincut5' np.save(l_trainpath, labels_test) print datetime.datetime.now().isoformat() ``` # test stats. ``` csv_reader = csv.reader(open('version2/data_whole_test.csv', 'r')) len_heads_test = [] #1 len_paras_test = [] #2 cnt_paras_test = [] #3 len_bodys_test = [] #4 labels_test = [] print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, labels_test.append(int(row[3])) head = row[1] len_heads_test.append(len(head.split())) #1 body = row[2] tkn_para = [] for para in body.split('<EOP>'): if para and para != ' ': _para = para + '<EOP>' len_para = len(_para.split()) len_paras_test.append(len_para) #2 tkn_para.append(_para) cnt_paras_test.append(len(tkn_para)) #3 body_split = body.split() len_bodys_test.append(len(body_split)) #4 print n+1, 'Done' print datetime.datetime.now().isoformat() #1 len_titles = np.array(len_heads_test) print len_titles.tolist().count(1) print np.max(len_titles), np.min(len_titles), np.mean(len_titles), np.std(len_titles) len_t = len(len_titles) cnt_t = sum(len_titles <= 49) print cnt_t, len_t, cnt_t*1.0/len_t #2 len_paras = np.array(len_paras_test) print len_paras.tolist().count(1) print np.max(len_paras), np.min(len_paras), np.mean(len_paras), np.std(len_paras) len_p = len(len_paras) cnt_p = sum(len_paras <= 170) print cnt_p, len_p, cnt_p*1.0/len_p #3 cnt_para = np.array(cnt_paras_test) print cnt_para.tolist().count(1) print np.max(cnt_para), np.min(cnt_para), np.mean(cnt_para), np.std(cnt_para), np.median(cnt_para) len_cp = len(cnt_para) cnt_cp = sum(cnt_para <= 20) print cnt_cp, len_cp, cnt_cp*1.0/len_cp #4 len_bodys = np.array(len_bodys_test) print len_bodys.tolist().count(2) print np.max(len_bodys), np.min(len_bodys), np.mean(len_bodys), np.std(len_bodys) len_b = len(len_bodys) cnt_b = sum(len_bodys <= 1200) print cnt_b, len_b, cnt_b*1.0/len_b plt.figure(1) plt.hist(len_paras, range=[0, 500], normed=False, bins=500) tkn2df = {} for tkn in tkn2tf.keys(): tkn2df[tkn] = 0 csv_reader = csv.reader(open('final_final/data_whole_training.csv', 'r')) print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, tmp_tkn = [] head = row[1] body = row[2] doc = ' '.join([head, body]) for tkn in doc.split(): if tkn in tmp_tkn: continue else: tkn2df[tkn] += 1 tmp_tkn.append(tkn) print n, 'Done' print datetime.datetime.now().isoformat() ```
github_jupyter
<table> <tr> <td ><h1><strong>NI SystemLink Analysis Automation</strong></h1></td> </tr> </table> This notebook is an example for how you can analyze your data with NI SystemLink Analysis Automation. It forms the core of the analysis procedure, which includes the notebook, the query, and the execution parameters (parallel or comparative). The [procedure is uploaded to Analysis Automation](https://www.ni.com/documentation/en/systemlink/latest/analysis/creating-anp-with-jupyter/). The output is a report in form of PDF documents or HTML pages. <br> <hr> ## Prerequisites Before you run this example, you need to [create a DataFinder search query](https://www.ni.com/documentation/en/systemlink/latest/datanavigation/finding-data-with-advanced-search/) in Data Navigation to find the example files (e.g. 'TR_M17_QT_42-1.tdms'). Save this query on the server. <hr> ## Summary This example exercises the SystemLink TDMReader API to access bulk data (see `data_api`) and/or descriptive data (see `metadata_api`). When the notebook executes, Analysis Automation provides data links which the API uses to access content. It also shows how to select channels from two channel groups and display the data in two graphs. The channel values from each channel group populate arrays, which you can use to further analyze and visualize your data. Furthermore, the example uses two procedure parameters that write a comment to the first graph and select a channel to display in the second graph (refer to __Plot Graph__ below). <hr> ## Imports This example uses the `TDMReader` API to work with the bulk data and meta data of the given files. `Matplotlib` is used for plotting the graph. The `scrapbook` is used to set and display the results in the analysis procedure results list. ``` import systemlink.clients.nitdmreader as tdmreader metadata_api = tdmreader.MetadataApi() data_api = tdmreader.DataApi() import matplotlib.pyplot as plt import scrapbook as sb def get_property(element, property_name): """Gets a property of the given element. The element can be a file, channel group, or channel. Args: element: Element to get the property from. property_name: Name of the property to get. Returns: The according property of the element or ``None`` if the property doesn't exist. """ return next((e.value for e in element.properties.properties if e.name == property_name), None) ``` ## Define Notebook Parameters a) In a code cell (*called __parameters cell__*), define the parameters. Fill in the needed values/content parameters in the code cell below. E.g. **Defined parameters:** - `comment_group_1`: Writes a comment into the box of the first group.<br> (Default value = `Checked`) - `shown_channel_index`: Any valid channel index of the second group. This channel is plotted in the second graph. <br> (Default value = `2`) Your code may look like the following: ``` comment_group_1 = "Checked" shown_channel_index = 2 ``` b) Select this code cell (*__parameters cell__*) and open on the __Property Inspector__ panel on the right sidebar to add the parameters, their default values, to the __Cell Metadata__ code block. For example, your code may look like the following: ```json { "papermill": { "parameters": { "comment_group_1": "Checked", "shown_channel_index": 2 } }, "tags": [ "parameters" ] } ``` You can use the variables of the __parameters__ cell content in all code cells below. ## Retrieve Metadata with a Data Link A data link is the input for each __Analysis Automation procedure__ that uses a query to collect specific data items. A `data_link` contains a list of one or more elements that point to a list of files, channel groups, or channels (depending on the query result type). This example shows how the Metadata API accesses the `file_info` structure from the file, through the `groups`, and down to the `channels` level. This example calculates the absolute minimum and absolute maximum value of all channels in each group and displays these values in the report. ``` data_links = ni_analysis_automation["data_links"] file_ids = [d["fileId"] for d in data_links] file_infos = await metadata_api.get_multiple_file_info(tdmreader.FileList(file_ids)) file_info = file_infos[0] test_file_name = get_property(file_info, "name") program_name = get_property(file_info, "Test~Procedure") group_names = [] channels = [] formatted_properties = [] for group in file_info.groups: group_names.append(group.name) channels.append(group.channels) max_values_of_group = [] min_values_of_group = [] mean_values_of_group = [] for channel in group.channels: minimum = float(get_property(channel, "minimum") or "NaN") maximum = float(get_property(channel, "maximum") or "NaN") mean_values_of_group.append((minimum + maximum) / 2) max_values_of_group.append(maximum) min_values_of_group.append(minimum) # Calculate statistical values from metadata abs_min = max(max_values_of_group) abs_max = min(max_values_of_group) abs_mean = sum(mean_values_of_group) / float(len(mean_values_of_group)) formatted_properties.append(f"Absolute Maximum: {abs_max:.3f} °C"+ f",Absolute Minimum: {abs_min:.3f} °C"+ f",Mean Value: {abs_mean:.3f} °C") # Populate the info box of the plot with the notebook parameters formatted_properties[1] += f",Parameter: {comment_group_1}" formatted_properties[0] += f",Channel #: {shown_channel_index}" ``` ## Retrieve Bulk Data with a Data Link Use the TDMReader API to work with bulk data. There are multiple ways for retrieving the data. The access path used in this example shows you how to loop over all groups and over all channels within the groups. The resulting channel specifiers (`chn_specs`) are used in the next step to `query` the bulk data and retrieve all channel `values` from the queried data. ``` bulk_data = [] file_id = data_links[0]['fileId'] for group in file_info.groups: chn_specs = [] for channel in group.channels: channel_specifier = tdmreader.OneChannelSpecifier( file_id=file_id, group_name=group.name, channel_name=channel.name) chn_specs.append(channel_specifier) xy_chns = tdmreader.ChannelSpecificationsXyChannels(y_channels=chn_specs) channel_specs = tdmreader.ChannelSpecifications(xy_channels=[xy_chns]) query = tdmreader.QueryDataSpecifier(channel_specs) data = await data_api.query_data(query) # get numeric y-data y_channels = data.data[0].y values = list(map(lambda c: c.numeric_data, y_channels)) bulk_data.append(values) ``` ## Plot Graph The next two cells plot a graph with two areas and two sub plots, using the Python `matplotlib.pyplot` module as `plt`. ``` # Helper method and constant for plotting data curr_fontsize = 18 axis_lable_fontsize = curr_fontsize - 5 def plot_area(subplot, area_bulk_data, area_meta_data, enable_channel_selector, area_properties): """ Plot a sub print area of a figure :param subplot: Object of the plot print area :param area_bulk_data: Channel bulk data to print :param area_meta_data: Channel metadata (name, properties, ...) :param enable_channel_selector: True, when property shown_channel_index should be used :param area_properties: String with comma-separated parts as content for the info box area e.g.: "Absolute Maximum: 12.6 °C,Absolute Minimum: -22.3 °C" """ # Place a text box below the legend subplot.text(1.05, 0.0, area_properties.replace(",", "\n"), transform=subplot.transAxes, ha="left", va="bottom") subplot.grid(True) subplot.set_xlabel('Time [s]', fontsize=axis_lable_fontsize) unit = get_property(area_meta_data[0], "unit_string") subplot.set_ylabel('Amplitudes ['+unit+']', fontsize=axis_lable_fontsize) i = 0 for channel in area_meta_data: if (enable_channel_selector): if (i == (shown_channel_index - 1)): subplot.plot(area_bulk_data[i], label=channel.name) # Lable => name of the curve = channel else: subplot.plot(area_bulk_data[i], label=channel.name) # Lable => name of the curve = channel i += 1 # Place a legend to the right of this subplot. subplot.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0., fontsize=axis_lable_fontsize) # Create plot and print data fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(15, 10)) fig.suptitle ('Temperature Monitoring File: '+ test_file_name + ' Test program: ' + program_name, fontsize=curr_fontsize, color='blue') ax1.set_title(group_names[1], fontsize=curr_fontsize) plot_area(ax1, bulk_data[1], channels[1], False, formatted_properties[1]) ax2.set_title(group_names[0], fontsize=curr_fontsize) plot_area(ax2, bulk_data[0], channels[0], True, formatted_properties[0]) plt.tight_layout() plt.show() ``` ## Add Result Summary Each Scrap recorded with `sb.glue()` is displayed for each procedure on the __History__ tab in Analysis Automation. ``` sb.glue("File", test_file_name) sb.glue("Test", program_name) sb.glue("Comment", comment_group_1) sb.glue("Displayed Channel #", shown_channel_index) ```
github_jupyter
<a href="https://colab.research.google.com/github/Serbeld/ArtificialVisionForQualityControl/blob/master/Copia_de_Yolo_Step_by_Step.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> **Outline of Steps** + Initialization + Download COCO detection data from http://cocodataset.org/#download + http://images.cocodataset.org/zips/train2014.zip <= train images + http://images.cocodataset.org/zips/val2014.zip <= validation images + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations + Run this script to convert annotations in COCO format to VOC format + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py + Download pre-trained weights from https://pjreddie.com/darknet/yolo/ + https://pjreddie.com/media/files/yolo.weights + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder) + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder) + Specity the path of pre-trained weights by setting variable *wt_path* + Construct equivalent network in Keras + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg + Load the pretrained weights + Perform training + Perform detection on an image with newly trained weights + Perform detection on an video with newly trained weights # Initialization ``` !pip install h5py import h5py from google.colab import drive,files drive.mount('/content/drive') import sys sys.path.append('/content/drive/My Drive/keras-yolo2/') !pip install tensorflow-gpu==2.0.0-alpha0 from keras.models import Sequential, Model from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda from keras.layers.advanced_activations import LeakyReLU from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard from keras.optimizers import SGD, Adam, RMSprop from keras.layers.merge import concatenate import matplotlib.pyplot as plt import keras.backend as K import tensorflow as tf import imgaug as ia from tqdm import tqdm from imgaug import augmenters as iaa import numpy as np import pickle import os, cv2 from preprocessing import parse_annotation, BatchGenerator from utils import WeightReader, decode_netout, draw_boxes os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" LABELS = ['COLOR HDPE', 'PET', 'WHITE HDPE'] IMAGE_H, IMAGE_W = 416, 416 GRID_H, GRID_W = 13 , 13 BOX = 5 CLASS = len(LABELS) CLASS_WEIGHTS = np.ones(CLASS, dtype='float32') OBJ_THRESHOLD = 0.2#0.5 NMS_THRESHOLD = 0.2#0.45 ANCHORS = [0.96,4.22, 1.52,4.79, 2.30,4.30, 2.76,2.35, 3.62,6.03] NO_OBJECT_SCALE = 1.0 OBJECT_SCALE = 5.0 COORD_SCALE = 1.0 CLASS_SCALE = 1.0 BATCH_SIZE = 16 WARM_UP_BATCHES = 0 TRUE_BOX_BUFFER = 50 wt_path = '/content/drive/My Drive/keras-yolo2/yolov2.weights' train_image_folder = '/content/drive/My Drive/dataset/images/' train_annot_folder = '/content/drive/My Drive/dataset/annotations/' valid_image_folder = '/content/drive/My Drive/dataset/images_val/' valid_annot_folder = '/content/drive/My Drive/dataset/annotattionsVAL/' #import os #print(os.listdir('/content/drive/My Drive/dataset/images')) train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS) val_imgs, seen_val_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS) train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize) valid_batch = BatchGenerator(val_imgs, generator_config, norm=normalize) ``` **Sanity check: show a few images with ground truth boxes overlaid** ``` batches = BatchGenerator(train_imgs, generator_config) image = batches[0][0][0][0] image = cv2.resize(image,(680,340)) plt.imshow(image.astype('uint8')) ``` # Construct the network ``` # the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K) def space_to_depth_x2(x): return tf.space_to_depth(x, block_size=2) input_image = Input(shape=(IMAGE_H, IMAGE_W, 3)) true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4)) # Layer 1 x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image) x = BatchNormalization(name='norm_1')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 2 x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x) x = BatchNormalization(name='norm_2')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 3 x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x) x = BatchNormalization(name='norm_3')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 4 x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x) x = BatchNormalization(name='norm_4')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 5 x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x) x = BatchNormalization(name='norm_5')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 6 x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x) x = BatchNormalization(name='norm_6')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 7 x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x) x = BatchNormalization(name='norm_7')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 8 x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x) x = BatchNormalization(name='norm_8')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 9 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x) x = BatchNormalization(name='norm_9')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 10 x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x) x = BatchNormalization(name='norm_10')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 11 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x) x = BatchNormalization(name='norm_11')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 12 x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x) x = BatchNormalization(name='norm_12')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 13 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x) x = BatchNormalization(name='norm_13')(x) x = LeakyReLU(alpha=0.1)(x) skip_connection = x x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 14 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x) x = BatchNormalization(name='norm_14')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 15 x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x) x = BatchNormalization(name='norm_15')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 16 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x) x = BatchNormalization(name='norm_16')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 17 x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x) x = BatchNormalization(name='norm_17')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 18 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x) x = BatchNormalization(name='norm_18')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 19 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x) x = BatchNormalization(name='norm_19')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 20 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x) x = BatchNormalization(name='norm_20')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 21 skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection) skip_connection = BatchNormalization(name='norm_21')(skip_connection) skip_connection = LeakyReLU(alpha=0.1)(skip_connection) skip_connection = Lambda(space_to_depth_x2)(skip_connection) x = concatenate([skip_connection, x]) # Layer 22 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x) x = BatchNormalization(name='norm_22')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 23 x = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x) output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x) # small hack to allow true_boxes to be registered when Keras build the model # for more information: https://github.com/fchollet/keras/issues/2790 output = Lambda(lambda args: args[0])([output, true_boxes]) model = Model([input_image, true_boxes], output) model.summary() ``` # Load pretrained weights **Load the weights originally provided by YOLO** ``` weight_reader = WeightReader(wt_path) weight_reader.reset() nb_conv = 23 for i in range(1, nb_conv+1): conv_layer = model.get_layer('conv_' + str(i)) if i < nb_conv: norm_layer = model.get_layer('norm_' + str(i)) size = np.prod(norm_layer.get_weights()[0].shape) beta = weight_reader.read_bytes(size) gamma = weight_reader.read_bytes(size) mean = weight_reader.read_bytes(size) var = weight_reader.read_bytes(size) weights = norm_layer.set_weights([gamma, beta, mean, var]) if len(conv_layer.get_weights()) > 1: bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape)) kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape)) kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape))) kernel = kernel.transpose([2,3,1,0]) conv_layer.set_weights([kernel, bias]) else: kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape)) kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape))) kernel = kernel.transpose([2,3,1,0]) conv_layer.set_weights([kernel]) ``` **Randomize weights of the last layer** ``` layer = model.layers[-4] # the last convolutional layer weights = layer.get_weights() new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W) new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W) layer.set_weights([new_kernel, new_bias]) ``` # Perform training **Loss function** $$\begin{multline} \lambda_\textbf{coord} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left[ \left( x_i - \hat{x}_i \right)^2 + \left( y_i - \hat{y}_i \right)^2 \right] \\ + \lambda_\textbf{coord} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left[ \left( \sqrt{w_i} - \sqrt{\hat{w}_i} \right)^2 + \left( \sqrt{h_i} - \sqrt{\hat{h}_i} \right)^2 \right] \\ + \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left( C_i - \hat{C}_i \right)^2 \\ + \lambda_\textrm{noobj} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{noobj}} \left( C_i - \hat{C}_i \right)^2 \\ + \sum_{i = 0}^{S^2} L_i^{\text{obj}} \sum_{c \in \textrm{classes}} \left( p_i(c) - \hat{p}_i(c) \right)^2 \end{multline}$$ ``` def custom_loss(y_true, y_pred): mask_shape = tf.shape(y_true)[:4] cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1))) cell_y = tf.transpose(cell_x, (0,2,1,3,4)) cell_grid = tf.tile(tf.concat([cell_x,cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1]) coord_mask = tf.zeros(mask_shape) conf_mask = tf.zeros(mask_shape) class_mask = tf.zeros(mask_shape) seen = tf.Variable(0.) total_recall = tf.Variable(0.) """ Adjust prediction """ ### adjust x and y pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid ### adjust w and h pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1,1,1,BOX,2]) ### adjust confidence pred_box_conf = tf.sigmoid(y_pred[..., 4]) ### adjust class probabilities pred_box_class = y_pred[..., 5:] """ Adjust ground truth """ ### adjust x and y true_box_xy = y_true[..., 0:2] # relative position to the containing cell ### adjust w and h true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically ### adjust confidence true_wh_half = true_box_wh / 2. true_mins = true_box_xy - true_wh_half true_maxes = true_box_xy + true_wh_half pred_wh_half = pred_box_wh / 2. pred_mins = pred_box_xy - pred_wh_half pred_maxes = pred_box_xy + pred_wh_half intersect_mins = tf.maximum(pred_mins, true_mins) intersect_maxes = tf.minimum(pred_maxes, true_maxes) intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.) intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1] true_areas = true_box_wh[..., 0] * true_box_wh[..., 1] pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1] union_areas = pred_areas + true_areas - intersect_areas iou_scores = tf.truediv(intersect_areas, union_areas) true_box_conf = iou_scores * y_true[..., 4] ### adjust class probabilities true_box_class = tf.argmax(y_true[..., 5:], -1) """ Determine the masks """ ### coordinate mask: simply the position of the ground truth boxes (the predictors) coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE ### confidence mask: penelize predictors + penalize boxes with low IOU # penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6 true_xy = true_boxes[..., 0:2] true_wh = true_boxes[..., 2:4] true_wh_half = true_wh / 2. true_mins = true_xy - true_wh_half true_maxes = true_xy + true_wh_half pred_xy = tf.expand_dims(pred_box_xy, 4) pred_wh = tf.expand_dims(pred_box_wh, 4) pred_wh_half = pred_wh / 2. pred_mins = pred_xy - pred_wh_half pred_maxes = pred_xy + pred_wh_half intersect_mins = tf.maximum(pred_mins, true_mins) intersect_maxes = tf.minimum(pred_maxes, true_maxes) intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.) intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1] true_areas = true_wh[..., 0] * true_wh[..., 1] pred_areas = pred_wh[..., 0] * pred_wh[..., 1] union_areas = pred_areas + true_areas - intersect_areas iou_scores = tf.truediv(intersect_areas, union_areas) best_ious = tf.reduce_max(iou_scores, axis=4) conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE # penalize the confidence of the boxes, which are reponsible for corresponding ground truth box conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE ### class mask: simply the position of the ground truth boxes (the predictors) class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE """ Warm-up training """ no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE/2.) seen = tf.assign_add(seen, 1.) true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES), lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask, true_box_wh + tf.ones_like(true_box_wh) * np.reshape(ANCHORS, [1,1,1,BOX,2]) * no_boxes_mask, tf.ones_like(coord_mask)], lambda: [true_box_xy, true_box_wh, coord_mask]) """ Finalize the loss """ nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0)) nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0)) nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0)) loss_xy = tf.reduce_sum(tf.square(true_box_xy-pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2. loss_wh = tf.reduce_sum(tf.square(true_box_wh-pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2. loss_conf = tf.reduce_sum(tf.square(true_box_conf-pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2. loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class) loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6) loss = loss_xy + loss_wh + loss_conf + loss_class nb_true_box = tf.reduce_sum(y_true[..., 4]) nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3)) """ Debugging code """ current_recall = nb_pred_box/(nb_true_box + 1e-6) total_recall = tf.assign_add(total_recall, current_recall) #loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t', summarize=1000) #loss = tf.Print(loss, [loss_xy], message='Loss XY \t', summarize=1000) #loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000) #loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000) #loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000) #loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000) #loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000) #loss = tf.Print(loss, [total_recall/seen], message='Average Recall \t', summarize=1000) loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t') loss = tf.Print(loss, [loss_xy], message='Loss XY \t') loss = tf.Print(loss, [loss_wh], message='Loss WH \t') loss = tf.Print(loss, [loss_conf], message='Loss Conf \t') loss = tf.Print(loss, [loss_class], message='Loss Class \t') loss = tf.Print(loss, [loss], message='Total Loss \t') loss = tf.Print(loss, [current_recall], message='Current Recall \t') loss = tf.Print(loss, [total_recall/seen], message='Average Recall \t') return loss ``` **Parse the annotations to construct train generator and validation generator** ``` generator_config = { 'IMAGE_H' : IMAGE_H, 'IMAGE_W' : IMAGE_W, 'GRID_H' : GRID_H, 'GRID_W' : GRID_W, 'BOX' : BOX, 'LABELS' : LABELS, 'CLASS' : len(LABELS), 'ANCHORS' : ANCHORS, 'BATCH_SIZE' : BATCH_SIZE, 'TRUE_BOX_BUFFER' : 50, } def normalize(image): return image / 255. print(train_annot_folder) ``` **Setup a few callbacks and start the training** ``` early_stop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3, mode='min', verbose=1) checkpoint = ModelCheckpoint('botellas.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=1) #tb_counter = len([log for log in os.listdir(os.path.expanduser('~/logs/')) if 'coco_' in log]) + 1 #tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/') + 'coco_' + '_' + str(tb_counter), # histogram_freq=0, # write_graph=True, # write_images=False) optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9) #optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(loss=custom_loss, optimizer=optimizer,metrics=['accuracy']) #'loss_xy','loss_wh','loss_conf','loss_classloss','current_recall','total_recall/seen' stad = model.fit_generator(generator = train_batch, steps_per_epoch = len(train_batch), epochs = 3, verbose = 1, validation_data = valid_batch, validation_steps = len(valid_batch), callbacks = [early_stop, checkpoint], max_queue_size = 3) #model.fit_generator(generator = train_batch, # steps_per_epoch = len(train_batch), # epochs = 100, # verbose = 1, # validation_data = valid_batch, # validation_steps = len(valid_batch), # callbacks = [early_stop, checkpoint, tensorboard], # max_queue_size = 3) image = batches[0][0][0][0] plt.imshow(image.astype('uint8'))plt.figure(0) plt.plot(stad.history['acc'],'r') plt.plot(stad.history['val_acc'],'g') plt.xlabel("Num of Epochs") plt.ylabel("Accuracy") plt.title("Training Accuracy vs Validation Accuracy") plt.legend(['train','validation']) plt.savefig("Grafica_1.jpg", bbox_inches = 'tight') plt.figure(1) plt.plot(stad.history['loss'],'r') plt.plot(stad.history['val_loss'],'g') plt.xlabel("Num of Epochs") plt.ylabel("Loss") plt.title("Training Loss vs Validation Loss") plt.legend(['train','validation']) plt.savefig("Grafica_2.jpg", bbox_inches = 'tight') plt.show() ``` # Perform detection on image ``` model.load_weights("botellas.h5") import cv2 import matplotlib.pyplot as plt plt.figure() input_image = cv2.imread("/content/drive/My Drive/dataset/images/1.png") input_image = cv2.resize(input_image, (416, 416)) dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4)) input_image = input_image / 255. input_image = input_image[:,:,::-1] input_image = np.expand_dims(input_image, 0) netout = model.predict([input_image, dummy_array]) boxes = decode_netout(netout[0], obj_threshold=OBJ_THRESHOLD, nms_threshold=NMS_THRESHOLD, anchors=ANCHORS, nb_class=CLASS) imagen = draw_boxes(imagen, boxes, labels=LABELS) imagen = cv2.resize(imagen,(640,380)) plt.imshow(imagen[:,:,::-1]); plt.show() ``` # Perform detection on video ``` #model.load_weights("weights_coco.h5") #dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4)) #video_inp = '../basic-yolo-keras/images/phnom_penh.mp4' #video_out = '../basic-yolo-keras/images/phnom_penh_bbox.mp4' #video_reader = cv2.VideoCapture(video_inp) #nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT)) #frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) #frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) #video_writer = cv2.VideoWriter(video_out, # cv2.VideoWriter_fourcc(*'XVID'), # 50.0, # (frame_w, frame_h)) #for i in tqdm(range(nb_frames)): # ret, image = video_reader.read() # input_image = cv2.resize(image, (416, 416)) # input_image = input_image / 255. # input_image = input_image[:,:,::-1] # input_image = np.expand_dims(input_image, 0) # netout = model.predict([input_image, dummy_array]) # boxes = decode_netout(netout[0], # obj_threshold=0.3, # nms_threshold=NMS_THRESHOLD, # anchors=ANCHORS, # nb_class=CLASS) # image = draw_boxes(image, boxes, labels=LABELS) # video_writer.write(np.uint8(image)) #video_reader.release() #video_writer.release() ```
github_jupyter
``` import numpy as np import librosa import os import random import tflearn import tensorflow as tf lr = 0.001 iterations_train = 30 bsize = 64 audio_features = 20 utterance_length = 35 ndigits = 10 def get_mfcc_features(fpath): raw_w,sampling_rate = librosa.load(fpath,mono=True) mfcc_features = librosa.feature.mfcc(raw_w,sampling_rate) if(mfcc_features.shape[1]>utterance_length): mfcc_features = mfcc_features[:,0:utterance_length] else: mfcc_features=np.pad(mfcc_features,((0,0),(0,utterance_length-mfcc_features.shape[1])), mode='constant', constant_values=0) return mfcc_features import matplotlib.pyplot as plt import librosa.display %matplotlib inline mfcc_features = get_mfcc_features('../../speech_dset/recordings/train/5_theo_45.wav') plt.figure(figsize=(10, 6)) plt.subplot(2, 1, 1) librosa.display.specshow(mfcc_features, x_axis='time') print("Feature shape: ", mfcc_features.shape) print("Features: ", mfcc_features[:,0]) def get_batch_mfcc(fpath,batch_size=256): ft_batch = [] labels_batch = [] files = os.listdir(fpath) while True: print("Total %d files" % len(files)) random.shuffle(files) for fname in files: if not fname.endswith(".wav"): continue mfcc_features = get_mfcc_features(fpath+fname) label = np.eye(10)[int(fname[0])] labels_batch.append(label) ft_batch.append(mfcc_features) if len(ft_batch) >= batch_size: yield ft_batch, labels_batch ft_batch = [] labels_batch = [] train_batch = get_batch_mfcc('../../speech_dset/recordings/train/') sp_network = tflearn.input_data([None, audio_features, utterance_length]) sp_network = tflearn.lstm(sp_network, 128*4, dropout=0.5) sp_network = tflearn.fully_connected(sp_network, ndigits, activation='softmax') sp_network = tflearn.regression(sp_network, optimizer='adam', learning_rate=lr, loss='categorical_crossentropy') sp_model = tflearn.DNN(sp_network, tensorboard_verbose=0) while iterations_train > 0: X_tr, y_tr = next(train_batch) X_test, y_test = next(train_batch) sp_model.fit(X_tr, y_tr, n_epoch=10, validation_set=(X_test, y_test), show_metric=True, batch_size=bsize) iterations_train-=1 sp_model.save("/tmp/speech_recognition.lstm") sp_model.load('/tmp/speech_recognition.lstm') mfcc_features = get_mfcc_features('../../speech_dset/recordings/test/4_jackson_40.wav') mfcc_features = mfcc_features.reshape((1,mfcc_features.shape[0],mfcc_features.shape[1])) prediction_digit = sp_model.predict(mfcc_features) print(prediction_digit) print("Digit predicted: ", np.argmax(prediction_digit)) ```
github_jupyter
``` import sys sys.path.append('../../../GraphGallery/') sys.path.append('../../../GraphAdv/') import tensorflow as tf import numpy as np import networkx as nx import scipy.sparse as sp from graphgallery.nn.models import GCN from graphgallery.nn.functions import softmax from graphadv.attack.targeted import IGA import matplotlib.pyplot as plt plt.style.use(['no-latex', 'ieee']) from graphgallery.data import NPZDataset data = NPZDataset('citeseer', root="~/GraphData/datasets/", verbose=False, standardize=True) adj, x, labels = data.graph.unpack() idx_train, idx_val, idx_test = data.split(random_state=15) n_classes = labels.max() + 1 target = 0 assert target in idx_test print(f"Attack target {target} with class label {labels[target]}") attacker = IGA(adj, x, labels, idx_train, seed=None, surrogate_args={'idx_val':idx_val}) attacker.reset() attacker.attack(target, direct_attack=True, structure_attack=True, feature_attack=False) # show logs attacker.show_edge_flips(detail=True) ``` # Before Attack ``` model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) ``` # After Attack ``` model = GCN(attacker.A, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) ``` # Visulation ``` def evaluate(adj, x, retrain_iters=5): classification_margins = [] class_distrs = [] for _ in range(retrain_iters): print(f"... {_+1}/{retrain_iters} ") model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=None) model.build() his = model.train(idx_train, idx_val, verbose=0, epochs=100) logit = softmax(model.predict(target).ravel()) class_distrs.append(logit) best_second_class_before = (logit - labels[target]).argmax() margin = logit[labels[target]] - logit[best_second_class_before] classification_margins.append(margin) model.close del model class_distrs = np.array(class_distrs) return class_distrs retrain_iters = 5 print("Before Attack") class_distrs_clean = evaluate(adj, x, retrain_iters=retrain_iters) print(f"After {attacker.name} Attack") class_distrs_retrain = evaluate(attacker.A, x, retrain_iters=retrain_iters) def make_xlabel(ix, correct): if ix == correct: return "Class {}\n(correct)".format(ix) return "Class {}".format(ix) figure = plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) center_ixs_clean = [] for ix, block in enumerate(class_distrs_clean.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_clean.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) plt.ylabel("Predicted probability") ax.set_xticks(center_ixs_clean) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} on clean data\n({retrain_iters} re-trainings)") fig = plt.subplot(1, 2, 2) center_ixs_retrain = [] for ix, block in enumerate(class_distrs_retrain.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_retrain.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) ax.set_xticks(center_ixs_retrain) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} after {attacker.n_perturbations} perturbations\n({retrain_iters} re-trainings)") plt.tight_layout() plt.show() ```
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Synthetic simulation of historical TCs parameters using Gaussian copulas (Rueda et al. 2016) and subsequent selection of representative cases using Maximum Dissimilarity (MaxDiss) algorithm (Camus et al. 2011) inputs required: * Historical TC parameters that affect the site (output of *notebook 05*) * number of synthetic simulations to run * number of representative cases to be selected using MaxDiss in this notebook: * synthetic generation of TCs tracks based on gaussian copulas of the TC parameters * MDA selection of representative number of events ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import xarray as xr import numpy as np # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..')) # teslakit from teslakit.database import Database from teslakit.statistical import CopulaSimulation from teslakit.mda import MaxDiss_Simplified_NoThreshold from teslakit.plotting.storms import Plot_TCs_Params_MDAvsSIM, \ Plot_TCs_Params_HISTvsSIM, Plot_TCs_Params_HISTvsSIM_histogram ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/nico/Projects/TESLA-kit/TeslaKit/data' db = Database(p_data) # set site db.SetSite('ROI') # -------------------------------------- # load data and set parameters _, TCs_r2_params = db.Load_TCs_r2_hist() # TCs parameters inside radius 2 # TCs random generation and MDA parameters num_sim_rnd = 100000 num_sel_mda = 1000 ``` ## Historical TCs - Probabilistic Simulation ``` # -------------------------------------- # Probabilistic simulation Historical TCs # aux functions def adjust_to_pareto(var): 'Fix data. It needs to start at 0 for Pareto adjustment ' var = var.astype(float) var_pareto = np.amax(var) - var + 0.00001 return var_pareto def adjust_from_pareto(var_base, var_pareto): 'Returns data from pareto adjustment' var = np.amax(var_base) - var_pareto + 0.00001 return var # use small radius parameters (4º) pmean = TCs_r2_params.pressure_mean.values[:] pmin = TCs_r2_params.pressure_min.values[:] gamma = TCs_r2_params.gamma.values[:] delta = TCs_r2_params.delta.values[:] vmean = TCs_r2_params.velocity_mean.values[:] # fix pressure for p pmean_p = adjust_to_pareto(pmean) pmin_p = adjust_to_pareto(pmin) # join storm parameters for copula simulation storm_params = np.column_stack( (pmean_p, pmin_p, gamma, delta, vmean) ) # statistical simulate PCs using copulas kernels = ['GPareto', 'GPareto', 'ECDF', 'ECDF', 'ECDF'] storm_params_sim = CopulaSimulation(storm_params, kernels, num_sim_rnd) # adjust back pressures from pareto pmean_sim = adjust_from_pareto(pmean, storm_params_sim[:,0]) pmin_sim = adjust_from_pareto(pmin, storm_params_sim[:,1]) # store simulated storms - parameters TCs_r2_sim_params = xr.Dataset( { 'pressure_mean':(('storm'), pmean_sim), 'pressure_min':(('storm'), pmin_sim), 'gamma':(('storm'), storm_params_sim[:,2]), 'delta':(('storm'), storm_params_sim[:,3]), 'velocity_mean':(('storm'), storm_params_sim[:,4]), }, coords = { 'storm':(('storm'), np.arange(num_sim_rnd)) }, ) print(TCs_r2_sim_params) db.Save_TCs_r2_sim_params(TCs_r2_sim_params) # Historical vs Simulated: scatter plot parameters Plot_TCs_Params_HISTvsSIM(TCs_r2_params, TCs_r2_sim_params); # Historical vs Simulated: histogram parameters Plot_TCs_Params_HISTvsSIM_histogram(TCs_r2_params, TCs_r2_sim_params); ``` ## Simulated TCs - MaxDiss classification ``` # -------------------------------------- # MaxDiss classification # get simulated parameters pmean_s = TCs_r2_sim_params.pressure_mean.values[:] pmin_s = TCs_r2_sim_params.pressure_min.values[:] gamma_s = TCs_r2_sim_params.gamma.values[:] delta_s = TCs_r2_sim_params.delta.values[:] vmean_s = TCs_r2_sim_params.velocity_mean.values[:] # subset, scalar and directional indexes data_mda = np.column_stack((pmean_s, pmin_s, vmean_s, delta_s, gamma_s)) ix_scalar = [0,1,2] ix_directional = [3,4] centroids = MaxDiss_Simplified_NoThreshold( data_mda, num_sel_mda, ix_scalar, ix_directional ) # store MDA storms - parameters TCs_r2_MDA_params = xr.Dataset( { 'pressure_mean':(('storm'), centroids[:,0]), 'pressure_min':(('storm'), centroids[:,1]), 'velocity_mean':(('storm'), centroids[:,2]), 'delta':(('storm'), centroids[:,3]), 'gamma':(('storm'), centroids[:,4]), }, coords = { 'storm':(('storm'), np.arange(num_sel_mda)) }, ) print(TCs_r2_MDA_params) #db.Save_TCs_r2_mda_params(TCs_r2_MDA_params) # Historical vs Simulated: scatter plot parameters Plot_TCs_Params_MDAvsSIM(TCs_r2_MDA_params, TCs_r2_sim_params); ``` ## Historical TCs (MDA centroids) Waves Simulation Waves data is generated by numerically simulating selected storms. This methodology is not included inside teslakit python library. This step needs to be done before continuing with notebook 07
github_jupyter
# Preprocessing Part ## Author: Xiaochi (George) Li Input: "data.xlsx" provided by the professor Output: "processed_data.pickle" with target variable "Salary" as the last column. And all the missing value should be imputed or dropped. ### Summary In this part, we read the data from the file, did some exploratory data analysis on the data and processed the data for further analysis and synthesis. #### Exploratory Data Analysis * Correlation analysis * Missing value analysis * Unique percentage analysis #### Process * Removed 1. Need NLP: "MOU", "MOU Title", "Title", "Department", 2. No meaning:"Record Number", 3. \>50% missing: "POBP" * Imputed 1. p_dep: mean 2. p_grade: add new category 3. Lump Sum Pay:0 4. benefit: add new category 5. Rate:mean 6. o_pay:median ``` import numpy as np import pandas as pd import sklearn import seaborn as sns import matplotlib.pyplot as plt np.random.seed(42) df = pd.read_excel("data.xlsx",thousands=",") #seperations in thousands df.info() """Correlation analysis""" corr = df.corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, vmin=-1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) corr """Missing rate for each feature""" null_rate = df.isnull().sum(axis = 0).sort_values(ascending = False)/float((len(df))) null_rate """Unique Rate for each feature""" unique_rate = df.apply(lambda x: len(pd.unique(x)),axis = 0).sort_values(ascending = False) #unique rate and sort print(unique_rate) def column_analyse(x,df = df): #print count for columns that only has few uniques print(df[x].value_counts(),"\n",df[x].value_counts().sum() ,"\n",df[x].value_counts()/len(df[x]), "\n-----------------------") column_analyse("e_type") column_analyse("benefit") column_analyse("Time") column_analyse("p_grade") """Feature selection""" categotical_features = ["e_type", "benefit", "Time", "p_grade"] not_include_features = ["MOU", "MOU Title", "Title", "Department", "Record Number", "POBP"] selected_features = [i for i in df.columns if i not in not_include_features] X_selected = df.loc[:,selected_features] X_selected["p_dep"].hist(bins=50) X_selected["p_dep"].describe() X_selected["Lump Sum Pay"].hist(bins=50) X_selected["Lump Sum Pay"].describe() X_selected["Rate"].hist(bins=50) X_selected["Rate"].describe() X_selected["o_pay"].hist(bins=50) X_selected["o_pay"].describe() ``` |Feature Name|Missing Rate|Imputation Method| |----|----|----| |p_dep|0.189287|Mean| |p_grade|0.189287|add new category| |Lump Sum Pay|0.185537|0| |benefit|0.178262|add new category| |Rate|0.058162|mean| |o_pay|0.003750|median| ``` """imputation""" X_selected["p_dep"] = X_selected["p_dep"].fillna(X_selected["p_dep"].mean()) X_selected["Lump Sum Pay"] = X_selected["Lump Sum Pay"].fillna(0) X_selected["Rate"] = X_selected["Rate"].fillna(X_selected["Rate"].mean()) X_selected["o_pay"] = X_selected["o_pay"].fillna(X_selected["o_pay"].median()) X_selected["p_grade"] = X_selected["p_grade"].fillna(-1) X_selected["benefit"] = X_selected["benefit"].fillna(-1) X_selected.head() X_selected.to_pickle("processed_data.pickle") ```
github_jupyter
# MPIJob and Horovod Runtime ## Running distributed workloads Training a Deep Neural Network is a hard task. With growing datasets, wider and deeper networks, training our Neural Network can require a lot of resources (CPUs / GPUs / Mem and Time). There are two main reasons why we would like to distribute our Deep Learning workloads: 1. **Model Parallelism** &mdash; The **Model** is too big to fit a single GPU. In this case the model contains too many parameters to hold within a single GPU. To negate this we can use strategies like **Parameter Server** or slicing the model into slices of consecutive layers which we can fit in a single GPU. Both strategies require **Synchronization** between the layers held on different GPUs / Parameter Server shards. 2. **Data Parallelism** &mdash; The **Dataset** is too big to fit a single GPU. Using methods like **Stochastic Gradient Descent** we can send batches of data to our models for gradient estimation. This comes at the cost of longer time to converge since the estimated gradient may not fully represent the actual gradient. To increase the likelihood of estimating the actual gradient we could use bigger batches, by sending small batches to different GPUs running the same Neural Network, calculating the batch gradient and then running a **Synchronization Step** to calculate the average gradient over the batches and update the Neural Networks running on the different GPUs. > It is important to understand that the act of distribution adds extra **Synchronization Costs** which may vary according to your cluster's configuration. > <br> > As the gradients and NN needs to be propagated to each GPU in the cluster every epoch (or a number of steps), Networking can become a bottleneck and sometimes different configurations need to be used for optimal performance. > <br> > **Scaling Efficiency** is the metric used to show by how much each additional GPU should benefit the training process with Horovod showing up to 90% (When running with a well written code and good parameters). ![Horovod scaling](https://user-images.githubusercontent.com/16640218/38965607-bf5c46ca-4332-11e8-895a-b9c137e86013.png) ## How can we distribute our training There are two different cluster configurations (which can be combined) we need to take into account. - **Multi Node** &mdash; GPUs are distributed over multiple nodes in the cluster. - **Multi GPU** &mdash; GPUs are within a single Node. In this demo we show a **Multi Node Multi GPU** &mdash; **Data Parallel** enabled training using Horovod. However, you should always try and use the best distribution strategy for your use case (due to the added costs of the distribution itself, ability to run in an optimized way on specific hardware or other considerations that may arise). ## How Horovod works? Horovod's primary motivation is to make it easy to take a single-GPU training script and successfully scale it to train across many GPUs in parallel. This has two aspects: - How much modification does one have to make to a program to make it distributed, and how easy is it to run it? - How much faster would it run in distributed mode? Horovod Supports TensorFlow, Keras, PyTorch, and Apache MXNet. in MLRun we use Horovod with MPI in order to create cluster resources and allow for optimized networking. **Note:** Horovod and MPI may use [NCCL](https://developer.nvidia.com/nccl) when applicable which may require some specific configuration arguments to run optimally. Horovod uses this MPI and NCCL concepts for distributed computation and messaging to quickly and easily synchronize between the different nodes or GPUs. ![Ring Allreduce Strategy](https://miro.medium.com/max/700/1*XdMlfmOgPCUG9ZOYLTeP9w.jpeg) Horovod will run your code on all the given nodes (Specific node can be addressed via `hvd.rank()`) while using an `hvd.DistributedOptimizer` wrapper to run the **synchronization cycles** between the copies of your Neural Network running at each node. **Note:** Since all the copies of your Neural Network must be the same, Your workers will adjust themselves to the rate of the slowest worker (simply by waiting for it to finish the epoch and receive its updates). Thus try not to make a specific worker do a lot of additional work on each epoch (Like a lot of saving, extra calculations, etc...) since this can affect the overall training time. ## How do we integrate TF2 with Horovod? As it's one of the main motivations, integration is fairly easy and requires only a few steps: ([You can read the full instructions for all the different frameworks on Horovod's documentation website](https://horovod.readthedocs.io/en/stable/tensorflow.html)). 1. Run `hvd.init()`. 2. Pin each GPU to a single process. With the typical setup of one GPU per process, set this to local rank. The first process on the server will be allocated the first GPU, the second process will be allocated the second GPU, and so forth. ``` gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') ``` 3. Scale the learning rate by the number of workers. Effective batch size in synchronous distributed training is scaled by the number of workers. An increase in learning rate compensates for the increased batch size. 4. Wrap the optimizer in `hvd.DistributedOptimizer`. The distributed optimizer delegates gradient computation to the original optimizer, averages gradients using allreduce or allgather, and then applies those averaged gradients. For TensorFlow v2, when using a `tf.GradientTape`, wrap the tape in `hvd.DistributedGradientTape` instead of wrapping the optimizer. 1. Broadcast the initial variable states from rank 0 to all other processes. This is necessary to ensure consistent initialization of all workers when training is started with random weights or restored from a checkpoint. For TensorFlow v2, use `hvd.broadcast_variables` after models and optimizers have been initialized. 1. Modify your code to save checkpoints only on worker 0 to prevent other workers from corrupting them. For TensorFlow v2, construct a `tf.train.Checkpoint` and only call `checkpoint.save()` when `hvd.rank() == 0`. You can go to [Horovod's Documentation](https://horovod.readthedocs.io/en/stable) to read more about horovod. ## Image classification use case See the end to end [**Image Classification with Distributed Training Demo**](https://github.com/mlrun/demos/tree/0.6.x/image-classification-with-distributed-training)
github_jupyter
# Amazon Augmented AI(A2I) Integrated with AWS Marketplace ML Models Sometimes, for some payloads, machine learning (ML) model predictions are just not confident enough and you want more than a machine. Furthermore, training a model can be complicated, time-consuming, and expensive. This is where [AWS Marketplace](https://aws.amazon.com/marketplace/b/6297422012?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao) and [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/) (Amazon A2I) come in. By combining a pretrained ML model in AWS Marketplace with Amazon Augmented AI, you can quickly reap the benefits of pretrained models with validating and augmenting the model's accuracy with human intelligence. AWS Marketplace contains over 400 pretrained ML models. Some models are general purpose. For example, the [GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?qid=1605041213915&sr=0-5&ref_=sa_campaign_pbrao) can detect objects in an image and place bounding boxes around the objects. AWS Marketplace also offers many purpose-built models such as a [Background Noise Classifier](https://aws.amazon.com/marketplace/pp/prodview-vpd6qdjm4d7u4?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), a [Hard Hat Detector for Worker Safety](https://aws.amazon.com/marketplace/pp/prodview-jd5tj2egpxxum?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), and a [Person in Water](https://aws.amazon.com/marketplace/pp/prodview-wlndemzv5pxhw?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao). Amazon A2I provides a human-in-loop workflow to review ML predictions. Its configurable human-review workflow solution and customizable user-review console enable you to focus on ML tasks and increase the accuracy of the predictions with human input. ## Overview In this notebook, you will use a pre-trained AWS Marketplace machine learning model with Amazon A2I to detect images as well as trigger a human-in-loop workflow to review, update and add additional labeled objects to an individual image. Furthermore, you can specify configurable threshold criteria for triggering the human-in-loop workflow in Amazon A2I. For example, you can trigger a human-in-loop workflow if there are no objects that are detected with an accuracy of 90% or greater. The following diagram shows the AWS services that are used in this notebook and the steps that you will perform. Here are the high level steps in this notebook: 1. Configure the human-in-loop review using Amazon A2I 1. Select, deploy, and invoke an AWS Marketplace ML model 1. Trigger the human review workflow in Amazon A2I. 1. The private workforce that was created in Amazon SageMaker Ground Truth reviews and edits the objects detected in the image. <img style="float: center;" src="./img/a2i_diagram.png" width="700" height="500"> ## Contents * [Prerequisites](#Prerequisites) * [Step 1 Configure Amazon A2I service](#step1) * [Step 1.1 Creating human review Workteam or Workforce](#step1_1) * [Step 1.2 Create Human Task UI](#step1_2) * [Step 1.3 Create the Flow Definition](#step1_3) * [Step 2 Deploy and invoke AWS Marketplace model](#step2) * [Step 2.1 Create an endpoint](#step2_1) * [Step 2.2 Create input payload](#step2_2) * [Step 2.3 Perform real-time inference](#step2_3) * [Step3 Starting Human Loops](#step3) * [Step 3.1 View Task Results](#step3_1) * [Step 4 Next steps](#step4) * [Step 4.1 Additional resources](#step4_1) * [Step 5 Cleanup Resources](#step5) ### Usage instructions You can run this notebook one cell at a time (By using Shift+Enter for running a cell). ## Prerequisites <a class="anchor" id="prerequisites"></a> This sample notebook requires a subscription to **[GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao)**, a pre-trained machine learning model package from AWS Marketplace. If your AWS account has not been subscribed to this listing, here is the process you can follow: 1. Open the [listing](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao) from AWS Marketplace 1. Read the Highlights section and then product overview section of the listing. 1. View usage information and then additional resources. 1. Note the supported instance types. 1. Next, click on **Continue to subscribe.** 1. Review End-user license agreement, support terms, as well as pricing information. 1. The **Accept Offer** button needs to be selected if your organization agrees with EULA, pricing information as well as support terms. If the Continue to configuration button is active, it means your account already has a subscription to this listing. Once you select the **Continue to configuration** button and then choose **region**, you will see that a Product Arn will appear. This is the **model package ARN** that you need to specify in the following cell. ``` model_package_arn = "arn:aws:sagemaker:us-east-1:865070037744:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0" # Update as needed ``` 8. This notebook requires the IAM role associated with this notebook to have *AmazonSageMakerFullAccess* IAM permission. 8. Note: If you want to run this notebook on AWS SageMaker Studio - please use Classic Jupyter mode to be able correctly render visualization. Pick instance type **'ml.m4.xlarge'** or larger. Set kernel to **'Data Science'**. <img style="float: left;" src="./img/classicjupyter.png"> ### Installing Dependencies Import the libraries that are needed for this notebook. ``` # Import necessary libraries import boto3 import json import pandas as pd import pprint import requests import sagemaker import shutil import time import uuid import PIL.Image from IPython.display import Image from IPython.display import Markdown as md from sagemaker import get_execution_role from sagemaker import ModelPackage ``` #### Setup Variables, Bucket and Paths ``` # Setting Role to the default SageMaker Execution Role role = get_execution_role() # Instantiate the SageMaker session and client that will be used throughout the notebook sagemaker_session = sagemaker.Session() sagemaker_client = sagemaker_session.sagemaker_client # Fetch the region region = sagemaker_session.boto_region_name # Create S3 and A2I clients s3 = boto3.client("s3", region) a2i = boto3.client("sagemaker-a2i-runtime", region) # Retrieve the current timestamp timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) # endpoint_name = '<ENDPOINT_NAME>' endpoint_name = "gluoncv-object-detector" # content_type='<CONTENT_TYPE>' content_type = "image/jpeg" # Instance size type to be used for making real-time predictions real_time_inference_instance_type = "ml.m4.xlarge" # Task UI name - this value is unique per account and region. You can also provide your own value here. # task_ui_name = '<TASK_UI_NAME>' task_ui_name = "ui-aws-marketplace-gluon-model-" + timestamp # Flow definition name - this value is unique per account and region. You can also provide your own value here. flow_definition_name = "fd-aws-marketplace-gluon-model-" + timestamp # Name of the image file that will be used in object detection image_file_name = "image.jpg" # Create the sub-directory in the default S3 bucket # that will store the results of the human-in-loop A2I review bucket = sagemaker_session.default_bucket() key = "a2i-results" s3.put_object(Bucket=bucket, Key=(key + "/")) output_path = f"s3://{bucket}/a2i-results" print(f"Results of A2I will be stored in {output_path}.") ``` ## Step 1 Configure Amazon A2I service<a class="anchor" id="step1"></a> In this section, you will create 3 resources: 1. Private workforce 2. Human-in-loop Console UI 3. Workflow definition ### Step 1.1 Creating human review Workteam or Workforce <a class="anchor" id="step1_1"></a> If you have already created a private work team, replace <WORKTEAM_ARN> with the ARN of your work team. If you have never created a private work team, use the instructions below to create one. To learn more about using and managing private work teams, see [Use a Private Workforce](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html)). 1. In the Amazon SageMaker console in the left sidebar under the Ground Truth heading, open the **Labeling Workforces**. 1. Choose **Private**, and then choose **Create private team**. 1. If you are a new user of SageMaker workforces, it is recommended you select **Create a private work team with AWS Cognito**. 1. For team name, enter "MyTeam". 1. To add workers by email, select **Invite new workers by email** and paste or type a list of up to 50 email addresses, separated by commas, into the email addresses box. If you are following this notebook, specify an email account that you have access to. The system sends an invitation email, which allows users to authenticate and set up their profile for performing human-in-loop review. 1. Enter an organization name - this will be used to customize emails sent to your workers. 1. For contact email, enter an email address you have access to. 1. Select **Create private team**. This will bring you back to the Private tab under labeling workforces, where you can view and manage your private teams and workers. ### **IMPORTANT: After you have created your workteam, from the Team summary section copy the value of the ARN and uncomment and replace `<WORKTEAM_ARN>` below:** ``` # workteam_arn = '<WORKTEAM_ARN>' ``` ### Step 1.2 Create Human Task UI <a class="anchor" id="step1_2"></a> Create a human task UI resource, giving a UI template in liquid HTML. This template will be rendered to the human workers whenever human loop is required. For additional UI templates, check out this repository: https://github.com/aws-samples/amazon-a2i-sample-task-uis. You will be using a slightly modified version of the [object detection UI](https://github.com/aws-samples/amazon-a2i-sample-task-uis/blob/master/images/bounding-box.liquid.html) that provides support for the `initial-value` and `labels` variables in the template. ``` # Create task UI # Read in the template from a local file template = open("./src/worker-task-template.html").read() human_task_ui_response = sagemaker_client.create_human_task_ui( HumanTaskUiName=task_ui_name, UiTemplate={"Content": template} ) human_task_ui_arn = human_task_ui_response["HumanTaskUiArn"] print(human_task_ui_arn) ``` ### Step 1.3 Create the Flow Definition <a class="anchor" id="step1_3"></a> In this section, you will create a flow definition. Flow Definitions allow you to specify: * The workforce that your tasks will be sent to. * The instructions that your workforce will receive. This is called a worker task template. * The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks. * Where your output data will be stored. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html. ``` create_workflow_definition_response = sagemaker_client.create_flow_definition( FlowDefinitionName=flow_definition_name, RoleArn=role, HumanLoopConfig={ "WorkteamArn": workteam_arn, "HumanTaskUiArn": human_task_ui_arn, "TaskCount": 1, "TaskDescription": "Identify and locate the object in an image.", "TaskTitle": "Object detection Amazon A2I demo", }, OutputConfig={"S3OutputPath": output_path}, ) flow_definition_arn = create_workflow_definition_response[ "FlowDefinitionArn" ] # let's save this ARN for future use %%time # Describe flow definition - status should be active for x in range(60): describe_flow_definition_response = sagemaker_client.describe_flow_definition( FlowDefinitionName=flow_definition_name ) print(describe_flow_definition_response["FlowDefinitionStatus"]) if describe_flow_definition_response["FlowDefinitionStatus"] == "Active": print("Flow Definition is active") break time.sleep(2) ``` ## Step 2 Deploy and invoke AWS Marketplace model <a class="anchor" id="step2"></a> In this section, you will stand up an Amazon SageMaker endpoint. Each endpoint must have a unique name which you can use for performing inference. ### Step 2.1 Create an Endpoint <a class="anchor" id="step2_1"></a> ``` %%time # Create a deployable model from the model package. model = ModelPackage( role=role, model_package_arn=model_package_arn, sagemaker_session=sagemaker_session, predictor_cls=sagemaker.predictor.Predictor, ) # Deploy the model predictor = model.deploy( initial_instance_count=1, instance_type=real_time_inference_instance_type, endpoint_name=endpoint_name, ) ``` It will take anywhere between 5 to 10 minutes to create the endpoint. Once the endpoint has been created, you would be able to perform real-time inference. ### Step 2.2 Create input payload <a class="anchor" id="step2_2"></a> In this step, you will prepare a payload to perform a prediction. ``` # Download the image file # Open the url image, set stream to True, this will return the stream content. r = requests.get("https://images.pexels.com/photos/763398/pexels-photo-763398.jpeg", stream=True) # Open a local file with wb ( write binary ) permission to save it locally. with open(image_file_name, "wb") as f: shutil.copyfileobj(r.raw, f) ``` Resize the image and upload the file to S3 so that the image can be referenced from the worker console UI. ``` # Load the image image = PIL.Image.open(image_file_name) # Resize the image resized_image = image.resize((600, 400)) # Save the resized image file locally resized_image.save(image_file_name) # Save file to S3 s3 = boto3.client("s3") with open(image_file_name, "rb") as f: s3.upload_fileobj(f, bucket, image_file_name) # Display the image from IPython.core.display import Image, display Image(filename=image_file_name, width=600, height=400) ``` ### Step 2.3 Perform real-time inference <a class="anchor" id="step2_3"></a> Submit the image file to the model and it will detect the objects in the image. ``` with open(image_file_name, "rb") as f: payload = f.read() response = sagemaker_session.sagemaker_runtime_client.invoke_endpoint( EndpointName=endpoint_name, ContentType=content_type, Accept="json", Body=payload ) result = json.loads(response["Body"].read().decode()) # Convert list to JSON json_result = json.dumps(result) df = pd.read_json(json_result) # Display confidence scores < 0.90 df = df[df.score < 0.90] print(df.head()) ``` ## Step 3 Starting Human Loops <a class="anchor" id="step3"></a> In a previous step, you have already submitted your image to the model for prediction and stored the output in JSON format in the `result` variable. You simply need to modify the X, Y coordinates of the bounding boxes. Additionally, you can filter out all predictions that are less than 90% accurate before submitting it to your human-in-loop review. This will insure that your model's predictions are highly accurate and any additional detections of objects will be made by a human. ``` # Helper function to update X,Y coordinates and labels for the bounding boxes def fix_boundingboxes(prediction_results, threshold=0.8): bounding_boxes = [] labels = set() for data in prediction_results: label = data["id"] labels.add(label) if data["score"] > threshold: width = data["right"] - data["left"] height = data["bottom"] - data["top"] top = data["top"] left = data["left"] bounding_boxes.append( {"height": height, "width": width, "top": top, "left": left, "label": label} ) return bounding_boxes, list(labels) bounding_boxes, labels = fix_boundingboxes(result, threshold=0.9) # Define the content that is passed into the human-in-loop workflow and console human_loop_name = str(uuid.uuid4()) input_content = { "initialValue": bounding_boxes, # the bounding box values that have been detected by model prediction "taskObject": f"s3://{bucket}/" + image_file_name, # the s3 object will be passed to the worker task UI to render "labels": labels, # the labels that are displayed in the legend } # Trigger the human-in-loop workflow start_loop_response = a2i.start_human_loop( HumanLoopName=human_loop_name, FlowDefinitionArn=flow_definition_arn, HumanLoopInput={"InputContent": json.dumps(input_content)}, ) ``` Now that the human-in-loop review has been triggered, you can log into the worker console to work on the task and make edits and additions to the object detection bounding boxes from the image. ``` # Fetch the URL for the worker console UI workteam_name = workteam_arn.split("/")[-1] my_workteam = sagemaker_session.sagemaker_client.list_workteams(NameContains=workteam_name) worker_console_url = "https://" + my_workteam["Workteams"][0]["SubDomain"] md( "### Click on the [Worker Console]({}) to begin reviewing the object detection".format( worker_console_url ) ) ``` The below image shows the objects that were detected for the sample image that was used in this notebook by your model and displayed in the worker console. <img src='./img/rain_biker_bb.png' align='center' height=600 width=800/> You can now make edits to the image to detect other objects. For example, in the image above, the model failed to detect the bicycle in the foreground with an accuracy of 90% or greater. However, as a human reviewer, you can clearly see the bicycle and can make a bounding box around it. Once you have finished with your edits, you can submit the result. ### Step 3.1 View Task Results <a class="anchor" id="step3_1"></a> Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 `output_path` that you specified when all work is completed. Note that the human answer, the label and the bounding box, is returned and saved in the JSON file. **NOTE: You must edit/submit the image in the Worker console so that its status is `Completed`.** ``` # Fetch the details about the human loop review in order to locate the JSON output on S3 resp = a2i.describe_human_loop(HumanLoopName=human_loop_name) # Wait for the human-in-loop review to be completed while True: resp = a2i.describe_human_loop(HumanLoopName=human_loop_name) print("-", sep="", end="", flush=True) if resp["HumanLoopStatus"] == "Completed": print("!") break time.sleep(2) ``` Once its status is `Completed`, you can execute the below cell to view the JSON output that is stored in S3. Under `annotatedResult`, any new bounding boxes will be included along with those that the model predicted, will be included. To learn more about the output data schema, please refer to the documentation about [Output Data From Custom Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-output-data.html#sms-output-data-custom). ``` # Once the image has been submitted, display the JSON output that was sent to S3 bucket, key = resp["HumanLoopOutput"]["OutputS3Uri"].replace("s3://", "").split("/", 1) response = s3.get_object(Bucket=bucket, Key=key) content = response["Body"].read() json_output = json.loads(content) print(json.dumps(json_output, indent=1)) ``` ## Step 4 Next Steps <a class="anchor" id="step4"></a> ### Step 4.1 Additional Resources <a class="anchor" id="step4_1"></a> * You can explore additional machine learning models in [AWS Marketplace - Machine Learning](https://aws.amazon.com/marketplace/b/c3714653-8485-4e34-b35b-82c2203e81c1?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao). * Learn more about [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/) * Other AWS blogs that may be of interest are: * [Using AWS Marketplace for machine learning workloads](https://aws.amazon.com/blogs/awsmarketplace/using-aws-marketplace-for-machine-learning-workloads/) * [Adding AI to your applications with ready-to-use models from AWS Marketplace](https://aws.amazon.com/blogs/machine-learning/adding-ai-to-your-applications-with-ready-to-use-models-from-aws-marketplace/) * [Building an end-to-end intelligent document processing solution using AWS](https://aws.amazon.com/blogs/machine-learning/building-an-end-to-end-intelligent-document-processing-solution-using-aws/) ## Step 5 Clean up resources <a class="anchor" id="step5"></a> In order to clean up the resources from this notebook,simply execute the below cells. ``` # Delete Workflow definition sagemaker_client.delete_flow_definition(FlowDefinitionName=flow_definition_name) # Delete Human Task UI sagemaker_client.delete_human_task_ui(HumanTaskUiName=task_ui_name) # Delete Endpoint predictor.delete_endpoint() # Delete Model predictor.delete_model() ``` #### Cancel AWS Marketplace subscription (Optional) Finally, if you subscribed to AWS Marketplace model for an experiment and would like to unsubscribe, you can follow the steps below. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model. **Steps to unsubscribe from the product on AWS Marketplace:** Navigate to Machine Learning tab on Your [Software subscriptions page](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=lbr_tab_ml). Locate the listing that you would need to cancel, and click Cancel Subscription.
github_jupyter
# NLP - Using spaCy library - **Created by Andrés Segura Tinoco** - **Created on June 04, 2019** - **Updated on October 29, 2021** **Natural language processing (NLP):** is a discipline where computer science, artificial intelligence and cognitive logic are intercepted, with the objective that machines can read and understand our language for decision making <a href="#link_one">[1]</a>. **spaCy:** features fast statistical NER as well as an open-source named-entity visualizer <a href="#link_two">[2]</a>. ## Example with a document in Spanish ``` # Load Python libraries import io import random from collections import Counter # Load NLP libraries from spacy import spacy # Verify installed spacy version spacy.__version__ ``` ### Step 1 - Read natural text from a book ``` # Util function to read a plain text file def read_text_file(file_path, encoding='ISO-8859-1'): text = "" with open(file_path, 'r', encoding=encoding) as f: text = f.read() return text # Get text sample file_path = "../data/es/El Grillo del Hogar - Charles Dickens.txt" book_text = read_text_file(file_path) # Show first 1000 raw characters of document book_text[:1000] ``` ### Step 2 - Create a NLP model ``` # Create NLP model for spanish language nlp = spacy.load('es_core_news_sm') doc_es = nlp(book_text) ``` **- Vocabulary:** unique words of the document. ``` # Get vocabulary vocabulary_es = set(str(token).lower() for token in doc_es if not token.is_stop and token.is_alpha) len(vocabulary_es) # Show 100 random words of the vocabulary print(random.sample(vocabulary_es, 100)) ``` **- Stopwords:** refers to the most common words in a language, which do not significantly affect the meaning of the text. ``` # Get unique stop-words stop_words_es = set(str(token).lower() for token in doc_es if token.is_stop) len(stop_words_es) # Show unique stop-words print(stop_words_es) ``` **- Entity:** can be any word or series of words that consistently refers to the same thing. ``` # Returns a text with data quality def text_quality(text): new_text = text.replace('\n', '') return new_text.strip('\r\n') # Print out named first 50 entities for ix in range(50): ent = doc_es.ents[ix] ent_text = text_quality(ent.text) if len(ent_text) > 3: print((ix + 1), '- Entity:', ent_text, ', Label:', ent.label_) ``` ### Step 3 - Working with POS, NER and sentences **- POS:** the parts of speech explain how a word is used in a sentence. ``` # Part of speech (POS) used in this document set(token.pos_ for token in doc_es) ``` **- Sentences:** a set of words that is complete in itself and typically containing a subject and predicate. ``` # How many sentences are in this text? sentences = [s for s in doc_es.sents] len(sentences) # Show first 10 sentences sentences[1:11] # Get the sentences in which the 'grillo' appears pattern = 'grillo' cricket_sent = [sent for sent in doc_es.sents if pattern in sent.text] len(cricket_sent) # Show the first 10 sentences in which the 'grillo' appears for sent in cricket_sent[1:11]: print('-', sent) ``` **- NER:** Named Entity Recognition. ``` # Returns the most common entities and their quantity def find_entities(doc, ent_type, n): entities = Counter() for ent in doc.ents: if ent.label_ == ent_type: ent_name = text_quality(ent.lemma_) entities[ent_name] += 1 return entities.most_common(n) # Show entities of type PERSON find_entities(doc_es, 'PER', 20) # Returns persons adjectives def get_person_adj(doc, person): adjectives = [] for ent in doc.ents: if ent.lemma_ == person: for token in ent.subtree: if token.pos_ == 'ADJ': # Adjective adjectives.append(token.lemma_) for ent in doc.ents: if ent.lemma_ == person: if ent.root.dep_ == 'nsubj': # Nominal subject for child in ent.root.head.children: if child.dep_ == 'acomp': # Adjectival complement adjectives.append(child.lemma_) return set(adjectives) # Show the adjectives used for John (most common entity) curr_person = 'John' print(get_person_adj(doc_es, curr_person)) # Returns the people who use a certain verb def verb_persons(doc, verb, n): verb_count = Counter() for ent in doc.ents: if ent.label_ == 'PER' and ent.root.head.lemma_ == verb: verb_count[ent.text] += 1 return verb_count.most_common(n) # Show the people who use a certain verb curr_verb = 'hacer' verb_persons(doc_es, curr_verb, 10) # Get ADJ type labels adj_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'ADJ') len(adj_tokens) # Show 50 random ADJ type labels print(random.sample(adj_tokens, 50)) # Get PROPN type labels propn_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'PROPN') len(adj_tokens) # Show 50 random PROPN type labels print(random.sample(propn_tokens, 50)) ``` ## Reference <a name='link_one' href='https://en.wikipedia.org/wiki/Natural_language_processing' target='_blank' >[1]</a> Wikipedia - Natural language processing. <a name='link_two' href='https://spacy.io/' target='_blank' >[2]</a> spaCy website. <hr> <p><a href="https://ansegura7.github.io/NLP/">« Home</a></p>
github_jupyter
# Tutorial Part 11: Learning Unsupervised Embeddings for Molecules In this example, we will use a `SeqToSeq` model to generate fingerprints for classifying molecules. This is based on the following paper, although some of the implementation details are different: Xu et al., "Seq2seq Fingerprint: An Unsupervised Deep Molecular Embedding for Drug Discovery" (https://doi.org/10.1145/3107411.3107424). Many types of models require their inputs to have a fixed shape. Since molecules can vary widely in the numbers of atoms and bonds they contain, this makes it hard to apply those models to them. We need a way of generating a fixed length "fingerprint" for each molecule. Various ways of doing this have been designed, such as Extended-Connectivity Fingerprints (ECFPs). But in this example, instead of designing a fingerprint by hand, we will let a `SeqToSeq` model learn its own method of creating fingerprints. A `SeqToSeq` model performs sequence to sequence translation. For example, they are often used to translate text from one language to another. It consists of two parts called the "encoder" and "decoder". The encoder is a stack of recurrent layers. The input sequence is fed into it, one token at a time, and it generates a fixed length vector called the "embedding vector". The decoder is another stack of recurrent layers that performs the inverse operation: it takes the embedding vector as input, and generates the output sequence. By training it on appropriately chosen input/output pairs, you can create a model that performs many sorts of transformations. In this case, we will use SMILES strings describing molecules as the input sequences. We will train the model as an autoencoder, so it tries to make the output sequences identical to the input sequences. For that to work, the encoder must create embedding vectors that contain all information from the original sequence. That's exactly what we want in a fingerprint, so perhaps those embedding vectors will then be useful as a way to represent molecules in other models! ## Colab This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb) ## Setup To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment. This notebook will take a few hours to run on a GPU machine, so we encourage you to run it on Google colab unless you have a good GPU machine available. ``` !wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh !chmod +x Anaconda3-2019.10-Linux-x86_64.sh !bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local !conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0 import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') import deepchem as dc ``` Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with. ``` import deepchem as dc tasks, datasets, transformers = dc.molnet.load_muv() train_dataset, valid_dataset, test_dataset = datasets train_smiles = train_dataset.ids valid_smiles = valid_dataset.ids ``` We need to define the "alphabet" for our `SeqToSeq` model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence. ``` tokens = set() for s in train_smiles: tokens = tokens.union(set(c for c in s)) tokens = sorted(list(tokens)) ``` Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an `ExponentialDecay` to multiply the learning rate by 0.9 after each epoch. ``` from deepchem.models.optimizers import Adam, ExponentialDecay max_length = max(len(s) for s in train_smiles) batch_size = 100 batches_per_epoch = len(train_smiles)/batch_size model = dc.models.SeqToSeq(tokens, tokens, max_length, encoder_layers=2, decoder_layers=2, embedding_dimension=256, model_dir='fingerprint', batch_size=batch_size, learning_rate=ExponentialDecay(0.004, 0.9, batches_per_epoch)) ``` Let's train it! The input to `fit_sequences()` is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less. ``` def generate_sequences(epochs): for i in range(epochs): for s in train_smiles: yield (s, s) model.fit_sequences(generate_sequences(40)) ``` Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced. ``` predicted = model.predict_from_sequences(valid_smiles[:500]) count = 0 for s,p in zip(valid_smiles[:500], predicted): if ''.join(p) == s: count += 1 print('reproduced', count, 'of 500 validation SMILES strings') ``` Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory. ``` train_embeddings = model.predict_embeddings(train_smiles) train_embeddings_dataset = dc.data.NumpyDataset(train_embeddings, train_dataset.y, train_dataset.w, train_dataset.ids) valid_embeddings = model.predict_embeddings(valid_smiles) valid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings, valid_dataset.y, valid_dataset.w, valid_dataset.ids) ``` For classification, we'll use a simple fully connected network with one hidden layer. ``` classifier = dc.models.MultitaskClassifier(n_tasks=len(tasks), n_features=256, layer_sizes=[512]) classifier.fit(train_embeddings_dataset, nb_epoch=10) ``` Find out how well it worked. Compute the ROC AUC for the training and validation datasets. ``` import numpy as np metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification") train_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers) valid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers) print('Training set ROC AUC:', train_score) print('Validation set ROC AUC:', valid_score) ``` # Congratulations! Time to join the Community! Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways: ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem) This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build. ## Join the DeepChem Gitter The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
github_jupyter
``` import argparse import logging from operator import mul import time import os import pubweb.singlecell # import AnnDataSparse from pubweb.hdf5 import Hdf5 from pubweb.commands.convert.singlecell.anndata import ImportAnndata from pubweb.commands.convert.singlecell.cellranger import ImportCellRanger from pubweb.commands.validate.dimensions import ValidateDimensions from pubweb.commands.annotate.geneid import AnnotateGeneId from pubweb.commands.annotate.geneset import AnnotateGeneset from pubweb.commands.export.lists import ExportLists from pubweb.commands.export.attributes import ExportAttributes from pubweb.commands.export.tables import ExportTables from pubweb.commands.export.projections import ExportProjections from pubweb.commands.export.spatial import ExportSpatial from pubweb.commands.export.matrix_sparse import ExportMatrixSparse from pubweb.commands.export.matrix_dense import ExportMatrixDense from pubweb.commands.summarize.genes import SummarizeGenes from pubweb.commands.summarize.genemap import SummarizeGeneMap from pubweb.commands.summarize.colors import SummarizeColors from pubweb.commands.summarize.manifest import SummerizeManifest import importlib importlib.reload(pubweb.singlecell) importlib.reload(pubweb.hdf5) importlib.reload(pubweb.commands.convert.singlecell.anndata) importlib.reload(pubweb.commands.convert.singlecell.cellranger) importlib.reload(pubweb.commands.validate.dimensions) importlib.reload(pubweb.commands.annotate.geneid) importlib.reload(pubweb.commands.annotate.geneset) importlib.reload(pubweb.commands.export) importlib.reload(pubweb.commands.export.lists) importlib.reload(pubweb.commands.export.attributes) importlib.reload(pubweb.commands.export.tables) importlib.reload(pubweb.commands.export.projections) importlib.reload(pubweb.commands.export.spatial) importlib.reload(pubweb.commands.export.matrix_sparse) importlib.reload(pubweb.commands.export.matrix_dense) importlib.reload(pubweb.commands.summarize.genes) importlib.reload(pubweb.commands.summarize.genemap) importlib.reload(pubweb.commands.summarize.colors) importlib.reload(pubweb.commands.summarize.manifest) logging.basicConfig(level='DEBUG') datasetName='lung-upper-airway-h1299' inputFile = '/data/notebooks/input/convert.hdf5' outputFolder = '/data/notebooks/pubweb' species = 'human' overwriteHdf5 = True python_wd = '/opt/pubweb' #dir(pubweb.singlecell) ``` ``` # anndatasparse outputFile = f'{outputFolder}/pubweb.hdf5' if os.path.exists(outputFile) and overwriteHdf5: os.remove(outputFile) hdf5 = Hdf5.load(outputFile, "a") hdf5.uri %time hdf5 | ImportAnndata(inputFile, datasetName) # 345 hdf5.getDatasets() hdf5.h5py['pubweb/lung-upper-airway-h1299/matrix'] %time hdf5 | AnnotateGeneId(species=species) # 1min28s # save hdf5_geneid print(type(hdf5)) hdf5.getDatasetsWithPath('pubweb/lung-upper-airway-h1299') hdf5.getDatasets() %time hdf5 | ExportMatrixDense(outputFolder) # 14.1s %time hdf5 | ExportProjections(outputFolder) # 3min3s %time hdf5 | ExportTables(outputFolder) # 426us %time hdf5 | ExportLists(outputFolder) #480us %time hdf5 | ExportAttributes(outputFolder) # 2min 7 s %time hdf5 | SummarizeColors(outputFolder) # 59.4ms %time hdf5 | SummerizeManifest(outputFolder) # 4.2ms ```
github_jupyter
# Best-practices for Cloud-Optimized Geotiffs **Part 2. Multiple COGs** This notebook goes over ways to construct a multidimensional xarray DataArray from many 2D COGS ``` import dask import s3fs import intake import os import xarray as xr import pandas as pd # use the same GDAL environment settings as we did for the single COG case env = dict(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR', AWS_NO_SIGN_REQUEST='YES', GDAL_MAX_RAW_BLOCK_CACHE_SIZE='200000000', GDAL_SWATH_SIZE='200000000', VSI_CURL_CACHE_SIZE='200000000') os.environ.update(env) # set up a connection with credentials and other settings s3 = s3fs.S3FileSystem(anon=True) objects = s3.ls('sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/') images = ['s3://' + obj + '/Gamma0_VV.tif' for obj in objects] print(len(images)) images[:6] #january 2020 scenes ``` ## GDAL VRT A GDAL VRT file is an XML format that can group together many separate files into separate bands. It's common to create such a file with a the GDAL command line tool `gdalbuildvrt`, illustrated below: ``` #step 1) write a file list that points to the data. GDAL requires special prefixes for this /vsis3/ or /vsicurl/ with open('files.txt', 'w') as f: lines = [x.replace('s3://', '/vsis3/') + '\n' for x in images[:6]] f.writelines(lines) %%time # step 2) create a VRT file !gdalbuildvrt stack.vrt -separate -input_file_list files.txt %%time # step 4) open with xarray chunks=dict(band=1, x=2745, y=2745) da = xr.open_rasterio('stack.vrt', chunks=chunks) da # step 5) optionally modify coordinates (e.g. time dimension extracted from file name) da = da.rename({'band':'time'}) da['time'] = [pd.to_datetime(x[60:68]) for x in images[:6]] ``` #### Recap 1. `xr.open_rasterio(stack.vrt)` stores band coordinates as sequential integers (we lose file name and metadata from each individual COG, so it's common to alter the coordinates after opening the dataset) 2. data is tied to a reference to a local file ('stack.vrt'), which can cause problems with distributed computing if you don't have access to the local filesystem ## intake-xarray [intake-xarray](https://github.com/intake/intake-xarray) is a plugin for the intake library. It uses fsspec/s3fs under the hood to facilitate loading data into python objects. the function `intake.open_rasterio()` accepts a list of paths. it returns an intake object with a `to_dask()` function that returns an xarray DataArray ``` %%time # ~ 1s for 6 files # this loads the image ID into xarray's band coordinates. pattern = 's3://sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/{band}/Gamma0_VV.tif' chunks=dict(band=1, x=2745, y=2745) sources = intake.open_rasterio(images[:6], chunks=chunks, path_as_pattern=pattern, concat_dim='band') da = sources.to_dask() da ``` #### recap: * This is a convient way to avoid constructing a VRT and load a bunch of COGs. It works well as long as the COG urls follow a distinct pattern. Metadata is also lost (we have attributes from the first COG, not others) ## Custom You can also just use xarray and dask to construct a larger datacube from many COGS. ``` %%time # 4 - 8 s # Load all the images chunks=dict(band=1, x=2745, y=2745) dataArrays = [xr.open_rasterio(url, chunks=chunks) for url in images] # note use of join='override' b/c we know these COGS have the same coordinates da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop') da = da.rename({'band':'time'}) da['time'] = [pd.to_datetime(x[60:68]) for x in images] da ``` #### recap: * The cell above is essentially a for-loop that iterates over each COG in sequence. 50ms-200ms * 80 ~ 4-16 seconds. The next notebook will look at using Dask to speed things up by opening the files in parallel. ## Visualize Here is an example of interactive visualization again using hvplot. Since we're using full resolution arrays it's key to set the `rasterize=True` keyword argument. That uses the datashader library to pre-render images before sending them to the browser. This is extremely powerful because, resolution updates as you zoom in, and you can scrub through the data cube with an interactive slider widget ``` import hvplot.xarray da.hvplot.image(rasterize=True, aspect='equal', cmap='gray', clim=(0,0.4)) ```
github_jupyter
Script delete Cassandra en cluster multidomain ``` !pip install mysql-connector==2.1.7 !pip install pandas !pip install sqlalchemy #requiere instalación adicional, consultar https://github.com/PyMySQL/mysqlclient !pip install mysqlclient !pip install numpy !pip install pymysql import pandas as pd import numpy as np import os import json import random from cassandra.cluster import Cluster from cassandra.auth import PlainTextAuthProvider import time from pprint import pprint import psutil import uuid from cassandra.query import tuple_factory from cassandra.query import dict_factory from cassandra.query import BatchStatement, SimpleStatement from cassandra.policies import RetryPolicy #Los resultados de medidas de tiempo en carga por dominios se almacenan en estos objetos. #Se itera durante 100 iteraciones para sacar medias #repeticiones repeats = 100 #Ficheros de salida resultados_etl_delete = '../Results/Cassandra/CassandraDelete_test_{}.csv' def save_results_to_csv(results,file): #Guardamos los resultados en csv from datetime import datetime csv_df = pd.DataFrame(results, columns=['Registros', 'Tiempo', 'CPU','Memoria']) dia = datetime.now().strftime("%d%m%Y_%H_%M_%S") print(file.format(str(dia))) csv_df.to_csv(file.format(str(dia))) from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import WhiteListRoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import tuple_factory from cassandra import ConsistencyLevel profile = ExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']), retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL, request_timeout=3600, row_factory=tuple_factory ) cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) session = cluster.connect() print(session.execute("SELECT release_version FROM system.local").one()) session.execute('USE currentaccountkeyspace') partyid_list = [] partyid_list_deleted = [] result = session.execute("SELECT partyid FROM customerprofilekeyspace.customerprofile LIMIT 10000;") for partyid in result: partyid_list.append(partyid[0]) print(partyid_list[10:15]) ``` # Select test multidomain ``` from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement from cassandra import concurrent from statistics import mean registers = [] batch = BatchStatement(consistency_level=ConsistencyLevel.ALL) account_id_list = [] #Cargas Masiva con Many def deletePartyOnCascade(): SELECT_CURRENT_ACCOUNT_STMT = "SELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = '{}';" DELETE_CURRENT_ACCOUNT_STMT = "DELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '{}';" DELETE_CUSTOMER_PROFILE_STMT = "DELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = '{}';" DELETE_CUSTOMER_PROFILE_ADDRESS_STMT = "DELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = '{}';" DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT = "DELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '{}';" DELETE_POSITIONKEEPING_SMT = "DELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '{}';" session = cluster.connect('customerprofilekeyspace') iter = 0; i = 1 for i in range(0,repeats): time_inicial = time.time() accounts_to_delete = [] partyid = random.choice(partyid_list) print(SELECT_CURRENT_ACCOUNT_STMT.format(partyid)) result_ca = session.execute(SELECT_CURRENT_ACCOUNT_STMT.format(partyid)) for accountid in result_ca: accounts_to_delete.append(accountid[0]) #print("accounts_to_delete:", accountid[0]) #Borrado customerprofile print(DELETE_CUSTOMER_PROFILE_STMT.format(partyid)) batch.add(DELETE_CUSTOMER_PROFILE_STMT.format(partyid)) #Borrado customerprofileaddress print(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid)) batch.add(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid)) #Borrado account info for accountid in accounts_to_delete: #Borrado account print(DELETE_CURRENT_ACCOUNT_STMT.format(accountid)) batch.add(DELETE_CURRENT_ACCOUNT_STMT.format(accountid)) print(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid)) batch.add(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid)) print(DELETE_POSITIONKEEPING_SMT.format(accountid)) batch.add(DELETE_POSITIONKEEPING_SMT.format(accountid)) #Borrado en cascada session.execute(batch) batch.clear() partyid_list.remove(partyid) time_final = time.time() data_time_collection = round(time_final - time_inicial,3) used_cpu = psutil.cpu_percent() mem_used = psutil.virtual_memory().percent registers.append((iter,data_time_collection,used_cpu,mem_used)) print((iter,data_time_collection,used_cpu,mem_used)) iter += 1; time_inicial = time.time() i = i + 1 return registers registers = deletePartyOnCascade() #Guardamos los resultados Customer Profile save_results_to_csv(registers,resultados_etl_delete) cluster.shutdown() print('Conexion cerrada') ```
github_jupyter
``` %matplotlib inline from mpl_toolkits.mplot3d import Axes3D import scipy.io as io import numpy as np import matplotlib.pyplot as plt from math import ceil from scipy.optimize import curve_fit realization = 1000 import seaborn as sns from matplotlib import cm from array_response import * import itertools mat = io.loadmat('boundary.mat') bound1_para = mat['bound1_para'][0,:] bound2_para = mat['bound2_para'][0,:] bound3_para = mat['bound3_para'][0,:] bound4_1para = mat['bound4_1para'][0,:] bound4_2para = mat['bound4_2para'][0,:] bound4_3para = mat['bound4_3para'][0,:] xlim_4_1 = mat['xlim_4_1'][0,0] xlim_4_2 = mat['xlim_4_2'][0,:] xlim_4_3 = mat['xlim_4_3'][0,0] azi_rot = np.linspace(0,2*np.pi,50) def func_sin(x, c, d): return np.sin(2*np.pi*x*0.312 + c)*0.23 + d test_1 = func_sin(azi_rot, *bound1_para) test_2 = func_sin(azi_rot, *bound2_para) bound3 = np.poly1d(bound3_para) boud4_13 = np.poly1d(bound4_1para) bound4_2 = np.poly1d(bound4_2para) plt.plot(azi_rot,test_1) plt.plot(azi_rot,test_2) plt.plot(azi_rot,bound3(azi_rot)) plt.plot(azi_rot,boud4_13(azi_rot)) plt.plot(azi_rot,bound4_2(azi_rot)) plt.ylim(0,3.14) def check_cate(_azi,_ele): _index = "" if ((_ele - bound3(_azi)) > 0): if (((_azi<xlim_4_1) and ((_ele - boud4_13(_azi))<0)) or ((_azi>xlim_4_2[0]) and (_azi<xlim_4_2[1]) and ((_ele - bound4_2(_azi))<0)) or ((_azi>xlim_4_3) and ((_ele - boud4_13(_azi))<0))): _index = "samecluster" else: _index = "diffclus_samepol" else: if ((_ele - func_sin(_azi, *bound2_para)) > 0): _index = "diffclus_crosspol" else: if ((_ele - func_sin(_azi, *bound1_para)) > 0): _index = "samecluster" else: _index = "diffclus_samepol" return _index ``` ### Parameters declaration Declare parameters needed for channel realization ``` Ns = 1 # number of streams Nc = 6 # number of cluster Nray = 1 # number of rays in each cluster Nt = 64 # number of transmit antennas Nr = 16 # number of receive antennas angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx gamma = np.sqrt((Nt*Nr)/(Nc*Nray)) realization = 1000 # equivalent to number of taking sample count = 0 eps = 0.1 # 20dB isolation sigma = np.sqrt(8/(1+eps**2))*1.37/1.14 # according to the normalization condition of H ``` ### Channel Realization Realize channel H for Dual-Polarized antenna array ``` H_pol = np.zeros((2*Nr,2*Nt,realization),dtype=complex) At = np.zeros((Nt,Nc*Nray,realization),dtype=complex) Ar = np.zeros((Nr,Nc*Nray,realization),dtype=complex) alpha_hh = np.zeros((Nc*Nray,realization),dtype=complex) alpha_hv = np.zeros((Nc*Nray,realization),dtype=complex) alpha_vh = np.zeros((Nc*Nray,realization),dtype=complex) alpha_vv = np.zeros((Nc*Nray,realization),dtype=complex) AoD = np.zeros((2,Nc*Nray),dtype=complex) AoA = np.zeros((2,Nc*Nray),dtype=complex) H = np.zeros((2*Nr,2*Nt,realization),dtype=complex) azi_rot = np.random.normal(1.7,0.3,realization) ele_rot = np.random.normal(2.3,0.3,realization) # Why PI/2 ?? # azi_rot = np.random.uniform(0,2*np.pi,realization) # ele_rot = np.random.uniform(0,np.pi,realization) # Why PI/2 ?? R = np.array([[np.cos(ele_rot)*np.cos(azi_rot),np.sin(ele_rot)],[-np.sin(ele_rot)*np.cos(azi_rot),np.cos(ele_rot)]]) # rotation matrix for reali in range(realization): for c in range(1,Nc+1): AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray)) AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray)) AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray)) AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray)) for j in range(Nc*Nray): At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt)/np.sqrt(2) # UPA array response Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)/np.sqrt(2) var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2)) alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2)) alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2)) alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2)) alpha = np.vstack((np.hstack((alpha_hh[j,reali],alpha_hv[j,reali])),np.hstack((alpha_vh[j,reali],alpha_vv[j,reali])))) H_pol[:,:,reali] = H_pol[:,:,reali] + np.kron(alpha,Ar[:,[j],reali]@At[:,[j],reali].conj().T) H_pol[:,:,reali] = 2*gamma* H_pol[:,:,reali] H[:,:,reali] = (np.kron(R[:,:,reali],np.eye(Nr)))@H_pol[:,:,reali] H[:,:,reali] = np.sqrt(4/3)* H[:,:,reali] ``` ### Check normalized condition ``` channel_fro_2 = np.zeros(realization) for reali in range(realization): channel_fro_2[reali] = np.linalg.norm(H[:,:,reali],'fro') print("4*Nt*Nr =", 4*Nt*Nr , " Frobenius norm =", np.mean(channel_fro_2**2)) cluster = np.arange(Nc) print(cluster) c = list(itertools.combinations(cluster, 2)) num_path = (2*Nc-1)*Nc path_combi = np.zeros((num_path,4),dtype=int) print(path_combi.shape) path_combi[0:Nc,:]=np.arange(Nc).reshape(Nc,1).repeat(4,axis=1) count = 0 for i in range(int(Nc*(Nc-1)/2)): path_combi[Nc+4*i,:] = np.array([c[count][0],c[count][0],c[count][1],c[count][1]]) path_combi[Nc+4*i+1,:] = np.array([c[count][1],c[count][1],c[count][0],c[count][0]]) path_combi[Nc+4*i+2,:] = np.array([c[count][0],c[count][1],c[count][1],c[count][0]]) path_combi[Nc+4*i+3,:] = np.array([c[count][1],c[count][0],c[count][0],c[count][1]]) count = count+1 cross_index = [] samepolar_index = [] count = Nc-1 while (count<num_path-4): cross_index.extend([count+3,count+4]) samepolar_index.extend([count+1,count+2]) count = count + 4 cross_index = np.array(cross_index) samepolar_index = np.array(samepolar_index) sameclus_index = np.arange(0,Nc) print(cross_index) print(samepolar_index) print(sameclus_index) # print(path_combi) path_gain = np.zeros((num_path,realization)) # 2 to save the position and maximum value for reali in range(realization): for combi in range(num_path): path_gain[combi,reali] =\ (np.abs\ ((np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.sin(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,1])+\ (np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.sin(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,1])+\ (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.cos(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,3])+\ (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.cos(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,3]) ))**2 print(np.max(path_gain[0:Nc,2])) print(path_gain[0:Nc,2]) print(path_gain[samepolar_index,2]) print(np.max(path_gain[samepolar_index,2])) ``` __Check maximum gain from combination of path in each realization__ ``` index = np.zeros(realization,dtype=int) for reali in range(realization): index[reali] = np.argmax(path_gain[:,reali]) ``` __Same Cluster__ ``` index_sameclus = np.zeros(realization,dtype=int) for reali in range(realization): index_sameclus[reali] = np.argmax(path_gain[0:Nc,reali]) gain_sameclus = np.zeros(realization,dtype=float) for reali in range(realization): gain_sameclus[reali] = path_gain[index_sameclus[reali],reali] ``` __Chosen Category before check__ ``` choosen_cate = ["" for x in range(realization)] index_checkcate = np.zeros(realization,dtype=int) cate = "" temp = 0 for reali in range(realization): cate = check_cate(azi_rot[reali],ele_rot[reali]) if (cate == "samecluster"): index_checkcate[reali] = np.argmax(path_gain[0:Nc,reali]) if (cate == "diffclus_samepol"): temp = np.argmax(path_gain[samepolar_index,reali]) index_checkcate[reali] = int(temp+(np.floor(temp/2))*2+Nc) # index_checkcate[reali] = np.argmax(path_gain[samepolar_index,reali]) if (cate == "diffclus_crosspol"): # index_checkcate[reali] = np.argmax(path_gain[cross_index,reali]) temp = np.argmax(path_gain[cross_index,reali]) index_checkcate[reali] = int(temp+(np.floor(temp/2)+1)*2+Nc) choosen_cate[reali] = cate temp = 0 ``` ### Plot Spectral Efficiency ``` SNR_dB = np.arange(-35,10,5) SNR = 10**(SNR_dB/10) smax = SNR.shape[0] R_cross = np.zeros([smax, realization],dtype=complex) # R_steer = np.zeros([smax, realization],dtype=complex) R_samecl = np.zeros([smax, realization],dtype=complex) R_checkcate = np.zeros([smax, realization],dtype=complex) for reali in range(realization): _chosen_combi_path = path_combi[index[reali]] _chosen_checkcate_path = path_combi[index_checkcate[reali]] # _chosen_checkcate_path = path_combi[:,reali] _chosen_sameclus_path = path_combi[index_sameclus[reali]] W_cross = np.vstack((Ar[:,[_chosen_combi_path[1]],reali],Ar[:,[_chosen_combi_path[3]],reali])) F_cross = np.vstack((At[:,[_chosen_combi_path[0]],reali],At[:,[_chosen_combi_path[2]],reali])) W_checkcate = np.vstack((Ar[:,[_chosen_checkcate_path[1]],reali],Ar[:,[_chosen_checkcate_path[3]],reali])) F_checkcate = np.vstack((At[:,[_chosen_checkcate_path[0]],reali],At[:,[_chosen_checkcate_path[2]],reali])) # W_steer = np.vstack((Ar[:,[_chosen_steer_path[0]],reali],Ar[:,[_chosen_steer_path[1]],reali])) # F_steer = np.vstack((At[:,[_chosen_steer_path[0]],reali],At[:,[_chosen_steer_path[1]],reali])) W_samecl = np.vstack((Ar[:,[_chosen_sameclus_path[1]],reali],Ar[:,[_chosen_sameclus_path[3]],reali])) F_samecl = np.vstack((At[:,[_chosen_sameclus_path[0]],reali],At[:,[_chosen_sameclus_path[2]],reali])) for s in range(smax): R_cross[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_cross)@H[:,:,reali]@F_cross@F_cross.conj().T@H[:,:,reali].conj().T@W_cross)) R_checkcate[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_checkcate)@H[:,:,reali]@F_checkcate@F_checkcate.conj().T@H[:,:,reali].conj().T@W_checkcate)) R_samecl[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_samecl)@H[:,:,reali]@F_samecl@F_samecl.conj().T@H[:,:,reali].conj().T@W_samecl)) x = np.linalg.norm(F_cross,'fro') print("Ns", Ns , " Frobenius norm FRF*FBB=", x**2) plt.plot(SNR_dB, (np.sum(R_cross,axis=1).real)/realization, label='joint polarization beam steering') plt.plot(SNR_dB, (np.sum(R_checkcate,axis=1).real)/realization, label='one category beam steering') plt.plot(SNR_dB, (np.sum(R_samecl,axis=1).real)/realization, label='same ray beam steering') plt.legend(loc='upper left',prop={'size': 9}) plt.xlabel('SNR(dB)',fontsize=11) plt.ylabel('Spectral Efficiency (bits/s/Hz)',fontsize=11) plt.tick_params(axis='both', which='major', labelsize=9) plt.ylim(0,11) plt.grid() plt.show() ```
github_jupyter
<h1 align="center">Introduction to SimpleITKv4 Registration</h1> <table width="100%"> <tr style="background-color: red;"><td><font color="white">SimpleITK conventions:</font></td></tr> <tr><td> <ul> <li>Dimensionality and pixel type of registered images is required to be the same (2D/2D or 3D/3D).</li> <li>Supported pixel types are sitkFloat32 and sitkFloat64 (use the SimpleITK <a href="http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#af8c9d7cc96a299a05890e9c3db911885">Cast()</a> function if your image's pixel type is something else). </ul> </td></tr> </table> ## Registration Components <img src="ITKv4RegistrationComponentsDiagram.svg" style="width:700px"/><br><br> There are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the <a href="http://www.itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ImageRegistrationMethod.html">ImageRegistrationMethod</a> class. This class encapsulates many of the components available in ITK for constructing a registration instance. Currently, the available choices from the following groups of ITK components are: ### Optimizers The SimpleITK registration framework supports several optimizer types via the SetOptimizerAsX() methods, these include: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ExhaustiveOptimizerv4.html">Exhaustive</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1AmoebaOptimizerv4.html">Nelder-Mead downhill simplex</a>, a.k.a. Amoeba. </li> <li> <a href="https://itk.org/Doxygen/html/classitk_1_1PowellOptimizerv4.html">Powell optimizer</a>. </li> <li> <a href="https://itk.org/Doxygen/html/classitk_1_1OnePlusOneEvolutionaryOptimizerv4.html">1+1 evolutionary optimizer</a>. </li> <li> Variations on gradient descent: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentOptimizerv4Template.html">GradientDescent</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentLineSearchOptimizerv4Template.html">GradientDescentLineSearch</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1RegularStepGradientDescentOptimizerv4.html">RegularStepGradientDescent</a> </li> </ul> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ConjugateGradientLineSearchOptimizerv4Template.html">ConjugateGradientLineSearch</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1LBFGSBOptimizerv4.html">L-BFGS-B</a> (Limited memory Broyden, Fletcher,Goldfarb,Shannon-Bound Constrained) - supports the use of simple constraints ($l\leq x \leq u$) </li> </ul> ### Similarity metrics The SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1MeanSquaresImageToImageMetricv4.html">MeanSquares</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1DemonsImageToImageMetricv4.html">Demons</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1CorrelationImageToImageMetricv4.html">Correlation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ANTSNeighborhoodCorrelationImageToImageMetricv4.html">ANTSNeighborhoodCorrelation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1JointHistogramMutualInformationImageToImageMetricv4.html">JointHistogramMutualInformation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1MattesMutualInformationImageToImageMetricv4.html">MattesMutualInformation</a> </li> </ul> ### Interpolators The SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one of the <a href="http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#a7cb1ef8bd02c669c02ea2f9f5aa374e5">following enumerations</a>: <ul> <li> sitkNearestNeighbor </li> <li> sitkLinear </li> <li> sitkBSpline </li> <li> sitkGaussian </li> <li> sitkHammingWindowedSinc </li> <li> sitkCosineWindowedSinc </li> <li> sitkWelchWindowedSinc </li> <li> sitkLanczosWindowedSinc </li> <li> sitkBlackmanWindowedSinc </li> </ul> ## Data - Retrospective Image Registration Evaluation We will be using part of the training data from the Retrospective Image Registration Evaluation (<a href="http://www.insight-journal.org/rire/">RIRE</a>) project. ``` import SimpleITK as sitk # Utility method that either downloads data from the MIDAS repository or # if already downloaded returns the file name for reading from disk (cached data). %run update_path_to_download_script from downloaddata import fetch_data as fdata # Always write output to a separate directory, we don't want to pollute the source directory. import os OUTPUT_DIR = 'Output' ``` ## Utility functions A number of utility callback functions for image display and for plotting the similarity metric during registration. ``` import matplotlib.pyplot as plt %matplotlib inline from ipywidgets import interact, fixed from IPython.display import clear_output # Callback invoked by the interact IPython method for scrolling through the image stacks of # the two images (moving and fixed). def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa): # Create a figure with two subplots and the specified size. plt.subplots(1,2,figsize=(10,8)) # Draw the fixed image in the first subplot. plt.subplot(1,2,1) plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r); plt.title('fixed image') plt.axis('off') # Draw the moving image in the second subplot. plt.subplot(1,2,2) plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r); plt.title('moving image') plt.axis('off') plt.show() # Callback invoked by the IPython interact method for scrolling and modifying the alpha blending # of an image stack of two images that occupy the same physical space. def display_images_with_alpha(image_z, alpha, fixed, moving): img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z] plt.imshow(sitk.GetArrayViewFromImage(img),cmap=plt.cm.Greys_r); plt.axis('off') plt.show() # Callback invoked when the StartEvent happens, sets up our new data. def start_plot(): global metric_values, multires_iterations metric_values = [] multires_iterations = [] # Callback invoked when the EndEvent happens, do cleanup of data and figure. def end_plot(): global metric_values, multires_iterations del metric_values del multires_iterations # Close figure, we don't want to get a duplicate of the plot latter on. plt.close() # Callback invoked when the IterationEvent happens, update our data and display new figure. def plot_values(registration_method): global metric_values, multires_iterations metric_values.append(registration_method.GetMetricValue()) # Clear the output area (wait=True, to reduce flickering), and plot current data clear_output(wait=True) # Plot the similarity metric values plt.plot(metric_values, 'r') plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*') plt.xlabel('Iteration Number',fontsize=12) plt.ylabel('Metric Value',fontsize=12) plt.show() # Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the # metric_values list. def update_multires_iterations(): global metric_values, multires_iterations multires_iterations.append(len(metric_values)) ``` ## Read images We first read the images, casting the pixel type to that required for registration (Float32 or Float64) and look at them. ``` fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32) moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32) interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayViewFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayViewFromImage(moving_image))); ``` ## Initial Alignment Use the CenteredTransformInitializer to align the centers of the two volumes and set the center of rotation to the center of the fixed image. ``` initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY) moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()) interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled)); ``` ## Registration The specific registration task at hand estimates a 3D rigid transformation between images of different modalities. There are multiple components from each group (optimizers, similarity metrics, interpolators) that are appropriate for the task. Note that each component selection requires setting some parameter values. We have made the following choices: <ul> <li>Similarity metric, mutual information (Mattes MI): <ul> <li>Number of histogram bins, 50.</li> <li>Sampling strategy, random.</li> <li>Sampling percentage, 1%.</li> </ul> </li> <li>Interpolator, sitkLinear.</li> <li>Optimizer, gradient descent: <ul> <li>Learning rate, step size along traversal direction in parameter space, 1.0 .</li> <li>Number of iterations, maximal number of iterations, 100.</li> <li>Convergence minimum value, value used for convergence checking in conjunction with the energy profile of the similarity metric that is estimated in the given window size, 1e-6.</li> <li>Convergence window size, number of values of the similarity metric which are used to estimate the energy profile of the similarity metric, 10.</li> </ul> </li> </ul> Perform registration using the settings given above, and take advantage of the built in multi-resolution framework, use a three tier pyramid. In this example we plot the similarity metric's value during registration. Note that the change of scales in the multi-resolution framework is readily visible. ``` registration_method = sitk.ImageRegistrationMethod() # Similarity metric settings. registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) # Optimizer settings. registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10) registration_method.SetOptimizerScalesFromPhysicalShift() # Setup for the multi-resolution framework. registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1]) registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0]) registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() # Don't optimize in-place, we would possibly like to run this cell multiple times. registration_method.SetInitialTransform(initial_transform, inPlace=False) # Connect all of the observers so that we can perform plotting during registration. registration_method.AddCommand(sitk.sitkStartEvent, start_plot) registration_method.AddCommand(sitk.sitkEndEvent, end_plot) registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations) registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method)) final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)) ``` ## Post registration analysis Query the registration method to see the metric value and the reason the optimization terminated. The metric value allows us to compare multiple registration runs as there is a probabilistic aspect to our registration, we are using random sampling to estimate the similarity metric. Always remember to query why the optimizer terminated. This will help you understand whether termination is too early, either due to thresholds being too tight, early termination due to small number of iterations - numberOfIterations, or too loose, early termination due to large value for minimal change in similarity measure - convergenceMinimumValue) ``` print('Final metric value: {0}'.format(registration_method.GetMetricValue())) print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) ``` Now visually inspect the results. ``` moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()) interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled)); ``` If we are satisfied with the results, save them to file. ``` sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha')) sitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm')) ```
github_jupyter
# Fit $k_{ij}$ and $r_c^{ABij}$ interactions parameter of Ethanol and CPME --- Let's call $\underline{\xi}$ the optimization parameters of a mixture. In order to optimize them, you need to provide experimental phase equilibria data. This can include VLE, LLE and VLLE data. The objective function used for each equilibria type are shown below: ### Vapor-Liquid Equilibria Data $$ OF_{VLE}(\underline{\xi}) = w_y \sum_{j=1}^{Np} \left[ \sum_{i=1}^c (y_{i,j}^{cal} - y_{i,j}^{exp})^2 \right] + w_P \sum_{j=1}^{Np} \left[ \frac{P_{j}^{cal} - P_{j}^{exp}}{P_{j}^{exp}} \right]^2$$ Where, $Np$ is the number of experimental data points, $y_i$ is the vapor molar fraction of the component $i$ and $P$ is the bubble pressure. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_y$ is the weight for the vapor composition error and $w_P$ is the weight for the bubble pressure error. ### Liquid-Liquid Equilibria Data $$ OF_{LLE}(\underline{\xi}) = w_x \sum_{j=1}^{Np} \sum_{i=1}^c \left[x_{i,j} - x_{i,j}^{exp}\right]^2 + w_w \sum_{j=1}^{Np} \sum_{i=1}^c \left[ w_{i,j} - w_{i,j}^{exp} \right]^2 $$ Where, $Np$ is the number of experimental data points, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the liquids phases. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error. ### Vapor-Liquid-Liquid Equilibria Data $$ OF_{VLLE}(\underline{\xi}) = w_x \sum_{j=1}^{Np} \sum_{i=1}^c \left[x_{i,j}^{cal} - x_{i,j}^{exp}\right]^2 + w_w \sum_{j=1}^{Np} \sum_{i=1}^c \left[w_{i,j}^{cal} - w_{i,j}^{exp}\right]^2 + w_y \sum_{j=1}^{Np} \sum_{i=1}^c \left[y_{i,j}^{cal} - y_{i,j}^{exp}\right]^2 + w_P \sum_{j=1}^{Np} \left[ \frac{P_{j}^{cal}}{P_{j}^{exp}} - 1\right]^2 $$ Where, $Np$ is the number of experimental data points, $y_i$, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the vapor and liquids phases, respectively. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error, $w_y$ is the weight for vapor composition error and $w_P$ is weight for three phase equilibria pressure error. If there is data for more than one equilibria type, the errors can be added accordinly. So the objective funcion becomes: $$ OF(\underline{\xi}) =OF_{ELV}(\underline{\xi}) + OF_{ELL}(\underline{\xi}) + OF_{ELLV}(\underline{\xi})$$ --- This notebook has te purpose of showing how to optimize the $k_{ij}$ and $r_c^{ABij}$ for a mixture with induced association. For these mixtures the interactions parameters are shown below: $$ \epsilon_{ij} = (1-k_{ij}) \frac{\sqrt{\sigma_i^3 \sigma_j^3}}{\sigma_{ij}^3} \sqrt{\epsilon_i \epsilon_j} ;\quad\epsilon_{ij}^{AB} = \frac{\epsilon^{AB} (self-associating)}{2} ;\quad r^{ABij}_c (fitted)$$ First, it's needed to import the necessary modules ``` import numpy as np from sgtpy import component, mixture, saftvrmie from sgtpy.fit import fit_cross ``` Now that the functions are available it is necessary to create the mixture. ``` ethanol = component('ethanol2C', ms = 1.7728, sigma = 3.5592 , eps = 224.50, lambda_r = 11.319, lambda_a = 6., eAB = 3018.05, rcAB = 0.3547, rdAB = 0.4, sites = [1,0,1], cii= 5.3141080872882285e-20) cpme = component('cpme', ms = 2.32521144, sigma = 4.13606074, eps = 343.91193798, lambda_r = 14.15484877, lambda_a = 6.0, npol = 1.91990385,mupol = 1.27, sites =[0,0,1], cii = 3.5213681817448466e-19) mix = mixture(ethanol, cpme) ``` Now the experimental equilibria data is read and a tuple is created. It includes the experimental liquid composition, vapor composition, equilibrium temperature and pressure. This is done with ```datavle = (Xexp, Yexp, Texp, Pexp)``` ``` # Experimental data obtained from Mejia, Cartes, J. Chem. Eng. Data, vol. 64, no. 5, pp. 1970–1977, 2019 # Experimental temperature saturation in K Texp = np.array([355.77, 346.42, 342.82, 340.41, 338.95, 337.78, 336.95, 336.29, 335.72, 335.3 , 334.92, 334.61, 334.35, 334.09, 333.92, 333.79, 333.72, 333.72, 333.81, 334.06, 334.58]) # Experimental pressure in Pa Pexp = np.array([50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000.]) # Experimental liquid composition Xexp = np.array([[0. , 0.065, 0.11 , 0.161, 0.203, 0.253, 0.301, 0.351, 0.402, 0.446, 0.497, 0.541, 0.588, 0.643, 0.689, 0.743, 0.785, 0.837, 0.893, 0.947, 1. ], [1. , 0.935, 0.89 , 0.839, 0.797, 0.747, 0.699, 0.649, 0.598, 0.554, 0.503, 0.459, 0.412, 0.357, 0.311, 0.257, 0.215, 0.163, 0.107, 0.053, 0. ]]) # Experimental vapor composition Yexp = np.array([[0. , 0.302, 0.411, 0.48 , 0.527, 0.567, 0.592, 0.614, 0.642, 0.657, 0.678, 0.694, 0.71 , 0.737, 0.753, 0.781, 0.801, 0.837, 0.883, 0.929, 1. ], [1. , 0.698, 0.589, 0.52 , 0.473, 0.433, 0.408, 0.386, 0.358, 0.343, 0.322, 0.306, 0.29 , 0.263, 0.247, 0.219, 0.199, 0.163, 0.117, 0.071, 0. ]]) datavle = (Xexp, Yexp, Texp, Pexp) ``` The function ```fit_cross``` optimize the $k_{ij}$ correction and $r_c^{ABij}$ distance. An initial guess is needed, as well as the mixture object, the index of the self-associating component and the equilibria data. Optionally, the ```minimize_options``` option allows modifying the minimizer default parameters. ``` #initial guesses for kij and rcij x0 = [0.01015194, 2.23153033] fit_cross(x0, mix, assoc=0, datavle=datavle) ``` If the mixture exhibits other equilibria types you can supply this experimental data to the ``datalle`` or ``datavlle`` parameters. - ``datalle``: (Xexp, Wexp, Texp, Pexp) - ``datavlle``: (Xexp, Wexp, Yexp, Texp, Pexp) You can specify the weights for each objetive function through the following parameters: - ``weights_vle``: list or array_like, weights for the VLE objective function. - weights_vle[0] = weight for Y composition error, default to 1. - weights_vle[1] = weight for bubble pressure error, default to 1. - ``weights_lle``: list or array_like, weights for the LLE objective function. - weights_lle[0] = weight for X (liquid 1) composition error, default to 1. - weights_lle[1] = weight for W (liquid 2) composition error, default to 1. - ``weights_vlle``: list or array_like, weights for the VLLE objective function. - weights_vlle[0] = weight for X (liquid 1) composition error, default to 1. - weights_vlle[1] = weight for W (liquid 2) composition error, default to 1. - weights_vlle[2] = weight for Y (vapor) composition error, default to 1. - weights_vlle[3] = weight for equilibrium pressure error, default to 1. Additionally, you can set options to the SciPy's ``minimize`` function using the ``minimize_options`` parameter. For more information just run: ```fit_cross?```
github_jupyter
<table width=60%> <tr style="background-color: white;"> <td><img src='https://www.creativedestructionlab.com/wp-content/uploads/2018/05/xanadu.jpg'></td>></td> </tr> </table> --- <img src='https://raw.githubusercontent.com/XanaduAI/strawberryfields/master/doc/_static/strawberry-fields-text.png'> --- <br> <center> <h1> Gaussian boson sampling tutorial </h1></center> To get a feel for how Strawberry Fields works, let's try coding a quantum program, Gaussian boson sampling. ## Background information: Gaussian states A Gaussian state is one that can be described by a [Gaussian function](https://en.wikipedia.org/wiki/Gaussian_function) in the phase space. For example, for a single mode Gaussian state, squeezed in the $x$ quadrature by squeezing operator $S(r)$, could be described by the following [Wigner quasiprobability distribution](Wigner quasiprobability distribution): $$W(x,p) = \frac{2}{\pi}e^{-2\sigma^2(x-\bar{x})^2 - 2(p-\bar{p})^2/\sigma^2}$$ where $\sigma$ represents the **squeezing**, and $\bar{x}$ and $\bar{p}$ are the mean **displacement**, respectively. For multimode states containing $N$ modes, this can be generalised; Gaussian states are uniquely defined by a [multivariate Gaussian function](https://en.wikipedia.org/wiki/Multivariate_normal_distribution), defined in terms of the **vector of means** ${\mu}$ and a **covariance matrix** $\sigma$. ### The position and momentum basis For example, consider a single mode in the position and momentum quadrature basis (the default for Strawberry Fields). Assuming a Gaussian state with displacement $\alpha = \bar{x}+i\bar{p}$ and squeezing $\xi = r e^{i\phi}$ in the phase space, it has a vector of means and a covariance matrix given by: $$ \mu = (\bar{x},\bar{p}),~~~~~~\sigma = SS\dagger=R(\phi/2)\begin{bmatrix}e^{-2r} & 0 \\0 & e^{2r} \\\end{bmatrix}R(\phi/2)^T$$ where $S$ is the squeezing operator, and $R(\phi)$ is the standard two-dimensional rotation matrix. For multiple modes, in Strawberry Fields we use the convention $$ \mu = (\bar{x}_1,\bar{x}_2,\dots,\bar{x}_N,\bar{p}_1,\bar{p}_2,\dots,\bar{p}_N)$$ and therefore, considering $\phi=0$ for convenience, the multimode covariance matrix is simply $$\sigma = \text{diag}(e^{-2r_1},\dots,e^{-2r_N},e^{2r_1},\dots,e^{2r_N})\in\mathbb{C}^{2N\times 2N}$$ If a continuous-variable state *cannot* be represented in the above form (for example, a single photon Fock state or a cat state), then it is non-Gaussian. ### The annihilation and creation operator basis If we are instead working in the creation and annihilation operator basis, we can use the transformation of the single mode squeezing operator $$ S(\xi) \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right] = \left[\begin{matrix}\cosh(r)&-e^{i\phi}\sinh(r)\\-e^{-i\phi}\sinh(r)&\cosh(r)\end{matrix}\right] \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right]$$ resulting in $$\sigma = SS^\dagger = \left[\begin{matrix}\cosh(2r)&-e^{i\phi}\sinh(2r)\\-e^{-i\phi}\sinh(2r)&\cosh(2r)\end{matrix}\right]$$ For multiple Gaussian states with non-zero squeezing, the covariance matrix in this basis simply generalises to $$\sigma = \text{diag}(S_1S_1^\dagger,\dots,S_NS_N^\dagger)\in\mathbb{C}^{2N\times 2N}$$ ## Introduction to Gaussian boson sampling <div class="alert alert-info"> “If you need to wait exponential time for your single photon sources to emit simultaneously, then there would seem to be no advantage over classical computation. This is the reason why so far, boson sampling has only been demonstrated with 3-4 photons. When faced with these problems, until recently, all we could do was shrug our shoulders.” - [Scott Aaronson](https://www.scottaaronson.com/blog/?p=1579) </div> While [boson sampling](https://en.wikipedia.org/wiki/Boson_sampling) allows the experimental implementation of a quantum sampling problem that it countably hard classically, one of the main issues it has in experimental setups is one of **scalability**, due to its dependence on an array of simultaneously emitting single photon sources. Currently, most physical implementations of boson sampling make use of a process known as [Spontaneous Parametric Down-Conversion](http://en.wikipedia.org/wiki/Spontaneous_parametric_down-conversion) to generate the single photon source inputs. Unfortunately, this method is non-deterministic - as the number of modes in the apparatus increases, the average time required until every photon source emits a simultaneous photon increases *exponentially*. In order to simulate a *deterministic* single photon source array, several variations on boson sampling have been proposed; the most well known being scattershot boson sampling ([Lund, 2014](https://link.aps.org/doi/10.1103/PhysRevLett.113.100502)). However, a recent boson sampling variation by [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) negates the need for single photon Fock states altogether, by showing that **incident Gaussian states** - in this case, single mode squeezed states - can produce problems in the same computational complexity class as boson sampling. Even more significantly, this negates the scalability problem with single photon sources, as single mode squeezed states can be easily simultaneously generated experimentally. Aside from changing the input states from single photon Fock states to Gaussian states, the Gaussian boson sampling scheme appears quite similar to that of boson sampling: 1. $N$ single mode squeezed states $\left|{\xi_i}\right\rangle$, with squeezing parameters $\xi_i=r_ie^{i\phi_i}$, enter an $N$ mode linear interferometer with unitary $U$. <br> 2. The output of the interferometer is denoted $\left|{\psi'}\right\rangle$. Each output mode is then measured in the Fock basis, $\bigotimes_i n_i\left|{n_i}\middle\rangle\middle\langle{n_i}\right|$. Without loss of generality, we can absorb the squeezing parameter $\phi$ into the interferometer, and set $\phi=0$ for convenience. The covariance matrix **in the creation and annihilation operator basis** at the output of the interferometer is then given by: $$\sigma_{out} = \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right]\sigma_{in} \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right]$$ Using phase space methods, [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) showed that the probability of measuring a Fock state is given by $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(U\bigoplus_i\tanh(r_i)U^T)]_{st}\right|^2}{n_1!n_2!\cdots n_N!\sqrt{|\sigma_{out}+I/2|}},$$ i.e. the sampled single photon probability distribution is proportional to the **Hafnian** of a submatrix of $U\bigoplus_i\tanh(r_i)U^T$, dependent upon the output covariance matrix. <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**The Hafnian**</p> The Hafnian of a matrix is defined by <br><br> $$\text{Haf}(A) = \frac{1}{n!2^n}\sum_{\sigma=S_{2N}}\prod_{i=1}^N A_{\sigma(2i-1)\sigma(2i)}$$ <br> $S_{2N}$ is the set of all permutations of $2N$ elements. In graph theory, the Hafnian calculates the number of perfect <a href="https://en.wikipedia.org/wiki/Matching_(graph_theory)">matchings</a> in an **arbitrary graph** with adjacency matrix $A$. <br> Compare this to the permanent, which calculates the number of perfect matchings on a *bipartite* graph - the Hafnian turns out to be a generalisation of the permanent, with the relationship $$\begin{align} \text{Per(A)} = \text{Haf}\left(\left[\begin{matrix} 0&A\\ A^T&0 \end{matrix}\right]\right) \end{align}$$ As any algorithm that could calculate (or even approximate) the Hafnian could also calculate the permanent - a #P problem - it follows that calculating or approximating the Hafnian must also be a classically hard problem. </div> ### Equally squeezed input states In the case where all the input states are squeezed equally with squeezing factor $\xi=r$ (i.e. so $\phi=0$), we can simplify the denominator into a much nicer form. It can be easily seen that, due to the unitarity of $U$, $$\left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}UU^\dagger&0\\0&U^*U^T\end{matrix} \right] =I$$ Thus, we have $$\begin{align} \sigma_{out} +\frac{1}{2}I &= \sigma_{out} + \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \frac{1}{2} \left(\sigma_{in}+I\right) \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] \end{align}$$ where we have subtituted in the expression for $\sigma_{out}$. Taking the determinants of both sides, the two block diagonal matrices containing $U$ are unitary, and thus have determinant 1, resulting in $$\left|\sigma_{out} +\frac{1}{2}I\right| =\left|\frac{1}{2}\left(\sigma_{in}+I\right)\right|=\left|\frac{1}{2}\left(SS^\dagger+I\right)\right| $$ By expanding out the right hand side, and using various trig identities, it is easy to see that this simply reduces to $\cosh^{2N}(r)$ where $N$ is the number of modes; thus the Gaussian boson sampling problem in the case of equally squeezed input modes reduces to $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)},$$ ## The Gaussian boson sampling circuit The multimode linear interferometer can be decomposed into two-mode beamsplitters (`BSgate`) and single-mode phase shifters (`Rgate`) (<a href="https://doi.org/10.1103/physrevlett.73.58">Reck, 1994</a>), allowing for an almost trivial translation into a continuous-variable quantum circuit. For example, in the case of a 4 mode interferometer, with arbitrary $4\times 4$ unitary $U$, the continuous-variable quantum circuit for Gaussian boson sampling is given by <img src="https://s3.amazonaws.com/xanadu-img/gaussian_boson_sampling.svg" width=70%/> In the above, * the single mode squeeze states all apply identical squeezing $\xi=r$, * the detectors perform Fock state measurements (i.e. measuring the photon number of each mode), * the parameters of the beamsplitters and the rotation gates determines the unitary $U$. For $N$ input modes, we must have a minimum of $N$ columns in the beamsplitter array ([Clements, 2016](https://arxiv.org/abs/1603.08788)). ## Simulating boson sampling in Strawberry Fields ``` import strawberryfields as sf from strawberryfields.ops import * from strawberryfields.utils import random_interferometer ``` Strawberry Fields makes this easy; there is an `Interferometer` quantum operation, and a utility function that allows us to generate the matrix representing a random interferometer. ``` U = random_interferometer(4) ``` The lack of Fock states and non-linear operations means we can use the Gaussian backend to simulate Gaussian boson sampling. In this example program, we are using input states with squeezing parameter $\xi=1$, and the randomly chosen interferometer generated above. ``` eng = sf.Engine('gaussian') gbs = sf.Program(4) with gbs.context as q: # prepare the input squeezed states S = Sgate(1) All(S) | q # interferometer Interferometer(U) | q MeasureFock() | q results = eng.run(gbs, run_options={"shots":10}) state = results.state # Note: Running this cell will generate a warning. This is just the Gaussian backend of Strawberryfields telling us # that, although it can carry out the MeasureFock operation, it will not update the state of the circuit after doing so, # since the resulting state would be non-Gaussian. For this notebook, the warning can be safely ignored. ``` We can see the decomposed beamsplitters and rotation gates, by calling `eng.print_applied()`: ``` eng.print_applied() ``` <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**Available decompositions**</p> Check out our <a href="https://strawberryfields.readthedocs.io/en/stable/conventions/decompositions.html">documentation</a> to see the available CV decompositions available in Strawberry Fields. </div> We can also see some of the measurement samples from this circuit within `results.samples`. These correspond to independent runs of the Gaussian Boson Sampling circuit. ``` results.samples ``` ## Analysis Let's now verify the Gaussian boson sampling result, by comparing the output Fock state probabilities to the Hafnian, using the relationship $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)}$$ ### Calculating the Hafnian For the right hand side numerator, we first calculate the submatrix $[(UU^T\tanh(r))]_{st}$: ``` import numpy as np B = (np.dot(U, U.T) * np.tanh(1)) ``` In Gaussian boson sampling, we determine the submatrix by taking the rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix in the case of the output measurement $\left|{1,1,0,0}\right\rangle$, ``` B[:,[0,1]][[0,1]] ``` To calculate the Hafnian in Python, we can use the direct definition $$\text{Haf}(A) = \frac{1}{n!2^n} \sum_{\sigma \in S_{2n}} \prod_{j=1}^n A_{\sigma(2j - 1), \sigma(2j)}$$ Notice that this function counts each term in the definition multiple times, and renormalizes to remove the multiple counts by dividing by a factor $\frac{1}{n!2^n}$. **This function is extremely slow!** ``` from itertools import permutations from scipy.special import factorial def Haf(M): n = len(M) m = int(n/2) haf = 0.0 for i in permutations(range(n)): prod = 1.0 for j in range(m): prod *= M[i[2 * j], i[2 * j + 1]] haf += prod return haf / (factorial(m) * (2 ** m)) ``` ## Comparing to the SF result In Strawberry Fields, both Fock and Gaussian states have the method `fock_prob()`, which returns the probability of measuring that particular Fock state. #### Let's compare the case of measuring at the output state $\left|0,1,0,1\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [1, 3]][[1, 3]] np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([0, 1, 0, 1]) ``` #### For the measurement result $\left|2,0,0,0\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 0]][[0, 0]] np.abs(Haf(B)) ** 2 / (2 * np.cosh(1) ** 4) state.fock_prob([2, 0, 0, 0]) ``` #### For the measurement result $\left|1,1,0,0\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 1]][[0, 1]] np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([1, 1, 0, 0]) ``` #### For the measurement result $\left|1,1,1,1\right\rangle$, this corresponds to the full matrix $B$: ``` B = (np.dot(U,U.T) * np.tanh(1)) np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([1, 1, 1, 1]) ``` #### For the measurement result $\left|0,0,0,0\right\rangle$, this corresponds to a **null** submatrix, which has a Hafnian of 1: ``` 1 / np.cosh(1) ** 4 state.fock_prob([0, 0, 0, 0]) ``` As you can see, like in the boson sampling tutorial, they agree with almost negligable difference. <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**Exercises**</p> Repeat this notebook with <ol> <li> A higher value for <tt>shots</tt> in <tt>eng.run()</tt>, and compare the relative probabilties of events with the expected values.</li> <li> A Fock backend such as NumPy, instead of the Gaussian backend</li> <li> Different beamsplitter and rotation parameters</li> <li> Input states with *differing* squeezed values $r_i$. You will need to modify the code to take into account the fact that the output covariance matrix determinant must now be calculated! </ol> </div>
github_jupyter
This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org. Copyright (c) $\omega radlib$ developers. Distributed under the MIT License. See LICENSE.txt for more info. # How to use wradlib's ipol module for interpolation tasks? ``` import wradlib.ipol as ipol from wradlib.util import get_wradlib_data_file from wradlib.vis import plot_ppi import numpy as np import matplotlib.pyplot as pl import datetime as dt import warnings warnings.filterwarnings('ignore') try: get_ipython().magic("matplotlib inline") except: pl.ion() ``` ### 1-dimensional example Includes Nearest Neighbours, Inverse Distance Weighting, and Ordinary Kriging. ``` # Synthetic observations xsrc = np.arange(10)[:, None] vals = np.sin(xsrc).ravel() # Define target coordinates xtrg = np.linspace(0, 20, 100)[:, None] # Set up interpolation objects # IDW idw = ipol.Idw(xsrc, xtrg) # Nearest Neighbours nn = ipol.Nearest(xsrc, xtrg) # Linear ok = ipol.OrdinaryKriging(xsrc, xtrg) # Plot results pl.figure(figsize=(10,5)) pl.plot(xsrc.ravel(), vals, 'bo', label="Observation") pl.plot(xtrg.ravel(), idw(vals), 'r-', label="IDW interpolation") pl.plot(xtrg.ravel(), nn(vals), 'k-', label="Nearest Neighbour interpolation") pl.plot(xtrg.ravel(), ok(vals), 'g-', label="Ordinary Kriging") pl.xlabel("Distance", fontsize="large") pl.ylabel("Value", fontsize="large") pl.legend(loc="bottomright") ``` ### 2-dimensional example Includes Nearest Neighbours, Inverse Distance Weighting, Linear Interpolation, and Ordinary Kriging. ``` # Synthetic observations and source coordinates src = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3]))).transpose() np.random.seed(1319622840) vals = np.random.uniform(size=len(src)) # Target coordinates xtrg = np.linspace(0, 20, 40) ytrg = np.linspace(0, 20, 40) trg = np.meshgrid(xtrg, ytrg) trg = np.vstack( (trg[0].ravel(), trg[1].ravel()) ).T # Interpolation objects idw = ipol.Idw(src, trg) nn = ipol.Nearest(src, trg) linear = ipol.Linear(src, trg) ok = ipol.OrdinaryKriging(src, trg) # Subplot layout def gridplot(interpolated, title=""): pm = ax.pcolormesh(xtrg, ytrg, interpolated.reshape( (len(xtrg), len(ytrg)) ) ) pl.axis("tight") ax.scatter(src[:, 0], src[:, 1], facecolor="None", s=50, marker='s') pl.title(title) pl.xlabel("x coordinate") pl.ylabel("y coordinate") # Plot results fig = pl.figure(figsize=(8,8)) ax = fig.add_subplot(221, aspect="equal") gridplot(idw(vals), "IDW") ax = fig.add_subplot(222, aspect="equal") gridplot(nn(vals), "Nearest Neighbours") ax = fig.add_subplot(223, aspect="equal") gridplot(np.ma.masked_invalid(linear(vals)), "Linear interpolation") ax = fig.add_subplot(224, aspect="equal") gridplot(ok(vals), "Ordinary Kriging") pl.tight_layout() ``` ### Using the convenience function ipol.interpolation in order to deal with missing values **(1)** Exemplified for one dimension in space and two dimensions of the source value array (could e.g. be two time steps). ``` # Synthetic observations (e.g. two time steps) src = np.arange(10)[:, None] vals = np.hstack((1.+np.sin(src), 5. + 2.*np.sin(src))) # Target coordinates trg = np.linspace(0, 20, 100)[:, None] # Here we introduce missing values in the second dimension of the source value array vals[3:5, 1] = np.nan # interpolation using the convenience function "interpolate" idw_result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4) nn_result = ipol.interpolate(src, trg, vals, ipol.Nearest) # Plot results fig = pl.figure(figsize=(10,5)) ax = fig.add_subplot(111) pl1 = ax.plot(trg, idw_result, 'b-', label="IDW") pl2 = ax.plot(trg, nn_result, 'k-', label="Nearest Neighbour") pl3 = ax.plot(src, vals, 'ro', label="Observations") ``` **(2)** Exemplified for two dimensions in space and two dimensions of the source value array (e.g. time steps), containing also NaN values (here we only use IDW interpolation) ``` # Just a helper function for repeated subplots def plotall(ax, trgx, trgy, src, interp, pts, title, vmin, vmax): ix = np.where(np.isfinite(pts)) ax.pcolormesh(trgx, trgy, interp.reshape( (len(trgx),len(trgy) ) ), vmin=vmin, vmax=vmax ) ax.scatter(src[ix, 0].ravel(), src[ix, 1].ravel(), c=pts.ravel()[ix], s=20, marker='s', vmin=vmin, vmax=vmax) ax.set_title(title) pl.axis("tight") # Synthetic observations src = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3])) ).T np.random.seed(1319622840 + 1) vals = np.round(np.random.uniform(size=(len(src), 2)), 1) # Target coordinates trgx = np.linspace(0, 20, 100) trgy = np.linspace(0, 20, 100) trg = np.meshgrid(trgx, trgy) trg = np.vstack((trg[0].ravel(), trg[1].ravel())).transpose() result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4) # Now introduce NaNs in the observations vals_with_nan = vals.copy() vals_with_nan[1, 0] = np.nan vals_with_nan[1:3, 1] = np.nan result_with_nan = ipol.interpolate(src, trg, vals_with_nan, ipol.Idw, nnearest=4) vmin = np.concatenate((vals.ravel(), result.ravel())).min() vmax = np.concatenate((vals.ravel(), result.ravel())).max() fig = pl.figure(figsize=(8,8)) ax = fig.add_subplot(221) plotall(ax, trgx, trgy, src, result[:, 0], vals[:, 0], '1st dim: no NaNs', vmin, vmax) ax = fig.add_subplot(222) plotall(ax, trgx, trgy, src, result[:, 1], vals[:, 1], '2nd dim: no NaNs', vmin, vmax) ax = fig.add_subplot(223) plotall(ax, trgx, trgy, src, result_with_nan[:, 0], vals_with_nan[:, 0], '1st dim: one NaN', vmin, vmax) ax = fig.add_subplot(224) plotall(ax, trgx, trgy, src, result_with_nan[:, 1], vals_with_nan[:, 1], '2nd dim: two NaN', vmin, vmax) pl.tight_layout() ``` ### How to use interpolation for gridding data in polar coordinates? Read polar coordinates and corresponding rainfall intensity from file ``` filename = get_wradlib_data_file('misc/bin_coords_tur.gz') src = np.loadtxt(filename) filename = get_wradlib_data_file('misc/polar_R_tur.gz') vals = np.loadtxt(filename) src.shape ``` Define target grid coordinates ``` xtrg = np.linspace(src[:,0].min(), src[:,0].max(), 200) ytrg = np.linspace(src[:,1].min(), src[:,1].max(), 200) trg = np.meshgrid(xtrg, ytrg) trg = np.vstack((trg[0].ravel(), trg[1].ravel())).T ``` Linear Interpolation ``` ip_lin = ipol.Linear(src, trg) result_lin = ip_lin(vals.ravel(), fill_value=np.nan) ``` IDW interpolation ``` ip_near = ipol.Nearest(src, trg) maxdist = trg[1,0] - trg[0,0] result_near = ip_near(vals.ravel(), maxdist=maxdist) ``` Plot results ``` fig = pl.figure(figsize=(15, 6)) fig.subplots_adjust(wspace=0.4) ax = fig.add_subplot(131, aspect="equal") plot_ppi(vals, ax=ax) ax = fig.add_subplot(132, aspect="equal") pl.pcolormesh(xtrg, ytrg, result_lin.reshape( (len(xtrg), len(ytrg)) ) ) ax = fig.add_subplot(133, aspect="equal") pl.pcolormesh(xtrg, ytrg, result_near.reshape( (len(xtrg), len(ytrg)) ) ) ```
github_jupyter
## Exploratory Data Analysis ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import warnings warnings.filterwarnings('ignore') # read dataset df = pd.read_csv('../datasets/winequality/winequality-red.csv',sep=';') # check data dimensions print(df.shape) # check length print(len(df)) # check number of dimensions of your DataFrame or Series print(df.ndim) # show the first five rows print(df.head(5)) # show the last five rows print(df.tail(5)) # print column names df.dtypes # return the number of non-missing values for each column of the DataFrame print(df.count) # change direction to get count of non-missing values for each each row df.count(axis='columns') # To print the metadata, use info() print(df.info()) # show the columns df.columns ``` ### Sorting A DataFrame can be sorted by the value of one of the variables (i.e columns). For example, we can sort by Total day charge (use ascending=False to sort in descending order): ``` df.sort_values(by='alcohol', ascending=False).head() ``` Alternatively, we can also sort by multiple columns: ``` df.sort_values(by=['alcohol', 'quality'], ascending=[True, False]).head() ``` ### Indexing and retrieving data DataFrame can be indexed in different ways. To get a single column, you can use a DataFrame['Name'] construction. Let's use this to answer a question about that column alone: **what is the proportion of alcohol in our dataframe?** ``` df['alcohol'].mean() ``` ### Applying Functions to Cells, Columns and Rows **To apply functions to each column, use `apply():`** ``` df.apply(np.max) ``` The apply method can also be used to apply a function to each row. To do this, specify `axis=1`. `lambda` functions are very convenient in such scenarios. For example, if we need to select all wines with alcohol content greater than 6, we can do it like this: ``` df[df['alcohol'].apply(lambda alcohol: alcohol > 6)].head() ``` The `map` method can be used to **replace values in a column** by passing a dictionary of the form `{old_value: new_value}` as its argument: ``` d = {'9.4' : 100, '9.8' : 200} df['alcohol'] = df['alcohol'].map(d) df.head() ``` The same thing can be done with the `replace` method: ### Grouping In general, grouping data in Pandas goes as follows: df.groupby(by=grouping_columns)[columns_to_show].function() 1. First, the `groupby` method divides the grouping_columns by their values. They become a new index in the resulting dataframe. 2. Then, columns of interest are selected (`columns_to_show`). If columns_to_show is not included, all non groupby clauses will be included. 3. Finally, one or several functions are applied to the obtained groups per selected columns. Here is an example where we group the data according to the values of the `sulphates` variable and display statistics of three columns in each group: ``` columns_to_show = ['pH', 'chlorides', 'citric acid'] df.groupby(['sulphates'])[columns_to_show].describe(percentiles=[]).head() ``` Let’s do the same thing, but slightly differently by passing a list of functions to `agg()`: ``` columns_to_show = ['pH', 'chlorides', 'citric acid'] df.groupby(['sulphates'])[columns_to_show].agg([np.mean, np.std, np.min, np.max]).head() ``` ### Summary tables Suppose we want to see how the observations in our sample are distributed in the context of two variables - `sulphates` and `quality`. To do so, we can build a contingency table using the `crosstab` method: ``` pd.crosstab(df['sulphates'], df['quality']).head() pd.crosstab(df['sulphates'], df['quality'], normalize=True).head() ``` ## First attempt on predicting wine quality Let's see how wine quality is related to the alcohol content in it. We’ll do this using a crosstab contingency table and also through visual analysis with Seaborn (however, visual analysis will be covered more thoroughly in the next article). ``` pd.crosstab(df['pH'], df['quality'], margins=True).head() sns.countplot(x='density', hue='quality', data=df); ``` ### Histogram ``` # create histogram bin_edges = np.arange(0, df['residual sugar'].max() + 1, 1) fig = plt.hist(df['residual sugar'], bins=bin_edges) # add plot labels plt.xlabel('count') plt.ylabel('residual sugar') plt.show() ``` ### Scatterplot for continuous variables ``` # create scatterplot fig = plt.scatter(df['pH'], df['residual sugar']) # add plot labels plt.xlabel('pH') plt.ylabel('residual sugar') plt.show() ``` ### Scatterplot Matrix ``` # show columns df.columns # create scatterplot matrix fig = sns.pairplot(data=df[['alcohol', 'pH', 'residual sugar', 'quality']], hue='quality') # add plot labels plt.xlabel('pH') plt.ylabel('residual sugar') plt.show() ``` ### Boxplots - Distribution of data in terms of median and percentiles (median is the 50th percentile) ##### manual approach ``` percentiles = np.percentile(df['alcohol'], q=[25, 50, 75]) percentiles for p in percentiles: plt.axhline(p, color='black', linestyle='-') plt.scatter(np.zeros(df.shape[0]) + 0.5, df['alcohol']) iqr = percentiles[-1] - percentiles[0] upper_whisker = min(df['alcohol'].max(), percentiles[-1] + iqr * 1.5) lower_whisker = max(df['alcohol'].min(), percentiles[0] - iqr * 1.5) plt.axhline(upper_whisker, color='black', linestyle='--') plt.axhline(lower_whisker, color='black', linestyle='--') plt.ylim([8, 16]) plt.ylabel('alcohol') fig = plt.gca() fig.axes.get_xaxis().set_ticks([]) plt.show() ``` #### using matplotlib.pyplot.boxplot approach ``` plt.boxplot(df['alcohol']) plt.ylim([8, 16]) plt.ylabel('alcohol') fig = plt.gca() fig.axes.get_xaxis().set_ticks([]) plt.show() # Assume density is the target variable #descriptive statistics summary df['density'].describe() #histogram sns.distplot(df['density']); #skewness and kurtosis print("Skewness: %f" % df['density'].skew()) print("Kurtosis: %f" % df['density'].kurt()) ``` ### Relationship with other continuous variables ``` # other variables are fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol', var = 'pH' data = pd.concat([df['density'], df[var]], axis=1) data.plot.scatter(x=var, y='density'); ### Relationship with categorical variable var = 'quality' data = pd.concat([df['density'], df[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="density", data=data) ``` #### Correlation matrix (heatmap style) ``` #correlation matrix corrmat = df.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True); ``` #### `density` correlation matrix (zoomed heatmap style) ``` k = 10 #number of variables for heatmap cols = corrmat.nlargest(k, 'density')['density'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() ``` From the above heatmap plot we can see that variable `density` is highly correlated to `fixed acidity`, `citric acid`, `total sulphur dioxide`, and `free sulphur dioxide` ``` df.columns #scatterplot sns.set() cols = ['fixed acidity', 'citric acid', 'total sulfur dioxide', 'free sulfur dioxide'] sns.pairplot(df[cols], height = 2.5) plt.show(); ``` ### Missing data Important questions when thinking about missing data: - How prevalent is the missing data? - Is missing data random or does it have a pattern? ``` #missing data total = df.isnull().sum().sort_values(ascending=False) percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20) ``` ### Detailed Statistical Analysis According to Hair et al. (2013), four assumptions should be tested: **Normality** - When we talk about normality what we mean is that the data should look like a normal distribution. This is important because several statistic tests rely on this (e.g. t-statistics). In this exercise we'll just check univariate normality for 'density' (which is a limited approach). Remember that univariate normality doesn't ensure multivariate normality (which is what we would like to have), but it helps. Another detail to take into account is that in big samples (>200 observations) normality is not such an issue. However, if we solve normality, we avoid a lot of other problems (e.g. heteroscedacity) so that's the main reason why we are doing this analysis. **Homoscedasticity** - I just hope I wrote it right. Homoscedasticity refers to the 'assumption that dependent variable(s) exhibit equal levels of variance across the range of predictor variable(s)' (Hair et al., 2013). Homoscedasticity is desirable because we want the error term to be the same across all values of the independent variables. **Linearity**- The most common way to assess linearity is to examine scatter plots and search for linear patterns. If patterns are not linear, it would be worthwhile to explore data transformations. However, we'll not get into this because most of the scatter plots we've seen appear to have linear relationships. **Absence of correlated errors** - Correlated errors, like the definition suggests, happen when one error is correlated to another. For instance, if one positive error makes a negative error systematically, it means that there's a relationship between these variables. This occurs often in time series, where some patterns are time related. We'll also not get into this. However, if you detect something, try to add a variable that can explain the effect you're getting. That's the most common solution for correlated errors. **Normality** - Histogram - Kurtosis and skewness. - Normal probability plot - Data distribution should closely follow the diagonal that represents the normal distribution. ``` #histogram and normal probability plot sns.set_style('darkgrid') sns.distplot(df['density']); # Add labels plt.title('Histogram of Density') plt.xlabel('Density') plt.ylabel('Count') sns.distplot(df['density'], hist= True, kde=False) help(sns.distplot) ```
github_jupyter
# Tutorial for Geoseg > __version__ == 0.1.0 > __author__ == Go-Hiroaki # Overview: ## 1. Evaluating with pretrained models > Test model performance by providing pretrained models ## 2. Re-training with provided dataset > Trained new models with provide training datastet ## 3. Training with personal dataset > Train and test models with your own dataset ``` ls ``` ## 1. Evaluating with pretrained models ### 1.1 Prepared and loaded dataset #### > Prepared dataset ``` YOUR_DATASET/ |-- img | |-- train_1.png | |-- train_2.png | `-- |-- msk | |-- train_1.png | |-- train_2.png | `-- |-- ref.csv |-- statistic.csv |-- train.csv |-- val.csv ``` #### > Modified src/datasets.py to make sure YOUR_DATASET ``` if __name__ == "__main__": # ====================== parameter initialization ======================= # parser = argparse.ArgumentParser(description='ArgumentParser') parser.add_argument('-idx', type=int, default=0, help='index of sample image') args = parser.parse_args() idx = args.idx for root in ['YOUR_DATASET']: for mode in ["IM", "IMS", "IME"]: print("Load {}/{}.".format(root, mode)) trainset, valset = load_dataset(root, mode) # print("Load train set = {} examples, val set = {} examples".format( # len(trainset), len(valset))) sample = trainset[idx] trainset.show(idx) sample = valset[idx] valset.show(idx) print("\tsrc:", sample["src"].shape, "tar:", sample["tar"].shape,) ``` #### > Run src/datasets.py > python src/datasets.py if success, sample image will show up in example/ ### 1.2 Download pretrained models > 1. FCN8s_iter_5000.pth [LINK](https://drive.google.com/open?id=1KHs7coyXAipz8t5cN_lbTC4MOYi8FddI) > 2. FCN16s_iter_5000.pth [LINK](https://drive.google.com/open?id=1wlORkMx_ykmHysShUKY4UcCYs-fVaen6) > 3. FCN32s_iter_5000.pth [LINK](https://drive.google.com/open?id=1OR_Sk66RAGtKrp0quvqazRkL0xtAH8RY) > 4. SegNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1J0aRjFG-zOSSXnynm02VaYxjw1tjx-qC) > 5. UNet_iter_5000.pth [LINK](https://drive.google.com/open?id=17X0aCgRx3XXgH1fcfLoLwgcbWIzxZe5K) > 6. FPN_iter_5000.pth [LINK](https://drive.google.com/open?id=1fWrCnGQJBZTw7m5OZlQvH5-R_JJlBA-r) > 7. ResUNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1jGs_PxEMXCshOzXdg9LuFJxe8kO39oxT) > 8. MC-FCN_iter_5000.pth [LINK](https://drive.google.com/open?id=1Kt_JmR0ZGXvK9kuTmDOek5l1SsHX4xhz) > 9. BR-Net_iter_5000.pth [LINK](https://drive.google.com/open?id=1rytD9tzAq2mne5yf3XEh-jTSHlvQvedT) > * Upcoming ... After downloading corresponding pretrained models, save them at checkpoints/ . ``` ls ./checkpoint/ ``` ### 1.3 Run evaluation scripts * sinle model ``` visSingle.py -h optional arguments: -h, --help show this help message and exit -checkpoints CHECKPOINTS [CHECKPOINTS ...] checkpoints used for making prediction -spaces SPACES [SPACES ...] barrier space for merging -direction {horizontal,vertical} merge image direction -disp_cols DISP_COLS cols for displaying image -edge_fn {shift,canny} method used for edge extraction -gen_nb GEN_NB number of generated image -color COLOR background color for generated rgb result -partition PARTITION partition of dataset for loading -disk DISK dilation level -cuda CUDA using cuda for optimization ``` The generate result will show up at result/single - BR-Net ![time](./result/single/BR-Net_canny_segmap_edge_0.png) * multi models ``` visSingleComparison.py -h optional arguments: -h, --help show this help message and exit -checkpoints CHECKPOINTS [CHECKPOINTS ...] checkpoints used for making prediction -spaces SPACES [SPACES ...] barrier spaces for merging -direction {horizontal,vertical} merge image direction -disp_cols DISP_COLS cols for displaying image -target {segmap,edge} target for model prediction [segmap, edge] -edge_fn {shift,canny} method used for edge extraction -gen_nb GEN_NB number of generated image -eval_fn {ov,precision,recall,f1_score,jaccard,kappa} method used for evaluate performance -significance SIGNIFICANCE significant different level between methods -color COLOR background color for generated rgb result -partition PARTITION partition of dataset for loading -disk DISK dilation level -batch_size BATCH_SIZE batch size for model prediction -cuda CUDA using cuda for optimization ``` The generate result will show up at result/single-comparison - Segmap FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/segmap_FCN32s_FCN16s_FCN8s_1.png) - Edge FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/edge_FCN32s_FCN16s_FCN8s_1.png) ## 2. Re-train with provided dataset ### 2.1 Download training dataset > Training dataset [LINK](https://drive.google.com/file/d/1boGcJz9TyK9XB4GUhjCHVu8XGtbgjjbi/view?usp=sharing). Unzip and place to datasets/ ### 2.2 Run training scripts ``` python src/train.py -h usage: train.py [-h] [-root ROOT] [-net NET] [-base_kernel BASE_KERNEL] [-trigger {epoch,iter}] [-interval INTERVAL] [-terminal TERMINAL] [-batch_size BATCH_SIZE] [-lr LR] [-cuda CUDA] ArgumentParser optional arguments: -h, --help show this help message and exit -root ROOT root dir of dataset for training models -net NET network type for training -base_kernel BASE_KERNEL base number of kernels -trigger {epoch,iter} trigger type for logging -interval INTERVAL interval for logging -terminal TERMINAL terminal for training -batch_size BATCH_SIZE batch_size for training -lr LR learning rate for optimization -cuda CUDA using cuda for optimization ``` ## 3. Training with personal dataset ### 3.1 Prepare your own dataset ### 3.2 Run training scripts ``` Step ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") ##### Functions # 1st function: to graph time series based on TransactionDT vs the variable selected def scatter(column): fr,no_fr = (train[train['isFraud'] == 1], train[train['isFraud'] == 0]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,3)) ax1.title.set_text('Histogram ' + column + ' when isFraud == 0') ax1.set_ylim(train[column].min() - 1,train[column].max() + 1) ax1.scatter(x = no_fr['TransactionDT'], y = no_fr[column], color = 'blue', marker='o') ax2.title.set_text('Histogram ' + column + ' when isFraud == 1') ax2.set_ylim(train[column].min() - 1,train[column].max() + 1) ax2.scatter(x = fr['TransactionDT'], y = fr[column], color = 'red', marker='o') plt.show() # 2nd function: to show a ranking of pearson correlation with the variable selected def corr(data,column): print('Correlation with ' + column) print(train[data].corrwith(train[column]).abs().sort_values(ascending = False)[1:]) # 3rd function: to reduce the groups based on Nans agroupation and pearson correlation def reduce(groups): result = list() for values in groups: maxval = 0 val = values[0] for value in values: unique_values = train[value].nunique() if unique_values > maxval: maxval = unique_values val = value result.append(value) return result # 4th function: to sort each column in ascending order based on its number def order_finalcolumns(final_Xcolumns): return sorted(final_Xcolumns, key=lambda x: int("".join([i for i in x if i.isdigit()]))) ##### Download of files. print('Downloading datasets...') print(' ') train = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/train_mred.pkl') print('Train has been downloaded... (1/2)') test = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/test_mred.pkl') print('Test has been downloaded... (2/2)') print(' ') print('All files are downloaded') ##### All the columns of train dataset. print(list(train)) ``` # NaNs Exploration We will search all the columns to determine which columns are related by the number of NANs present. After grouping them, we decide to keep the columns of each group with major amount of unique values (its supposed to be the most explanatory variable) ## Transaction columns ``` # These columns are the first ones in transaction dataset. columns= list(train.columns[:17]) columns for col in columns: print(f'{col} NaNs: {train[col].isna().sum()} | {train[col].isna().sum()/train.shape[0]:.2%}') # If we look closely to % NaNs data, most of them have low number of missing information. We are keeping all the columns where % NaNs < 0.7 final_transactioncolumns = list() for col in columns: if train[col].isna().sum()/train.shape[0] < 0.7: final_transactioncolumns.append(col) print('Final Transaction columns:',final_transactioncolumns) ``` ## C columns ``` ##### Group the C columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['C' + str(i) for i in range(1,15)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` ##### Time series graph based on TransactionDT # There is no column that does not have NaNs values so we get all the columns in the same group group_list = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['C1','C11','C2','C6','C8','C4','C10','C14','C12','C7','C13'], ['C3'], ['C5','C9']] result = reduce(reduce_groups) print('Final C columns:',result) final_ccolumns = result ``` ## D columns ``` ##### Group the D columns + Dachr columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['D' + str(i) for i in range(1,16)] columns.extend(['D1achr','D2achr','D4achr','D6achr','D10achr','D11achr','D12achr','D13achr','D14achr','D15achr']) df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` ##### Time series graph based on TransactionDT. # Despite having different number of NaNs, we are analyzing it as a single group. But due to NaNs low number in D1, we keep it as a final column. group_list = ['D1achr', 'D2achr', 'D3', 'D4achr', 'D5', 'D6achr', 'D7', 'D8', 'D9', 'D10achr', 'D11achr', 'D12achr', 'D13achr', 'D14achr', 'D15achr'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 # On the first group, D1achr vs D2achr --> we keep D1achr due to the low number of NaNs. reduce_groups = [['D3','D7','D5'],['D4achr','D12achr','D6achr','D15achr','D10achr', 'D11achr'], ['D8'], ['D9'], ['D13achr'],['D14achr']] result = reduce(reduce_groups) result.append('D1achr') print('Final D columns:',result) final_dcolumns = result ``` ## M columns ``` ##### Group the M columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['M' + str(i) for i in range(1,10)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` # To analize M columns, we need to transform strings to numbers. Instead of using Label Encoder, we use a dictionary. T_F_num = dict({'F': 0, 'T': 1, 'M0': 0, 'M1': 1, 'M2': 2}) for column in ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9']: print(f'{column}:', train[column].unique()) print('Transforming strings to numbers...') train[column] = train[column].replace(T_F_num) print(f'{column}:', train[column].unique()) print('') ##### Time series graph based on TransactionDT. # Despite having different number of NaNs, we are analyzing it as a single group. group_list = ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() #### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, We grouped together the columns with corr > 0.7 but in this case, no correlation is bigger than 0.7 # That's why, in this particular case we grouped together the columns with corr > 0.5 reduce_groups = ['M1'], ['M2','M3'], ['M4'], ['M5'], ['M6'], ['M7', 'M8'], ['M9'] result = reduce(reduce_groups) print('Final M columns:',result) final_mcolumns = result ``` ## V columns ``` ##### Group the V columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['V' + str(i) for i in range(1,340)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 final_vcolumns = list() ``` ### Group 1 ``` ##### Time series graph based on TransactionDT. group_list = ['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V1'], ['V2','V3'], ['V4','V5'], ['V6','V7'], ['V8','V9'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group1 columns:',result) ``` ### Group 2 ``` ##### Time series graph based on TransactionDT. group_list = ['V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V12','V13'], ['V14'], ['V15','V16','V33','V34','V31','V32','V21','V22','V17','V18'], ['V19','V20'],['V23','V24'],['V25','V26'],['V27','V28'],['V29','V30']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group2 columns:',result) ``` ### Group 3 ``` ##### Time series graph based on TransactionDT. group_list = ['V35', 'V36', 'V37', 'V38', 'V39', 'V40', 'V41', 'V42', 'V43', 'V44', 'V45', 'V46', 'V47', 'V48', 'V49', 'V50', 'V51', 'V52'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V35','V36'], ['V37','V38'], ['V39','V40','V42','V43','V50','V51','V52'], ['V41'], ['V44','V45'],['V46','V47'],['V48','V49']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group3 columns:',result) ``` ### Group 4 ``` ##### Time series graph based on TransactionDT. group_list = ['V53', 'V54', 'V55', 'V56', 'V57', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V65', 'V66', 'V67', 'V68', 'V69', 'V70', 'V71', 'V72', 'V73', 'V74'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V53','V54'], ['V55','V56'], ['V57','V58','V71','V73','V72','V74','V63','V59','V64','V60'],['V61','V62'],['V65'], ['V66','V67'],['V68'], ['V69','V70']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group4 columns:',result) ``` ### Group 5 ``` ##### Time series graph based on TransactionDT. group_list = ['V75', 'V76', 'V77', 'V78', 'V79', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'V92', 'V93', 'V94'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V75','V76'],['V77','V78'], ['V79', 'V94', 'V93', 'V92', 'V84', 'V85', 'V80', 'V81'],['V82','V83'],['V86','V87'],['V88'],['V89'],['V90','V91']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group5 columns:',result) ``` ### Group 6 ``` ##### Time series graph based on TransactionDT. group_list = ['V95', 'V96', 'V97', 'V98', 'V99', 'V100', 'V101', 'V102', 'V103', 'V104', 'V105', 'V106', 'V107', 'V108', 'V109', 'V110', 'V111', 'V112', 'V113', 'V114', 'V115', 'V116', 'V117', 'V118', 'V119', 'V120', 'V121', 'V122', 'V123', 'V124', 'V125', 'V126', 'V127', 'V128', 'V129', 'V130', 'V131', 'V132', 'V133', 'V134', 'V135', 'V136', 'V137'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 # We omit V107 since there is no info about corr with other columns and its unique values are 1. reduce_groups = [['V95','V101'],['V96','V102','V97','V99','V100','V103'],['V98'],['V104','V106','V105'],['V108','V110','V114','V109','V111','V113','V112','V115','V116'], ['V117','V119','V118'],['V120','V122','V121'],['V123','V125','V124'],['V126','V128','V132'],['V127','V133','V134'],['V129','V131','V130'], ['V135','V137','V136']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group6 columns:',result) ``` ### Group 7 ``` ##### Time series graph based on TransactionDT. group_list = ['V138', 'V139', 'V140', 'V141', 'V142', 'V143', 'V144', 'V145', 'V146', 'V147', 'V148', 'V149', 'V150', 'V151', 'V152', 'V153', 'V154', 'V155', 'V156', 'V157', 'V158', 'V159', 'V160', 'V161', 'V162', 'V163', 'V164', 'V165', 'V166'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V138'],['V139','V140'],['V141','V142'],['V143','V159','V150','V151','V165','V144','V145','V160','V152','V164','V166'],['V146','V147'], ['V148','V155','V149','V153','V154','V156','V157','V158'],['V161','V163','V162']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group7 columns:',result) ``` ### Group 8 ``` ##### Time series graph based on TransactionDT. group_list = ['V167', 'V168', 'V172', 'V173', 'V176', 'V177', 'V178', 'V179', 'V181', 'V182', 'V183', 'V186', 'V187', 'V190', 'V191', 'V192', 'V193', 'V196', 'V199', 'V202', 'V203', 'V204', 'V205', 'V206', 'V207', 'V211', 'V212', 'V213', 'V214', 'V215', 'V216'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V167','V176','V199','V179','V190','V177','V186','V168','V172','V178','V196','V191','V204','V213','V207','V173'],['V181','V183','V182', 'V187','V192','V203','V215','V178','V193','V212','V204'],['V202','V216','V204','V214'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group8 columns:',result) ``` ### Group 9 ``` ##### Time series graph based on TransactionDT. group_list = ['V169', 'V170', 'V171', 'V174', 'V175', 'V180', 'V184', 'V185', 'V188', 'V189', 'V194', 'V195', 'V197', 'V198', 'V200', 'V201', 'V208', 'V209', 'V210'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V169'],['V170','V171','V200','V201'],['V174','V175'],['V180'],['V184','V185'],['V188','V189'],['V194','V197','V195','V198'], ['V208','V210','V209']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group9 columns:',result) ``` ### Group 10 ``` ##### Time series graph based on TransactionDT. group_list = ['V217', 'V218', 'V219', 'V223', 'V224', 'V225', 'V226', 'V228', 'V229', 'V230', 'V231', 'V232', 'V233', 'V235', 'V236', 'V237','V240', 'V241', 'V242', 'V243', 'V244', 'V246', 'V247', 'V248', 'V249', 'V252', 'V253', 'V254', 'V257', 'V258', 'V260', 'V261', 'V262', 'V263', 'V264', 'V265', 'V266', 'V267', 'V268', 'V269', 'V273', 'V274', 'V275', 'V276', 'V277', 'V278'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V217','V231','V233','V228','V257','V219','V232','V246'],['V218','V229','V224','V225','V253','V243','V254','V248','V264','V261','V249','V258', 'V267','V274','V230','V236','V247','V262','V223','V252','V260'],['V226','V263','V276','V278'], ['V235','V237'],['V240','V241'],['V242','V244'], ['V265','V275','V277','V268','V273'],['V269','V266']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group10 columns:',result) ``` ### Group 11 ``` ##### Time series graph based on TransactionDT. group_list = ['V220', 'V221', 'V222', 'V227', 'V234', 'V238', 'V239', 'V245', 'V250', 'V251', 'V255', 'V256', 'V259', 'V270', 'V271', 'V272'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V220'],['V221','V222','V259','V245','V227','V255','V256'],['V234'],['V238','V239'],['V250','V251'],['V270','V272','V271'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group11 columns:',result) ``` ### Group 12 ``` ##### Time series graph based on TransactionDT. group_list = ['V279', 'V280', 'V284', 'V285', 'V286', 'V287', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V297', 'V298', 'V299', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V279','V293','V290','V280','V295','V294','V292','V291','V317','V307','V318'],['V284'],['V285','V287'],['V286'],['V297','V299','V298'], ['V302','V304','V303'],['V305'],['V306','V308','V316','V319'],['V309','V311','V312','V310'],['V320','V321']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group12 columns:',result) ``` ### Group 13 ``` ##### Time series graph based on TransactionDT. group_list = ['V281', 'V282', 'V283', 'V288', 'V289', 'V296', 'V300', 'V301', 'V313', 'V314', 'V315'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V281','V282','V283'],['V288','V289'],['V296'],['V300','V301'],['V313','V315','V314'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group13 columns:',result) ``` ### Group 14 ``` ##### Time series graph based on TransactionDT. group_list = ['V322', 'V323', 'V324', 'V325', 'V326', 'V327', 'V328', 'V329', 'V330', 'V331', 'V332', 'V333', 'V334', 'V335', 'V336', 'V337', 'V338', 'V339'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V322','V324'],['V323','V326','V324','V327','V326'],['V325'],['V328','V330','V329'],['V331','V333','V332','V337'],['V334','V336','V335'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group14 columns:',result) ``` ### Final V columns ``` print('Number of V columns:', len(final_vcolumns)) print(final_vcolumns) ``` # Conclusions Based on previous process, we suggest keeping as final columns the ones describes below: ``` ##### 1st we sort them (ascending order) with a function final_ccolumns = order_finalcolumns(final_ccolumns) final_dcolumns = order_finalcolumns(final_dcolumns) final_mcolumns = order_finalcolumns(final_mcolumns) final_vcolumns = order_finalcolumns(final_vcolumns) ##### Final columns print(f'Final Transaction columns ({len(final_transactioncolumns)}): {final_transactioncolumns}') print(' ') print(f'Final C columns ({len(final_ccolumns)}): {final_ccolumns}') print(' ') print(f'Final D columns ({len(final_dcolumns)}): {final_dcolumns}') print(' ') print(f'Final M columns ({len(final_mcolumns)}): {final_mcolumns}') print(' ') print(f'Final V columns ({len(final_vcolumns)}): {final_vcolumns}') print(' ') print('#' * 50) final_columns = final_transactioncolumns + final_ccolumns + final_dcolumns + final_mcolumns + final_vcolumns print(' ') print('Final columns:', final_columns) print(' ') print('Lenght of final columns:', len(final_columns)) ```
github_jupyter
# Dropout Dropout [1] is a technique for regularizing neural networks by randomly setting some features to zero during the forward pass. In this exercise you will implement a dropout layer and modify your fully-connected network to optionally use dropout. [1] [Geoffrey E. Hinton et al, "Improving neural networks by preventing co-adaptation of feature detectors", arXiv 2012](https://arxiv.org/abs/1207.0580) ``` # As usual, a bit of setup from __future__ import print_function import time import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.fc_net import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.items(): print('%s: ' % k, v.shape) ``` # Dropout forward pass In the file `cs231n/layers.py`, implement the forward pass for dropout. Since dropout behaves differently during training and testing, make sure to implement the operation for both modes. Once you have done so, run the cell below to test your implementation. ``` np.random.seed(231) x = np.random.randn(500, 500) + 10 for p in [0.25, 0.4, 0.7]: out, _ = dropout_forward(x, {'mode': 'train', 'p': p}) out_test, _ = dropout_forward(x, {'mode': 'test', 'p': p}) print('Running tests with p = ', p) print('Mean of input: ', x.mean()) print('Mean of train-time output: ', out.mean()) print('Mean of test-time output: ', out_test.mean()) print('Fraction of train-time output set to zero: ', (out == 0).mean()) print('Fraction of test-time output set to zero: ', (out_test == 0).mean()) print() ``` # Dropout backward pass In the file `cs231n/layers.py`, implement the backward pass for dropout. After doing so, run the following cell to numerically gradient-check your implementation. ``` np.random.seed(231) x = np.random.randn(10, 10) + 10 dout = np.random.randn(*x.shape) dropout_param = {'mode': 'train', 'p': 0.2, 'seed': 123} out, cache = dropout_forward(x, dropout_param) dx = dropout_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda xx: dropout_forward(xx, dropout_param)[0], x, dout) # Error should be around e-10 or less print('dx relative error: ', rel_error(dx, dx_num)) ``` ## Inline Question 1: What happens if we do not divide the values being passed through inverse dropout by `p` in the dropout layer? Why does that happen? ## Answer: # Fully-connected nets with Dropout In the file `cs231n/classifiers/fc_net.py`, modify your implementation to use dropout. Specifically, if the constructor of the net receives a value that is not 1 for the `dropout` parameter, then the net should add dropout immediately after every ReLU nonlinearity. After doing so, run the following to numerically gradient-check your implementation. ``` np.random.seed(231) N, D, H1, H2, C = 2, 15, 20, 30, 10 X = np.random.randn(N, D) y = np.random.randint(C, size=(N,)) for dropout in [1, 0.75, 0.5]: print('Running check with dropout = ', dropout) model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C, weight_scale=5e-2, dtype=np.float64, dropout=dropout, seed=123) loss, grads = model.loss(X, y) print('Initial loss: ', loss) # Relative errors should be around e-6 or less; Note that it's fine # if for dropout=1 you have W2 error be on the order of e-5. for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5) print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))) print() ``` # Regularization experiment As an experiment, we will train a pair of two-layer networks on 500 training examples: one will use no dropout, and one will use a keep probability of 0.25. We will then visualize the training and validation accuracies of the two networks over time. ``` # Train two identical nets, one with dropout and one without np.random.seed(231) num_train = 500 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } solvers = {} dropout_choices = [1, 0.25] for dropout in dropout_choices: model = FullyConnectedNet([500], dropout=dropout) print(dropout) solver = Solver(model, small_data, num_epochs=25, batch_size=100, update_rule='adam', optim_config={ 'learning_rate': 5e-4, }, verbose=True, print_every=100) solver.train() solvers[dropout] = solver # Plot train and validation accuracies of the two models train_accs = [] val_accs = [] for dropout in dropout_choices: solver = solvers[dropout] train_accs.append(solver.train_acc_history[-1]) val_accs.append(solver.val_acc_history[-1]) plt.subplot(3, 1, 1) for dropout in dropout_choices: plt.plot(solvers[dropout].train_acc_history, 'o', label='%.2f dropout' % dropout) plt.title('Train accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(ncol=2, loc='lower right') plt.subplot(3, 1, 2) for dropout in dropout_choices: plt.plot(solvers[dropout].val_acc_history, 'o', label='%.2f dropout' % dropout) plt.title('Val accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(ncol=2, loc='lower right') plt.gcf().set_size_inches(15, 15) plt.show() ``` ## Inline Question 2: Compare the validation and training accuracies with and without dropout -- what do your results suggest about dropout as a regularizer? ## Answer: ## Inline Question 3: Suppose we are training a deep fully-connected network for image classification, with dropout after hidden layers (parameterized by keep probability p). How should we modify p, if at all, if we decide to decrease the size of the hidden layers (that is, the number of nodes in each layer)? ## Answer:
github_jupyter