hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d06d93cb84006288496ec405442609c152a3d191
173,440
ipynb
Jupyter Notebook
analytics/analytics.ipynb
shawlu95/Grocery_Matter
d52176312bc8cfabd9f8c194bb0ee6bcee0d255f
[ "MIT" ]
1
2019-05-05T01:54:11.000Z
2019-05-05T01:54:11.000Z
analytics/analytics.ipynb
shawlu95/Grocery_Matter
d52176312bc8cfabd9f8c194bb0ee6bcee0d255f
[ "MIT" ]
null
null
null
analytics/analytics.ipynb
shawlu95/Grocery_Matter
d52176312bc8cfabd9f8c194bb0ee6bcee0d255f
[ "MIT" ]
1
2020-10-01T10:29:22.000Z
2020-10-01T10:29:22.000Z
148.112724
75,568
0.830593
[ [ [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import sys\n!{sys.executable} -m pip install -r requirements.txt", "Requirement already satisfied: mysqlclient in /Users/shawlu/anaconda3/lib/python3.5/site-packages (from -r requirements.txt (line 1)) (1.3.13)\r\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom analytics import SQLClient", "_____no_output_____" ] ], [ [ "### Connect to MySQL database", "_____no_output_____" ] ], [ [ "username = \"privateuser\"\npassword = \"1234567\"\nport = 7777\n\nclient = SQLClient(username, password, port)", "_____no_output_____" ], [ "sql_tmp = \"\"\"\n SELECT \n id\n ,userID\n ,name\n ,type\n ,-priceCNY * count / 6.9 AS price\n ,count\n ,currency\n ,-priceCNY * count AS priceCNY\n ,time \n FROM items \n WHERE userID LIKE '%%shawlu%%' \n AND time BETWEEN '$start_dt$ 00:00:00' AND '$end_dt$ 00:00:00' \n AND deleted = 0\n ORDER BY time;\"\"\"", "_____no_output_____" ] ], [ [ "### Analytics: Nov. 2018", "_____no_output_____" ] ], [ [ "start_dt = '2018-11-01'\nend_dt = '2018-12-01'\ndf = client.query(sql_tmp.replace('$start_dt$', start_dt).replace(\"$end_dt$\", end_dt))", "_____no_output_____" ], [ "df = df.groupby(['type']).sum()\ntotal = np.sum(df.price)\ndf[\"pct\"] = df.price / total\ndf[\"category\"] = client.categories\ndf = df.sort_values(\"pct\")[::-1]\ndf", "_____no_output_____" ], [ "labels = [\"%s: $%.2f\"%(df.category.values[i],\n df.price.values[i]) for i in range(len(df))]\n\nplt.figure(figsize=(8, 8))\n\ntitle = \"Total expense %s\\n%s-%s\"%('$ {:,.2f}'.format(total), start_dt, end_dt)\nplt.title(title)\n\n_ = plt.pie(x = df.price.values, \n labels = labels,\n autopct='%1.1f%%',\n labeldistance = 1.1)\n\ncentre_circle = plt.Circle((0,0),0.70,fc='white')\nfig = plt.gcf()\nfig.gca().add_artist(centre_circle)\n\nplt.savefig(\"month.png\")", "_____no_output_____" ] ], [ [ "### Analytics: Year of 2018", "_____no_output_____" ] ], [ [ "start_dt = '2018-01-01'\nend_dt = '2018-12-01'\ndf = client.query(sql_tmp.replace('$start_dt$', start_dt).replace(\"$end_dt$\", end_dt))", "_____no_output_____" ], [ "df[df.type == 'COM']", "_____no_output_____" ], [ "df = df.groupby(['type']).sum()\ntotal = np.sum(df.price)\ndf[\"pct\"] = df.price / total\ndf[\"category\"] = client.categories\ndf = df.sort_values(\"pct\")[::-1]\ndf", "_____no_output_____" ], [ "labels = [\"%s: $%.2f\"%(df.category.values[i],\n df.price.values[i]) for i in range(len(df))]\n\nplt.figure(figsize=(8, 8))\n\ntitle = \"Total expense %s\\n%s-%s\"%('$ {:,.2f}'.format(total), start_dt, end_dt)\nplt.title(title)\n\n_ = plt.pie(x = df.price.values, \n labels = labels,\n autopct='%1.1f%%',\n labeldistance = 1.1)\n\ncentre_circle = plt.Circle((0,0),0.70,fc='white')\nfig = plt.gcf()\nfig.gca().add_artist(centre_circle)\n\nplt.savefig(\"year.png\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06d9c8a6195881478d55c85396fa990b6b227cb
430,031
ipynb
Jupyter Notebook
Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb
ThinkBricks/APTOS2019BlindnessDetection
e524fd69f83a1252710076c78b6a5236849cd885
[ "MIT" ]
23
2019-09-08T17:19:16.000Z
2022-02-02T16:20:09.000Z
Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb
ThinkBricks/APTOS2019BlindnessDetection
e524fd69f83a1252710076c78b6a5236849cd885
[ "MIT" ]
1
2020-03-10T18:42:12.000Z
2020-09-18T22:02:38.000Z
Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb
ThinkBricks/APTOS2019BlindnessDetection
e524fd69f83a1252710076c78b6a5236849cd885
[ "MIT" ]
16
2019-09-21T12:29:59.000Z
2022-03-21T00:42:26.000Z
130.748252
69,432
0.743058
[ [ [ "## Dependencies", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport cv2\nimport shutil\nimport random\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport multiprocessing as mp\nimport matplotlib.pyplot as plt\nfrom tensorflow import set_random_seed\nfrom sklearn.utils import class_weight\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, cohen_kappa_score\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.utils import to_categorical\nfrom keras import optimizers, applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler, ModelCheckpoint\n\ndef seed_everything(seed=0):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n set_random_seed(0)\n\nseed = 0\nseed_everything(seed)\n%matplotlib inline\nsns.set(style=\"whitegrid\")\nwarnings.filterwarnings(\"ignore\")\nsys.path.append(os.path.abspath('../input/efficientnet/efficientnet-master/efficientnet-master/'))\nfrom efficientnet import *", "/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\nUsing TensorFlow backend.\n" ] ], [ [ "## Load data", "_____no_output_____" ] ], [ [ "fold_set = pd.read_csv('../input/aptos-split-oldnew/5-fold.csv')\nX_train = fold_set[fold_set['fold_2'] == 'train']\nX_val = fold_set[fold_set['fold_2'] == 'validation']\ntest = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')\n\n# Preprocecss data\ntest[\"id_code\"] = test[\"id_code\"].apply(lambda x: x + \".png\")\n\nprint('Number of train samples: ', X_train.shape[0])\nprint('Number of validation samples: ', X_val.shape[0])\nprint('Number of test samples: ', test.shape[0])\ndisplay(X_train.head())", "Number of train samples: 18697\nNumber of validation samples: 733\nNumber of test samples: 1928\n" ] ], [ [ "# Model parameters", "_____no_output_____" ] ], [ [ "# Model parameters\nmodel_path = '../working/effNetB4_img256_noBen_fold3.h5'\nFACTOR = 4\nBATCH_SIZE = 8 * FACTOR\nEPOCHS = 20\nWARMUP_EPOCHS = 5\nLEARNING_RATE = 1e-3/2 * FACTOR\nWARMUP_LEARNING_RATE = 1e-3/2 * FACTOR\nHEIGHT = 256\nWIDTH = 256\nCHANNELS = 3\nTTA_STEPS = 5\nES_PATIENCE = 5\nLR_WARMUP_EPOCHS = 5\nSTEP_SIZE = len(X_train) // BATCH_SIZE\nTOTAL_STEPS = EPOCHS * STEP_SIZE\nWARMUP_STEPS = LR_WARMUP_EPOCHS * STEP_SIZE", "_____no_output_____" ] ], [ [ "# Pre-procecess images", "_____no_output_____" ] ], [ [ "old_data_base_path = '../input/diabetic-retinopathy-resized/resized_train/resized_train/'\nnew_data_base_path = '../input/aptos2019-blindness-detection/train_images/'\ntest_base_path = '../input/aptos2019-blindness-detection/test_images/'\ntrain_dest_path = 'base_dir/train_images/'\nvalidation_dest_path = 'base_dir/validation_images/'\ntest_dest_path = 'base_dir/test_images/'\n\n# Making sure directories don't exist\nif os.path.exists(train_dest_path):\n shutil.rmtree(train_dest_path)\nif os.path.exists(validation_dest_path):\n shutil.rmtree(validation_dest_path)\nif os.path.exists(test_dest_path):\n shutil.rmtree(test_dest_path)\n \n# Creating train, validation and test directories\nos.makedirs(train_dest_path)\nos.makedirs(validation_dest_path)\nos.makedirs(test_dest_path)\n\ndef crop_image(img, tol=7):\n if img.ndim ==2:\n mask = img>tol\n return img[np.ix_(mask.any(1),mask.any(0))]\n elif img.ndim==3:\n gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n mask = gray_img>tol\n check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]\n if (check_shape == 0): # image is too dark so that we crop out everything,\n return img # return original image\n else:\n img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]\n img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]\n img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]\n img = np.stack([img1,img2,img3],axis=-1)\n \n return img\n\ndef circle_crop(img):\n img = crop_image(img)\n\n height, width, depth = img.shape\n largest_side = np.max((height, width))\n img = cv2.resize(img, (largest_side, largest_side))\n\n height, width, depth = img.shape\n\n x = width//2\n y = height//2\n r = np.amin((x, y))\n\n circle_img = np.zeros((height, width), np.uint8)\n cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)\n img = cv2.bitwise_and(img, img, mask=circle_img)\n img = crop_image(img)\n\n return img\n \ndef preprocess_image(image_id, base_path, save_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):\n image = cv2.imread(base_path + image_id)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = circle_crop(image)\n image = cv2.resize(image, (HEIGHT, WIDTH))\n# image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)\n cv2.imwrite(save_path + image_id, image)\n \ndef preprocess_data(df, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):\n df = df.reset_index()\n for i in range(df.shape[0]):\n item = df.iloc[i]\n image_id = item['id_code']\n item_set = item['fold_2']\n item_data = item['data']\n if item_set == 'train':\n if item_data == 'new':\n preprocess_image(image_id, new_data_base_path, train_dest_path)\n if item_data == 'old':\n preprocess_image(image_id, old_data_base_path, train_dest_path)\n if item_set == 'validation':\n if item_data == 'new':\n preprocess_image(image_id, new_data_base_path, validation_dest_path)\n if item_data == 'old':\n preprocess_image(image_id, old_data_base_path, validation_dest_path)\n \ndef preprocess_test(df, base_path=test_base_path, save_path=test_dest_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):\n df = df.reset_index()\n for i in range(df.shape[0]):\n image_id = df.iloc[i]['id_code']\n preprocess_image(image_id, base_path, save_path)\n\nn_cpu = mp.cpu_count()\ntrain_n_cnt = X_train.shape[0] // n_cpu\nval_n_cnt = X_val.shape[0] // n_cpu\ntest_n_cnt = test.shape[0] // n_cpu\n\n# Pre-procecss old data train set\npool = mp.Pool(n_cpu)\ndfs = [X_train.iloc[train_n_cnt*i:train_n_cnt*(i+1)] for i in range(n_cpu)]\ndfs[-1] = X_train.iloc[train_n_cnt*(n_cpu-1):]\nres = pool.map(preprocess_data, [x_df for x_df in dfs])\npool.close()\n\n# Pre-procecss validation set\npool = mp.Pool(n_cpu)\ndfs = [X_val.iloc[val_n_cnt*i:val_n_cnt*(i+1)] for i in range(n_cpu)]\ndfs[-1] = X_val.iloc[val_n_cnt*(n_cpu-1):] \nres = pool.map(preprocess_data, [x_df for x_df in dfs])\npool.close()\n\n# Pre-procecss test set\npool = mp.Pool(n_cpu)\ndfs = [test.iloc[test_n_cnt*i:test_n_cnt*(i+1)] for i in range(n_cpu)]\ndfs[-1] = test.iloc[test_n_cnt*(n_cpu-1):] \nres = pool.map(preprocess_test, [x_df for x_df in dfs])\npool.close()", "_____no_output_____" ] ], [ [ "# Data generator", "_____no_output_____" ] ], [ [ "datagen=ImageDataGenerator(rescale=1./255, \n rotation_range=360,\n horizontal_flip=True,\n vertical_flip=True)\n\ntrain_generator=datagen.flow_from_dataframe(\n dataframe=X_train,\n directory=train_dest_path,\n x_col=\"id_code\",\n y_col=\"diagnosis\",\n class_mode=\"raw\",\n batch_size=BATCH_SIZE,\n target_size=(HEIGHT, WIDTH),\n seed=seed)\n\nvalid_generator=datagen.flow_from_dataframe(\n dataframe=X_val,\n directory=validation_dest_path,\n x_col=\"id_code\",\n y_col=\"diagnosis\",\n class_mode=\"raw\",\n batch_size=BATCH_SIZE,\n target_size=(HEIGHT, WIDTH),\n seed=seed)\n\ntest_generator=datagen.flow_from_dataframe( \n dataframe=test,\n directory=test_dest_path,\n x_col=\"id_code\",\n batch_size=1,\n class_mode=None,\n shuffle=False,\n target_size=(HEIGHT, WIDTH),\n seed=seed)", "Found 18697 validated image filenames.\nFound 733 validated image filenames.\nFound 1928 validated image filenames.\n" ], [ "def classify(x):\n if x < 0.5:\n return 0\n elif x < 1.5:\n return 1\n elif x < 2.5:\n return 2\n elif x < 3.5:\n return 3\n return 4\n\nlabels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']\ndef plot_confusion_matrix(train, validation, labels=labels):\n train_labels, train_preds = train\n validation_labels, validation_preds = validation\n fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))\n train_cnf_matrix = confusion_matrix(train_labels, train_preds)\n validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)\n\n train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]\n validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]\n\n train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)\n validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)\n\n sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap=\"Blues\",ax=ax1).set_title('Train')\n sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')\n plt.show()\n \ndef plot_metrics(history, figsize=(20, 14)):\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=figsize)\n\n ax1.plot(history['loss'], label='Train loss')\n ax1.plot(history['val_loss'], label='Validation loss')\n ax1.legend(loc='best')\n ax1.set_title('Loss')\n\n ax2.plot(history['acc'], label='Train accuracy')\n ax2.plot(history['val_acc'], label='Validation accuracy')\n ax2.legend(loc='best')\n ax2.set_title('Accuracy')\n\n plt.xlabel('Epochs')\n sns.despine()\n plt.show()\n \ndef apply_tta(model, generator, steps=10):\n step_size = generator.n//generator.batch_size\n preds_tta = []\n for i in range(steps):\n generator.reset()\n preds = model.predict_generator(generator, steps=step_size)\n preds_tta.append(preds)\n\n return np.mean(preds_tta, axis=0)\n\ndef evaluate_model(train, validation):\n train_labels, train_preds = train\n validation_labels, validation_preds = validation\n print(\"Train Cohen Kappa score: %.3f\" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))\n print(\"Validation Cohen Kappa score: %.3f\" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))\n print(\"Complete set Cohen Kappa score: %.3f\" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))\n\ndef cosine_decay_with_warmup(global_step,\n learning_rate_base,\n total_steps,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0):\n \"\"\"\n Cosine decay schedule with warm up period.\n In this schedule, the learning rate grows linearly from warmup_learning_rate\n to learning_rate_base for warmup_steps, then transitions to a cosine decay\n schedule.\n :param global_step {int}: global step.\n :param learning_rate_base {float}: base learning rate.\n :param total_steps {int}: total number of training steps.\n :param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).\n :param warmup_steps {int}: number of warmup steps. (default: {0}).\n :param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).\n :param global_step {int}: global step.\n :Returns : a float representing learning rate.\n :Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.\n \"\"\"\n\n if total_steps < warmup_steps:\n raise ValueError('total_steps must be larger or equal to warmup_steps.')\n learning_rate = 0.5 * learning_rate_base * (1 + np.cos(\n np.pi *\n (global_step - warmup_steps - hold_base_rate_steps\n ) / float(total_steps - warmup_steps - hold_base_rate_steps)))\n if hold_base_rate_steps > 0:\n learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,\n learning_rate, learning_rate_base)\n if warmup_steps > 0:\n if learning_rate_base < warmup_learning_rate:\n raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')\n slope = (learning_rate_base - warmup_learning_rate) / warmup_steps\n warmup_rate = slope * global_step + warmup_learning_rate\n learning_rate = np.where(global_step < warmup_steps, warmup_rate,\n learning_rate)\n return np.where(global_step > total_steps, 0.0, learning_rate)\n\n\nclass WarmUpCosineDecayScheduler(Callback):\n \"\"\"Cosine decay with warmup learning rate scheduler\"\"\"\n\n def __init__(self,\n learning_rate_base,\n total_steps,\n global_step_init=0,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0,\n verbose=0):\n \"\"\"\n Constructor for cosine decay with warmup learning rate scheduler.\n :param learning_rate_base {float}: base learning rate.\n :param total_steps {int}: total number of training steps.\n :param global_step_init {int}: initial global step, e.g. from previous checkpoint.\n :param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).\n :param warmup_steps {int}: number of warmup steps. (default: {0}).\n :param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).\n :param verbose {int}: quiet, 1: update messages. (default: {0}).\n \"\"\"\n\n super(WarmUpCosineDecayScheduler, self).__init__()\n self.learning_rate_base = learning_rate_base\n self.total_steps = total_steps\n self.global_step = global_step_init\n self.warmup_learning_rate = warmup_learning_rate\n self.warmup_steps = warmup_steps\n self.hold_base_rate_steps = hold_base_rate_steps\n self.verbose = verbose\n self.learning_rates = []\n\n def on_batch_end(self, batch, logs=None):\n self.global_step = self.global_step + 1\n lr = K.get_value(self.model.optimizer.lr)\n self.learning_rates.append(lr)\n\n def on_batch_begin(self, batch, logs=None):\n lr = cosine_decay_with_warmup(global_step=self.global_step,\n learning_rate_base=self.learning_rate_base,\n total_steps=self.total_steps,\n warmup_learning_rate=self.warmup_learning_rate,\n warmup_steps=self.warmup_steps,\n hold_base_rate_steps=self.hold_base_rate_steps)\n K.set_value(self.model.optimizer.lr, lr)\n if self.verbose > 0:\n print('\\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr))", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "def create_model(input_shape):\n input_tensor = Input(shape=input_shape)\n base_model = EfficientNetB4(weights=None, \n include_top=False,\n input_tensor=input_tensor)\n base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b4_imagenet_1000_notop.h5')\n\n x = GlobalAveragePooling2D()(base_model.output)\n final_output = Dense(1, activation='linear', name='final_output')(x)\n model = Model(input_tensor, final_output)\n \n return model", "_____no_output_____" ] ], [ [ "# Train top layers", "_____no_output_____" ] ], [ [ "model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))\n\nfor layer in model.layers:\n layer.trainable = False\n\nfor i in range(-2, 0):\n model.layers[i].trainable = True\n\nmetric_list = [\"accuracy\"]\noptimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)\nmodel.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)\nmodel.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) (None, 256, 256, 3) 0 \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 128, 128, 48) 1296 input_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 128, 128, 48) 192 conv2d_1[0][0] \n__________________________________________________________________________________________________\nswish_1 (Swish) (None, 128, 128, 48) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_1 (DepthwiseCo (None, 128, 128, 48) 432 swish_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_2 (BatchNor (None, 128, 128, 48) 192 depthwise_conv2d_1[0][0] \n__________________________________________________________________________________________________\nswish_2 (Swish) (None, 128, 128, 48) 0 batch_normalization_2[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) (None, 1, 1, 48) 0 swish_2[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 1, 1, 12) 588 lambda_1[0][0] \n__________________________________________________________________________________________________\nswish_3 (Swish) (None, 1, 1, 12) 0 conv2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 1, 1, 48) 624 swish_3[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 1, 1, 48) 0 conv2d_3[0][0] \n__________________________________________________________________________________________________\nmultiply_1 (Multiply) (None, 128, 128, 48) 0 activation_1[0][0] \n swish_2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 128, 128, 24) 1152 multiply_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_3 (BatchNor (None, 128, 128, 24) 96 conv2d_4[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_2 (DepthwiseCo (None, 128, 128, 24) 216 batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, 128, 128, 24) 96 depthwise_conv2d_2[0][0] \n__________________________________________________________________________________________________\nswish_4 (Swish) (None, 128, 128, 24) 0 batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nlambda_2 (Lambda) (None, 1, 1, 24) 0 swish_4[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 1, 1, 6) 150 lambda_2[0][0] \n__________________________________________________________________________________________________\nswish_5 (Swish) (None, 1, 1, 6) 0 conv2d_5[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 1, 1, 24) 168 swish_5[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 1, 1, 24) 0 conv2d_6[0][0] \n__________________________________________________________________________________________________\nmultiply_2 (Multiply) (None, 128, 128, 24) 0 activation_2[0][0] \n swish_4[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 128, 128, 24) 576 multiply_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, 128, 128, 24) 96 conv2d_7[0][0] \n__________________________________________________________________________________________________\ndrop_connect_1 (DropConnect) (None, 128, 128, 24) 0 batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 128, 128, 24) 0 drop_connect_1[0][0] \n batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 128, 128, 144 3456 add_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, 128, 128, 144 576 conv2d_8[0][0] \n__________________________________________________________________________________________________\nswish_6 (Swish) (None, 128, 128, 144 0 batch_normalization_6[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_3 (DepthwiseCo (None, 64, 64, 144) 1296 swish_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, 64, 64, 144) 576 depthwise_conv2d_3[0][0] \n__________________________________________________________________________________________________\nswish_7 (Swish) (None, 64, 64, 144) 0 batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nlambda_3 (Lambda) (None, 1, 1, 144) 0 swish_7[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 1, 1, 6) 870 lambda_3[0][0] \n__________________________________________________________________________________________________\nswish_8 (Swish) (None, 1, 1, 6) 0 conv2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 1, 1, 144) 1008 swish_8[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 1, 1, 144) 0 conv2d_10[0][0] \n__________________________________________________________________________________________________\nmultiply_3 (Multiply) (None, 64, 64, 144) 0 activation_3[0][0] \n swish_7[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 64, 64, 32) 4608 multiply_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_8 (BatchNor (None, 64, 64, 32) 128 conv2d_11[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 64, 64, 192) 6144 batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_9 (BatchNor (None, 64, 64, 192) 768 conv2d_12[0][0] \n__________________________________________________________________________________________________\nswish_9 (Swish) (None, 64, 64, 192) 0 batch_normalization_9[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_4 (DepthwiseCo (None, 64, 64, 192) 1728 swish_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_10 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_4[0][0] \n__________________________________________________________________________________________________\nswish_10 (Swish) (None, 64, 64, 192) 0 batch_normalization_10[0][0] \n__________________________________________________________________________________________________\nlambda_4 (Lambda) (None, 1, 1, 192) 0 swish_10[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 1, 1, 8) 1544 lambda_4[0][0] \n__________________________________________________________________________________________________\nswish_11 (Swish) (None, 1, 1, 8) 0 conv2d_13[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 1, 1, 192) 1728 swish_11[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 1, 1, 192) 0 conv2d_14[0][0] \n__________________________________________________________________________________________________\nmultiply_4 (Multiply) (None, 64, 64, 192) 0 activation_4[0][0] \n swish_10[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 64, 64, 32) 6144 multiply_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_11 (BatchNo (None, 64, 64, 32) 128 conv2d_15[0][0] \n__________________________________________________________________________________________________\ndrop_connect_2 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_11[0][0] \n__________________________________________________________________________________________________\nadd_2 (Add) (None, 64, 64, 32) 0 drop_connect_2[0][0] \n batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 64, 64, 192) 6144 add_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_12 (BatchNo (None, 64, 64, 192) 768 conv2d_16[0][0] \n__________________________________________________________________________________________________\nswish_12 (Swish) (None, 64, 64, 192) 0 batch_normalization_12[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_5 (DepthwiseCo (None, 64, 64, 192) 1728 swish_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_13 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_5[0][0] \n__________________________________________________________________________________________________\nswish_13 (Swish) (None, 64, 64, 192) 0 batch_normalization_13[0][0] \n__________________________________________________________________________________________________\nlambda_5 (Lambda) (None, 1, 1, 192) 0 swish_13[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 1, 1, 8) 1544 lambda_5[0][0] \n__________________________________________________________________________________________________\nswish_14 (Swish) (None, 1, 1, 8) 0 conv2d_17[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 1, 1, 192) 1728 swish_14[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 1, 1, 192) 0 conv2d_18[0][0] \n__________________________________________________________________________________________________\nmultiply_5 (Multiply) (None, 64, 64, 192) 0 activation_5[0][0] \n swish_13[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 64, 64, 32) 6144 multiply_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_14 (BatchNo (None, 64, 64, 32) 128 conv2d_19[0][0] \n__________________________________________________________________________________________________\ndrop_connect_3 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_14[0][0] \n__________________________________________________________________________________________________\nadd_3 (Add) (None, 64, 64, 32) 0 drop_connect_3[0][0] \n add_2[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 64, 64, 192) 6144 add_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_15 (BatchNo (None, 64, 64, 192) 768 conv2d_20[0][0] \n__________________________________________________________________________________________________\nswish_15 (Swish) (None, 64, 64, 192) 0 batch_normalization_15[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_6 (DepthwiseCo (None, 64, 64, 192) 1728 swish_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_16 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_6[0][0] \n__________________________________________________________________________________________________\nswish_16 (Swish) (None, 64, 64, 192) 0 batch_normalization_16[0][0] \n__________________________________________________________________________________________________\nlambda_6 (Lambda) (None, 1, 1, 192) 0 swish_16[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 1, 1, 8) 1544 lambda_6[0][0] \n__________________________________________________________________________________________________\nswish_17 (Swish) (None, 1, 1, 8) 0 conv2d_21[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 1, 1, 192) 1728 swish_17[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 1, 1, 192) 0 conv2d_22[0][0] \n__________________________________________________________________________________________________\nmultiply_6 (Multiply) (None, 64, 64, 192) 0 activation_6[0][0] \n swish_16[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 64, 64, 32) 6144 multiply_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_17 (BatchNo (None, 64, 64, 32) 128 conv2d_23[0][0] \n__________________________________________________________________________________________________\ndrop_connect_4 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_17[0][0] \n__________________________________________________________________________________________________\nadd_4 (Add) (None, 64, 64, 32) 0 drop_connect_4[0][0] \n add_3[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 64, 64, 192) 6144 add_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_18 (BatchNo (None, 64, 64, 192) 768 conv2d_24[0][0] \n__________________________________________________________________________________________________\nswish_18 (Swish) (None, 64, 64, 192) 0 batch_normalization_18[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_7 (DepthwiseCo (None, 32, 32, 192) 4800 swish_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_19 (BatchNo (None, 32, 32, 192) 768 depthwise_conv2d_7[0][0] \n__________________________________________________________________________________________________\nswish_19 (Swish) (None, 32, 32, 192) 0 batch_normalization_19[0][0] \n__________________________________________________________________________________________________\nlambda_7 (Lambda) (None, 1, 1, 192) 0 swish_19[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 1, 1, 8) 1544 lambda_7[0][0] \n__________________________________________________________________________________________________\nswish_20 (Swish) (None, 1, 1, 8) 0 conv2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 1, 1, 192) 1728 swish_20[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 1, 1, 192) 0 conv2d_26[0][0] \n__________________________________________________________________________________________________\nmultiply_7 (Multiply) (None, 32, 32, 192) 0 activation_7[0][0] \n swish_19[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 32, 32, 56) 10752 multiply_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_20 (BatchNo (None, 32, 32, 56) 224 conv2d_27[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 32, 32, 336) 18816 batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_21 (BatchNo (None, 32, 32, 336) 1344 conv2d_28[0][0] \n__________________________________________________________________________________________________\nswish_21 (Swish) (None, 32, 32, 336) 0 batch_normalization_21[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_8 (DepthwiseCo (None, 32, 32, 336) 8400 swish_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_22 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_8[0][0] \n__________________________________________________________________________________________________\nswish_22 (Swish) (None, 32, 32, 336) 0 batch_normalization_22[0][0] \n__________________________________________________________________________________________________\nlambda_8 (Lambda) (None, 1, 1, 336) 0 swish_22[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 1, 1, 14) 4718 lambda_8[0][0] \n__________________________________________________________________________________________________\nswish_23 (Swish) (None, 1, 1, 14) 0 conv2d_29[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 1, 1, 336) 5040 swish_23[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 1, 1, 336) 0 conv2d_30[0][0] \n__________________________________________________________________________________________________\nmultiply_8 (Multiply) (None, 32, 32, 336) 0 activation_8[0][0] \n swish_22[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 32, 32, 56) 18816 multiply_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_23 (BatchNo (None, 32, 32, 56) 224 conv2d_31[0][0] \n__________________________________________________________________________________________________\ndrop_connect_5 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_23[0][0] \n__________________________________________________________________________________________________\nadd_5 (Add) (None, 32, 32, 56) 0 drop_connect_5[0][0] \n batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 32, 32, 336) 18816 add_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_24 (BatchNo (None, 32, 32, 336) 1344 conv2d_32[0][0] \n__________________________________________________________________________________________________\nswish_24 (Swish) (None, 32, 32, 336) 0 batch_normalization_24[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_9 (DepthwiseCo (None, 32, 32, 336) 8400 swish_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_25 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_9[0][0] \n__________________________________________________________________________________________________\nswish_25 (Swish) (None, 32, 32, 336) 0 batch_normalization_25[0][0] \n__________________________________________________________________________________________________\nlambda_9 (Lambda) (None, 1, 1, 336) 0 swish_25[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 1, 1, 14) 4718 lambda_9[0][0] \n__________________________________________________________________________________________________\nswish_26 (Swish) (None, 1, 1, 14) 0 conv2d_33[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 1, 1, 336) 5040 swish_26[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 1, 1, 336) 0 conv2d_34[0][0] \n__________________________________________________________________________________________________\nmultiply_9 (Multiply) (None, 32, 32, 336) 0 activation_9[0][0] \n swish_25[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 32, 32, 56) 18816 multiply_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_26 (BatchNo (None, 32, 32, 56) 224 conv2d_35[0][0] \n__________________________________________________________________________________________________\ndrop_connect_6 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_26[0][0] \n__________________________________________________________________________________________________\nadd_6 (Add) (None, 32, 32, 56) 0 drop_connect_6[0][0] \n add_5[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 32, 32, 336) 18816 add_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_27 (BatchNo (None, 32, 32, 336) 1344 conv2d_36[0][0] \n__________________________________________________________________________________________________\nswish_27 (Swish) (None, 32, 32, 336) 0 batch_normalization_27[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_10 (DepthwiseC (None, 32, 32, 336) 8400 swish_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_28 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_10[0][0] \n__________________________________________________________________________________________________\nswish_28 (Swish) (None, 32, 32, 336) 0 batch_normalization_28[0][0] \n__________________________________________________________________________________________________\nlambda_10 (Lambda) (None, 1, 1, 336) 0 swish_28[0][0] \n__________________________________________________________________________________________________\nconv2d_37 (Conv2D) (None, 1, 1, 14) 4718 lambda_10[0][0] \n__________________________________________________________________________________________________\nswish_29 (Swish) (None, 1, 1, 14) 0 conv2d_37[0][0] \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 1, 1, 336) 5040 swish_29[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 1, 1, 336) 0 conv2d_38[0][0] \n__________________________________________________________________________________________________\nmultiply_10 (Multiply) (None, 32, 32, 336) 0 activation_10[0][0] \n swish_28[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 32, 32, 56) 18816 multiply_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_29 (BatchNo (None, 32, 32, 56) 224 conv2d_39[0][0] \n__________________________________________________________________________________________________\ndrop_connect_7 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_29[0][0] \n__________________________________________________________________________________________________\nadd_7 (Add) (None, 32, 32, 56) 0 drop_connect_7[0][0] \n add_6[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 32, 32, 336) 18816 add_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_30 (BatchNo (None, 32, 32, 336) 1344 conv2d_40[0][0] \n__________________________________________________________________________________________________\nswish_30 (Swish) (None, 32, 32, 336) 0 batch_normalization_30[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_11 (DepthwiseC (None, 16, 16, 336) 3024 swish_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_31 (BatchNo (None, 16, 16, 336) 1344 depthwise_conv2d_11[0][0] \n__________________________________________________________________________________________________\nswish_31 (Swish) (None, 16, 16, 336) 0 batch_normalization_31[0][0] \n__________________________________________________________________________________________________\nlambda_11 (Lambda) (None, 1, 1, 336) 0 swish_31[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 1, 1, 14) 4718 lambda_11[0][0] \n__________________________________________________________________________________________________\nswish_32 (Swish) (None, 1, 1, 14) 0 conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 1, 1, 336) 5040 swish_32[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 1, 1, 336) 0 conv2d_42[0][0] \n__________________________________________________________________________________________________\nmultiply_11 (Multiply) (None, 16, 16, 336) 0 activation_11[0][0] \n swish_31[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 16, 16, 112) 37632 multiply_11[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_32 (BatchNo (None, 16, 16, 112) 448 conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 16, 16, 672) 75264 batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_33 (BatchNo (None, 16, 16, 672) 2688 conv2d_44[0][0] \n__________________________________________________________________________________________________\nswish_33 (Swish) (None, 16, 16, 672) 0 batch_normalization_33[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_12 (DepthwiseC (None, 16, 16, 672) 6048 swish_33[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_34 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_12[0][0] \n__________________________________________________________________________________________________\nswish_34 (Swish) (None, 16, 16, 672) 0 batch_normalization_34[0][0] \n__________________________________________________________________________________________________\nlambda_12 (Lambda) (None, 1, 1, 672) 0 swish_34[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 1, 1, 28) 18844 lambda_12[0][0] \n__________________________________________________________________________________________________\nswish_35 (Swish) (None, 1, 1, 28) 0 conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 1, 1, 672) 19488 swish_35[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 1, 1, 672) 0 conv2d_46[0][0] \n__________________________________________________________________________________________________\nmultiply_12 (Multiply) (None, 16, 16, 672) 0 activation_12[0][0] \n swish_34[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 16, 16, 112) 75264 multiply_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_35 (BatchNo (None, 16, 16, 112) 448 conv2d_47[0][0] \n__________________________________________________________________________________________________\ndrop_connect_8 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_35[0][0] \n__________________________________________________________________________________________________\nadd_8 (Add) (None, 16, 16, 112) 0 drop_connect_8[0][0] \n batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 16, 16, 672) 75264 add_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_36 (BatchNo (None, 16, 16, 672) 2688 conv2d_48[0][0] \n__________________________________________________________________________________________________\nswish_36 (Swish) (None, 16, 16, 672) 0 batch_normalization_36[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_13 (DepthwiseC (None, 16, 16, 672) 6048 swish_36[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_37 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_13[0][0] \n__________________________________________________________________________________________________\nswish_37 (Swish) (None, 16, 16, 672) 0 batch_normalization_37[0][0] \n__________________________________________________________________________________________________\nlambda_13 (Lambda) (None, 1, 1, 672) 0 swish_37[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, 1, 1, 28) 18844 lambda_13[0][0] \n__________________________________________________________________________________________________\nswish_38 (Swish) (None, 1, 1, 28) 0 conv2d_49[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, 1, 1, 672) 19488 swish_38[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 1, 1, 672) 0 conv2d_50[0][0] \n__________________________________________________________________________________________________\nmultiply_13 (Multiply) (None, 16, 16, 672) 0 activation_13[0][0] \n swish_37[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, 16, 16, 112) 75264 multiply_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_38 (BatchNo (None, 16, 16, 112) 448 conv2d_51[0][0] \n__________________________________________________________________________________________________\ndrop_connect_9 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_38[0][0] \n__________________________________________________________________________________________________\nadd_9 (Add) (None, 16, 16, 112) 0 drop_connect_9[0][0] \n add_8[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, 16, 16, 672) 75264 add_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_39 (BatchNo (None, 16, 16, 672) 2688 conv2d_52[0][0] \n__________________________________________________________________________________________________\nswish_39 (Swish) (None, 16, 16, 672) 0 batch_normalization_39[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_14 (DepthwiseC (None, 16, 16, 672) 6048 swish_39[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_40 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_14[0][0] \n__________________________________________________________________________________________________\nswish_40 (Swish) (None, 16, 16, 672) 0 batch_normalization_40[0][0] \n__________________________________________________________________________________________________\nlambda_14 (Lambda) (None, 1, 1, 672) 0 swish_40[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, 1, 1, 28) 18844 lambda_14[0][0] \n__________________________________________________________________________________________________\nswish_41 (Swish) (None, 1, 1, 28) 0 conv2d_53[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, 1, 1, 672) 19488 swish_41[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 1, 1, 672) 0 conv2d_54[0][0] \n__________________________________________________________________________________________________\nmultiply_14 (Multiply) (None, 16, 16, 672) 0 activation_14[0][0] \n swish_40[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, 16, 16, 112) 75264 multiply_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_41 (BatchNo (None, 16, 16, 112) 448 conv2d_55[0][0] \n__________________________________________________________________________________________________\ndrop_connect_10 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_41[0][0] \n__________________________________________________________________________________________________\nadd_10 (Add) (None, 16, 16, 112) 0 drop_connect_10[0][0] \n add_9[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, 16, 16, 672) 75264 add_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_42 (BatchNo (None, 16, 16, 672) 2688 conv2d_56[0][0] \n__________________________________________________________________________________________________\nswish_42 (Swish) (None, 16, 16, 672) 0 batch_normalization_42[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_15 (DepthwiseC (None, 16, 16, 672) 6048 swish_42[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_43 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_15[0][0] \n__________________________________________________________________________________________________\nswish_43 (Swish) (None, 16, 16, 672) 0 batch_normalization_43[0][0] \n__________________________________________________________________________________________________\nlambda_15 (Lambda) (None, 1, 1, 672) 0 swish_43[0][0] \n__________________________________________________________________________________________________\nconv2d_57 (Conv2D) (None, 1, 1, 28) 18844 lambda_15[0][0] \n__________________________________________________________________________________________________\nswish_44 (Swish) (None, 1, 1, 28) 0 conv2d_57[0][0] \n__________________________________________________________________________________________________\nconv2d_58 (Conv2D) (None, 1, 1, 672) 19488 swish_44[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 1, 1, 672) 0 conv2d_58[0][0] \n__________________________________________________________________________________________________\nmultiply_15 (Multiply) (None, 16, 16, 672) 0 activation_15[0][0] \n swish_43[0][0] \n__________________________________________________________________________________________________\nconv2d_59 (Conv2D) (None, 16, 16, 112) 75264 multiply_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_44 (BatchNo (None, 16, 16, 112) 448 conv2d_59[0][0] \n__________________________________________________________________________________________________\ndrop_connect_11 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_44[0][0] \n__________________________________________________________________________________________________\nadd_11 (Add) (None, 16, 16, 112) 0 drop_connect_11[0][0] \n add_10[0][0] \n__________________________________________________________________________________________________\nconv2d_60 (Conv2D) (None, 16, 16, 672) 75264 add_11[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_45 (BatchNo (None, 16, 16, 672) 2688 conv2d_60[0][0] \n__________________________________________________________________________________________________\nswish_45 (Swish) (None, 16, 16, 672) 0 batch_normalization_45[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_16 (DepthwiseC (None, 16, 16, 672) 6048 swish_45[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_46 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_16[0][0] \n__________________________________________________________________________________________________\nswish_46 (Swish) (None, 16, 16, 672) 0 batch_normalization_46[0][0] \n__________________________________________________________________________________________________\nlambda_16 (Lambda) (None, 1, 1, 672) 0 swish_46[0][0] \n__________________________________________________________________________________________________\nconv2d_61 (Conv2D) (None, 1, 1, 28) 18844 lambda_16[0][0] \n__________________________________________________________________________________________________\nswish_47 (Swish) (None, 1, 1, 28) 0 conv2d_61[0][0] \n__________________________________________________________________________________________________\nconv2d_62 (Conv2D) (None, 1, 1, 672) 19488 swish_47[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 1, 1, 672) 0 conv2d_62[0][0] \n__________________________________________________________________________________________________\nmultiply_16 (Multiply) (None, 16, 16, 672) 0 activation_16[0][0] \n swish_46[0][0] \n__________________________________________________________________________________________________\nconv2d_63 (Conv2D) (None, 16, 16, 112) 75264 multiply_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_47 (BatchNo (None, 16, 16, 112) 448 conv2d_63[0][0] \n__________________________________________________________________________________________________\ndrop_connect_12 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_47[0][0] \n__________________________________________________________________________________________________\nadd_12 (Add) (None, 16, 16, 112) 0 drop_connect_12[0][0] \n add_11[0][0] \n__________________________________________________________________________________________________\nconv2d_64 (Conv2D) (None, 16, 16, 672) 75264 add_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_48 (BatchNo (None, 16, 16, 672) 2688 conv2d_64[0][0] \n__________________________________________________________________________________________________\nswish_48 (Swish) (None, 16, 16, 672) 0 batch_normalization_48[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_17 (DepthwiseC (None, 16, 16, 672) 16800 swish_48[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_49 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_17[0][0] \n__________________________________________________________________________________________________\nswish_49 (Swish) (None, 16, 16, 672) 0 batch_normalization_49[0][0] \n__________________________________________________________________________________________________\nlambda_17 (Lambda) (None, 1, 1, 672) 0 swish_49[0][0] \n__________________________________________________________________________________________________\nconv2d_65 (Conv2D) (None, 1, 1, 28) 18844 lambda_17[0][0] \n__________________________________________________________________________________________________\nswish_50 (Swish) (None, 1, 1, 28) 0 conv2d_65[0][0] \n__________________________________________________________________________________________________\nconv2d_66 (Conv2D) (None, 1, 1, 672) 19488 swish_50[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 1, 1, 672) 0 conv2d_66[0][0] \n__________________________________________________________________________________________________\nmultiply_17 (Multiply) (None, 16, 16, 672) 0 activation_17[0][0] \n swish_49[0][0] \n__________________________________________________________________________________________________\nconv2d_67 (Conv2D) (None, 16, 16, 160) 107520 multiply_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_50 (BatchNo (None, 16, 16, 160) 640 conv2d_67[0][0] \n__________________________________________________________________________________________________\nconv2d_68 (Conv2D) (None, 16, 16, 960) 153600 batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_51 (BatchNo (None, 16, 16, 960) 3840 conv2d_68[0][0] \n__________________________________________________________________________________________________\nswish_51 (Swish) (None, 16, 16, 960) 0 batch_normalization_51[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_18 (DepthwiseC (None, 16, 16, 960) 24000 swish_51[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_52 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_18[0][0] \n__________________________________________________________________________________________________\nswish_52 (Swish) (None, 16, 16, 960) 0 batch_normalization_52[0][0] \n__________________________________________________________________________________________________\nlambda_18 (Lambda) (None, 1, 1, 960) 0 swish_52[0][0] \n__________________________________________________________________________________________________\nconv2d_69 (Conv2D) (None, 1, 1, 40) 38440 lambda_18[0][0] \n__________________________________________________________________________________________________\nswish_53 (Swish) (None, 1, 1, 40) 0 conv2d_69[0][0] \n__________________________________________________________________________________________________\nconv2d_70 (Conv2D) (None, 1, 1, 960) 39360 swish_53[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 1, 1, 960) 0 conv2d_70[0][0] \n__________________________________________________________________________________________________\nmultiply_18 (Multiply) (None, 16, 16, 960) 0 activation_18[0][0] \n swish_52[0][0] \n__________________________________________________________________________________________________\nconv2d_71 (Conv2D) (None, 16, 16, 160) 153600 multiply_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_53 (BatchNo (None, 16, 16, 160) 640 conv2d_71[0][0] \n__________________________________________________________________________________________________\ndrop_connect_13 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_53[0][0] \n__________________________________________________________________________________________________\nadd_13 (Add) (None, 16, 16, 160) 0 drop_connect_13[0][0] \n batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nconv2d_72 (Conv2D) (None, 16, 16, 960) 153600 add_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_54 (BatchNo (None, 16, 16, 960) 3840 conv2d_72[0][0] \n__________________________________________________________________________________________________\nswish_54 (Swish) (None, 16, 16, 960) 0 batch_normalization_54[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_19 (DepthwiseC (None, 16, 16, 960) 24000 swish_54[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_55 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_19[0][0] \n__________________________________________________________________________________________________\nswish_55 (Swish) (None, 16, 16, 960) 0 batch_normalization_55[0][0] \n__________________________________________________________________________________________________\nlambda_19 (Lambda) (None, 1, 1, 960) 0 swish_55[0][0] \n__________________________________________________________________________________________________\nconv2d_73 (Conv2D) (None, 1, 1, 40) 38440 lambda_19[0][0] \n__________________________________________________________________________________________________\nswish_56 (Swish) (None, 1, 1, 40) 0 conv2d_73[0][0] \n__________________________________________________________________________________________________\nconv2d_74 (Conv2D) (None, 1, 1, 960) 39360 swish_56[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 1, 1, 960) 0 conv2d_74[0][0] \n__________________________________________________________________________________________________\nmultiply_19 (Multiply) (None, 16, 16, 960) 0 activation_19[0][0] \n swish_55[0][0] \n__________________________________________________________________________________________________\nconv2d_75 (Conv2D) (None, 16, 16, 160) 153600 multiply_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_56 (BatchNo (None, 16, 16, 160) 640 conv2d_75[0][0] \n__________________________________________________________________________________________________\ndrop_connect_14 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_56[0][0] \n__________________________________________________________________________________________________\nadd_14 (Add) (None, 16, 16, 160) 0 drop_connect_14[0][0] \n add_13[0][0] \n__________________________________________________________________________________________________\nconv2d_76 (Conv2D) (None, 16, 16, 960) 153600 add_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_57 (BatchNo (None, 16, 16, 960) 3840 conv2d_76[0][0] \n__________________________________________________________________________________________________\nswish_57 (Swish) (None, 16, 16, 960) 0 batch_normalization_57[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_20 (DepthwiseC (None, 16, 16, 960) 24000 swish_57[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_58 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_20[0][0] \n__________________________________________________________________________________________________\nswish_58 (Swish) (None, 16, 16, 960) 0 batch_normalization_58[0][0] \n__________________________________________________________________________________________________\nlambda_20 (Lambda) (None, 1, 1, 960) 0 swish_58[0][0] \n__________________________________________________________________________________________________\nconv2d_77 (Conv2D) (None, 1, 1, 40) 38440 lambda_20[0][0] \n__________________________________________________________________________________________________\nswish_59 (Swish) (None, 1, 1, 40) 0 conv2d_77[0][0] \n__________________________________________________________________________________________________\nconv2d_78 (Conv2D) (None, 1, 1, 960) 39360 swish_59[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 1, 1, 960) 0 conv2d_78[0][0] \n__________________________________________________________________________________________________\nmultiply_20 (Multiply) (None, 16, 16, 960) 0 activation_20[0][0] \n swish_58[0][0] \n__________________________________________________________________________________________________\nconv2d_79 (Conv2D) (None, 16, 16, 160) 153600 multiply_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_59 (BatchNo (None, 16, 16, 160) 640 conv2d_79[0][0] \n__________________________________________________________________________________________________\ndrop_connect_15 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_59[0][0] \n__________________________________________________________________________________________________\nadd_15 (Add) (None, 16, 16, 160) 0 drop_connect_15[0][0] \n add_14[0][0] \n__________________________________________________________________________________________________\nconv2d_80 (Conv2D) (None, 16, 16, 960) 153600 add_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_60 (BatchNo (None, 16, 16, 960) 3840 conv2d_80[0][0] \n__________________________________________________________________________________________________\nswish_60 (Swish) (None, 16, 16, 960) 0 batch_normalization_60[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_21 (DepthwiseC (None, 16, 16, 960) 24000 swish_60[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_61 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_21[0][0] \n__________________________________________________________________________________________________\nswish_61 (Swish) (None, 16, 16, 960) 0 batch_normalization_61[0][0] \n__________________________________________________________________________________________________\nlambda_21 (Lambda) (None, 1, 1, 960) 0 swish_61[0][0] \n__________________________________________________________________________________________________\nconv2d_81 (Conv2D) (None, 1, 1, 40) 38440 lambda_21[0][0] \n__________________________________________________________________________________________________\nswish_62 (Swish) (None, 1, 1, 40) 0 conv2d_81[0][0] \n__________________________________________________________________________________________________\nconv2d_82 (Conv2D) (None, 1, 1, 960) 39360 swish_62[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 1, 1, 960) 0 conv2d_82[0][0] \n__________________________________________________________________________________________________\nmultiply_21 (Multiply) (None, 16, 16, 960) 0 activation_21[0][0] \n swish_61[0][0] \n__________________________________________________________________________________________________\nconv2d_83 (Conv2D) (None, 16, 16, 160) 153600 multiply_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_62 (BatchNo (None, 16, 16, 160) 640 conv2d_83[0][0] \n__________________________________________________________________________________________________\ndrop_connect_16 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_62[0][0] \n__________________________________________________________________________________________________\nadd_16 (Add) (None, 16, 16, 160) 0 drop_connect_16[0][0] \n add_15[0][0] \n__________________________________________________________________________________________________\nconv2d_84 (Conv2D) (None, 16, 16, 960) 153600 add_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_63 (BatchNo (None, 16, 16, 960) 3840 conv2d_84[0][0] \n__________________________________________________________________________________________________\nswish_63 (Swish) (None, 16, 16, 960) 0 batch_normalization_63[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_22 (DepthwiseC (None, 16, 16, 960) 24000 swish_63[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_64 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_22[0][0] \n__________________________________________________________________________________________________\nswish_64 (Swish) (None, 16, 16, 960) 0 batch_normalization_64[0][0] \n__________________________________________________________________________________________________\nlambda_22 (Lambda) (None, 1, 1, 960) 0 swish_64[0][0] \n__________________________________________________________________________________________________\nconv2d_85 (Conv2D) (None, 1, 1, 40) 38440 lambda_22[0][0] \n__________________________________________________________________________________________________\nswish_65 (Swish) (None, 1, 1, 40) 0 conv2d_85[0][0] \n__________________________________________________________________________________________________\nconv2d_86 (Conv2D) (None, 1, 1, 960) 39360 swish_65[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 1, 1, 960) 0 conv2d_86[0][0] \n__________________________________________________________________________________________________\nmultiply_22 (Multiply) (None, 16, 16, 960) 0 activation_22[0][0] \n swish_64[0][0] \n__________________________________________________________________________________________________\nconv2d_87 (Conv2D) (None, 16, 16, 160) 153600 multiply_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_65 (BatchNo (None, 16, 16, 160) 640 conv2d_87[0][0] \n__________________________________________________________________________________________________\ndrop_connect_17 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_65[0][0] \n__________________________________________________________________________________________________\nadd_17 (Add) (None, 16, 16, 160) 0 drop_connect_17[0][0] \n add_16[0][0] \n__________________________________________________________________________________________________\nconv2d_88 (Conv2D) (None, 16, 16, 960) 153600 add_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_66 (BatchNo (None, 16, 16, 960) 3840 conv2d_88[0][0] \n__________________________________________________________________________________________________\nswish_66 (Swish) (None, 16, 16, 960) 0 batch_normalization_66[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_23 (DepthwiseC (None, 8, 8, 960) 24000 swish_66[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_67 (BatchNo (None, 8, 8, 960) 3840 depthwise_conv2d_23[0][0] \n__________________________________________________________________________________________________\nswish_67 (Swish) (None, 8, 8, 960) 0 batch_normalization_67[0][0] \n__________________________________________________________________________________________________\nlambda_23 (Lambda) (None, 1, 1, 960) 0 swish_67[0][0] \n__________________________________________________________________________________________________\nconv2d_89 (Conv2D) (None, 1, 1, 40) 38440 lambda_23[0][0] \n__________________________________________________________________________________________________\nswish_68 (Swish) (None, 1, 1, 40) 0 conv2d_89[0][0] \n__________________________________________________________________________________________________\nconv2d_90 (Conv2D) (None, 1, 1, 960) 39360 swish_68[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 1, 1, 960) 0 conv2d_90[0][0] \n__________________________________________________________________________________________________\nmultiply_23 (Multiply) (None, 8, 8, 960) 0 activation_23[0][0] \n swish_67[0][0] \n__________________________________________________________________________________________________\nconv2d_91 (Conv2D) (None, 8, 8, 272) 261120 multiply_23[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_68 (BatchNo (None, 8, 8, 272) 1088 conv2d_91[0][0] \n__________________________________________________________________________________________________\nconv2d_92 (Conv2D) (None, 8, 8, 1632) 443904 batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_69 (BatchNo (None, 8, 8, 1632) 6528 conv2d_92[0][0] \n__________________________________________________________________________________________________\nswish_69 (Swish) (None, 8, 8, 1632) 0 batch_normalization_69[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_24 (DepthwiseC (None, 8, 8, 1632) 40800 swish_69[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_70 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_24[0][0] \n__________________________________________________________________________________________________\nswish_70 (Swish) (None, 8, 8, 1632) 0 batch_normalization_70[0][0] \n__________________________________________________________________________________________________\nlambda_24 (Lambda) (None, 1, 1, 1632) 0 swish_70[0][0] \n__________________________________________________________________________________________________\nconv2d_93 (Conv2D) (None, 1, 1, 68) 111044 lambda_24[0][0] \n__________________________________________________________________________________________________\nswish_71 (Swish) (None, 1, 1, 68) 0 conv2d_93[0][0] \n__________________________________________________________________________________________________\nconv2d_94 (Conv2D) (None, 1, 1, 1632) 112608 swish_71[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 1, 1, 1632) 0 conv2d_94[0][0] \n__________________________________________________________________________________________________\nmultiply_24 (Multiply) (None, 8, 8, 1632) 0 activation_24[0][0] \n swish_70[0][0] \n__________________________________________________________________________________________________\nconv2d_95 (Conv2D) (None, 8, 8, 272) 443904 multiply_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_71 (BatchNo (None, 8, 8, 272) 1088 conv2d_95[0][0] \n__________________________________________________________________________________________________\ndrop_connect_18 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_71[0][0] \n__________________________________________________________________________________________________\nadd_18 (Add) (None, 8, 8, 272) 0 drop_connect_18[0][0] \n batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nconv2d_96 (Conv2D) (None, 8, 8, 1632) 443904 add_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_72 (BatchNo (None, 8, 8, 1632) 6528 conv2d_96[0][0] \n__________________________________________________________________________________________________\nswish_72 (Swish) (None, 8, 8, 1632) 0 batch_normalization_72[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_25 (DepthwiseC (None, 8, 8, 1632) 40800 swish_72[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_73 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_25[0][0] \n__________________________________________________________________________________________________\nswish_73 (Swish) (None, 8, 8, 1632) 0 batch_normalization_73[0][0] \n__________________________________________________________________________________________________\nlambda_25 (Lambda) (None, 1, 1, 1632) 0 swish_73[0][0] \n__________________________________________________________________________________________________\nconv2d_97 (Conv2D) (None, 1, 1, 68) 111044 lambda_25[0][0] \n__________________________________________________________________________________________________\nswish_74 (Swish) (None, 1, 1, 68) 0 conv2d_97[0][0] \n__________________________________________________________________________________________________\nconv2d_98 (Conv2D) (None, 1, 1, 1632) 112608 swish_74[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 1, 1, 1632) 0 conv2d_98[0][0] \n__________________________________________________________________________________________________\nmultiply_25 (Multiply) (None, 8, 8, 1632) 0 activation_25[0][0] \n swish_73[0][0] \n__________________________________________________________________________________________________\nconv2d_99 (Conv2D) (None, 8, 8, 272) 443904 multiply_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_74 (BatchNo (None, 8, 8, 272) 1088 conv2d_99[0][0] \n__________________________________________________________________________________________________\ndrop_connect_19 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_74[0][0] \n__________________________________________________________________________________________________\nadd_19 (Add) (None, 8, 8, 272) 0 drop_connect_19[0][0] \n add_18[0][0] \n__________________________________________________________________________________________________\nconv2d_100 (Conv2D) (None, 8, 8, 1632) 443904 add_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_75 (BatchNo (None, 8, 8, 1632) 6528 conv2d_100[0][0] \n__________________________________________________________________________________________________\nswish_75 (Swish) (None, 8, 8, 1632) 0 batch_normalization_75[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_26 (DepthwiseC (None, 8, 8, 1632) 40800 swish_75[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_76 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_26[0][0] \n__________________________________________________________________________________________________\nswish_76 (Swish) (None, 8, 8, 1632) 0 batch_normalization_76[0][0] \n__________________________________________________________________________________________________\nlambda_26 (Lambda) (None, 1, 1, 1632) 0 swish_76[0][0] \n__________________________________________________________________________________________________\nconv2d_101 (Conv2D) (None, 1, 1, 68) 111044 lambda_26[0][0] \n__________________________________________________________________________________________________\nswish_77 (Swish) (None, 1, 1, 68) 0 conv2d_101[0][0] \n__________________________________________________________________________________________________\nconv2d_102 (Conv2D) (None, 1, 1, 1632) 112608 swish_77[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 1, 1, 1632) 0 conv2d_102[0][0] \n__________________________________________________________________________________________________\nmultiply_26 (Multiply) (None, 8, 8, 1632) 0 activation_26[0][0] \n swish_76[0][0] \n__________________________________________________________________________________________________\nconv2d_103 (Conv2D) (None, 8, 8, 272) 443904 multiply_26[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_77 (BatchNo (None, 8, 8, 272) 1088 conv2d_103[0][0] \n__________________________________________________________________________________________________\ndrop_connect_20 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_77[0][0] \n__________________________________________________________________________________________________\nadd_20 (Add) (None, 8, 8, 272) 0 drop_connect_20[0][0] \n add_19[0][0] \n__________________________________________________________________________________________________\nconv2d_104 (Conv2D) (None, 8, 8, 1632) 443904 add_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_78 (BatchNo (None, 8, 8, 1632) 6528 conv2d_104[0][0] \n__________________________________________________________________________________________________\nswish_78 (Swish) (None, 8, 8, 1632) 0 batch_normalization_78[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_27 (DepthwiseC (None, 8, 8, 1632) 40800 swish_78[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_79 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_27[0][0] \n__________________________________________________________________________________________________\nswish_79 (Swish) (None, 8, 8, 1632) 0 batch_normalization_79[0][0] \n__________________________________________________________________________________________________\nlambda_27 (Lambda) (None, 1, 1, 1632) 0 swish_79[0][0] \n__________________________________________________________________________________________________\nconv2d_105 (Conv2D) (None, 1, 1, 68) 111044 lambda_27[0][0] \n__________________________________________________________________________________________________\nswish_80 (Swish) (None, 1, 1, 68) 0 conv2d_105[0][0] \n__________________________________________________________________________________________________\nconv2d_106 (Conv2D) (None, 1, 1, 1632) 112608 swish_80[0][0] \n__________________________________________________________________________________________________\nactivation_27 (Activation) (None, 1, 1, 1632) 0 conv2d_106[0][0] \n__________________________________________________________________________________________________\nmultiply_27 (Multiply) (None, 8, 8, 1632) 0 activation_27[0][0] \n swish_79[0][0] \n__________________________________________________________________________________________________\nconv2d_107 (Conv2D) (None, 8, 8, 272) 443904 multiply_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_80 (BatchNo (None, 8, 8, 272) 1088 conv2d_107[0][0] \n__________________________________________________________________________________________________\ndrop_connect_21 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_80[0][0] \n__________________________________________________________________________________________________\nadd_21 (Add) (None, 8, 8, 272) 0 drop_connect_21[0][0] \n add_20[0][0] \n__________________________________________________________________________________________________\nconv2d_108 (Conv2D) (None, 8, 8, 1632) 443904 add_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_81 (BatchNo (None, 8, 8, 1632) 6528 conv2d_108[0][0] \n__________________________________________________________________________________________________\nswish_81 (Swish) (None, 8, 8, 1632) 0 batch_normalization_81[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_28 (DepthwiseC (None, 8, 8, 1632) 40800 swish_81[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_82 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_28[0][0] \n__________________________________________________________________________________________________\nswish_82 (Swish) (None, 8, 8, 1632) 0 batch_normalization_82[0][0] \n__________________________________________________________________________________________________\nlambda_28 (Lambda) (None, 1, 1, 1632) 0 swish_82[0][0] \n__________________________________________________________________________________________________\nconv2d_109 (Conv2D) (None, 1, 1, 68) 111044 lambda_28[0][0] \n__________________________________________________________________________________________________\nswish_83 (Swish) (None, 1, 1, 68) 0 conv2d_109[0][0] \n__________________________________________________________________________________________________\nconv2d_110 (Conv2D) (None, 1, 1, 1632) 112608 swish_83[0][0] \n__________________________________________________________________________________________________\nactivation_28 (Activation) (None, 1, 1, 1632) 0 conv2d_110[0][0] \n__________________________________________________________________________________________________\nmultiply_28 (Multiply) (None, 8, 8, 1632) 0 activation_28[0][0] \n swish_82[0][0] \n__________________________________________________________________________________________________\nconv2d_111 (Conv2D) (None, 8, 8, 272) 443904 multiply_28[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_83 (BatchNo (None, 8, 8, 272) 1088 conv2d_111[0][0] \n__________________________________________________________________________________________________\ndrop_connect_22 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_83[0][0] \n__________________________________________________________________________________________________\nadd_22 (Add) (None, 8, 8, 272) 0 drop_connect_22[0][0] \n add_21[0][0] \n__________________________________________________________________________________________________\nconv2d_112 (Conv2D) (None, 8, 8, 1632) 443904 add_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_84 (BatchNo (None, 8, 8, 1632) 6528 conv2d_112[0][0] \n__________________________________________________________________________________________________\nswish_84 (Swish) (None, 8, 8, 1632) 0 batch_normalization_84[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_29 (DepthwiseC (None, 8, 8, 1632) 40800 swish_84[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_85 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_29[0][0] \n__________________________________________________________________________________________________\nswish_85 (Swish) (None, 8, 8, 1632) 0 batch_normalization_85[0][0] \n__________________________________________________________________________________________________\nlambda_29 (Lambda) (None, 1, 1, 1632) 0 swish_85[0][0] \n__________________________________________________________________________________________________\nconv2d_113 (Conv2D) (None, 1, 1, 68) 111044 lambda_29[0][0] \n__________________________________________________________________________________________________\nswish_86 (Swish) (None, 1, 1, 68) 0 conv2d_113[0][0] \n__________________________________________________________________________________________________\nconv2d_114 (Conv2D) (None, 1, 1, 1632) 112608 swish_86[0][0] \n__________________________________________________________________________________________________\nactivation_29 (Activation) (None, 1, 1, 1632) 0 conv2d_114[0][0] \n__________________________________________________________________________________________________\nmultiply_29 (Multiply) (None, 8, 8, 1632) 0 activation_29[0][0] \n swish_85[0][0] \n__________________________________________________________________________________________________\nconv2d_115 (Conv2D) (None, 8, 8, 272) 443904 multiply_29[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_86 (BatchNo (None, 8, 8, 272) 1088 conv2d_115[0][0] \n__________________________________________________________________________________________________\ndrop_connect_23 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_86[0][0] \n__________________________________________________________________________________________________\nadd_23 (Add) (None, 8, 8, 272) 0 drop_connect_23[0][0] \n add_22[0][0] \n__________________________________________________________________________________________________\nconv2d_116 (Conv2D) (None, 8, 8, 1632) 443904 add_23[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_87 (BatchNo (None, 8, 8, 1632) 6528 conv2d_116[0][0] \n__________________________________________________________________________________________________\nswish_87 (Swish) (None, 8, 8, 1632) 0 batch_normalization_87[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_30 (DepthwiseC (None, 8, 8, 1632) 40800 swish_87[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_88 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_30[0][0] \n__________________________________________________________________________________________________\nswish_88 (Swish) (None, 8, 8, 1632) 0 batch_normalization_88[0][0] \n__________________________________________________________________________________________________\nlambda_30 (Lambda) (None, 1, 1, 1632) 0 swish_88[0][0] \n__________________________________________________________________________________________________\nconv2d_117 (Conv2D) (None, 1, 1, 68) 111044 lambda_30[0][0] \n__________________________________________________________________________________________________\nswish_89 (Swish) (None, 1, 1, 68) 0 conv2d_117[0][0] \n__________________________________________________________________________________________________\nconv2d_118 (Conv2D) (None, 1, 1, 1632) 112608 swish_89[0][0] \n__________________________________________________________________________________________________\nactivation_30 (Activation) (None, 1, 1, 1632) 0 conv2d_118[0][0] \n__________________________________________________________________________________________________\nmultiply_30 (Multiply) (None, 8, 8, 1632) 0 activation_30[0][0] \n swish_88[0][0] \n__________________________________________________________________________________________________\nconv2d_119 (Conv2D) (None, 8, 8, 272) 443904 multiply_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_89 (BatchNo (None, 8, 8, 272) 1088 conv2d_119[0][0] \n__________________________________________________________________________________________________\ndrop_connect_24 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_89[0][0] \n__________________________________________________________________________________________________\nadd_24 (Add) (None, 8, 8, 272) 0 drop_connect_24[0][0] \n add_23[0][0] \n__________________________________________________________________________________________________\nconv2d_120 (Conv2D) (None, 8, 8, 1632) 443904 add_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_90 (BatchNo (None, 8, 8, 1632) 6528 conv2d_120[0][0] \n__________________________________________________________________________________________________\nswish_90 (Swish) (None, 8, 8, 1632) 0 batch_normalization_90[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_31 (DepthwiseC (None, 8, 8, 1632) 14688 swish_90[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_91 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_31[0][0] \n__________________________________________________________________________________________________\nswish_91 (Swish) (None, 8, 8, 1632) 0 batch_normalization_91[0][0] \n__________________________________________________________________________________________________\nlambda_31 (Lambda) (None, 1, 1, 1632) 0 swish_91[0][0] \n__________________________________________________________________________________________________\nconv2d_121 (Conv2D) (None, 1, 1, 68) 111044 lambda_31[0][0] \n__________________________________________________________________________________________________\nswish_92 (Swish) (None, 1, 1, 68) 0 conv2d_121[0][0] \n__________________________________________________________________________________________________\nconv2d_122 (Conv2D) (None, 1, 1, 1632) 112608 swish_92[0][0] \n__________________________________________________________________________________________________\nactivation_31 (Activation) (None, 1, 1, 1632) 0 conv2d_122[0][0] \n__________________________________________________________________________________________________\nmultiply_31 (Multiply) (None, 8, 8, 1632) 0 activation_31[0][0] \n swish_91[0][0] \n__________________________________________________________________________________________________\nconv2d_123 (Conv2D) (None, 8, 8, 448) 731136 multiply_31[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_92 (BatchNo (None, 8, 8, 448) 1792 conv2d_123[0][0] \n__________________________________________________________________________________________________\nconv2d_124 (Conv2D) (None, 8, 8, 2688) 1204224 batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_93 (BatchNo (None, 8, 8, 2688) 10752 conv2d_124[0][0] \n__________________________________________________________________________________________________\nswish_93 (Swish) (None, 8, 8, 2688) 0 batch_normalization_93[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_32 (DepthwiseC (None, 8, 8, 2688) 24192 swish_93[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_94 (BatchNo (None, 8, 8, 2688) 10752 depthwise_conv2d_32[0][0] \n__________________________________________________________________________________________________\nswish_94 (Swish) (None, 8, 8, 2688) 0 batch_normalization_94[0][0] \n__________________________________________________________________________________________________\nlambda_32 (Lambda) (None, 1, 1, 2688) 0 swish_94[0][0] \n__________________________________________________________________________________________________\nconv2d_125 (Conv2D) (None, 1, 1, 112) 301168 lambda_32[0][0] \n__________________________________________________________________________________________________\nswish_95 (Swish) (None, 1, 1, 112) 0 conv2d_125[0][0] \n__________________________________________________________________________________________________\nconv2d_126 (Conv2D) (None, 1, 1, 2688) 303744 swish_95[0][0] \n__________________________________________________________________________________________________\nactivation_32 (Activation) (None, 1, 1, 2688) 0 conv2d_126[0][0] \n__________________________________________________________________________________________________\nmultiply_32 (Multiply) (None, 8, 8, 2688) 0 activation_32[0][0] \n swish_94[0][0] \n__________________________________________________________________________________________________\nconv2d_127 (Conv2D) (None, 8, 8, 448) 1204224 multiply_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_95 (BatchNo (None, 8, 8, 448) 1792 conv2d_127[0][0] \n__________________________________________________________________________________________________\ndrop_connect_25 (DropConnect) (None, 8, 8, 448) 0 batch_normalization_95[0][0] \n__________________________________________________________________________________________________\nadd_25 (Add) (None, 8, 8, 448) 0 drop_connect_25[0][0] \n batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nconv2d_128 (Conv2D) (None, 8, 8, 1792) 802816 add_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_96 (BatchNo (None, 8, 8, 1792) 7168 conv2d_128[0][0] \n__________________________________________________________________________________________________\nswish_96 (Swish) (None, 8, 8, 1792) 0 batch_normalization_96[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_1 (Glo (None, 1792) 0 swish_96[0][0] \n__________________________________________________________________________________________________\nfinal_output (Dense) (None, 1) 1793 global_average_pooling2d_1[0][0] \n==================================================================================================\nTotal params: 17,675,609\nTrainable params: 1,793\nNon-trainable params: 17,673,816\n__________________________________________________________________________________________________\n" ], [ "STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size\nSTEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size\n\nhistory_warmup = model.fit_generator(generator=train_generator,\n steps_per_epoch=STEP_SIZE_TRAIN,\n validation_data=valid_generator,\n validation_steps=STEP_SIZE_VALID,\n epochs=WARMUP_EPOCHS,\n verbose=2).history", "Epoch 1/5\n - 282s - loss: 1.2568 - acc: 0.3093 - val_loss: 1.4649 - val_acc: 0.4148\nEpoch 2/5\n - 269s - loss: 1.0960 - acc: 0.3228 - val_loss: 1.2900 - val_acc: 0.2810\nEpoch 3/5\n - 267s - loss: 1.0743 - acc: 0.3280 - val_loss: 1.3244 - val_acc: 0.2967\nEpoch 4/5\n - 266s - loss: 1.0808 - acc: 0.3231 - val_loss: 1.2172 - val_acc: 0.3338\nEpoch 5/5\n - 267s - loss: 1.0612 - acc: 0.3245 - val_loss: 1.1476 - val_acc: 0.2853\n" ] ], [ [ "# Fine-tune the model", "_____no_output_____" ] ], [ [ "for layer in model.layers:\n layer.trainable = True\n \ncheckpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)\nes = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)\ncosine_lr = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE,\n total_steps=TOTAL_STEPS,\n warmup_learning_rate=0.0,\n warmup_steps=WARMUP_STEPS,\n hold_base_rate_steps=(3 * STEP_SIZE))\n\ncallback_list = [checkpoint, es, cosine_lr]\noptimizer = optimizers.Adam(lr=LEARNING_RATE)\nmodel.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)\nmodel.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_1 (InputLayer) (None, 256, 256, 3) 0 \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 128, 128, 48) 1296 input_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 128, 128, 48) 192 conv2d_1[0][0] \n__________________________________________________________________________________________________\nswish_1 (Swish) (None, 128, 128, 48) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_1 (DepthwiseCo (None, 128, 128, 48) 432 swish_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_2 (BatchNor (None, 128, 128, 48) 192 depthwise_conv2d_1[0][0] \n__________________________________________________________________________________________________\nswish_2 (Swish) (None, 128, 128, 48) 0 batch_normalization_2[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) (None, 1, 1, 48) 0 swish_2[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 1, 1, 12) 588 lambda_1[0][0] \n__________________________________________________________________________________________________\nswish_3 (Swish) (None, 1, 1, 12) 0 conv2d_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 1, 1, 48) 624 swish_3[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 1, 1, 48) 0 conv2d_3[0][0] \n__________________________________________________________________________________________________\nmultiply_1 (Multiply) (None, 128, 128, 48) 0 activation_1[0][0] \n swish_2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 128, 128, 24) 1152 multiply_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_3 (BatchNor (None, 128, 128, 24) 96 conv2d_4[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_2 (DepthwiseCo (None, 128, 128, 24) 216 batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_4 (BatchNor (None, 128, 128, 24) 96 depthwise_conv2d_2[0][0] \n__________________________________________________________________________________________________\nswish_4 (Swish) (None, 128, 128, 24) 0 batch_normalization_4[0][0] \n__________________________________________________________________________________________________\nlambda_2 (Lambda) (None, 1, 1, 24) 0 swish_4[0][0] \n__________________________________________________________________________________________________\nconv2d_5 (Conv2D) (None, 1, 1, 6) 150 lambda_2[0][0] \n__________________________________________________________________________________________________\nswish_5 (Swish) (None, 1, 1, 6) 0 conv2d_5[0][0] \n__________________________________________________________________________________________________\nconv2d_6 (Conv2D) (None, 1, 1, 24) 168 swish_5[0][0] \n__________________________________________________________________________________________________\nactivation_2 (Activation) (None, 1, 1, 24) 0 conv2d_6[0][0] \n__________________________________________________________________________________________________\nmultiply_2 (Multiply) (None, 128, 128, 24) 0 activation_2[0][0] \n swish_4[0][0] \n__________________________________________________________________________________________________\nconv2d_7 (Conv2D) (None, 128, 128, 24) 576 multiply_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_5 (BatchNor (None, 128, 128, 24) 96 conv2d_7[0][0] \n__________________________________________________________________________________________________\ndrop_connect_1 (DropConnect) (None, 128, 128, 24) 0 batch_normalization_5[0][0] \n__________________________________________________________________________________________________\nadd_1 (Add) (None, 128, 128, 24) 0 drop_connect_1[0][0] \n batch_normalization_3[0][0] \n__________________________________________________________________________________________________\nconv2d_8 (Conv2D) (None, 128, 128, 144 3456 add_1[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_6 (BatchNor (None, 128, 128, 144 576 conv2d_8[0][0] \n__________________________________________________________________________________________________\nswish_6 (Swish) (None, 128, 128, 144 0 batch_normalization_6[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_3 (DepthwiseCo (None, 64, 64, 144) 1296 swish_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_7 (BatchNor (None, 64, 64, 144) 576 depthwise_conv2d_3[0][0] \n__________________________________________________________________________________________________\nswish_7 (Swish) (None, 64, 64, 144) 0 batch_normalization_7[0][0] \n__________________________________________________________________________________________________\nlambda_3 (Lambda) (None, 1, 1, 144) 0 swish_7[0][0] \n__________________________________________________________________________________________________\nconv2d_9 (Conv2D) (None, 1, 1, 6) 870 lambda_3[0][0] \n__________________________________________________________________________________________________\nswish_8 (Swish) (None, 1, 1, 6) 0 conv2d_9[0][0] \n__________________________________________________________________________________________________\nconv2d_10 (Conv2D) (None, 1, 1, 144) 1008 swish_8[0][0] \n__________________________________________________________________________________________________\nactivation_3 (Activation) (None, 1, 1, 144) 0 conv2d_10[0][0] \n__________________________________________________________________________________________________\nmultiply_3 (Multiply) (None, 64, 64, 144) 0 activation_3[0][0] \n swish_7[0][0] \n__________________________________________________________________________________________________\nconv2d_11 (Conv2D) (None, 64, 64, 32) 4608 multiply_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_8 (BatchNor (None, 64, 64, 32) 128 conv2d_11[0][0] \n__________________________________________________________________________________________________\nconv2d_12 (Conv2D) (None, 64, 64, 192) 6144 batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_9 (BatchNor (None, 64, 64, 192) 768 conv2d_12[0][0] \n__________________________________________________________________________________________________\nswish_9 (Swish) (None, 64, 64, 192) 0 batch_normalization_9[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_4 (DepthwiseCo (None, 64, 64, 192) 1728 swish_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_10 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_4[0][0] \n__________________________________________________________________________________________________\nswish_10 (Swish) (None, 64, 64, 192) 0 batch_normalization_10[0][0] \n__________________________________________________________________________________________________\nlambda_4 (Lambda) (None, 1, 1, 192) 0 swish_10[0][0] \n__________________________________________________________________________________________________\nconv2d_13 (Conv2D) (None, 1, 1, 8) 1544 lambda_4[0][0] \n__________________________________________________________________________________________________\nswish_11 (Swish) (None, 1, 1, 8) 0 conv2d_13[0][0] \n__________________________________________________________________________________________________\nconv2d_14 (Conv2D) (None, 1, 1, 192) 1728 swish_11[0][0] \n__________________________________________________________________________________________________\nactivation_4 (Activation) (None, 1, 1, 192) 0 conv2d_14[0][0] \n__________________________________________________________________________________________________\nmultiply_4 (Multiply) (None, 64, 64, 192) 0 activation_4[0][0] \n swish_10[0][0] \n__________________________________________________________________________________________________\nconv2d_15 (Conv2D) (None, 64, 64, 32) 6144 multiply_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_11 (BatchNo (None, 64, 64, 32) 128 conv2d_15[0][0] \n__________________________________________________________________________________________________\ndrop_connect_2 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_11[0][0] \n__________________________________________________________________________________________________\nadd_2 (Add) (None, 64, 64, 32) 0 drop_connect_2[0][0] \n batch_normalization_8[0][0] \n__________________________________________________________________________________________________\nconv2d_16 (Conv2D) (None, 64, 64, 192) 6144 add_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_12 (BatchNo (None, 64, 64, 192) 768 conv2d_16[0][0] \n__________________________________________________________________________________________________\nswish_12 (Swish) (None, 64, 64, 192) 0 batch_normalization_12[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_5 (DepthwiseCo (None, 64, 64, 192) 1728 swish_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_13 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_5[0][0] \n__________________________________________________________________________________________________\nswish_13 (Swish) (None, 64, 64, 192) 0 batch_normalization_13[0][0] \n__________________________________________________________________________________________________\nlambda_5 (Lambda) (None, 1, 1, 192) 0 swish_13[0][0] \n__________________________________________________________________________________________________\nconv2d_17 (Conv2D) (None, 1, 1, 8) 1544 lambda_5[0][0] \n__________________________________________________________________________________________________\nswish_14 (Swish) (None, 1, 1, 8) 0 conv2d_17[0][0] \n__________________________________________________________________________________________________\nconv2d_18 (Conv2D) (None, 1, 1, 192) 1728 swish_14[0][0] \n__________________________________________________________________________________________________\nactivation_5 (Activation) (None, 1, 1, 192) 0 conv2d_18[0][0] \n__________________________________________________________________________________________________\nmultiply_5 (Multiply) (None, 64, 64, 192) 0 activation_5[0][0] \n swish_13[0][0] \n__________________________________________________________________________________________________\nconv2d_19 (Conv2D) (None, 64, 64, 32) 6144 multiply_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_14 (BatchNo (None, 64, 64, 32) 128 conv2d_19[0][0] \n__________________________________________________________________________________________________\ndrop_connect_3 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_14[0][0] \n__________________________________________________________________________________________________\nadd_3 (Add) (None, 64, 64, 32) 0 drop_connect_3[0][0] \n add_2[0][0] \n__________________________________________________________________________________________________\nconv2d_20 (Conv2D) (None, 64, 64, 192) 6144 add_3[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_15 (BatchNo (None, 64, 64, 192) 768 conv2d_20[0][0] \n__________________________________________________________________________________________________\nswish_15 (Swish) (None, 64, 64, 192) 0 batch_normalization_15[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_6 (DepthwiseCo (None, 64, 64, 192) 1728 swish_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_16 (BatchNo (None, 64, 64, 192) 768 depthwise_conv2d_6[0][0] \n__________________________________________________________________________________________________\nswish_16 (Swish) (None, 64, 64, 192) 0 batch_normalization_16[0][0] \n__________________________________________________________________________________________________\nlambda_6 (Lambda) (None, 1, 1, 192) 0 swish_16[0][0] \n__________________________________________________________________________________________________\nconv2d_21 (Conv2D) (None, 1, 1, 8) 1544 lambda_6[0][0] \n__________________________________________________________________________________________________\nswish_17 (Swish) (None, 1, 1, 8) 0 conv2d_21[0][0] \n__________________________________________________________________________________________________\nconv2d_22 (Conv2D) (None, 1, 1, 192) 1728 swish_17[0][0] \n__________________________________________________________________________________________________\nactivation_6 (Activation) (None, 1, 1, 192) 0 conv2d_22[0][0] \n__________________________________________________________________________________________________\nmultiply_6 (Multiply) (None, 64, 64, 192) 0 activation_6[0][0] \n swish_16[0][0] \n__________________________________________________________________________________________________\nconv2d_23 (Conv2D) (None, 64, 64, 32) 6144 multiply_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_17 (BatchNo (None, 64, 64, 32) 128 conv2d_23[0][0] \n__________________________________________________________________________________________________\ndrop_connect_4 (DropConnect) (None, 64, 64, 32) 0 batch_normalization_17[0][0] \n__________________________________________________________________________________________________\nadd_4 (Add) (None, 64, 64, 32) 0 drop_connect_4[0][0] \n add_3[0][0] \n__________________________________________________________________________________________________\nconv2d_24 (Conv2D) (None, 64, 64, 192) 6144 add_4[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_18 (BatchNo (None, 64, 64, 192) 768 conv2d_24[0][0] \n__________________________________________________________________________________________________\nswish_18 (Swish) (None, 64, 64, 192) 0 batch_normalization_18[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_7 (DepthwiseCo (None, 32, 32, 192) 4800 swish_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_19 (BatchNo (None, 32, 32, 192) 768 depthwise_conv2d_7[0][0] \n__________________________________________________________________________________________________\nswish_19 (Swish) (None, 32, 32, 192) 0 batch_normalization_19[0][0] \n__________________________________________________________________________________________________\nlambda_7 (Lambda) (None, 1, 1, 192) 0 swish_19[0][0] \n__________________________________________________________________________________________________\nconv2d_25 (Conv2D) (None, 1, 1, 8) 1544 lambda_7[0][0] \n__________________________________________________________________________________________________\nswish_20 (Swish) (None, 1, 1, 8) 0 conv2d_25[0][0] \n__________________________________________________________________________________________________\nconv2d_26 (Conv2D) (None, 1, 1, 192) 1728 swish_20[0][0] \n__________________________________________________________________________________________________\nactivation_7 (Activation) (None, 1, 1, 192) 0 conv2d_26[0][0] \n__________________________________________________________________________________________________\nmultiply_7 (Multiply) (None, 32, 32, 192) 0 activation_7[0][0] \n swish_19[0][0] \n__________________________________________________________________________________________________\nconv2d_27 (Conv2D) (None, 32, 32, 56) 10752 multiply_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_20 (BatchNo (None, 32, 32, 56) 224 conv2d_27[0][0] \n__________________________________________________________________________________________________\nconv2d_28 (Conv2D) (None, 32, 32, 336) 18816 batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_21 (BatchNo (None, 32, 32, 336) 1344 conv2d_28[0][0] \n__________________________________________________________________________________________________\nswish_21 (Swish) (None, 32, 32, 336) 0 batch_normalization_21[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_8 (DepthwiseCo (None, 32, 32, 336) 8400 swish_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_22 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_8[0][0] \n__________________________________________________________________________________________________\nswish_22 (Swish) (None, 32, 32, 336) 0 batch_normalization_22[0][0] \n__________________________________________________________________________________________________\nlambda_8 (Lambda) (None, 1, 1, 336) 0 swish_22[0][0] \n__________________________________________________________________________________________________\nconv2d_29 (Conv2D) (None, 1, 1, 14) 4718 lambda_8[0][0] \n__________________________________________________________________________________________________\nswish_23 (Swish) (None, 1, 1, 14) 0 conv2d_29[0][0] \n__________________________________________________________________________________________________\nconv2d_30 (Conv2D) (None, 1, 1, 336) 5040 swish_23[0][0] \n__________________________________________________________________________________________________\nactivation_8 (Activation) (None, 1, 1, 336) 0 conv2d_30[0][0] \n__________________________________________________________________________________________________\nmultiply_8 (Multiply) (None, 32, 32, 336) 0 activation_8[0][0] \n swish_22[0][0] \n__________________________________________________________________________________________________\nconv2d_31 (Conv2D) (None, 32, 32, 56) 18816 multiply_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_23 (BatchNo (None, 32, 32, 56) 224 conv2d_31[0][0] \n__________________________________________________________________________________________________\ndrop_connect_5 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_23[0][0] \n__________________________________________________________________________________________________\nadd_5 (Add) (None, 32, 32, 56) 0 drop_connect_5[0][0] \n batch_normalization_20[0][0] \n__________________________________________________________________________________________________\nconv2d_32 (Conv2D) (None, 32, 32, 336) 18816 add_5[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_24 (BatchNo (None, 32, 32, 336) 1344 conv2d_32[0][0] \n__________________________________________________________________________________________________\nswish_24 (Swish) (None, 32, 32, 336) 0 batch_normalization_24[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_9 (DepthwiseCo (None, 32, 32, 336) 8400 swish_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_25 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_9[0][0] \n__________________________________________________________________________________________________\nswish_25 (Swish) (None, 32, 32, 336) 0 batch_normalization_25[0][0] \n__________________________________________________________________________________________________\nlambda_9 (Lambda) (None, 1, 1, 336) 0 swish_25[0][0] \n__________________________________________________________________________________________________\nconv2d_33 (Conv2D) (None, 1, 1, 14) 4718 lambda_9[0][0] \n__________________________________________________________________________________________________\nswish_26 (Swish) (None, 1, 1, 14) 0 conv2d_33[0][0] \n__________________________________________________________________________________________________\nconv2d_34 (Conv2D) (None, 1, 1, 336) 5040 swish_26[0][0] \n__________________________________________________________________________________________________\nactivation_9 (Activation) (None, 1, 1, 336) 0 conv2d_34[0][0] \n__________________________________________________________________________________________________\nmultiply_9 (Multiply) (None, 32, 32, 336) 0 activation_9[0][0] \n swish_25[0][0] \n__________________________________________________________________________________________________\nconv2d_35 (Conv2D) (None, 32, 32, 56) 18816 multiply_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_26 (BatchNo (None, 32, 32, 56) 224 conv2d_35[0][0] \n__________________________________________________________________________________________________\ndrop_connect_6 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_26[0][0] \n__________________________________________________________________________________________________\nadd_6 (Add) (None, 32, 32, 56) 0 drop_connect_6[0][0] \n add_5[0][0] \n__________________________________________________________________________________________________\nconv2d_36 (Conv2D) (None, 32, 32, 336) 18816 add_6[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_27 (BatchNo (None, 32, 32, 336) 1344 conv2d_36[0][0] \n__________________________________________________________________________________________________\nswish_27 (Swish) (None, 32, 32, 336) 0 batch_normalization_27[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_10 (DepthwiseC (None, 32, 32, 336) 8400 swish_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_28 (BatchNo (None, 32, 32, 336) 1344 depthwise_conv2d_10[0][0] \n__________________________________________________________________________________________________\nswish_28 (Swish) (None, 32, 32, 336) 0 batch_normalization_28[0][0] \n__________________________________________________________________________________________________\nlambda_10 (Lambda) (None, 1, 1, 336) 0 swish_28[0][0] \n__________________________________________________________________________________________________\nconv2d_37 (Conv2D) (None, 1, 1, 14) 4718 lambda_10[0][0] \n__________________________________________________________________________________________________\nswish_29 (Swish) (None, 1, 1, 14) 0 conv2d_37[0][0] \n__________________________________________________________________________________________________\nconv2d_38 (Conv2D) (None, 1, 1, 336) 5040 swish_29[0][0] \n__________________________________________________________________________________________________\nactivation_10 (Activation) (None, 1, 1, 336) 0 conv2d_38[0][0] \n__________________________________________________________________________________________________\nmultiply_10 (Multiply) (None, 32, 32, 336) 0 activation_10[0][0] \n swish_28[0][0] \n__________________________________________________________________________________________________\nconv2d_39 (Conv2D) (None, 32, 32, 56) 18816 multiply_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_29 (BatchNo (None, 32, 32, 56) 224 conv2d_39[0][0] \n__________________________________________________________________________________________________\ndrop_connect_7 (DropConnect) (None, 32, 32, 56) 0 batch_normalization_29[0][0] \n__________________________________________________________________________________________________\nadd_7 (Add) (None, 32, 32, 56) 0 drop_connect_7[0][0] \n add_6[0][0] \n__________________________________________________________________________________________________\nconv2d_40 (Conv2D) (None, 32, 32, 336) 18816 add_7[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_30 (BatchNo (None, 32, 32, 336) 1344 conv2d_40[0][0] \n__________________________________________________________________________________________________\nswish_30 (Swish) (None, 32, 32, 336) 0 batch_normalization_30[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_11 (DepthwiseC (None, 16, 16, 336) 3024 swish_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_31 (BatchNo (None, 16, 16, 336) 1344 depthwise_conv2d_11[0][0] \n__________________________________________________________________________________________________\nswish_31 (Swish) (None, 16, 16, 336) 0 batch_normalization_31[0][0] \n__________________________________________________________________________________________________\nlambda_11 (Lambda) (None, 1, 1, 336) 0 swish_31[0][0] \n__________________________________________________________________________________________________\nconv2d_41 (Conv2D) (None, 1, 1, 14) 4718 lambda_11[0][0] \n__________________________________________________________________________________________________\nswish_32 (Swish) (None, 1, 1, 14) 0 conv2d_41[0][0] \n__________________________________________________________________________________________________\nconv2d_42 (Conv2D) (None, 1, 1, 336) 5040 swish_32[0][0] \n__________________________________________________________________________________________________\nactivation_11 (Activation) (None, 1, 1, 336) 0 conv2d_42[0][0] \n__________________________________________________________________________________________________\nmultiply_11 (Multiply) (None, 16, 16, 336) 0 activation_11[0][0] \n swish_31[0][0] \n__________________________________________________________________________________________________\nconv2d_43 (Conv2D) (None, 16, 16, 112) 37632 multiply_11[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_32 (BatchNo (None, 16, 16, 112) 448 conv2d_43[0][0] \n__________________________________________________________________________________________________\nconv2d_44 (Conv2D) (None, 16, 16, 672) 75264 batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_33 (BatchNo (None, 16, 16, 672) 2688 conv2d_44[0][0] \n__________________________________________________________________________________________________\nswish_33 (Swish) (None, 16, 16, 672) 0 batch_normalization_33[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_12 (DepthwiseC (None, 16, 16, 672) 6048 swish_33[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_34 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_12[0][0] \n__________________________________________________________________________________________________\nswish_34 (Swish) (None, 16, 16, 672) 0 batch_normalization_34[0][0] \n__________________________________________________________________________________________________\nlambda_12 (Lambda) (None, 1, 1, 672) 0 swish_34[0][0] \n__________________________________________________________________________________________________\nconv2d_45 (Conv2D) (None, 1, 1, 28) 18844 lambda_12[0][0] \n__________________________________________________________________________________________________\nswish_35 (Swish) (None, 1, 1, 28) 0 conv2d_45[0][0] \n__________________________________________________________________________________________________\nconv2d_46 (Conv2D) (None, 1, 1, 672) 19488 swish_35[0][0] \n__________________________________________________________________________________________________\nactivation_12 (Activation) (None, 1, 1, 672) 0 conv2d_46[0][0] \n__________________________________________________________________________________________________\nmultiply_12 (Multiply) (None, 16, 16, 672) 0 activation_12[0][0] \n swish_34[0][0] \n__________________________________________________________________________________________________\nconv2d_47 (Conv2D) (None, 16, 16, 112) 75264 multiply_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_35 (BatchNo (None, 16, 16, 112) 448 conv2d_47[0][0] \n__________________________________________________________________________________________________\ndrop_connect_8 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_35[0][0] \n__________________________________________________________________________________________________\nadd_8 (Add) (None, 16, 16, 112) 0 drop_connect_8[0][0] \n batch_normalization_32[0][0] \n__________________________________________________________________________________________________\nconv2d_48 (Conv2D) (None, 16, 16, 672) 75264 add_8[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_36 (BatchNo (None, 16, 16, 672) 2688 conv2d_48[0][0] \n__________________________________________________________________________________________________\nswish_36 (Swish) (None, 16, 16, 672) 0 batch_normalization_36[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_13 (DepthwiseC (None, 16, 16, 672) 6048 swish_36[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_37 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_13[0][0] \n__________________________________________________________________________________________________\nswish_37 (Swish) (None, 16, 16, 672) 0 batch_normalization_37[0][0] \n__________________________________________________________________________________________________\nlambda_13 (Lambda) (None, 1, 1, 672) 0 swish_37[0][0] \n__________________________________________________________________________________________________\nconv2d_49 (Conv2D) (None, 1, 1, 28) 18844 lambda_13[0][0] \n__________________________________________________________________________________________________\nswish_38 (Swish) (None, 1, 1, 28) 0 conv2d_49[0][0] \n__________________________________________________________________________________________________\nconv2d_50 (Conv2D) (None, 1, 1, 672) 19488 swish_38[0][0] \n__________________________________________________________________________________________________\nactivation_13 (Activation) (None, 1, 1, 672) 0 conv2d_50[0][0] \n__________________________________________________________________________________________________\nmultiply_13 (Multiply) (None, 16, 16, 672) 0 activation_13[0][0] \n swish_37[0][0] \n__________________________________________________________________________________________________\nconv2d_51 (Conv2D) (None, 16, 16, 112) 75264 multiply_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_38 (BatchNo (None, 16, 16, 112) 448 conv2d_51[0][0] \n__________________________________________________________________________________________________\ndrop_connect_9 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_38[0][0] \n__________________________________________________________________________________________________\nadd_9 (Add) (None, 16, 16, 112) 0 drop_connect_9[0][0] \n add_8[0][0] \n__________________________________________________________________________________________________\nconv2d_52 (Conv2D) (None, 16, 16, 672) 75264 add_9[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_39 (BatchNo (None, 16, 16, 672) 2688 conv2d_52[0][0] \n__________________________________________________________________________________________________\nswish_39 (Swish) (None, 16, 16, 672) 0 batch_normalization_39[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_14 (DepthwiseC (None, 16, 16, 672) 6048 swish_39[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_40 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_14[0][0] \n__________________________________________________________________________________________________\nswish_40 (Swish) (None, 16, 16, 672) 0 batch_normalization_40[0][0] \n__________________________________________________________________________________________________\nlambda_14 (Lambda) (None, 1, 1, 672) 0 swish_40[0][0] \n__________________________________________________________________________________________________\nconv2d_53 (Conv2D) (None, 1, 1, 28) 18844 lambda_14[0][0] \n__________________________________________________________________________________________________\nswish_41 (Swish) (None, 1, 1, 28) 0 conv2d_53[0][0] \n__________________________________________________________________________________________________\nconv2d_54 (Conv2D) (None, 1, 1, 672) 19488 swish_41[0][0] \n__________________________________________________________________________________________________\nactivation_14 (Activation) (None, 1, 1, 672) 0 conv2d_54[0][0] \n__________________________________________________________________________________________________\nmultiply_14 (Multiply) (None, 16, 16, 672) 0 activation_14[0][0] \n swish_40[0][0] \n__________________________________________________________________________________________________\nconv2d_55 (Conv2D) (None, 16, 16, 112) 75264 multiply_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_41 (BatchNo (None, 16, 16, 112) 448 conv2d_55[0][0] \n__________________________________________________________________________________________________\ndrop_connect_10 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_41[0][0] \n__________________________________________________________________________________________________\nadd_10 (Add) (None, 16, 16, 112) 0 drop_connect_10[0][0] \n add_9[0][0] \n__________________________________________________________________________________________________\nconv2d_56 (Conv2D) (None, 16, 16, 672) 75264 add_10[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_42 (BatchNo (None, 16, 16, 672) 2688 conv2d_56[0][0] \n__________________________________________________________________________________________________\nswish_42 (Swish) (None, 16, 16, 672) 0 batch_normalization_42[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_15 (DepthwiseC (None, 16, 16, 672) 6048 swish_42[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_43 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_15[0][0] \n__________________________________________________________________________________________________\nswish_43 (Swish) (None, 16, 16, 672) 0 batch_normalization_43[0][0] \n__________________________________________________________________________________________________\nlambda_15 (Lambda) (None, 1, 1, 672) 0 swish_43[0][0] \n__________________________________________________________________________________________________\nconv2d_57 (Conv2D) (None, 1, 1, 28) 18844 lambda_15[0][0] \n__________________________________________________________________________________________________\nswish_44 (Swish) (None, 1, 1, 28) 0 conv2d_57[0][0] \n__________________________________________________________________________________________________\nconv2d_58 (Conv2D) (None, 1, 1, 672) 19488 swish_44[0][0] \n__________________________________________________________________________________________________\nactivation_15 (Activation) (None, 1, 1, 672) 0 conv2d_58[0][0] \n__________________________________________________________________________________________________\nmultiply_15 (Multiply) (None, 16, 16, 672) 0 activation_15[0][0] \n swish_43[0][0] \n__________________________________________________________________________________________________\nconv2d_59 (Conv2D) (None, 16, 16, 112) 75264 multiply_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_44 (BatchNo (None, 16, 16, 112) 448 conv2d_59[0][0] \n__________________________________________________________________________________________________\ndrop_connect_11 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_44[0][0] \n__________________________________________________________________________________________________\nadd_11 (Add) (None, 16, 16, 112) 0 drop_connect_11[0][0] \n add_10[0][0] \n__________________________________________________________________________________________________\nconv2d_60 (Conv2D) (None, 16, 16, 672) 75264 add_11[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_45 (BatchNo (None, 16, 16, 672) 2688 conv2d_60[0][0] \n__________________________________________________________________________________________________\nswish_45 (Swish) (None, 16, 16, 672) 0 batch_normalization_45[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_16 (DepthwiseC (None, 16, 16, 672) 6048 swish_45[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_46 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_16[0][0] \n__________________________________________________________________________________________________\nswish_46 (Swish) (None, 16, 16, 672) 0 batch_normalization_46[0][0] \n__________________________________________________________________________________________________\nlambda_16 (Lambda) (None, 1, 1, 672) 0 swish_46[0][0] \n__________________________________________________________________________________________________\nconv2d_61 (Conv2D) (None, 1, 1, 28) 18844 lambda_16[0][0] \n__________________________________________________________________________________________________\nswish_47 (Swish) (None, 1, 1, 28) 0 conv2d_61[0][0] \n__________________________________________________________________________________________________\nconv2d_62 (Conv2D) (None, 1, 1, 672) 19488 swish_47[0][0] \n__________________________________________________________________________________________________\nactivation_16 (Activation) (None, 1, 1, 672) 0 conv2d_62[0][0] \n__________________________________________________________________________________________________\nmultiply_16 (Multiply) (None, 16, 16, 672) 0 activation_16[0][0] \n swish_46[0][0] \n__________________________________________________________________________________________________\nconv2d_63 (Conv2D) (None, 16, 16, 112) 75264 multiply_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_47 (BatchNo (None, 16, 16, 112) 448 conv2d_63[0][0] \n__________________________________________________________________________________________________\ndrop_connect_12 (DropConnect) (None, 16, 16, 112) 0 batch_normalization_47[0][0] \n__________________________________________________________________________________________________\nadd_12 (Add) (None, 16, 16, 112) 0 drop_connect_12[0][0] \n add_11[0][0] \n__________________________________________________________________________________________________\nconv2d_64 (Conv2D) (None, 16, 16, 672) 75264 add_12[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_48 (BatchNo (None, 16, 16, 672) 2688 conv2d_64[0][0] \n__________________________________________________________________________________________________\nswish_48 (Swish) (None, 16, 16, 672) 0 batch_normalization_48[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_17 (DepthwiseC (None, 16, 16, 672) 16800 swish_48[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_49 (BatchNo (None, 16, 16, 672) 2688 depthwise_conv2d_17[0][0] \n__________________________________________________________________________________________________\nswish_49 (Swish) (None, 16, 16, 672) 0 batch_normalization_49[0][0] \n__________________________________________________________________________________________________\nlambda_17 (Lambda) (None, 1, 1, 672) 0 swish_49[0][0] \n__________________________________________________________________________________________________\nconv2d_65 (Conv2D) (None, 1, 1, 28) 18844 lambda_17[0][0] \n__________________________________________________________________________________________________\nswish_50 (Swish) (None, 1, 1, 28) 0 conv2d_65[0][0] \n__________________________________________________________________________________________________\nconv2d_66 (Conv2D) (None, 1, 1, 672) 19488 swish_50[0][0] \n__________________________________________________________________________________________________\nactivation_17 (Activation) (None, 1, 1, 672) 0 conv2d_66[0][0] \n__________________________________________________________________________________________________\nmultiply_17 (Multiply) (None, 16, 16, 672) 0 activation_17[0][0] \n swish_49[0][0] \n__________________________________________________________________________________________________\nconv2d_67 (Conv2D) (None, 16, 16, 160) 107520 multiply_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_50 (BatchNo (None, 16, 16, 160) 640 conv2d_67[0][0] \n__________________________________________________________________________________________________\nconv2d_68 (Conv2D) (None, 16, 16, 960) 153600 batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_51 (BatchNo (None, 16, 16, 960) 3840 conv2d_68[0][0] \n__________________________________________________________________________________________________\nswish_51 (Swish) (None, 16, 16, 960) 0 batch_normalization_51[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_18 (DepthwiseC (None, 16, 16, 960) 24000 swish_51[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_52 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_18[0][0] \n__________________________________________________________________________________________________\nswish_52 (Swish) (None, 16, 16, 960) 0 batch_normalization_52[0][0] \n__________________________________________________________________________________________________\nlambda_18 (Lambda) (None, 1, 1, 960) 0 swish_52[0][0] \n__________________________________________________________________________________________________\nconv2d_69 (Conv2D) (None, 1, 1, 40) 38440 lambda_18[0][0] \n__________________________________________________________________________________________________\nswish_53 (Swish) (None, 1, 1, 40) 0 conv2d_69[0][0] \n__________________________________________________________________________________________________\nconv2d_70 (Conv2D) (None, 1, 1, 960) 39360 swish_53[0][0] \n__________________________________________________________________________________________________\nactivation_18 (Activation) (None, 1, 1, 960) 0 conv2d_70[0][0] \n__________________________________________________________________________________________________\nmultiply_18 (Multiply) (None, 16, 16, 960) 0 activation_18[0][0] \n swish_52[0][0] \n__________________________________________________________________________________________________\nconv2d_71 (Conv2D) (None, 16, 16, 160) 153600 multiply_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_53 (BatchNo (None, 16, 16, 160) 640 conv2d_71[0][0] \n__________________________________________________________________________________________________\ndrop_connect_13 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_53[0][0] \n__________________________________________________________________________________________________\nadd_13 (Add) (None, 16, 16, 160) 0 drop_connect_13[0][0] \n batch_normalization_50[0][0] \n__________________________________________________________________________________________________\nconv2d_72 (Conv2D) (None, 16, 16, 960) 153600 add_13[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_54 (BatchNo (None, 16, 16, 960) 3840 conv2d_72[0][0] \n__________________________________________________________________________________________________\nswish_54 (Swish) (None, 16, 16, 960) 0 batch_normalization_54[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_19 (DepthwiseC (None, 16, 16, 960) 24000 swish_54[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_55 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_19[0][0] \n__________________________________________________________________________________________________\nswish_55 (Swish) (None, 16, 16, 960) 0 batch_normalization_55[0][0] \n__________________________________________________________________________________________________\nlambda_19 (Lambda) (None, 1, 1, 960) 0 swish_55[0][0] \n__________________________________________________________________________________________________\nconv2d_73 (Conv2D) (None, 1, 1, 40) 38440 lambda_19[0][0] \n__________________________________________________________________________________________________\nswish_56 (Swish) (None, 1, 1, 40) 0 conv2d_73[0][0] \n__________________________________________________________________________________________________\nconv2d_74 (Conv2D) (None, 1, 1, 960) 39360 swish_56[0][0] \n__________________________________________________________________________________________________\nactivation_19 (Activation) (None, 1, 1, 960) 0 conv2d_74[0][0] \n__________________________________________________________________________________________________\nmultiply_19 (Multiply) (None, 16, 16, 960) 0 activation_19[0][0] \n swish_55[0][0] \n__________________________________________________________________________________________________\nconv2d_75 (Conv2D) (None, 16, 16, 160) 153600 multiply_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_56 (BatchNo (None, 16, 16, 160) 640 conv2d_75[0][0] \n__________________________________________________________________________________________________\ndrop_connect_14 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_56[0][0] \n__________________________________________________________________________________________________\nadd_14 (Add) (None, 16, 16, 160) 0 drop_connect_14[0][0] \n add_13[0][0] \n__________________________________________________________________________________________________\nconv2d_76 (Conv2D) (None, 16, 16, 960) 153600 add_14[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_57 (BatchNo (None, 16, 16, 960) 3840 conv2d_76[0][0] \n__________________________________________________________________________________________________\nswish_57 (Swish) (None, 16, 16, 960) 0 batch_normalization_57[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_20 (DepthwiseC (None, 16, 16, 960) 24000 swish_57[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_58 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_20[0][0] \n__________________________________________________________________________________________________\nswish_58 (Swish) (None, 16, 16, 960) 0 batch_normalization_58[0][0] \n__________________________________________________________________________________________________\nlambda_20 (Lambda) (None, 1, 1, 960) 0 swish_58[0][0] \n__________________________________________________________________________________________________\nconv2d_77 (Conv2D) (None, 1, 1, 40) 38440 lambda_20[0][0] \n__________________________________________________________________________________________________\nswish_59 (Swish) (None, 1, 1, 40) 0 conv2d_77[0][0] \n__________________________________________________________________________________________________\nconv2d_78 (Conv2D) (None, 1, 1, 960) 39360 swish_59[0][0] \n__________________________________________________________________________________________________\nactivation_20 (Activation) (None, 1, 1, 960) 0 conv2d_78[0][0] \n__________________________________________________________________________________________________\nmultiply_20 (Multiply) (None, 16, 16, 960) 0 activation_20[0][0] \n swish_58[0][0] \n__________________________________________________________________________________________________\nconv2d_79 (Conv2D) (None, 16, 16, 160) 153600 multiply_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_59 (BatchNo (None, 16, 16, 160) 640 conv2d_79[0][0] \n__________________________________________________________________________________________________\ndrop_connect_15 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_59[0][0] \n__________________________________________________________________________________________________\nadd_15 (Add) (None, 16, 16, 160) 0 drop_connect_15[0][0] \n add_14[0][0] \n__________________________________________________________________________________________________\nconv2d_80 (Conv2D) (None, 16, 16, 960) 153600 add_15[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_60 (BatchNo (None, 16, 16, 960) 3840 conv2d_80[0][0] \n__________________________________________________________________________________________________\nswish_60 (Swish) (None, 16, 16, 960) 0 batch_normalization_60[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_21 (DepthwiseC (None, 16, 16, 960) 24000 swish_60[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_61 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_21[0][0] \n__________________________________________________________________________________________________\nswish_61 (Swish) (None, 16, 16, 960) 0 batch_normalization_61[0][0] \n__________________________________________________________________________________________________\nlambda_21 (Lambda) (None, 1, 1, 960) 0 swish_61[0][0] \n__________________________________________________________________________________________________\nconv2d_81 (Conv2D) (None, 1, 1, 40) 38440 lambda_21[0][0] \n__________________________________________________________________________________________________\nswish_62 (Swish) (None, 1, 1, 40) 0 conv2d_81[0][0] \n__________________________________________________________________________________________________\nconv2d_82 (Conv2D) (None, 1, 1, 960) 39360 swish_62[0][0] \n__________________________________________________________________________________________________\nactivation_21 (Activation) (None, 1, 1, 960) 0 conv2d_82[0][0] \n__________________________________________________________________________________________________\nmultiply_21 (Multiply) (None, 16, 16, 960) 0 activation_21[0][0] \n swish_61[0][0] \n__________________________________________________________________________________________________\nconv2d_83 (Conv2D) (None, 16, 16, 160) 153600 multiply_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_62 (BatchNo (None, 16, 16, 160) 640 conv2d_83[0][0] \n__________________________________________________________________________________________________\ndrop_connect_16 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_62[0][0] \n__________________________________________________________________________________________________\nadd_16 (Add) (None, 16, 16, 160) 0 drop_connect_16[0][0] \n add_15[0][0] \n__________________________________________________________________________________________________\nconv2d_84 (Conv2D) (None, 16, 16, 960) 153600 add_16[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_63 (BatchNo (None, 16, 16, 960) 3840 conv2d_84[0][0] \n__________________________________________________________________________________________________\nswish_63 (Swish) (None, 16, 16, 960) 0 batch_normalization_63[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_22 (DepthwiseC (None, 16, 16, 960) 24000 swish_63[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_64 (BatchNo (None, 16, 16, 960) 3840 depthwise_conv2d_22[0][0] \n__________________________________________________________________________________________________\nswish_64 (Swish) (None, 16, 16, 960) 0 batch_normalization_64[0][0] \n__________________________________________________________________________________________________\nlambda_22 (Lambda) (None, 1, 1, 960) 0 swish_64[0][0] \n__________________________________________________________________________________________________\nconv2d_85 (Conv2D) (None, 1, 1, 40) 38440 lambda_22[0][0] \n__________________________________________________________________________________________________\nswish_65 (Swish) (None, 1, 1, 40) 0 conv2d_85[0][0] \n__________________________________________________________________________________________________\nconv2d_86 (Conv2D) (None, 1, 1, 960) 39360 swish_65[0][0] \n__________________________________________________________________________________________________\nactivation_22 (Activation) (None, 1, 1, 960) 0 conv2d_86[0][0] \n__________________________________________________________________________________________________\nmultiply_22 (Multiply) (None, 16, 16, 960) 0 activation_22[0][0] \n swish_64[0][0] \n__________________________________________________________________________________________________\nconv2d_87 (Conv2D) (None, 16, 16, 160) 153600 multiply_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_65 (BatchNo (None, 16, 16, 160) 640 conv2d_87[0][0] \n__________________________________________________________________________________________________\ndrop_connect_17 (DropConnect) (None, 16, 16, 160) 0 batch_normalization_65[0][0] \n__________________________________________________________________________________________________\nadd_17 (Add) (None, 16, 16, 160) 0 drop_connect_17[0][0] \n add_16[0][0] \n__________________________________________________________________________________________________\nconv2d_88 (Conv2D) (None, 16, 16, 960) 153600 add_17[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_66 (BatchNo (None, 16, 16, 960) 3840 conv2d_88[0][0] \n__________________________________________________________________________________________________\nswish_66 (Swish) (None, 16, 16, 960) 0 batch_normalization_66[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_23 (DepthwiseC (None, 8, 8, 960) 24000 swish_66[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_67 (BatchNo (None, 8, 8, 960) 3840 depthwise_conv2d_23[0][0] \n__________________________________________________________________________________________________\nswish_67 (Swish) (None, 8, 8, 960) 0 batch_normalization_67[0][0] \n__________________________________________________________________________________________________\nlambda_23 (Lambda) (None, 1, 1, 960) 0 swish_67[0][0] \n__________________________________________________________________________________________________\nconv2d_89 (Conv2D) (None, 1, 1, 40) 38440 lambda_23[0][0] \n__________________________________________________________________________________________________\nswish_68 (Swish) (None, 1, 1, 40) 0 conv2d_89[0][0] \n__________________________________________________________________________________________________\nconv2d_90 (Conv2D) (None, 1, 1, 960) 39360 swish_68[0][0] \n__________________________________________________________________________________________________\nactivation_23 (Activation) (None, 1, 1, 960) 0 conv2d_90[0][0] \n__________________________________________________________________________________________________\nmultiply_23 (Multiply) (None, 8, 8, 960) 0 activation_23[0][0] \n swish_67[0][0] \n__________________________________________________________________________________________________\nconv2d_91 (Conv2D) (None, 8, 8, 272) 261120 multiply_23[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_68 (BatchNo (None, 8, 8, 272) 1088 conv2d_91[0][0] \n__________________________________________________________________________________________________\nconv2d_92 (Conv2D) (None, 8, 8, 1632) 443904 batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_69 (BatchNo (None, 8, 8, 1632) 6528 conv2d_92[0][0] \n__________________________________________________________________________________________________\nswish_69 (Swish) (None, 8, 8, 1632) 0 batch_normalization_69[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_24 (DepthwiseC (None, 8, 8, 1632) 40800 swish_69[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_70 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_24[0][0] \n__________________________________________________________________________________________________\nswish_70 (Swish) (None, 8, 8, 1632) 0 batch_normalization_70[0][0] \n__________________________________________________________________________________________________\nlambda_24 (Lambda) (None, 1, 1, 1632) 0 swish_70[0][0] \n__________________________________________________________________________________________________\nconv2d_93 (Conv2D) (None, 1, 1, 68) 111044 lambda_24[0][0] \n__________________________________________________________________________________________________\nswish_71 (Swish) (None, 1, 1, 68) 0 conv2d_93[0][0] \n__________________________________________________________________________________________________\nconv2d_94 (Conv2D) (None, 1, 1, 1632) 112608 swish_71[0][0] \n__________________________________________________________________________________________________\nactivation_24 (Activation) (None, 1, 1, 1632) 0 conv2d_94[0][0] \n__________________________________________________________________________________________________\nmultiply_24 (Multiply) (None, 8, 8, 1632) 0 activation_24[0][0] \n swish_70[0][0] \n__________________________________________________________________________________________________\nconv2d_95 (Conv2D) (None, 8, 8, 272) 443904 multiply_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_71 (BatchNo (None, 8, 8, 272) 1088 conv2d_95[0][0] \n__________________________________________________________________________________________________\ndrop_connect_18 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_71[0][0] \n__________________________________________________________________________________________________\nadd_18 (Add) (None, 8, 8, 272) 0 drop_connect_18[0][0] \n batch_normalization_68[0][0] \n__________________________________________________________________________________________________\nconv2d_96 (Conv2D) (None, 8, 8, 1632) 443904 add_18[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_72 (BatchNo (None, 8, 8, 1632) 6528 conv2d_96[0][0] \n__________________________________________________________________________________________________\nswish_72 (Swish) (None, 8, 8, 1632) 0 batch_normalization_72[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_25 (DepthwiseC (None, 8, 8, 1632) 40800 swish_72[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_73 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_25[0][0] \n__________________________________________________________________________________________________\nswish_73 (Swish) (None, 8, 8, 1632) 0 batch_normalization_73[0][0] \n__________________________________________________________________________________________________\nlambda_25 (Lambda) (None, 1, 1, 1632) 0 swish_73[0][0] \n__________________________________________________________________________________________________\nconv2d_97 (Conv2D) (None, 1, 1, 68) 111044 lambda_25[0][0] \n__________________________________________________________________________________________________\nswish_74 (Swish) (None, 1, 1, 68) 0 conv2d_97[0][0] \n__________________________________________________________________________________________________\nconv2d_98 (Conv2D) (None, 1, 1, 1632) 112608 swish_74[0][0] \n__________________________________________________________________________________________________\nactivation_25 (Activation) (None, 1, 1, 1632) 0 conv2d_98[0][0] \n__________________________________________________________________________________________________\nmultiply_25 (Multiply) (None, 8, 8, 1632) 0 activation_25[0][0] \n swish_73[0][0] \n__________________________________________________________________________________________________\nconv2d_99 (Conv2D) (None, 8, 8, 272) 443904 multiply_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_74 (BatchNo (None, 8, 8, 272) 1088 conv2d_99[0][0] \n__________________________________________________________________________________________________\ndrop_connect_19 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_74[0][0] \n__________________________________________________________________________________________________\nadd_19 (Add) (None, 8, 8, 272) 0 drop_connect_19[0][0] \n add_18[0][0] \n__________________________________________________________________________________________________\nconv2d_100 (Conv2D) (None, 8, 8, 1632) 443904 add_19[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_75 (BatchNo (None, 8, 8, 1632) 6528 conv2d_100[0][0] \n__________________________________________________________________________________________________\nswish_75 (Swish) (None, 8, 8, 1632) 0 batch_normalization_75[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_26 (DepthwiseC (None, 8, 8, 1632) 40800 swish_75[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_76 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_26[0][0] \n__________________________________________________________________________________________________\nswish_76 (Swish) (None, 8, 8, 1632) 0 batch_normalization_76[0][0] \n__________________________________________________________________________________________________\nlambda_26 (Lambda) (None, 1, 1, 1632) 0 swish_76[0][0] \n__________________________________________________________________________________________________\nconv2d_101 (Conv2D) (None, 1, 1, 68) 111044 lambda_26[0][0] \n__________________________________________________________________________________________________\nswish_77 (Swish) (None, 1, 1, 68) 0 conv2d_101[0][0] \n__________________________________________________________________________________________________\nconv2d_102 (Conv2D) (None, 1, 1, 1632) 112608 swish_77[0][0] \n__________________________________________________________________________________________________\nactivation_26 (Activation) (None, 1, 1, 1632) 0 conv2d_102[0][0] \n__________________________________________________________________________________________________\nmultiply_26 (Multiply) (None, 8, 8, 1632) 0 activation_26[0][0] \n swish_76[0][0] \n__________________________________________________________________________________________________\nconv2d_103 (Conv2D) (None, 8, 8, 272) 443904 multiply_26[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_77 (BatchNo (None, 8, 8, 272) 1088 conv2d_103[0][0] \n__________________________________________________________________________________________________\ndrop_connect_20 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_77[0][0] \n__________________________________________________________________________________________________\nadd_20 (Add) (None, 8, 8, 272) 0 drop_connect_20[0][0] \n add_19[0][0] \n__________________________________________________________________________________________________\nconv2d_104 (Conv2D) (None, 8, 8, 1632) 443904 add_20[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_78 (BatchNo (None, 8, 8, 1632) 6528 conv2d_104[0][0] \n__________________________________________________________________________________________________\nswish_78 (Swish) (None, 8, 8, 1632) 0 batch_normalization_78[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_27 (DepthwiseC (None, 8, 8, 1632) 40800 swish_78[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_79 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_27[0][0] \n__________________________________________________________________________________________________\nswish_79 (Swish) (None, 8, 8, 1632) 0 batch_normalization_79[0][0] \n__________________________________________________________________________________________________\nlambda_27 (Lambda) (None, 1, 1, 1632) 0 swish_79[0][0] \n__________________________________________________________________________________________________\nconv2d_105 (Conv2D) (None, 1, 1, 68) 111044 lambda_27[0][0] \n__________________________________________________________________________________________________\nswish_80 (Swish) (None, 1, 1, 68) 0 conv2d_105[0][0] \n__________________________________________________________________________________________________\nconv2d_106 (Conv2D) (None, 1, 1, 1632) 112608 swish_80[0][0] \n__________________________________________________________________________________________________\nactivation_27 (Activation) (None, 1, 1, 1632) 0 conv2d_106[0][0] \n__________________________________________________________________________________________________\nmultiply_27 (Multiply) (None, 8, 8, 1632) 0 activation_27[0][0] \n swish_79[0][0] \n__________________________________________________________________________________________________\nconv2d_107 (Conv2D) (None, 8, 8, 272) 443904 multiply_27[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_80 (BatchNo (None, 8, 8, 272) 1088 conv2d_107[0][0] \n__________________________________________________________________________________________________\ndrop_connect_21 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_80[0][0] \n__________________________________________________________________________________________________\nadd_21 (Add) (None, 8, 8, 272) 0 drop_connect_21[0][0] \n add_20[0][0] \n__________________________________________________________________________________________________\nconv2d_108 (Conv2D) (None, 8, 8, 1632) 443904 add_21[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_81 (BatchNo (None, 8, 8, 1632) 6528 conv2d_108[0][0] \n__________________________________________________________________________________________________\nswish_81 (Swish) (None, 8, 8, 1632) 0 batch_normalization_81[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_28 (DepthwiseC (None, 8, 8, 1632) 40800 swish_81[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_82 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_28[0][0] \n__________________________________________________________________________________________________\nswish_82 (Swish) (None, 8, 8, 1632) 0 batch_normalization_82[0][0] \n__________________________________________________________________________________________________\nlambda_28 (Lambda) (None, 1, 1, 1632) 0 swish_82[0][0] \n__________________________________________________________________________________________________\nconv2d_109 (Conv2D) (None, 1, 1, 68) 111044 lambda_28[0][0] \n__________________________________________________________________________________________________\nswish_83 (Swish) (None, 1, 1, 68) 0 conv2d_109[0][0] \n__________________________________________________________________________________________________\nconv2d_110 (Conv2D) (None, 1, 1, 1632) 112608 swish_83[0][0] \n__________________________________________________________________________________________________\nactivation_28 (Activation) (None, 1, 1, 1632) 0 conv2d_110[0][0] \n__________________________________________________________________________________________________\nmultiply_28 (Multiply) (None, 8, 8, 1632) 0 activation_28[0][0] \n swish_82[0][0] \n__________________________________________________________________________________________________\nconv2d_111 (Conv2D) (None, 8, 8, 272) 443904 multiply_28[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_83 (BatchNo (None, 8, 8, 272) 1088 conv2d_111[0][0] \n__________________________________________________________________________________________________\ndrop_connect_22 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_83[0][0] \n__________________________________________________________________________________________________\nadd_22 (Add) (None, 8, 8, 272) 0 drop_connect_22[0][0] \n add_21[0][0] \n__________________________________________________________________________________________________\nconv2d_112 (Conv2D) (None, 8, 8, 1632) 443904 add_22[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_84 (BatchNo (None, 8, 8, 1632) 6528 conv2d_112[0][0] \n__________________________________________________________________________________________________\nswish_84 (Swish) (None, 8, 8, 1632) 0 batch_normalization_84[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_29 (DepthwiseC (None, 8, 8, 1632) 40800 swish_84[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_85 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_29[0][0] \n__________________________________________________________________________________________________\nswish_85 (Swish) (None, 8, 8, 1632) 0 batch_normalization_85[0][0] \n__________________________________________________________________________________________________\nlambda_29 (Lambda) (None, 1, 1, 1632) 0 swish_85[0][0] \n__________________________________________________________________________________________________\nconv2d_113 (Conv2D) (None, 1, 1, 68) 111044 lambda_29[0][0] \n__________________________________________________________________________________________________\nswish_86 (Swish) (None, 1, 1, 68) 0 conv2d_113[0][0] \n__________________________________________________________________________________________________\nconv2d_114 (Conv2D) (None, 1, 1, 1632) 112608 swish_86[0][0] \n__________________________________________________________________________________________________\nactivation_29 (Activation) (None, 1, 1, 1632) 0 conv2d_114[0][0] \n__________________________________________________________________________________________________\nmultiply_29 (Multiply) (None, 8, 8, 1632) 0 activation_29[0][0] \n swish_85[0][0] \n__________________________________________________________________________________________________\nconv2d_115 (Conv2D) (None, 8, 8, 272) 443904 multiply_29[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_86 (BatchNo (None, 8, 8, 272) 1088 conv2d_115[0][0] \n__________________________________________________________________________________________________\ndrop_connect_23 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_86[0][0] \n__________________________________________________________________________________________________\nadd_23 (Add) (None, 8, 8, 272) 0 drop_connect_23[0][0] \n add_22[0][0] \n__________________________________________________________________________________________________\nconv2d_116 (Conv2D) (None, 8, 8, 1632) 443904 add_23[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_87 (BatchNo (None, 8, 8, 1632) 6528 conv2d_116[0][0] \n__________________________________________________________________________________________________\nswish_87 (Swish) (None, 8, 8, 1632) 0 batch_normalization_87[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_30 (DepthwiseC (None, 8, 8, 1632) 40800 swish_87[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_88 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_30[0][0] \n__________________________________________________________________________________________________\nswish_88 (Swish) (None, 8, 8, 1632) 0 batch_normalization_88[0][0] \n__________________________________________________________________________________________________\nlambda_30 (Lambda) (None, 1, 1, 1632) 0 swish_88[0][0] \n__________________________________________________________________________________________________\nconv2d_117 (Conv2D) (None, 1, 1, 68) 111044 lambda_30[0][0] \n__________________________________________________________________________________________________\nswish_89 (Swish) (None, 1, 1, 68) 0 conv2d_117[0][0] \n__________________________________________________________________________________________________\nconv2d_118 (Conv2D) (None, 1, 1, 1632) 112608 swish_89[0][0] \n__________________________________________________________________________________________________\nactivation_30 (Activation) (None, 1, 1, 1632) 0 conv2d_118[0][0] \n__________________________________________________________________________________________________\nmultiply_30 (Multiply) (None, 8, 8, 1632) 0 activation_30[0][0] \n swish_88[0][0] \n__________________________________________________________________________________________________\nconv2d_119 (Conv2D) (None, 8, 8, 272) 443904 multiply_30[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_89 (BatchNo (None, 8, 8, 272) 1088 conv2d_119[0][0] \n__________________________________________________________________________________________________\ndrop_connect_24 (DropConnect) (None, 8, 8, 272) 0 batch_normalization_89[0][0] \n__________________________________________________________________________________________________\nadd_24 (Add) (None, 8, 8, 272) 0 drop_connect_24[0][0] \n add_23[0][0] \n__________________________________________________________________________________________________\nconv2d_120 (Conv2D) (None, 8, 8, 1632) 443904 add_24[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_90 (BatchNo (None, 8, 8, 1632) 6528 conv2d_120[0][0] \n__________________________________________________________________________________________________\nswish_90 (Swish) (None, 8, 8, 1632) 0 batch_normalization_90[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_31 (DepthwiseC (None, 8, 8, 1632) 14688 swish_90[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_91 (BatchNo (None, 8, 8, 1632) 6528 depthwise_conv2d_31[0][0] \n__________________________________________________________________________________________________\nswish_91 (Swish) (None, 8, 8, 1632) 0 batch_normalization_91[0][0] \n__________________________________________________________________________________________________\nlambda_31 (Lambda) (None, 1, 1, 1632) 0 swish_91[0][0] \n__________________________________________________________________________________________________\nconv2d_121 (Conv2D) (None, 1, 1, 68) 111044 lambda_31[0][0] \n__________________________________________________________________________________________________\nswish_92 (Swish) (None, 1, 1, 68) 0 conv2d_121[0][0] \n__________________________________________________________________________________________________\nconv2d_122 (Conv2D) (None, 1, 1, 1632) 112608 swish_92[0][0] \n__________________________________________________________________________________________________\nactivation_31 (Activation) (None, 1, 1, 1632) 0 conv2d_122[0][0] \n__________________________________________________________________________________________________\nmultiply_31 (Multiply) (None, 8, 8, 1632) 0 activation_31[0][0] \n swish_91[0][0] \n__________________________________________________________________________________________________\nconv2d_123 (Conv2D) (None, 8, 8, 448) 731136 multiply_31[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_92 (BatchNo (None, 8, 8, 448) 1792 conv2d_123[0][0] \n__________________________________________________________________________________________________\nconv2d_124 (Conv2D) (None, 8, 8, 2688) 1204224 batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_93 (BatchNo (None, 8, 8, 2688) 10752 conv2d_124[0][0] \n__________________________________________________________________________________________________\nswish_93 (Swish) (None, 8, 8, 2688) 0 batch_normalization_93[0][0] \n__________________________________________________________________________________________________\ndepthwise_conv2d_32 (DepthwiseC (None, 8, 8, 2688) 24192 swish_93[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_94 (BatchNo (None, 8, 8, 2688) 10752 depthwise_conv2d_32[0][0] \n__________________________________________________________________________________________________\nswish_94 (Swish) (None, 8, 8, 2688) 0 batch_normalization_94[0][0] \n__________________________________________________________________________________________________\nlambda_32 (Lambda) (None, 1, 1, 2688) 0 swish_94[0][0] \n__________________________________________________________________________________________________\nconv2d_125 (Conv2D) (None, 1, 1, 112) 301168 lambda_32[0][0] \n__________________________________________________________________________________________________\nswish_95 (Swish) (None, 1, 1, 112) 0 conv2d_125[0][0] \n__________________________________________________________________________________________________\nconv2d_126 (Conv2D) (None, 1, 1, 2688) 303744 swish_95[0][0] \n__________________________________________________________________________________________________\nactivation_32 (Activation) (None, 1, 1, 2688) 0 conv2d_126[0][0] \n__________________________________________________________________________________________________\nmultiply_32 (Multiply) (None, 8, 8, 2688) 0 activation_32[0][0] \n swish_94[0][0] \n__________________________________________________________________________________________________\nconv2d_127 (Conv2D) (None, 8, 8, 448) 1204224 multiply_32[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_95 (BatchNo (None, 8, 8, 448) 1792 conv2d_127[0][0] \n__________________________________________________________________________________________________\ndrop_connect_25 (DropConnect) (None, 8, 8, 448) 0 batch_normalization_95[0][0] \n__________________________________________________________________________________________________\nadd_25 (Add) (None, 8, 8, 448) 0 drop_connect_25[0][0] \n batch_normalization_92[0][0] \n__________________________________________________________________________________________________\nconv2d_128 (Conv2D) (None, 8, 8, 1792) 802816 add_25[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_96 (BatchNo (None, 8, 8, 1792) 7168 conv2d_128[0][0] \n__________________________________________________________________________________________________\nswish_96 (Swish) (None, 8, 8, 1792) 0 batch_normalization_96[0][0] \n__________________________________________________________________________________________________\nglobal_average_pooling2d_1 (Glo (None, 1792) 0 swish_96[0][0] \n__________________________________________________________________________________________________\nfinal_output (Dense) (None, 1) 1793 global_average_pooling2d_1[0][0] \n==================================================================================================\nTotal params: 17,675,609\nTrainable params: 17,550,409\nNon-trainable params: 125,200\n__________________________________________________________________________________________________\n" ], [ "history = model.fit_generator(generator=train_generator,\n steps_per_epoch=STEP_SIZE_TRAIN,\n validation_data=valid_generator,\n validation_steps=STEP_SIZE_VALID,\n epochs=EPOCHS,\n callbacks=callback_list,\n verbose=2).history", "Epoch 1/20\n - 467s - loss: 0.9229 - acc: 0.3720 - val_loss: 0.4854 - val_acc: 0.5078\nEpoch 2/20\n - 429s - loss: 0.7291 - acc: 0.4226 - val_loss: 0.3720 - val_acc: 0.6990\nEpoch 3/20\n - 429s - loss: 0.6848 - acc: 0.4571 - val_loss: 0.3860 - val_acc: 0.7304\nEpoch 4/20\n - 429s - loss: 0.6829 - acc: 0.4571 - val_loss: 0.4546 - val_acc: 0.6890\nEpoch 5/20\n - 429s - loss: 0.6728 - acc: 0.4615 - val_loss: 0.3843 - val_acc: 0.7061\nEpoch 6/20\n - 429s - loss: 0.6571 - acc: 0.4761 - val_loss: 0.3779 - val_acc: 0.7703\nEpoch 7/20\n - 430s - loss: 0.6442 - acc: 0.4803 - val_loss: 0.4148 - val_acc: 0.6519\nRestoring model weights from the end of the best epoch\nEpoch 00007: early stopping\n" ], [ "fig, ax = plt.subplots(1, 1, sharex='col', figsize=(20, 4))\n\nax.plot(cosine_lr.learning_rates)\nax.set_title('Fine-tune learning rates')\n\nplt.xlabel('Steps')\nplt.ylabel('Learning rate')\nsns.despine()\nplt.show()", "_____no_output_____" ] ], [ [ "# Model loss graph ", "_____no_output_____" ] ], [ [ "plot_metrics(history)", "_____no_output_____" ], [ "# Create empty arays to keep the predictions and labels\ndf_preds = pd.DataFrame(columns=['label', 'pred', 'set'])\ntrain_generator.reset()\nvalid_generator.reset()\n\n# Add train predictions and labels\nfor i in range(STEP_SIZE_TRAIN + 1):\n im, lbl = next(train_generator)\n preds = model.predict(im, batch_size=train_generator.batch_size)\n for index in range(len(preds)):\n df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']\n\n# Add validation predictions and labels\nfor i in range(STEP_SIZE_VALID + 1):\n im, lbl = next(valid_generator)\n preds = model.predict(im, batch_size=valid_generator.batch_size)\n for index in range(len(preds)):\n df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']\n\ndf_preds['label'] = df_preds['label'].astype('int')", "_____no_output_____" ], [ "# Classify predictions\ndf_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))\n\ntrain_preds = df_preds[df_preds['set'] == 'train']\nvalidation_preds = df_preds[df_preds['set'] == 'validation']", "_____no_output_____" ] ], [ [ "# Model Evaluation", "_____no_output_____" ], [ "## Confusion Matrix\n\n### Original thresholds", "_____no_output_____" ] ], [ [ "plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))", "_____no_output_____" ] ], [ [ "## Quadratic Weighted Kappa", "_____no_output_____" ] ], [ [ "evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions']))", "Train Cohen Kappa score: 0.738\nValidation Cohen Kappa score: 0.899\nComplete set Cohen Kappa score: 0.746\n" ] ], [ [ "## Apply model to test set and output predictions", "_____no_output_____" ] ], [ [ "preds = apply_tta(model, test_generator, TTA_STEPS)\npredictions = [classify(x) for x in preds]\n\nresults = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})\nresults['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])", "_____no_output_____" ], [ "# Cleaning created directories\nif os.path.exists(train_dest_path):\n shutil.rmtree(train_dest_path)\nif os.path.exists(validation_dest_path):\n shutil.rmtree(validation_dest_path)\nif os.path.exists(test_dest_path):\n shutil.rmtree(test_dest_path)", "_____no_output_____" ] ], [ [ "# Predictions class distribution", "_____no_output_____" ] ], [ [ "fig = plt.subplots(sharex='col', figsize=(24, 8.7))\nsns.countplot(x=\"diagnosis\", data=results, palette=\"GnBu_d\").set_title('Test')\nsns.despine()\nplt.show()", "_____no_output_____" ], [ "results.to_csv('submission.csv', index=False)\ndisplay(results.head())", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06da2cb9b901c10cc39873cef7f2b67c56b3279
3,254
ipynb
Jupyter Notebook
ksStatsPy.ipynb
tastiz/story_scape.html
9376c422d1d65f251edd39bbc0d55b45f7fdce31
[ "CC-BY-3.0" ]
null
null
null
ksStatsPy.ipynb
tastiz/story_scape.html
9376c422d1d65f251edd39bbc0d55b45f7fdce31
[ "CC-BY-3.0" ]
null
null
null
ksStatsPy.ipynb
tastiz/story_scape.html
9376c422d1d65f251edd39bbc0d55b45f7fdce31
[ "CC-BY-3.0" ]
null
null
null
30.990476
231
0.478795
[ [ [ "<a href=\"https://colab.research.google.com/github/tastiz/story_scape.html/blob/master/ksStatsPy.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "# To keep the page organized do all imports here\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom scipy import stats\n\n# Database credentials\npostgres_user = 'dabc_student'\npostgres_pw = '7*.8G9QH21'\npostgres_host = '142.93.121.174'\npostgres_port = '5432'\npostgres_db = 'kickstarterprojects'\n\n# use the credentials to start a connection\nengine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(\n postgres_user, postgres_pw, postgres_host, postgres_port, postgres_db))\n\nprojects_df = pd.read_sql_table('ksprojects', con=engine)\n\n# remove the connection\nengine.dispose()\n\n#projects_df.shape\n\n#describes column name and fill tyope\n#projects_df.info()\n\n#projects_df.head(2)\n\n# count the number of unique values in this column\nprojects_df['category'].nunique()\n\n# find the frequency of each value in the column\ncategory_counts = projects_df['category'].value_counts()\n\n# only print the first 10, because 158 are too many to print\n#category_counts.head(10)\n\nd", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d06dab8312c528adcab1f9f3a1f1de70d1a52bd8
20,329
ipynb
Jupyter Notebook
translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb
kw90/ctw_translation_toolchain
749c1ce4c06e428e19efe24c9901a067b04e8082
[ "BSD-3-Clause" ]
4
2021-06-06T15:51:15.000Z
2021-12-20T17:07:47.000Z
translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb
kw90/ctw_translation_toolchain
749c1ce4c06e428e19efe24c9901a067b04e8082
[ "BSD-3-Clause" ]
null
null
null
translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb
kw90/ctw_translation_toolchain
749c1ce4c06e428e19efe24c9901a067b04e8082
[ "BSD-3-Clause" ]
2
2021-06-06T15:51:22.000Z
2021-07-27T06:05:57.000Z
20,329
20,329
0.670815
[ [ [ "# Translate `dzn` to `smt2` for z3", "_____no_output_____" ], [ "### Check Versions of Tools", "_____no_output_____" ] ], [ [ "import os\nimport subprocess\n\nmy_env = os.environ.copy()\noutput = subprocess.check_output(f'''/home/{my_env['USER']}/optimathsat/bin/optimathsat -version''', shell=True, universal_newlines=True)\noutput", "_____no_output_____" ], [ "output = subprocess.check_output(f'''/home/{my_env['USER']}/minizinc/build/minizinc --version''', shell=True, universal_newlines=True)\noutput", "_____no_output_____" ], [ "output = subprocess.check_output(f'''/home/{my_env['USER']}/z3/build/z3 --version''', shell=True, universal_newlines=True)\noutput", "_____no_output_____" ] ], [ [ "First generate the FlatZinc files using the MiniZinc tool. Make sure that a `smt2` folder is located inside `./minizinc/share/minizinc/`. Else, to enable OptiMathSAT's support for global constraints download the [smt2.tar.gz](http://optimathsat.disi.unitn.it/data/smt2.tar.gz) package and unpack it there using\n\n```zsh\ntar xf smt2.tar.gz $MINIZINC_PATH/share/minizinc/\n```\n\nIf next output shows a list of `.mzn` files, then this dependency is satified.", "_____no_output_____" ] ], [ [ "output = subprocess.check_output(f'''ls -la /home/{my_env['USER']}/minizinc/share/minizinc/smt2/''', shell=True, universal_newlines=True)\nprint(output)", "total 292\ndrwxr-xr-x 2 jovyan jovyan 4096 Jan 15 2018 .\ndrwxr-xr-x 11 jovyan jovyan 4096 Jul 11 12:34 ..\n-rw-r--r-- 1 jovyan jovyan 328 Nov 13 2017 alldifferent_except_0.mzn\n-rw-r--r-- 1 jovyan jovyan 382 Nov 13 2017 all_different_int.mzn\n-rw-r--r-- 1 jovyan jovyan 396 Nov 13 2017 all_different_set.mzn\n-rw-r--r-- 1 jovyan jovyan 270 Nov 13 2017 all_disjoint.mzn\n-rw-r--r-- 1 jovyan jovyan 150 Nov 14 2017 all_equal_int.mzn\n-rw-r--r-- 1 jovyan jovyan 164 Nov 13 2017 all_equal_set.mzn\n-rw-r--r-- 1 jovyan jovyan 351 Nov 13 2017 among.mzn\n-rw-r--r-- 1 jovyan jovyan 305 Nov 8 2017 arg_max_float.mzn\n-rw-r--r-- 1 jovyan jovyan 291 Nov 8 2017 arg_max_int.mzn\n-rw-r--r-- 1 jovyan jovyan 306 Nov 8 2017 arg_min_float.mzn\n-rw-r--r-- 1 jovyan jovyan 291 Nov 8 2017 arg_min_int.mzn\n-rw-r--r-- 1 jovyan jovyan 480 Nov 13 2017 at_least_int.mzn\n-rw-r--r-- 1 jovyan jovyan 506 Nov 14 2017 at_least_set.mzn\n-rw-r--r-- 1 jovyan jovyan 340 Nov 13 2017 at_most1.mzn\n-rw-r--r-- 1 jovyan jovyan 474 Nov 13 2017 at_most_int.mzn\n-rw-r--r-- 1 jovyan jovyan 502 Nov 13 2017 at_most_set.mzn\n-rw-r--r-- 1 jovyan jovyan 1162 Nov 8 2017 bin_packing_capa.mzn\n-rw-r--r-- 1 jovyan jovyan 1044 Nov 8 2017 bin_packing_load.mzn\n-rw-r--r-- 1 jovyan jovyan 883 Nov 14 2017 bin_packing.mzn\n-rw-r--r-- 1 jovyan jovyan 765 Nov 14 2017 comparison_rel_array.mzn\n-rw-r--r-- 1 jovyan jovyan 350 Nov 13 2017 count_eq.mzn\n-rw-r--r-- 1 jovyan jovyan 382 Nov 13 2017 count_geq.mzn\n-rw-r--r-- 1 jovyan jovyan 375 Nov 13 2017 count_gt.mzn\n-rw-r--r-- 1 jovyan jovyan 379 Nov 13 2017 count_leq.mzn\n-rw-r--r-- 1 jovyan jovyan 371 Nov 13 2017 count_lt.mzn\n-rw-r--r-- 1 jovyan jovyan 371 Nov 13 2017 count_neq.mzn\n-rw-r--r-- 1 jovyan jovyan 398 Nov 13 2017 decreasing_bool.mzn\n-rw-r--r-- 1 jovyan jovyan 404 Nov 13 2017 decreasing_float.mzn\n-rw-r--r-- 1 jovyan jovyan 393 Nov 13 2017 decreasing_int.mzn\n-rw-r--r-- 1 jovyan jovyan 408 Nov 14 2017 decreasing_set.mzn\n-rw-r--r-- 1 jovyan jovyan 1589 Nov 13 2017 diffn_k.mzn\n-rw-r--r-- 1 jovyan jovyan 853 Nov 14 2017 diffn.mzn\n-rw-r--r-- 1 jovyan jovyan 1731 Nov 13 2017 diffn_nonstrict_k.mzn\n-rw-r--r-- 1 jovyan jovyan 919 Nov 14 2017 diffn_nonstrict.mzn\n-rw-r--r-- 1 jovyan jovyan 276 Nov 14 2017 disjoint.mzn\n-rw-r--r-- 1 jovyan jovyan 836 Nov 8 2017 disjunctive.mzn\n-rw-r--r-- 1 jovyan jovyan 748 Nov 8 2017 disjunctive_strict.mzn\n-rw-r--r-- 1 jovyan jovyan 696 Nov 14 2017 distribute.mzn\n-rw-r--r-- 1 jovyan jovyan 474 Nov 13 2017 exactly_int.mzn\n-rw-r--r-- 1 jovyan jovyan 502 Nov 13 2017 exactly_set.mzn\n-rw-r--r-- 1 jovyan jovyan 851 Nov 14 2017 global_cardinality_closed.mzn\n-rw-r--r-- 1 jovyan jovyan 396 Nov 8 2017 global_cardinality_fn.mzn\n-rw-r--r-- 1 jovyan jovyan 914 Nov 13 2017 global_cardinality_low_up_closed.mzn\n-rw-r--r-- 1 jovyan jovyan 795 Nov 13 2017 global_cardinality_low_up.mzn\n-rw-r--r-- 1 jovyan jovyan 717 Nov 14 2017 global_cardinality.mzn\n-rw-r--r-- 1 jovyan jovyan 398 Nov 13 2017 increasing_bool.mzn\n-rw-r--r-- 1 jovyan jovyan 403 Nov 13 2017 increasing_float.mzn\n-rw-r--r-- 1 jovyan jovyan 394 Nov 13 2017 increasing_int.mzn\n-rw-r--r-- 1 jovyan jovyan 408 Nov 13 2017 increasing_set.mzn\n-rw-r--r-- 1 jovyan jovyan 728 Nov 14 2017 int_set_channel.mzn\n-rw-r--r-- 1 jovyan jovyan 582 Nov 8 2017 inverse.mzn\n-rw-r--r-- 1 jovyan jovyan 827 Nov 8 2017 inverse_set.mzn\n-rw-r--r-- 1 jovyan jovyan 708 Nov 14 2017 link_set_to_booleans.mzn\n-rw-r--r-- 1 jovyan jovyan 375 Nov 11 2017 maximum_float.mzn\n-rw-r--r-- 1 jovyan jovyan 367 Nov 11 2017 maximum_int.mzn\n-rw-r--r-- 1 jovyan jovyan 422 Nov 13 2017 member_bool.mzn\n-rw-r--r-- 1 jovyan jovyan 431 Nov 13 2017 member_float.mzn\n-rw-r--r-- 1 jovyan jovyan 414 Nov 13 2017 member_int.mzn\n-rw-r--r-- 1 jovyan jovyan 442 Nov 13 2017 member_set.mzn\n-rw-r--r-- 1 jovyan jovyan 372 Nov 11 2017 minimum_float.mzn\n-rw-r--r-- 1 jovyan jovyan 367 Nov 11 2017 minimum_int.mzn\n-rw-r--r-- 1 jovyan jovyan 283 Nov 13 2017 nvalue.mzn\n-rw-r--r-- 1 jovyan jovyan 712 Nov 14 2017 range.mzn\n-rw-r--r-- 1 jovyan jovyan 1751 Nov 13 2017 redefinitions-2.0.2.mzn\n-rw-r--r-- 1 jovyan jovyan 1434 Nov 13 2017 redefinitions-2.0.mzn\n-rw-r--r-- 1 jovyan jovyan 678 Nov 15 2017 redefinitions-2.1.mzn\n-rw-r--r-- 1 jovyan jovyan 571 Nov 14 2017 roots.mzn\n-rw-r--r-- 1 jovyan jovyan 764 Nov 8 2017 sum_pred.mzn\n-rw-r--r-- 1 jovyan jovyan 445 Nov 15 2017 symmetric_all_different.mzn\n-rw-r--r-- 1 jovyan jovyan 280 Nov 15 2017 value_precede_int.mzn\n-rw-r--r-- 1 jovyan jovyan 294 Nov 15 2017 value_precede_set.mzn\n\n" ] ], [ [ "## Transform `dzn` to `fzn` Using a `mzn` Model", "_____no_output_____" ], [ "Then transform the desired `.dzn` file to `.fzn` using a `Mz.mzn` MiniZinc model.", "_____no_output_____" ], [ "First list all `dzn` files contained in the `dzn_path` that should get processed.", "_____no_output_____" ] ], [ [ "import os\n\ndzn_files = []\ndzn_path = f'''/home/{my_env['USER']}/data/dzn/'''\n\nfor filename in os.listdir(dzn_path):\n if filename.endswith(\".dzn\"):\n dzn_files.append(filename)\nlen(dzn_files)", "_____no_output_____" ] ], [ [ "#### Model $Mz_1$", "_____no_output_____" ] ], [ [ "import sys\n\nfzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz1-noAbs/'''\nminizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \\\n -Werror \\\n --compile --solver org.minizinc.mzn-fzn \\\n --search-dir /home/{my_env['USER']}/minizinc/share/minizinc/smt2/ \\\n /home/{my_env['USER']}/models/mzn/Mz1-noAbs.mzn '''\ntranslate_count = 0\nfor dzn in dzn_files:\n translate_count += 1\n minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \\\n + ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'\n print(f'''\\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')\n sys.stdout.flush()\n subprocess.check_output(minizinc_transform_cmd, shell=True, \n universal_newlines=True)", "(278/278) Translating /home/jovyan/data/dzn/R028.dzn to /home/jovyan/data/fzn/smt2/Mz1-noAbs/R028-dzn.fzn" ] ], [ [ "#### Model $Mz_2$", "_____no_output_____" ] ], [ [ "import sys\n\nfzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz2-noAbs/'''\nminizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \\\n -Werror \\\n --compile --solver org.minizinc.mzn-fzn \\\n --search-dir /home/{my_env['USER']}/minizinc/share/minizinc/smt2/ \\\n /home/{my_env['USER']}/models/mzn/Mz2-noAbs.mzn '''\ntranslate_count = 0\nfor dzn in dzn_files:\n translate_count += 1\n minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \\\n + ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'\n print(f'''\\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')\n sys.stdout.flush()\n subprocess.check_output(minizinc_transform_cmd, shell=True, \n universal_newlines=True)", "(278/278) Translating /home/jovyan/data/dzn/R028.dzn to /home/jovyan/data/fzn/smt2/Mz2-noAbs/R028-dzn.fzn" ] ], [ [ "## Translate `fzn` to `smt2`", "_____no_output_____" ], [ "The generated `.fzn` files can be used to generate a `.smt2` files using the `fzn2smt2.py` script from this [project](https://github.com/PatrickTrentin88/fzn2omt).\n\n**NOTE**: Files `R001` (no cables) and `R002` (one one-sided cable) throw an error while translating.", "_____no_output_____" ], [ "#### $Mz_1$", "_____no_output_____" ] ], [ [ "import os\n\nfzn_files = []\nfzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz1-noAbs/'''\n\nfor filename in os.listdir(fzn_path):\n if filename.endswith(\".fzn\"):\n fzn_files.append(filename)\nlen(fzn_files)", "_____no_output_____" ], [ "smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs/'''\nfzn2smt2_base_cmd = f'''/home/{my_env['USER']}/fzn2omt/bin/fzn2z3.py'''\ntranslate_count = 0\nmy_env = os.environ.copy()\nmy_env['PATH'] = f'''/home/{my_env['USER']}/optimathsat/bin/:{my_env['PATH']}'''\nmy_env['PATH'] = f'''/home/{my_env['USER']}/z3/build/:{my_env['PATH']}'''\nfor fzn in fzn_files:\n translate_count += 1\n fzn2smt2_transform_cmd = f'''{fzn2smt2_base_cmd} {fzn_path}{fzn} --smt2 {smt2_path}{fzn.replace('.', '-')}.smt2'''\n print(f'''\\r({translate_count}/{len(fzn_files)}) Translating {fzn_path + fzn} to {smt2_path + fzn.replace('.', '-')}.smt2''', end='')\n try:\n output = subprocess.check_output(fzn2smt2_transform_cmd,\n shell=True,env=my_env, \n universal_newlines=True)\n except Exception as e:\n output = str(e.output)\n print(f'''\\r{output}''', end='')\n sys.stdout.flush()", "(278/278) Translating /home/jovyan/data/fzn/smt2/Mz1-noAbs/R079-dzn.fzn to /home/jovyan/data/smt2/z3/Mz1-noAbs/R079-dzn-fzn.smt2\r" ] ], [ [ "#### $Mz_2$", "_____no_output_____" ] ], [ [ "import os\n\nfzn_files = []\nfzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz2-noAbs/'''\n\nfor filename in os.listdir(fzn_path):\n if filename.endswith(\".fzn\"):\n fzn_files.append(filename)\nlen(fzn_files)", "_____no_output_____" ], [ "smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs/'''\nfzn2smt2_base_cmd = f'''/home/{my_env['USER']}/fzn2omt/bin/fzn2z3.py'''\ntranslate_count = 0\nmy_env = os.environ.copy()\nmy_env['PATH'] = f'''/home/{my_env['USER']}/optimathsat/bin/:{my_env['PATH']}'''\nmy_env['PATH'] = f'''/home/{my_env['USER']}/z3/build/:{my_env['PATH']}'''\nfor fzn in fzn_files:\n translate_count += 1\n fzn2smt2_transform_cmd = f'''{fzn2smt2_base_cmd} {fzn_path}{fzn} --smt2 {smt2_path}{fzn.replace('.', '-')}.smt2'''\n print(f'''\\r({translate_count}/{len(fzn_files)}) Translating {fzn_path + fzn} to {smt2_path + fzn.replace('.', '-')}.smt2''', end='')\n try:\n output = subprocess.check_output(fzn2smt2_transform_cmd,\n shell=True,env=my_env, \n universal_newlines=True)\n except Exception as e:\n output = str(e.output)\n print(f'''\\r{output}''', end='')\n sys.stdout.flush()", "(278/278) Translating /home/jovyan/data/fzn/smt2/Mz2-noAbs/R079-dzn.fzn to /home/jovyan/data/smt2/z3/Mz2-noAbs/R079-dzn-fzn.smt2\r" ] ], [ [ "### Adjust `smt2` Files According to Chapter 5.2\n\n- Add lower and upper bounds for the decision variable `pfc`\n- Add number of cavities as comments for later solution extraction (workaround)", "_____no_output_____" ] ], [ [ "import os\nimport re\n\ndef adjust_smt2_file(smt2_path: str, file: str, write_path: str):\n\n with open(smt2_path+'/'+file, 'r+') as myfile:\n data = \"\".join(line for line in myfile)\n\n filename = os.path.splitext(file)[0]\n\n newFile = open(os.path.join(write_path, filename +'.smt2'),\"w+\")\n newFile.write(data)\n newFile.close()\n\n openFile = open(os.path.join(write_path, filename +'.smt2'))\n data = openFile.readlines()\n additionalLines = data[-5:]\n data = data[:-5]\n openFile.close()\n\n newFile = open(os.path.join(write_path, filename +'.smt2'),\"w+\")\n newFile.writelines([item for item in data])\n newFile.close()\n\n with open(os.path.join(write_path, filename +'.smt2'),\"r\") as myfile:\n data = \"\".join(line for line in myfile)\n newFile = open(os.path.join(write_path, filename +'.smt2'),\"w+\")\n matches = re.findall(r'\\(define-fun .\\d\\d \\(\\) Int (\\d+)\\)', data)\n try:\n cavity_count = int(matches[0])\n newFile.write(f''';; k={cavity_count}\\n''')\n newFile.write(f''';; Extract pfc from\\n''')\n for i in range(0,cavity_count):\n newFile.write(f''';; X_INTRODUCED_{str(i)}_\\n''')\n newFile.write(data)\n for i in range(1,cavity_count+1):\n lb = f'''(define-fun lbound{str(i)} () Bool (> X_INTRODUCED_{str(i-1)}_ 0))\\n'''\n ub = f'''(define-fun ubound{str(i)} () Bool (<= X_INTRODUCED_{str(i-1)}_ {str(cavity_count)}))\\n'''\n assertLb = f'''(assert lbound{str(i)})\\n'''\n assertUb = f'''(assert ubound{str(i)})\\n'''\n\n newFile.write(lb)\n newFile.write(ub)\n newFile.write(assertLb)\n newFile.write(assertUb)\n except:\n print(f'''\\nCheck {filename} for completeness - data missing?''')\n newFile.writelines([item for item in additionalLines])\n newFile.close()", "_____no_output_____" ] ], [ [ "#### $Mz_1$", "_____no_output_____" ] ], [ [ "import os\n\nsmt2_files = []\nsmt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs'''\n\nfor filename in os.listdir(smt2_path):\n if filename.endswith(\".smt2\"):\n smt2_files.append(filename)\nlen(smt2_files)", "_____no_output_____" ], [ "fix_count = 0\nfor smt2 in smt2_files:\n fix_count += 1\n print(f'''\\r{fix_count}/{len(smt2_files)} Fixing file {smt2}''', end='')\n adjust_smt2_file(smt2_path=smt2_path, file=smt2, write_path=f'''{smt2_path}''')\n sys.stdout.flush()", "49/278 Fixing file R002-dzn-fzn.smt2\nCheck R002-dzn-fzn for completeness - data missing?\n150/278 Fixing file R001-dzn-fzn.smt2\nCheck R001-dzn-fzn for completeness - data missing?\n278/278 Fixing file R166-dzn-fzn.smt2" ] ], [ [ "#### $Mz_2$", "_____no_output_____" ] ], [ [ "import os\n\nsmt2_files = []\nsmt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs'''\n\nfor filename in os.listdir(smt2_path):\n if filename.endswith(\".smt2\"):\n smt2_files.append(filename)\nlen(smt2_files)", "_____no_output_____" ], [ "fix_count = 0\nfor smt2 in smt2_files:\n fix_count += 1\n print(f'''\\r{fix_count}/{len(smt2_files)} Fixing file {smt2}''', end='')\n adjust_smt2_file(smt2_path=smt2_path, file=smt2, write_path=f'''{smt2_path}''')\n sys.stdout.flush()", "49/278 Fixing file R002-dzn-fzn.smt2\nCheck R002-dzn-fzn for completeness - data missing?\n150/278 Fixing file R001-dzn-fzn.smt2\nCheck R001-dzn-fzn for completeness - data missing?\n278/278 Fixing file R166-dzn-fzn.smt2" ] ], [ [ "## Test Generated `smt2` Files Using `z3`\n\nThis shoud generate the `smt2` file without any error. If this was the case then the `z3` prover can be called on a file by running\n\n\n```zsh\nz3 output/A001-dzn-smt2-fzn.smt2 \n```\n\nyielding something similar to\n\n```zsh\nz3 output/A001-dzn-smt2-fzn.smt2 \nsat\n(objectives\n (obj 41881)\n)\n(model \n (define-fun X_INTRODUCED_981_ () Bool\n false)\n (define-fun X_INTRODUCED_348_ () Bool\n false)\n \n .....\n```", "_____no_output_____" ], [ "#### Test with `smt2` from $Mz_1$", "_____no_output_____" ] ], [ [ "command = f'''/home/{my_env['USER']}/z3/build/z3 /home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs/A001-dzn-fzn.smt2'''\nprint(command)\ntry:\n result = subprocess.check_output(command, shell=True, universal_newlines=True)\nexcept Exception as e:\n print(e.output)\nprint(result)", "_____no_output_____" ] ], [ [ "#### Test with `smt2` from $Mz_2$", "_____no_output_____" ] ], [ [ "result = subprocess.check_output(\n f'''/home/{my_env['USER']}/z3/build/z3 \\\n /home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs/v3/A004-dzn-fzn_v3.smt2''',\n shell=True, universal_newlines=True)\nprint(result)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06dc30517ba0574de75d4ec6b6c5cf59c911788
21,082
ipynb
Jupyter Notebook
cincinnati_salaries.ipynb
doedotdev/cincinnati-salaries
61b39b435cdddb33e3b177e8d512e60086f08ec7
[ "MIT" ]
null
null
null
cincinnati_salaries.ipynb
doedotdev/cincinnati-salaries
61b39b435cdddb33e3b177e8d512e60086f08ec7
[ "MIT" ]
null
null
null
cincinnati_salaries.ipynb
doedotdev/cincinnati-salaries
61b39b435cdddb33e3b177e8d512e60086f08ec7
[ "MIT" ]
null
null
null
39.852552
164
0.413718
[ [ [ "### Cincinnati Salaries\n- https://data.cincinnati-oh.gov/Efficient-Service-Delivery/City-of-Cincinnati-Employees-w-Salaries/wmj4-ygbf", "_____no_output_____" ] ], [ [ "! pip install sodapy\n! pip install pandas", "WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\nPlease see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\nTo avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\nRequirement already satisfied: sodapy in /opt/conda/lib/python3.7/site-packages (2.0.0)\nRequirement already satisfied: requests>=2.20.0 in /opt/conda/lib/python3.7/site-packages (from sodapy) (2.22.0)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests>=2.20.0->sodapy) (2019.11.28)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests>=2.20.0->sodapy) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests>=2.20.0->sodapy) (1.25.7)\nRequirement already satisfied: idna<2.9,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests>=2.20.0->sodapy) (2.8)\nWARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\nPlease see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\nTo avoid this problem you can invoke Python with '-m pip' instead of running pip directly.\nRequirement already satisfied: pandas in /opt/conda/lib/python3.7/site-packages (0.25.3)\nRequirement already satisfied: pytz>=2017.2 in /opt/conda/lib/python3.7/site-packages (from pandas) (2019.3)\nRequirement already satisfied: numpy>=1.13.3 in /opt/conda/lib/python3.7/site-packages (from pandas) (1.17.5)\nRequirement already satisfied: python-dateutil>=2.6.1 in /opt/conda/lib/python3.7/site-packages (from pandas) (2.8.1)\nRequirement already satisfied: six>=1.5 in /opt/conda/lib/python3.7/site-packages (from python-dateutil>=2.6.1->pandas) (1.14.0)\n" ], [ "import pandas as pd\nfrom sodapy import Socrata\n\n# Unauthenticated client only works with public data sets. Note 'None'\n# in place of application token, and no username or password:\nclient = Socrata(\"data.cincinnati-oh.gov\", None)\n\n# Example authenticated client (needed for non-public datasets):\n# client = Socrata(data.cincinnati-oh.gov,\n# MyAppToken,\n# userame=\"[email protected]\",\n# password=\"AFakePassword\")\n\n# First 2000 results, returned as JSON from API / converted to Python list of\n# dictionaries by sodapy.\nresults = client.get(\"wmj4-ygbf\", limit=10000)\n\n# Convert to pandas DataFrame\nresults_df = pd.DataFrame.from_records(results)", "WARNING:root:Requests made without an app_token will be subject to strict throttling limits.\n" ], [ "results_df", "_____no_output_____" ], [ "max(pd.to_numeric(results_df['annual_rt']))", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06dc5d6cc1f906660f7a39674877a240027e4b7
38,697
ipynb
Jupyter Notebook
colaboratory_introduction.ipynb
karlkirschner/2020_Scientific_Programming
e7830468194eb2ef7824bc46f6d9ee112c652e35
[ "MIT" ]
2
2020-03-30T12:24:57.000Z
2020-03-30T13:34:27.000Z
colaboratory_introduction.ipynb
karlkirschner/2020_Scientific_Programming
e7830468194eb2ef7824bc46f6d9ee112c652e35
[ "MIT" ]
null
null
null
colaboratory_introduction.ipynb
karlkirschner/2020_Scientific_Programming
e7830468194eb2ef7824bc46f6d9ee112c652e35
[ "MIT" ]
2
2020-04-18T10:23:47.000Z
2020-05-01T10:38:21.000Z
106.31044
27,989
0.866295
[ [ [ "<p><img alt=\"Colaboratory logo\" height=\"45px\" src=\"/img/colab_favicon.ico\" align=\"left\" hspace=\"10px\" vspace=\"0px\"></p>\n\n<h1>Welcome to Colaboratory!</h1>\n\n\nColaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.\n\nWith Colaboratory you can write and execute code, save and share your analyses, and access powerful computing resources, all for free from your browser.", "_____no_output_____" ] ], [ [ "#@title Introducing Colaboratory { display-mode: \"form\" }\n#@markdown This 3-minute video gives an overview of the key features of Colaboratory:\nfrom IPython.display import YouTubeVideo\nYouTubeVideo('inN8seMm7UI', width=600, height=400)", "_____no_output_____" ] ], [ [ "## Getting Started\n\nThe document you are reading is a [Jupyter notebook](https://jupyter.org/), hosted in Colaboratory. It is not a static page, but an interactive environment that lets you write and execute code in Python and other languages.\n\nFor example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:", "_____no_output_____" ] ], [ [ "seconds_in_a_day = 24 * 60 * 60\nseconds_in_a_day", "_____no_output_____" ] ], [ [ "To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut \"Command/Ctrl+Enter\".\n\nAll cells modify the same global state, so variables that you define by executing a cell can be used in other cells:", "_____no_output_____" ] ], [ [ "seconds_in_a_week = 7 * seconds_in_a_day\nseconds_in_a_week", "_____no_output_____" ] ], [ [ "For more information about working with Colaboratory notebooks, see [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb).\n", "_____no_output_____" ], [ "---\n# Cells\nA notebook is a list of cells. Cells contain either explanatory text or executable code and its output. Click a cell to select it.", "_____no_output_____" ], [ "## Code cells\nBelow is a **code cell**. Once the toolbar button indicates CONNECTED, click in the cell to select it and execute the contents in the following ways:\n\n* Click the **Play icon** in the left gutter of the cell;\n* Type **Cmd/Ctrl+Enter** to run the cell in place;\n* Type **Shift+Enter** to run the cell and move focus to the next cell (adding one if none exists); or\n* Type **Alt+Enter** to run the cell and insert a new code cell immediately below it.\n\nThere are additional options for running some or all cells in the **Runtime** menu.\n", "_____no_output_____" ] ], [ [ "a = 13\na", "_____no_output_____" ] ], [ [ "## Text cells\nThis is a **text cell**. You can **double-click** to edit this cell. Text cells\nuse markdown syntax. To learn more, see our [markdown\nguide](/notebooks/markdown_guide.ipynb).\n\nYou can also add math to text cells using [LaTeX](http://www.latex-project.org/)\nto be rendered by [MathJax](https://www.mathjax.org). Just place the statement\nwithin a pair of **\\$** signs. For example `$\\sqrt{3x-1}+(1+x)^2$` becomes\n$\\sqrt{3x-1}+(1+x)^2.$\n", "_____no_output_____" ], [ "## Adding and moving cells\nYou can add new cells by using the **+ CODE** and **+ TEXT** buttons that show when you hover between cells. These buttons are also in the toolbar above the notebook where they can be used to add a cell below the currently selected cell.\n\nYou can move a cell by selecting it and clicking **Cell Up** or **Cell Down** in the top toolbar. \n\nConsecutive cells can be selected by \"lasso selection\" by dragging from outside one cell and through the group. Non-adjacent cells can be selected concurrently by clicking one and then holding down Ctrl while clicking another. Similarly, using Shift instead of Ctrl will select all intermediate cells.", "_____no_output_____" ], [ "# Integration with Drive\n\nColaboratory is integrated with Google Drive. It allows you to share, comment, and collaborate on the same document with multiple people:\n\n* The **SHARE** button (top-right of the toolbar) allows you to share the notebook and control permissions set on it.\n\n* **File->Make a Copy** creates a copy of the notebook in Drive.\n\n* **File->Save** saves the File to Drive. **File->Save and checkpoint** pins the version so it doesn't get deleted from the revision history. \n\n* **File->Revision history** shows the notebook's revision history. ", "_____no_output_____" ], [ "## Commenting on a cell\nYou can comment on a Colaboratory notebook like you would on a Google Document. Comments are attached to cells, and are displayed next to the cell they refer to. If you have **comment-only** permissions, you will see a comment button on the top right of the cell when you hover over it.\n\nIf you have edit or comment permissions you can comment on a cell in one of three ways: \n\n1. Select a cell and click the comment button in the toolbar above the top-right corner of the cell.\n1. Right click a text cell and select **Add a comment** from the context menu.\n3. Use the shortcut **Ctrl+Shift+M** to add a comment to the currently selected cell. \n\nYou can resolve and reply to comments, and you can target comments to specific collaborators by typing *+[email address]* (e.g., `[email protected]`). Addressed collaborators will be emailed. \n\nThe Comment button in the top-right corner of the page shows all comments attached to the notebook.", "_____no_output_____" ], [ "## More Resources\n- [Guide to Markdown](/notebooks/markdown_guide.ipynb)\n- Colaboratory is built on top of [Jupyter Notebook](https://jupyter.org/).", "_____no_output_____" ], [ "---\n**Original Sources:**\n1. https://colab.research.google.com/notebooks/welcome.ipynb\n2. https://colab.research.google.com/notebooks/basic_features_overview.ipynb", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06dc5e169bd009c4a0b5aa25b6b46a80aec4fc2
281,396
ipynb
Jupyter Notebook
docs/nb/gnuplot_ok.ipynb
nilqed/jfricas.pip
38ced5e0fdc1d4b05e35a8c0127c145d8cc9dcbf
[ "BSD-2-Clause-FreeBSD" ]
1
2021-02-16T11:51:25.000Z
2021-02-16T11:51:25.000Z
docs/nb/gnuplot_ok.ipynb
nilqed/jfricas.pip
38ced5e0fdc1d4b05e35a8c0127c145d8cc9dcbf
[ "BSD-2-Clause-FreeBSD" ]
1
2021-03-04T10:43:41.000Z
2021-03-04T12:01:36.000Z
docs/nb/gnuplot_ok.ipynb
nilqed/jfricas.pip
38ced5e0fdc1d4b05e35a8c0127c145d8cc9dcbf
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
29.701921
179
0.433983
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06dccd9e8bc038ad2a255ffbf4c2b1fba36e429
69,342
ipynb
Jupyter Notebook
keras/text_classification/keras_pretrained_embedding.ipynb
sindhu819/machine-learning-1
f75478ec7ffe088864dc4f0760012dca612a60ac
[ "MIT" ]
null
null
null
keras/text_classification/keras_pretrained_embedding.ipynb
sindhu819/machine-learning-1
f75478ec7ffe088864dc4f0760012dca612a60ac
[ "MIT" ]
null
null
null
keras/text_classification/keras_pretrained_embedding.ipynb
sindhu819/machine-learning-1
f75478ec7ffe088864dc4f0760012dca612a60ac
[ "MIT" ]
1
2020-03-03T21:07:43.000Z
2020-03-03T21:07:43.000Z
38.141914
2,315
0.532462
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Leveraging-Pre-trained-Word-Embedding-for-Text-Classification\" data-toc-modified-id=\"Leveraging-Pre-trained-Word-Embedding-for-Text-Classification-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Leveraging Pre-trained Word Embedding for Text Classification</a></span><ul class=\"toc-item\"><li><span><a href=\"#Data-Preparation\" data-toc-modified-id=\"Data-Preparation-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Data Preparation</a></span></li><li><span><a href=\"#Glove\" data-toc-modified-id=\"Glove-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Glove</a></span></li><li><span><a href=\"#Model\" data-toc-modified-id=\"Model-1.3\"><span class=\"toc-item-num\">1.3&nbsp;&nbsp;</span>Model</a></span><ul class=\"toc-item\"><li><span><a href=\"#Model-with-Pretrained-Embedding\" data-toc-modified-id=\"Model-with-Pretrained-Embedding-1.3.1\"><span class=\"toc-item-num\">1.3.1&nbsp;&nbsp;</span>Model with Pretrained Embedding</a></span></li><li><span><a href=\"#Model-without-Pretrained-Embedding\" data-toc-modified-id=\"Model-without-Pretrained-Embedding-1.3.2\"><span class=\"toc-item-num\">1.3.2&nbsp;&nbsp;</span>Model without Pretrained Embedding</a></span></li></ul></li><li><span><a href=\"#Submission\" data-toc-modified-id=\"Submission-1.4\"><span class=\"toc-item-num\">1.4&nbsp;&nbsp;</span>Submission</a></span></li><li><span><a href=\"#Summary\" data-toc-modified-id=\"Summary-1.5\"><span class=\"toc-item-num\">1.5&nbsp;&nbsp;</span>Summary</a></span></li></ul></li><li><span><a href=\"#Reference\" data-toc-modified-id=\"Reference-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Reference</a></span></li></ul></div>", "_____no_output_____" ] ], [ [ "# code for loading the format for the notebook\nimport os\n\n# path : store the current path to convert back to it later\npath = os.getcwd()\nos.chdir(os.path.join('..', '..', 'notebook_format'))\n\nfrom formats import load_style\nload_style(plot_style=False)", "_____no_output_____" ], [ "os.chdir(path)\n\n# 1. magic for inline plot\n# 2. magic to print version\n# 3. magic so that the notebook will reload external python modules\n# 4. magic to enable retina (high resolution) plots\n# https://gist.github.com/minrk/3301035\n%matplotlib inline\n%load_ext watermark\n%load_ext autoreload\n%autoreload 2\n%config InlineBackend.figure_format='retina'\n\nimport os\nimport time\nimport numpy as np\nimport pandas as pd\nfrom typing import List, Tuple, Dict\nfrom sklearn.model_selection import train_test_split\nfrom keras import layers\nfrom keras.models import Model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils.np_utils import to_categorical\nfrom keras.preprocessing.sequence import pad_sequences\n\n# prevent scientific notations\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,keras", "Using TensorFlow backend.\n" ] ], [ [ "# Leveraging Pre-trained Word Embedding for Text Classification", "_____no_output_____" ], [ "There are two main ways to obtain word embeddings:\n\n- Learn it from scratch: We specify a neural network architecture and learn the word embeddings jointly with the main task at our hand (e.g. sentiment classification). i.e. we would start off with some random word embeddings, and it would update itself along with the word embeddings.\n- Transfer Learning: The whole idea behind transfer learning is to avoid reinventing the wheel as much as possible. It gives us the capability to transfer knowledge that was gained/learned in some other task and use it to improve the learning of another related task. In practice, one way to do this is for the embedding part of the neural network architecture, we load some other embeddings that were trained on a different machine learning task than the one we are trying to solve and use that to bootstrap the process.\n\nOne area that transfer learning shines is when we have little training data available and using our data alone might not be enough to learn an appropriate task specific embedding/features for our vocabulary. In this case, leveraging a word embedding that captures generic aspect of the language can prove to be beneficial from both a performance and time perspective (i.e. we won't have to spend hours/days training a model from scratch to achieve a similar performance). Keep in mind that, as with all machine learning application, everything is still all about trial and error. What makes a embedding good depends heavily on the task at hand: The word embedding for a movie review sentiment classification model may look very different from a legal document classification model as the semantic of the corpus varies between these two tasks.", "_____no_output_____" ], [ "## Data Preparation", "_____no_output_____" ], [ "We'll use the movie review sentiment analysis dataset from [Kaggle](https://www.kaggle.com/c/word2vec-nlp-tutorial/overview) for this example. It's a binary classification problem with AUC as the ultimate evaluation metric. The next few code chunk performs the usual text preprocessing, build up the word vocabulary and performing a train/test split.", "_____no_output_____" ] ], [ [ "data_dir = 'data'\nsubmission_dir = 'submission'", "_____no_output_____" ], [ "input_path = os.path.join(data_dir, 'word2vec-nlp-tutorial', 'labeledTrainData.tsv')\ndf = pd.read_csv(input_path, delimiter='\\t')\nprint(df.shape)\ndf.head()", "(25000, 3)\n" ], [ "raw_text = df['review'].iloc[0]\nraw_text", "_____no_output_____" ], [ "import re\n\ndef clean_str(string: str) -> str:\n string = re.sub(r\"\\\\\", \"\", string) \n string = re.sub(r\"\\'\", \"\", string) \n string = re.sub(r\"\\\"\", \"\", string) \n return string.strip().lower()", "_____no_output_____" ], [ "from bs4 import BeautifulSoup\n\ndef clean_text(df: pd.DataFrame,\n text_col: str,\n label_col: str) -> Tuple[List[str], List[int]]:\n texts = []\n labels = []\n for raw_text, label in zip(df[text_col], df[label_col]): \n text = BeautifulSoup(raw_text).get_text()\n cleaned_text = clean_str(text)\n texts.append(cleaned_text)\n labels.append(label)\n\n return texts, labels", "_____no_output_____" ], [ "text_col = 'review'\nlabel_col = 'sentiment'\ntexts, labels = clean_text(df, text_col, label_col)\nprint('sample text: ', texts[0])\nprint('corresponding label:', labels[0])", "sample text: with all this stuff going down at the moment with mj ive started listening to his music, watching the odd documentary here and there, watched the wiz and watched moonwalker again. maybe i just want to get a certain insight into this guy who i thought was really cool in the eighties just to maybe make up my mind whether he is guilty or innocent. moonwalker is part biography, part feature film which i remember going to see at the cinema when it was originally released. some of it has subtle messages about mjs feeling towards the press and also the obvious message of drugs are bad mkay.visually impressive but of course this is all about michael jackson so unless you remotely like mj in anyway then you are going to hate this and find it boring. some may call mj an egotist for consenting to the making of this movie but mj and most of his fans would say that he made it for the fans which if true is really nice of him.the actual feature film bit when it finally starts is only on for 20 minutes or so excluding the smooth criminal sequence and joe pesci is convincing as a psychopathic all powerful drug lord. why he wants mj dead so bad is beyond me. because mj overheard his plans? nah, joe pescis character ranted that he wanted people to know it is he who is supplying drugs etc so i dunno, maybe he just hates mjs music.lots of cool things in this like mj turning into a car and a robot and the whole speed demon sequence. also, the director must have had the patience of a saint when it came to filming the kiddy bad sequence as usually directors hate working with one kid let alone a whole bunch of them performing a complex dance scene.bottom line, this movie is for people who like mj on one level or another (which i think is most people). if not, then stay away. it does try and give off a wholesome message and ironically mjs bestest buddy in this movie is a girl! michael jackson is truly one of the most talented people ever to grace this planet but is he guilty? well, with all the attention ive gave this subject....hmmm well i dont know because people can be different behind closed doors, i know this for a fact. he is either an extremely nice but stupid guy or one of the most sickest liars. i hope he is not the latter.\ncorresponding label: 1\n" ], [ "random_state = 1234\nval_split = 0.2\n\nlabels = to_categorical(labels)\ntexts_train, texts_val, y_train, y_val = train_test_split(\n texts, labels,\n test_size=val_split,\n random_state=random_state)\n\nprint('labels shape:', labels.shape)\nprint('train size: ', len(texts_train))\nprint('validation size: ', len(texts_val))", "labels shape: (25000, 2)\ntrain size: 20000\nvalidation size: 5000\n" ], [ "max_num_words = 20000\n\ntokenizer = Tokenizer(num_words=max_num_words, oov_token='<unk>')\ntokenizer.fit_on_texts(texts_train)\nprint('Found %s unique tokens.' % len(tokenizer.word_index))", "Found 74207 unique tokens.\n" ], [ "max_sequence_len = 1000\n\nsequences_train = tokenizer.texts_to_sequences(texts_train)\nx_train = pad_sequences(sequences_train, maxlen=max_sequence_len)\n\nsequences_val = tokenizer.texts_to_sequences(texts_val)\nx_val = pad_sequences(sequences_val, maxlen=max_sequence_len)\n\nsequences_train[0][:5]", "_____no_output_____" ] ], [ [ "## Glove", "_____no_output_____" ], [ "There are many different pretrained word embeddings online. The one we'll be using is from [Glove](https://nlp.stanford.edu/projects/glove/). Others include but not limited to [FastText](https://fasttext.cc/docs/en/crawl-vectors.html), [bpemb](https://github.com/bheinzerling/bpemb).\n\nIf we look at the project's wiki page, we can find any different pretrained embeddings available for us to experiment.\n\n<img src=\"img/pretrained_weights.png\" width=\"100%\" height=\"100%\">", "_____no_output_____" ] ], [ [ "import requests\nfrom tqdm import tqdm\n\ndef download_glove(embedding_type: str='glove.6B.zip'):\n \"\"\"\n download GloVe word vector representations, this step may take a while\n \n Parameters\n ----------\n embedding_type : str, default 'glove.6B.zip'\n Specifying different glove embeddings to download if not already there.\n {'glove.6B.zip', 'glove.42B.300d.zip', 'glove.840B.300d.zip', 'glove.twitter.27B.zip'}\n Be wary of the size. e.g. 'glove.6B.zip' is a 822 MB zipped, 2GB unzipped\n \"\"\"\n\n base_url = 'http://nlp.stanford.edu/data/'\n if not os.path.isfile(embedding_type):\n url = base_url + embedding_type\n\n # the following section is a pretty generic http get request for\n # saving large files, provides progress bars for checking progress\n response = requests.get(url, stream=True)\n response.raise_for_status()\n\n content_len = response.headers.get('Content-Length')\n total = int(content_len) if content_len is not None else 0\n\n with tqdm(unit='B', total=total) as pbar, open(embedding_type, 'wb') as f:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n pbar.update(len(chunk))\n f.write(chunk)\n\n if response.headers.get('Content-Type') == 'application/zip':\n from zipfile import ZipFile\n with ZipFile(embedding_type, 'r') as f:\n f.extractall(embedding_type.strip('.zip'))\n\n\ndownload_glove()", "_____no_output_____" ] ], [ [ "The way we'll leverage the pretrained embedding is to first read it in as a dictionary lookup, where the key is the word and the value is the corresponding word embedding. Then for each token in our vocabulary, we'll lookup this dictionary to see if there's a pretrained embedding available, if there is, we'll use the pretrained embedding, if there isn't, we'll leave the embedding for this word in its original randomly initialized form.\n\nThe format for this particular pretrained embedding is for every line, we have a space delimited values, where the first token is the word, and the rest are its corresponding embedding values. e.g. the first line from the line looks like:\n\n```\nthe -0.038194 -0.24487 0.72812 -0.39961 0.083172 0.043953 -0.39141 0.3344 -0.57545 0.087459 0.28787 -0.06731 0.30906 -0.26384 -0.13231 -0.20757 0.33395 -0.33848 -0.31743 -0.48336 0.1464 -0.37304 0.34577 0.052041 0.44946 -0.46971 0.02628 -0.54155 -0.15518 -0.14107 -0.039722 0.28277 0.14393 0.23464 -0.31021 0.086173 0.20397 0.52624 0.17164 -0.082378 -0.71787 -0.41531 0.20335 -0.12763 0.41367 0.55187 0.57908 -0.33477 -0.36559 -0.54857 -0.062892 0.26584 0.30205 0.99775 -0.80481 -3.0243 0.01254 -0.36942 2.2167 0.72201 -0.24978 0.92136 0.034514 0.46745 1.1079 -0.19358 -0.074575 0.23353 -0.052062 -0.22044 0.057162 -0.15806 -0.30798 -0.41625 0.37972 0.15006 -0.53212 -0.2055 -1.2526 0.071624 0.70565 0.49744 -0.42063 0.26148 -1.538 -0.30223 -0.073438 -0.28312 0.37104 -0.25217 0.016215 -0.017099 -0.38984 0.87424 -0.72569 -0.51058 -0.52028 -0.1459 0.8278 0.27062\n```", "_____no_output_____" ] ], [ [ "def get_embedding_lookup(embedding_path) -> Dict[str, np.ndarray]:\n embedding_lookup = {}\n with open(embedding_path) as f:\n for line in f:\n values = line.split()\n word = values[0]\n coef = np.array(values[1:], dtype=np.float32)\n embedding_lookup[word] = coef\n\n return embedding_lookup\n\n\ndef get_pretrained_embedding(embedding_path: str,\n index2word: Dict[int, str],\n max_features: int) -> np.ndarray:\n embedding_lookup = get_embedding_lookup(embedding_path)\n\n pretrained_embedding = np.stack(list(embedding_lookup.values()))\n embedding_dim = pretrained_embedding.shape[1]\n embeddings = np.random.normal(pretrained_embedding.mean(),\n pretrained_embedding.std(),\n (max_features, embedding_dim)).astype(np.float32)\n # we track how many tokens in our vocabulary exists in the pre-trained embedding,\n # i.e. how many tokens has a pre-trained embedding from this particular file\n n_found = 0\n \n # the loop starts from 1 due to keras' Tokenizer reserves 0 for padding index\n for i in range(1, max_features):\n word = index2word[i]\n embedding_vector = embedding_lookup.get(word)\n if embedding_vector is not None:\n embeddings[i] = embedding_vector\n n_found += 1\n\n print('number of words found:', n_found)\n return embeddings", "_____no_output_____" ], [ "glove_path = os.path.join('glove.6B', 'glove.6B.100d.txt')\nmax_features = max_num_words + 1\n\npretrained_embedding = get_pretrained_embedding(glove_path, tokenizer.index_word, max_features)\npretrained_embedding.shape", "number of words found: 19654\n" ] ], [ [ "## Model", "_____no_output_____" ], [ "To train our text classifier, we specify a 1D convolutional network. Our embedding layer can either be initialized randomly or loaded from a pre-trained embedding. Note that for the pre-trained embedding case, apart from loading the weights, we also \"freeze\" the embedding layer, i.e. we set its trainable attribute to False. This idea is often times used in transfer learning, where when parts of a model are pre-trained (in our case, only our Embedding layer), and parts of it are randomly initialized, the pre-trained part should ideally not be trained together with the randomly initialized part. The rationale behind it is that a large gradient update triggered by the randomly initialized layer would become very disruptive to those pre-trained weights.\n\nOnce we train the randomly initialized weights for a few iterations, we can then go about un-freezing the layers that were loaded with pre-trained weights, and do an update on the weight for the entire thing. The [keras documentation](https://keras.io/applications/#fine-tune-inceptionv3-on-a-new-set-of-classes) also provides an example of how to do this, although the example is for image models, the same idea can also be applied here, and can be something that's worth experimenting.", "_____no_output_____" ] ], [ [ "def simple_text_cnn(max_sequence_len: int,\n max_features: int,\n num_classes: int,\n optimizer: str='adam',\n metrics: List[str]=['acc'],\n pretrained_embedding: np.ndarray=None) -> Model:\n\n sequence_input = layers.Input(shape=(max_sequence_len,), dtype='int32')\n if pretrained_embedding is None:\n embedded_sequences = layers.Embedding(max_features, 100,\n name='embedding')(sequence_input)\n else:\n embedded_sequences = layers.Embedding(max_features, pretrained_embedding.shape[1],\n weights=[pretrained_embedding],\n name='embedding',\n trainable=False)(sequence_input)\n\n conv1 = layers.Conv1D(128, 5, activation='relu')(embedded_sequences)\n pool1 = layers.MaxPooling1D(5)(conv1)\n conv2 = layers.Conv1D(128, 5, activation='relu')(pool1)\n pool2 = layers.MaxPooling1D(5)(conv2)\n conv3 = layers.Conv1D(128, 5, activation='relu')(pool2)\n pool3 = layers.MaxPooling1D(35)(conv3)\n flatten = layers.Flatten()(pool3)\n dense = layers.Dense(128, activation='relu')(flatten)\n preds = layers.Dense(num_classes, activation='softmax')(dense)\n\n model = Model(sequence_input, preds)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=metrics)\n return model", "_____no_output_____" ] ], [ [ "### Model with Pretrained Embedding", "_____no_output_____" ] ], [ [ "num_classes = 2\nmodel1 = simple_text_cnn(max_sequence_len, max_features, num_classes,\n pretrained_embedding=pretrained_embedding)\nmodel1.summary()", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:203: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3576: The name tf.log is deprecated. Please use tf.math.log instead.\n\nModel: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_1 (InputLayer) (None, 1000) 0 \n_________________________________________________________________\nembedding (Embedding) (None, 1000, 100) 2000100 \n_________________________________________________________________\nconv1d_1 (Conv1D) (None, 996, 128) 64128 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 199, 128) 0 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 195, 128) 82048 \n_________________________________________________________________\nmax_pooling1d_2 (MaxPooling1 (None, 39, 128) 0 \n_________________________________________________________________\nconv1d_3 (Conv1D) (None, 35, 128) 82048 \n_________________________________________________________________\nmax_pooling1d_3 (MaxPooling1 (None, 1, 128) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_2 (Dense) (None, 2) 258 \n=================================================================\nTotal params: 2,245,094\nTrainable params: 244,994\nNon-trainable params: 2,000,100\n_________________________________________________________________\n" ] ], [ [ "We can confirm whether our embedding layer is trainable by looping through each layer and checking the trainable attribute.", "_____no_output_____" ] ], [ [ "df_model_layers = pd.DataFrame(\n [(layer.name, layer.trainable, layer.count_params()) for layer in model1.layers],\n columns=['layer', 'trainable', 'n_params']\n)\ndf_model_layers", "_____no_output_____" ], [ "# time : 70\n# test performance : auc 0.93212\nstart = time.time()\nhistory1 = model1.fit(x_train, y_train,\n validation_data=(x_val, y_val),\n batch_size=128,\n epochs=8)\nend = time.time()\nelapse1 = end - start\nelapse1", "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.\n\nTrain on 20000 samples, validate on 5000 samples\nEpoch 1/8\n20000/20000 [==============================] - 12s 604us/step - loss: 0.5854 - acc: 0.6748 - val_loss: 0.4772 - val_acc: 0.7808\nEpoch 2/8\n20000/20000 [==============================] - 8s 416us/step - loss: 0.4001 - acc: 0.8186 - val_loss: 0.3766 - val_acc: 0.8352\nEpoch 3/8\n20000/20000 [==============================] - 8s 414us/step - loss: 0.3428 - acc: 0.8507 - val_loss: 0.4276 - val_acc: 0.7966\nEpoch 4/8\n20000/20000 [==============================] - 8s 415us/step - loss: 0.2790 - acc: 0.8842 - val_loss: 0.3433 - val_acc: 0.8594\nEpoch 5/8\n20000/20000 [==============================] - 8s 415us/step - loss: 0.2469 - acc: 0.8987 - val_loss: 0.4015 - val_acc: 0.8310\nEpoch 6/8\n20000/20000 [==============================] - 8s 420us/step - loss: 0.1782 - acc: 0.9289 - val_loss: 0.4670 - val_acc: 0.8296\nEpoch 7/8\n20000/20000 [==============================] - 8s 419us/step - loss: 0.1017 - acc: 0.9643 - val_loss: 0.5965 - val_acc: 0.8146\nEpoch 8/8\n20000/20000 [==============================] - 8s 418us/step - loss: 0.0680 - acc: 0.9758 - val_loss: 0.6876 - val_acc: 0.8332\n" ] ], [ [ "### Model without Pretrained Embedding", "_____no_output_____" ] ], [ [ "num_classes = 2\nmodel2 = simple_text_cnn(max_sequence_len, max_features, num_classes)\nmodel2.summary()", "Model: \"model_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_2 (InputLayer) (None, 1000) 0 \n_________________________________________________________________\nembedding (Embedding) (None, 1000, 100) 2000100 \n_________________________________________________________________\nconv1d_4 (Conv1D) (None, 996, 128) 64128 \n_________________________________________________________________\nmax_pooling1d_4 (MaxPooling1 (None, 199, 128) 0 \n_________________________________________________________________\nconv1d_5 (Conv1D) (None, 195, 128) 82048 \n_________________________________________________________________\nmax_pooling1d_5 (MaxPooling1 (None, 39, 128) 0 \n_________________________________________________________________\nconv1d_6 (Conv1D) (None, 35, 128) 82048 \n_________________________________________________________________\nmax_pooling1d_6 (MaxPooling1 (None, 1, 128) 0 \n_________________________________________________________________\nflatten_2 (Flatten) (None, 128) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 128) 16512 \n_________________________________________________________________\ndense_4 (Dense) (None, 2) 258 \n=================================================================\nTotal params: 2,245,094\nTrainable params: 2,245,094\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "# time : 86 secs\n# test performance : auc 0.92310\nstart = time.time()\nhistory1 = model2.fit(x_train, y_train,\n validation_data=(x_val, y_val),\n batch_size=128,\n epochs=8)\nend = time.time()\nelapse1 = end - start\nelapse1", "Train on 20000 samples, validate on 5000 samples\nEpoch 1/8\n20000/20000 [==============================] - 11s 570us/step - loss: 0.5010 - acc: 0.7065 - val_loss: 0.3016 - val_acc: 0.8730\nEpoch 2/8\n20000/20000 [==============================] - 11s 542us/step - loss: 0.2024 - acc: 0.9243 - val_loss: 0.2816 - val_acc: 0.8824\nEpoch 3/8\n20000/20000 [==============================] - 11s 538us/step - loss: 0.0806 - acc: 0.9734 - val_loss: 0.3552 - val_acc: 0.8812\nEpoch 4/8\n20000/20000 [==============================] - 11s 535us/step - loss: 0.0272 - acc: 0.9917 - val_loss: 0.4671 - val_acc: 0.8836\nEpoch 5/8\n20000/20000 [==============================] - 11s 543us/step - loss: 0.0088 - acc: 0.9973 - val_loss: 0.6534 - val_acc: 0.8788\nEpoch 6/8\n20000/20000 [==============================] - 11s 542us/step - loss: 0.0090 - acc: 0.9973 - val_loss: 0.7522 - val_acc: 0.8740\nEpoch 7/8\n20000/20000 [==============================] - 11s 542us/step - loss: 0.0104 - acc: 0.9967 - val_loss: 1.0453 - val_acc: 0.8480\nEpoch 8/8\n20000/20000 [==============================] - 11s 543us/step - loss: 0.0205 - acc: 0.9924 - val_loss: 0.6930 - val_acc: 0.8712\n" ] ], [ [ "## Submission", "_____no_output_____" ], [ "For the submission section, we read in and preprocess the test data provided by the competition, then generate the predicted probability column for both the model that uses pretrained embedding and one that doesn't to compare their performance.", "_____no_output_____" ] ], [ [ "input_path = os.path.join(data_dir, 'word2vec-nlp-tutorial', 'testData.tsv')\ndf_test = pd.read_csv(input_path, delimiter='\\t')\nprint(df_test.shape)\ndf_test.head()", "(25000, 2)\n" ], [ "def clean_text_without_label(df: pd.DataFrame, text_col: str) -> List[str]:\n texts = []\n for raw_text in df[text_col]:\n text = BeautifulSoup(raw_text).get_text()\n cleaned_text = clean_str(text)\n texts.append(cleaned_text)\n\n return texts", "_____no_output_____" ], [ "texts_test = clean_text_without_label(df_test, text_col)\nsequences_test = tokenizer.texts_to_sequences(texts_test)\nx_test = pad_sequences(sequences_test, maxlen=max_sequence_len)\nlen(x_test)", "_____no_output_____" ], [ "def create_submission(ids, predictions, ids_col, label_col, submission_path) -> pd.DataFrame:\n df_submission = pd.DataFrame({\n ids_col: ids,\n label_col: predictions\n }, columns=[ids_col, label_col])\n\n if submission_path is not None:\n # create the directory if need be, e.g. if the submission_path = submission/submission.csv\n # we'll create the submission directory first if it doesn't exist\n directory = os.path.split(submission_path)[0]\n if (directory != '' or directory != '.') and not os.path.isdir(directory):\n os.makedirs(directory, exist_ok=True)\n\n df_submission.to_csv(submission_path, index=False, header=True)\n\n return df_submission", "_____no_output_____" ], [ "ids_col = 'id'\nlabel_col = 'sentiment'\nids = df_test[ids_col]\n\nmodels = {\n 'pretrained_embedding': model1,\n 'without_pretrained_embedding': model2\n}\n\nfor model_name, model in models.items():\n print('generating submission for: ', model_name)\n submission_path = os.path.join(submission_dir, '{}_submission.csv'.format(model_name))\n predictions = model.predict(x_test, verbose=1)[:, 1]\n df_submission = create_submission(ids, predictions, ids_col, label_col, submission_path)\n\n# sanity check to make sure the size and the output of the submission makes sense\nprint(df_submission.shape)\ndf_submission.head()", "generating submission for: pretrained_embedding\n25000/25000 [==============================] - 6s 228us/step\ngenerating submission for: without_pretrained_embedding\n25000/25000 [==============================] - 6s 222us/step\n(25000, 2)\n" ] ], [ [ "## Summary", "_____no_output_____" ], [ "In this article, we took a look at how to leverage pre-trained word embeddings for our text classification task. There're also various Kaggle Kernels [here](https://www.kaggle.com/sudalairajkumar/a-look-at-different-embeddings) and [here](https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge) that experiments whether different pre-trained embeddings or even an ensemble of models each with a different pre-trained embedding on various text classification tasks to see if it gives us an edge. ", "_____no_output_____" ], [ "# Reference", "_____no_output_____" ], [ "- [Blog: Text Classification, Part I - Convolutional Networks](https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/)\n- [Blog: Using pre-trained word embeddings in a Keras model](https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html)\n- [Jupyter Notebook - Deep Learning with Python - Using Word Embeddings](https://nbviewer.jupyter.org/github/fchollet/deep-learning-with-python-notebooks/blob/master/6.1-using-word-embeddings.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d06ddae3254138f1956012319eba73195332ba02
21,763
ipynb
Jupyter Notebook
_solved/pandas_04_time_series_data.ipynb
jorisvandenbossche/FLAMES-python-data-wrangling
24a6dbe8637264f010c47affd3a8dcbe2b493e00
[ "BSD-3-Clause" ]
1
2022-03-02T17:41:46.000Z
2022-03-02T17:41:46.000Z
_solved/pandas_04_time_series_data.ipynb
jorisvandenbossche/FLAMES-python-data-wrangling
24a6dbe8637264f010c47affd3a8dcbe2b493e00
[ "BSD-3-Clause" ]
10
2020-11-09T09:21:01.000Z
2021-10-18T06:03:19.000Z
_solved/pandas_04_time_series_data.ipynb
jorisvandenbossche/ICES-python-data
63864947657f37cb26cb4e2dcd67ff106dffe9cd
[ "BSD-3-Clause" ]
null
null
null
21.027053
307
0.521435
[ [ [ "<p><font size=\"6\"><b>04 - Pandas: Working with time series data</b></font></p>\n\n> *© 2021, Joris Van den Bossche and Stijn Van Hoey (<mailto:[email protected]>, <mailto:[email protected]>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*\n\n---", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.use('ggplot')", "_____no_output_____" ] ], [ [ "# Introduction: `datetime` module", "_____no_output_____" ], [ "Standard Python contains the `datetime` module to handle date and time data:", "_____no_output_____" ] ], [ [ "import datetime", "_____no_output_____" ], [ "dt = datetime.datetime(year=2016, month=12, day=19, hour=13, minute=30)\ndt", "_____no_output_____" ], [ "print(dt) # .day,...", "_____no_output_____" ], [ "print(dt.strftime(\"%d %B %Y\"))", "_____no_output_____" ] ], [ [ "# Dates and times in pandas", "_____no_output_____" ], [ "## The ``Timestamp`` object", "_____no_output_____" ], [ "Pandas has its own date and time objects, which are compatible with the standard `datetime` objects, but provide some more functionality to work with. \n\nThe `Timestamp` object can also be constructed from a string:", "_____no_output_____" ] ], [ [ "ts = pd.Timestamp('2016-12-19')\nts", "_____no_output_____" ] ], [ [ "Like with `datetime.datetime` objects, there are several useful attributes available on the `Timestamp`. For example, we can get the month (experiment with tab completion!):", "_____no_output_____" ] ], [ [ "ts.month", "_____no_output_____" ] ], [ [ "There is also a `Timedelta` type, which can e.g. be used to add intervals of time:", "_____no_output_____" ] ], [ [ "ts + pd.Timedelta('5 days')", "_____no_output_____" ] ], [ [ "## Parsing datetime strings", "_____no_output_____" ], [ "![](http://imgs.xkcd.com/comics/iso_8601.png)", "_____no_output_____" ], [ "Unfortunately, when working with real world data, you encounter many different `datetime` formats. Most of the time when you have to deal with them, they come in text format, e.g. from a `CSV` file. To work with those data in Pandas, we first have to *parse* the strings to actual `Timestamp` objects.", "_____no_output_____" ], [ "<div class=\"alert alert-info\">\n<b>REMEMBER</b>: <br><br>\n\nTo convert string formatted dates to Timestamp objects: use the `pandas.to_datetime` function\n\n</div>", "_____no_output_____" ] ], [ [ "pd.to_datetime(\"2016-12-09\")", "_____no_output_____" ], [ "pd.to_datetime(\"09/12/2016\")", "_____no_output_____" ], [ "pd.to_datetime(\"09/12/2016\", dayfirst=True)", "_____no_output_____" ], [ "pd.to_datetime(\"09/12/2016\", format=\"%d/%m/%Y\")", "_____no_output_____" ] ], [ [ "A detailed overview of how to specify the `format` string, see the table in the python documentation: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior", "_____no_output_____" ], [ "## `Timestamp` data in a Series or DataFrame column", "_____no_output_____" ] ], [ [ "s = pd.Series(['2016-12-09 10:00:00', '2016-12-09 11:00:00', '2016-12-09 12:00:00'])", "_____no_output_____" ], [ "s", "_____no_output_____" ] ], [ [ "The `to_datetime` function can also be used to convert a full series of strings:", "_____no_output_____" ] ], [ [ "ts = pd.to_datetime(s)", "_____no_output_____" ], [ "ts", "_____no_output_____" ] ], [ [ "Notice the data type of this series has changed: the `datetime64[ns]` dtype. This indicates that we have a series of actual datetime values.", "_____no_output_____" ], [ "The same attributes as on single `Timestamp`s are also available on a Series with datetime data, using the **`.dt`** accessor:", "_____no_output_____" ] ], [ [ "ts.dt.hour", "_____no_output_____" ], [ "ts.dt.dayofweek", "_____no_output_____" ] ], [ [ "To quickly construct some regular time series data, the [``pd.date_range``](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html) function comes in handy:", "_____no_output_____" ] ], [ [ "pd.Series(pd.date_range(start=\"2016-01-01\", periods=10, freq='3H'))", "_____no_output_____" ] ], [ [ "# Time series data: `Timestamp` in the index", "_____no_output_____" ], [ "## River discharge example data", "_____no_output_____" ], [ "For the following demonstration of the time series functionality, we use a sample of discharge data of the Maarkebeek (Flanders) with 3 hour averaged values, derived from the [Waterinfo website](https://www.waterinfo.be/).", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"data/vmm_flowdata.csv\")", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ] ], [ [ "We already know how to parse a date column with Pandas:", "_____no_output_____" ] ], [ [ "data['Time'] = pd.to_datetime(data['Time'])", "_____no_output_____" ] ], [ [ "With `set_index('datetime')`, we set the column with datetime values as the index, which can be done by both `Series` and `DataFrame`.", "_____no_output_____" ] ], [ [ "data = data.set_index(\"Time\")", "_____no_output_____" ], [ "data", "_____no_output_____" ] ], [ [ "The steps above are provided as built-in functionality of `read_csv`:", "_____no_output_____" ] ], [ [ "data = pd.read_csv(\"data/vmm_flowdata.csv\", index_col=0, parse_dates=True)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-info\">\n<b>REMEMBER</b>: <br><br>\n\n`pd.read_csv` provides a lot of built-in functionality to support this kind of transactions when reading in a file! Check the help of the read_csv function...\n\n</div>", "_____no_output_____" ], [ "## The DatetimeIndex", "_____no_output_____" ], [ "When we ensure the DataFrame has a `DatetimeIndex`, time-series related functionality becomes available:", "_____no_output_____" ] ], [ [ "data.index", "_____no_output_____" ] ], [ [ "Similar to a Series with datetime data, there are some attributes of the timestamp values available:", "_____no_output_____" ] ], [ [ "data.index.day", "_____no_output_____" ], [ "data.index.dayofyear", "_____no_output_____" ], [ "data.index.year", "_____no_output_____" ] ], [ [ "The `plot` method will also adapt its labels (when you zoom in, you can see the different levels of detail of the datetime labels):", "_____no_output_____" ] ], [ [ "%matplotlib widget", "_____no_output_____" ], [ "data.plot()", "_____no_output_____" ], [ "# switching back to static inline plots (the default)\n%matplotlib inline", "_____no_output_____" ] ], [ [ "We have too much data to sensibly plot on one figure. Let's see how we can easily select part of the data or aggregate the data to other time resolutions in the next sections.", "_____no_output_____" ], [ "## Selecting data from a time series", "_____no_output_____" ], [ "We can use label based indexing on a timeseries as expected:", "_____no_output_____" ] ], [ [ "data[pd.Timestamp(\"2012-01-01 09:00\"):pd.Timestamp(\"2012-01-01 19:00\")]", "_____no_output_____" ] ], [ [ "But, for convenience, indexing a time series also works with strings:", "_____no_output_____" ] ], [ [ "data[\"2012-01-01 09:00\":\"2012-01-01 19:00\"]", "_____no_output_____" ] ], [ [ "A nice feature is **\"partial string\" indexing**, where we can do implicit slicing by providing a partial datetime string.\n\nE.g. all data of 2013:", "_____no_output_____" ] ], [ [ "data['2013':]", "_____no_output_____" ] ], [ [ "Or all data of January up to March 2012:", "_____no_output_____" ] ], [ [ "data['2012-01':'2012-03']", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>select all data starting from 2012</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "data['2012':]", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>select all data in January for all different years</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "data[data.index.month == 1]", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>select all data in April, May and June for all different years</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "data[data.index.month.isin([4, 5, 6])]", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>select all 'daytime' data (between 8h and 20h) for all days</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "data[(data.index.hour > 8) & (data.index.hour < 20)]", "_____no_output_____" ] ], [ [ "## The power of pandas: `resample`", "_____no_output_____" ], [ "A very powerfull method is **`resample`: converting the frequency of the time series** (e.g. from hourly to daily data).\n\nThe time series has a frequency of 1 hour. I want to change this to daily:", "_____no_output_____" ] ], [ [ "data.resample('D').mean().head()", "_____no_output_____" ] ], [ [ "Other mathematical methods can also be specified:", "_____no_output_____" ] ], [ [ "data.resample('D').max().head()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-info\">\n<b>REMEMBER</b>: <br><br>\n\nThe string to specify the new time frequency: http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases <br>\n\nThese strings can also be combined with numbers, eg `'10D'`...\n\n</div>", "_____no_output_____" ] ], [ [ "data.resample('M').mean().plot() # 10D", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>Plot the monthly standard deviation of the columns</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "data.resample('M').std().plot() # 'A'", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>Plot the monthly mean and median values for the years 2011-2012 for 'L06_347'<br><br></li>\n</ul>\n\n__Note__ Did you know <a href=\"https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.agg.html\"><code>agg</code></a> to derive multiple statistics at the same time?\n\n</div>", "_____no_output_____" ] ], [ [ "subset = data['2011':'2012']['L06_347']\nsubset.resample('M').agg(['mean', 'median']).plot()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n\n<b>EXERCISE</b>:\n\n <ul>\n <li>plot the monthly mininum and maximum daily average value of the 'LS06_348' column</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "daily = data['LS06_348'].resample('D').mean() # daily averages calculated", "_____no_output_____" ], [ "daily.resample('M').agg(['min', 'max']).plot() # monthly minimum and maximum values of these daily averages", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>EXERCISE</b>:\n\n <ul>\n <li>Make a bar plot of the mean of the stations in year of 2013</li>\n</ul>\n\n</div>", "_____no_output_____" ] ], [ [ "data['2013':'2013'].mean().plot(kind='barh')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d06de29d62a20cae30e23682982b3385af733dfe
4,659
ipynb
Jupyter Notebook
platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
1
2022-01-25T17:24:13.000Z
2022-01-25T17:24:13.000Z
platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
null
null
null
platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb
fcivardi/spark-nlp-workshop
aedb1f5d93577c81bc3dd0da5e46e02586941541
[ "Apache-2.0" ]
null
null
null
19.493724
91
0.446018
[ [ [ "# Importing Spark NLP, Spark NLP for Healthcare and Spark OCR", "_____no_output_____" ] ], [ [ "import sparknlp", "_____no_output_____" ], [ "import sparknlp_jsl", "_____no_output_____" ], [ "import sparkocr", "_____no_output_____" ], [ "sparknlp_jsl.version()", "_____no_output_____" ], [ "sparknlp.version()", "_____no_output_____" ], [ "sparkocr.version()", "_____no_output_____" ] ], [ [ "# Retrieving your license", "_____no_output_____" ] ], [ [ "import os, json\n\nwith open('/license.json', 'r') as f:\n license_keys = json.load(f)\n\n# Defining license key-value pairs as local variables\nlocals().update(license_keys)\n\n# Adding license key-value pairs to environment variables\nos.environ.update(license_keys)", "_____no_output_____" ] ], [ [ "# Add a DNS entry for your Sagemaker instance", "_____no_output_____" ] ], [ [ "!echo \"127.0.0.1 $HOSTNAME\" >> /etc/hosts", "_____no_output_____" ] ], [ [ "# Start your session", "_____no_output_____" ] ], [ [ " spark = sparkocr.start(secret=os.environ['SPARK_OCR_SECRET'], nlp_secret=['SECRET'])", "Spark version: 3.0.2\nSpark NLP version: 3.3.1\nSpark OCR version: 3.8.0\n\n" ] ], [ [ "# Check everything is good and have fun!", "_____no_output_____" ] ], [ [ "spark", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06e1544e16cb52bb2495c691ebd3ab10ab7eb3b
17,953
ipynb
Jupyter Notebook
notebooks/DKDataBlock.ipynb
Bleyddyn/malpi
9315f19366bd56da12c6dc7a84d830bbec530753
[ "MIT" ]
5
2017-03-27T22:15:54.000Z
2022-01-19T23:46:46.000Z
notebooks/DKDataBlock.ipynb
Bleyddyn/malpi
9315f19366bd56da12c6dc7a84d830bbec530753
[ "MIT" ]
10
2017-01-19T19:22:06.000Z
2022-02-27T21:29:50.000Z
notebooks/DKDataBlock.ipynb
Bleyddyn/malpi
9315f19366bd56da12c6dc7a84d830bbec530753
[ "MIT" ]
null
null
null
31.331588
128
0.485713
[ [ [ "Demonstrating how to get DonkeyCar Tub files into a PyTorch/fastai DataBlock", "_____no_output_____" ] ], [ [ "from fastai.data.all import *\nfrom fastai.vision.all import *\nfrom fastai.data.transforms import ColReader, Normalize, RandomSplitter\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F", "_____no_output_____" ], [ "from donkeycar.parts.tub_v2 import Tub\nimport pandas as pd\nfrom pathlib import Path", "_____no_output_____" ], [ "from malpi.dk.train import preprocessFileList, get_data, get_learner, get_autoencoder, train_autoencoder", "_____no_output_____" ], [ "def learn_resnet():\n learn2 = cnn_learner(dls, resnet18, loss_func=MSELossFlat(), metrics=[rmse], cbs=ActivationStats(with_hist=True))\n learn2.fine_tune(5)\n \n learn2.recorder.plot_loss()\n learn2.show_results(figsize=(20,10))", "_____no_output_____" ] ], [ [ "The below code is modified from: https://github.com/cmasenas/fastai_navigation_training/blob/master/fastai_train.ipynb.\n\nTODO: Figure out how to have multiple output heads", "_____no_output_____" ] ], [ [ "def test_one_transform(name, inputs, df_all, batch_tfms, item_tfms, epochs, lr):\n dls = get_data(inputs, df_all=df_all, batch_tfms=batch_tfms, item_tfms=item_tfms)\n callbacks = [CSVLogger(f\"Transform_{name}.csv\", append=True)]\n learn = get_learner(dls)\n #learn.no_logging() #Try this to block logging when doing many training test runs\n learn.fit_one_cycle(epochs, lr, cbs=callbacks)\n #learn.recorder.plot_loss()\n #learn.show_results(figsize=(20,10))", "_____no_output_____" ], [ "# Train multipel times using a list of Transforms, one at a time.\n# Compare mean/stdev of best validation loss (or rmse?) for each Transform\ndf_all = get_dataframe(\"track1_warehouse.txt\")\ntransforms = [None]\ntransforms.extend( [*aug_transforms(do_flip=False, size=128)] )\nfor tfm in transforms:\n name = \"None\" if tfm is None else str(tfm.__class__.__name__)\n print( f\"Transform: {name}\" )\n for i in range(5):\n print( f\" Run {i+1}\" )\n test_one_transform(name, \"track1_warehouse.txt\", df_all, None, 5, 3e-3)", "_____no_output_____" ], [ "def visualize_learner( learn ):\n #dls=nav.dataloaders(df, bs=512)\n preds, tgt = learn.get_preds(dl=[dls.one_batch()])\n\n plt.title(\"Target vs Predicted Steering\", fontsize=18, y=1.0)\n plt.xlabel(\"Target\", fontsize=14, labelpad=15)\n plt.ylabel(\"Predicted\", fontsize=14, labelpad=15)\n plt.plot(tgt.T[0], preds.T[0],'bo')\n plt.plot([-1,1],[-1,1],'r', linewidth = 4)\n plt.show()\n\n plt.title(\"Target vs Predicted Throttle\", fontsize=18, y=1.02)\n plt.xlabel(\"Target\", fontsize=14, labelpad=15)\n plt.ylabel(\"Predicted\", fontsize=14, labelpad=15)\n plt.plot(tgt.T[1], preds.T[1],'bo')\n plt.plot([0,1],[0,1],'r', linewidth = 4)\n plt.show()", "_____no_output_____" ], [ "learn.export()", "_____no_output_____" ], [ "df_all = get_dataframe(\"track1_warehouse.txt\")\ndls = get_data(\"track1_warehouse.txt\", df_all=df_all, batch_tfms=None)", "_____no_output_____" ], [ "learn = get_learner(dls)\nlearn.fit_one_cycle(15, 3e-3)", "_____no_output_____" ], [ "visualize_learner(learn)", "_____no_output_____" ], [ "learn.export('models/track1_v2.pkl')", "_____no_output_____" ], [ "def clear_pyplot_memory():\n plt.clf()\n plt.cla()\n plt.close()\n\ndf_all = get_dataframe(\"track1_warehouse.txt\")\n\ntransforms=[None,\n RandomResizedCrop(128,p=1.0,min_scale=0.5,ratio=(0.9,1.1)),\n RandomErasing(sh=0.2, max_count=6,p=1.0),\n Brightness(max_lighting=0.4, p=1.0),\n Contrast(max_lighting=0.4, p=1.0),\n Saturation(max_lighting=0.4, p=1.0)]\n#dls = get_data(None, df_all, item_tfms=item_tfms, batch_tfms=batch_tfms)\n\nfor tfm in transforms:\n name = \"None\" if tfm is None else str(tfm.__class__.__name__)\n if name == \"RandomResizedCrop\":\n item_tfms = tfm\n batch_tfms = None\n else:\n item_tfms = None\n batch_tfms = tfm\n \n dls = get_data(\"track1_warehouse.txt\",\n df_all=df_all,\n item_tfms=item_tfms, batch_tfms=batch_tfms)\n\n dls.show_batch(unique=True, show=True)\n plt.savefig( f'Transform_{name}.png' )\n#clear_pyplot_memory()", "_____no_output_____" ], [ "learn, dls = train_autoencoder( \"tracks_all.txt\", 5, 3e-3, name=\"ae_test1\", verbose=False )", "_____no_output_____" ], [ "learn.recorder.plot_loss()\nlearn.show_results(figsize=(20,10))\n#plt.savefig(name + '.png')", "_____no_output_____" ], [ "idx = 0", "_____no_output_____" ], [ "idx += 1\nim1 = dls.one_batch()[0]\nim1_out = learn.model.forward(im1)\nshow_image(im1[idx])\nshow_image(im1_out[idx])", "_____no_output_____" ], [ "from fastai.metrics import rmse", "_____no_output_____" ], [ "from typing import List, Callable, Union, Any, TypeVar, Tuple\nTensor = TypeVar('torch.tensor')\n\nfrom abc import abstractmethod\n\nclass BaseVAE(nn.Module):\n\n def __init__(self) -> None:\n super(BaseVAE, self).__init__()\n\n def encode(self, input: Tensor) -> List[Tensor]:\n raise NotImplementedError\n\n def decode(self, input: Tensor) -> Any:\n raise NotImplementedError\n\n def sample(self, batch_size:int, current_device: int, **kwargs) -> Tensor:\n raise NotImplementedError\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n raise NotImplementedError\n\n @abstractmethod\n def forward(self, *inputs: Tensor) -> Tensor:\n pass\n\n @abstractmethod\n def loss_function(self, *inputs: Any, **kwargs) -> Tensor:\n pass", "_____no_output_____" ], [ "class VanillaVAE(BaseVAE):\n\n\n def __init__(self,\n in_channels: int,\n latent_dim: int,\n hidden_dims: List = None,\n **kwargs) -> None:\n super(VanillaVAE, self).__init__()\n\n self.latent_dim = latent_dim\n self.kld_weight = 0.00025 # TODO calculate based on: #al_img.shape[0]/ self.num_train_imgs\n modules = []\n if hidden_dims is None:\n hidden_dims = [32, 64, 128, 256, 512]\n\n # Build Encoder\n for h_dim in hidden_dims:\n modules.append(\n nn.Sequential(\n nn.Conv2d(in_channels, out_channels=h_dim,\n kernel_size= 3, stride= 2, padding = 1),\n nn.BatchNorm2d(h_dim),\n nn.LeakyReLU())\n )\n in_channels = h_dim\n\n self.encoder = nn.Sequential(*modules)\n self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)\n self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)\n\n\n # Build Decoder\n modules = []\n\n self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)\n\n hidden_dims.reverse()\n\n for i in range(len(hidden_dims) - 1):\n modules.append(\n nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[i],\n hidden_dims[i + 1],\n kernel_size=3,\n stride = 2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[i + 1]),\n nn.LeakyReLU())\n )\n\n\n\n self.decoder = nn.Sequential(*modules)\n\n self.final_layer = nn.Sequential(\n nn.ConvTranspose2d(hidden_dims[-1],\n hidden_dims[-1],\n kernel_size=3,\n stride=2,\n padding=1,\n output_padding=1),\n nn.BatchNorm2d(hidden_dims[-1]),\n nn.LeakyReLU(),\n nn.Conv2d(hidden_dims[-1], out_channels= 3,\n kernel_size= 3, padding= 1),\n nn.Tanh())\n\n def encode(self, input: Tensor) -> List[Tensor]:\n \"\"\"\n Encodes the input by passing through the encoder network\n and returns the latent codes.\n :param input: (Tensor) Input tensor to encoder [N x C x H x W]\n :return: (Tensor) List of latent codes\n \"\"\"\n result = self.encoder(input)\n result = torch.flatten(result, start_dim=1)\n\n # Split the result into mu and var components\n # of the latent Gaussian distribution\n mu = self.fc_mu(result)\n log_var = self.fc_var(result)\n\n return [mu, log_var]\n\n def decode(self, z: Tensor) -> Tensor:\n \"\"\"\n Maps the given latent codes\n onto the image space.\n :param z: (Tensor) [B x D]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result\n\n def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:\n \"\"\"\n Reparameterization trick to sample from N(mu, var) from\n N(0,1).\n :param mu: (Tensor) Mean of the latent Gaussian [B x D]\n :param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]\n :return: (Tensor) [B x D]\n \"\"\"\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return eps * std + mu\n\n def forward(self, input: Tensor, **kwargs) -> List[Tensor]:\n mu, log_var = self.encode(input)\n z = self.reparameterize(mu, log_var)\n return [self.decode(z), input, mu, log_var]\n\n def loss_function(self,\n *args,\n **kwargs) -> dict:\n \"\"\"\n Computes the VAE loss function.\n KL(N(\\mu, \\sigma), N(0, 1)) = \\log \\frac{1}{\\sigma} + \\frac{\\sigma^2 + \\mu^2}{2} - \\frac{1}{2}\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n #print( f\"loss_function: {len(args[0])} {type(args[0][0])} {args[1].shape}\" )\n recons = args[0][0]\n input = args[1]\n mu = args[0][2]\n log_var = args[0][3]\n\n kld_weight = self.kld_weight # kwargs['M_N'] # Account for the minibatch samples from the dataset\n recons_loss =F.mse_loss(recons, input)\n\n\n kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)\n\n loss = recons_loss + kld_weight * kld_loss\n return loss\n #return {'loss': loss, 'Reconstruction_Loss':recons_loss.detach(), 'KLD':-kld_loss.detach()}\n\n def sample(self,\n num_samples:int,\n current_device: int, **kwargs) -> Tensor:\n \"\"\"\n Samples from the latent space and return the corresponding\n image space map.\n :param num_samples: (Int) Number of samples\n :param current_device: (Int) Device to run the model\n :return: (Tensor)\n \"\"\"\n z = torch.randn(num_samples,\n self.latent_dim)\n\n z = z.to(current_device)\n\n samples = self.decode(z)\n return samples\n\n def generate(self, x: Tensor, **kwargs) -> Tensor:\n \"\"\"\n Given an input image x, returns the reconstructed image\n :param x: (Tensor) [B x C x H x W]\n :return: (Tensor) [B x C x H x W]\n \"\"\"\n\n return self.forward(x)[0]\n", "_____no_output_____" ], [ "input_file=\"track1_warehouse.txt\"\nitem_tfms = [Resize(64,method=\"squish\")]\ndls = get_data(input_file, item_tfms=item_tfms, verbose=False, autoencoder=True)", "_____no_output_____" ], [ "vae = VanillaVAE(3, 64)\nlearn = Learner(dls, vae, loss_func=vae.loss_function)", "_____no_output_____" ], [ "learn.fit_one_cycle(5, 3e-3)", "_____no_output_____" ], [ "vae", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06e197cea1954b88b79c2aafcca4037d3002a28
8,107
ipynb
Jupyter Notebook
section_robot_orig/ideal_robot1.ipynb
greenpepper123/LNPR_BOOK_CODES
69e9165036fb6912c7ed99da092b06f0656612c3
[ "MIT" ]
null
null
null
section_robot_orig/ideal_robot1.ipynb
greenpepper123/LNPR_BOOK_CODES
69e9165036fb6912c7ed99da092b06f0656612c3
[ "MIT" ]
null
null
null
section_robot_orig/ideal_robot1.ipynb
greenpepper123/LNPR_BOOK_CODES
69e9165036fb6912c7ed99da092b06f0656612c3
[ "MIT" ]
null
null
null
88.119565
5,872
0.827556
[ [ [ "import matplotlib.pyplot as plt ### fig:class_world1", "_____no_output_____" ], [ "class World: \n def __init__(self):\n self.objects = [] # ここにロボットなどのオブジェクトを登録\n \n def append(self,obj): # オブジェクトを登録するための関数\n self.objects.append(obj)\n \n def draw(self):\n fig = plt.figure(figsize=(8,8)) # 8x8 inchの図を準備\n ax = fig.add_subplot(111) # サブプロットを準備\n ax.set_aspect('equal') # 縦横比を座標の値と一致させる\n ax.set_xlim(-5,5) # X軸を-5m x 5mの範囲で描画\n ax.set_ylim(-5,5) # Y軸も同様に\n ax.set_xlabel(\"X\",fontsize=20) # X軸にラベルを表示\n ax.set_ylabel(\"Y\",fontsize=20) # 同じくY軸に\n \n for obj in self.objects: obj.draw(ax) # appendした物体を次々に描画\n \n plt.show()", "_____no_output_____" ], [ "world = World() ### fig:class_world3\nworld.draw()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d06e1a7b9d623727f484d79d75e5ebd020f62ab6
8,525
ipynb
Jupyter Notebook
视频课件/Redis 的高级用法.ipynb
kingname/SourceCodeofMongoRedis
9cf64a084362991f1860a6c52abdbefba59c0790
[ "MIT" ]
183
2019-02-27T08:13:17.000Z
2022-03-21T04:23:26.000Z
视频课件/Redis 的高级用法.ipynb
kingname/SourceCodeofMongoRedis
9cf64a084362991f1860a6c52abdbefba59c0790
[ "MIT" ]
10
2020-02-17T07:52:45.000Z
2021-08-19T06:17:43.000Z
视频课件/Redis 的高级用法.ipynb
kingname/SourceCodeofMongoRedis
9cf64a084362991f1860a6c52abdbefba59c0790
[ "MIT" ]
104
2019-02-22T13:59:35.000Z
2022-03-01T16:44:47.000Z
20.249406
132
0.473431
[ [ [ "# Redis列表实现一次pop 弹出多条数据\n\n![](https://kingname-1257411235.cos.ap-chengdu.myqcloud.com/2019-03-03-16-52-34.png)\n", "_____no_output_____" ] ], [ [ "# 连接 Redis\n\nimport redis\nclient = redis.Redis(host='122.51.39.219', port=6379, password='leftright123')\n\n# 注意:\n# 这个 Redis 环境仅作为练习之用,每小时会清空一次,请勿存放重要数据。", "_____no_output_____" ], [ "# 准备数据\n\nclient.lpush('test_batch_pop', *list(range(10000)))", "_____no_output_____" ], [ "# 一条一条读取,非常耗时\nimport time\n\n\nstart = time.time()\nwhile True:\n data = client.lpop('test_batch_pop')\n if not data:\n break\nend = time.time()\n\ndelta = end - start\nprint(f'循环读取10000条数据,使用 lpop 耗时:{delta}')", "循环读取10000条数据,使用 lpop 耗时:112.04084920883179\n" ] ], [ [ "## 为什么使用`lpop`读取10000条数据这么慢?\n\n因为`lpop`每次只弹出1条数据,每次弹出数据都要连接 Redis 。大量时间浪费在了网络传输上面。\n\n## 如何实现批量弹出多条数据,并在同一次网络请求中返回?\n\n先使用 `lrange` 获取数据,再使用`ltrim`删除被获取的数据。", "_____no_output_____" ] ], [ [ "# 复习一下 lrange 的用法\n\ndatas = client.lrange('test_batch_pop', 0, 9) # 读取前10条数据\ndatas", "_____no_output_____" ], [ "# 学习一下 ltrim 的用法\n\nclient.ltrim('test_batch_pop', 10, -1) # 删除前10条数据", "_____no_output_____" ], [ "# 验证一下数据是否被成功删除\n\nlength = client.llen('test_batch_pop')\nprint(f'现在列表里面还剩{length}条数据')\ndatas = client.lrange('test_batch_pop', 0, 9) # 读取前10条数据\ndatas", "现在列表里面还剩9990条数据\n" ], [ "# 一种看起来正确的做法\n\ndef batch_pop_fake(key, n):\n datas = client.lrange(key, 0, n - 1)\n client.ltrim(key, n, -1)\n return datas\n\nbatch_pop_fake('test_batch_pop', 10)", "_____no_output_____" ], [ "client.lrange('test_batch_pop', 0, 9)", "_____no_output_____" ] ], [ [ "## 这种写法用什么问题\n\n在多个进程同时使用 batch_pop_fake 函数的时候,由于执行 lrange 与 ltrim 是在两条语句中,因此实际上会分成2个网络请求。那么当 A 进程\n刚刚执行完lrange,还没有来得及执行 ltrim 时,B 进程刚好过来执行 lrange,那么 AB 两个进程就会获得相同的数据。\n\n等 B 进程获取完成数据以后,A 进程的 ltrim 刚刚抵达,此时Redis 会删除前 n 条数据,然后 B 进程的 ltrim 也到了,再删除前 n 条数据。那么最终导致的结果就是,AB 两个进程同时拿到前 n 条数据,但是却有2n 条数据被删除。", "_____no_output_____" ], [ "## 使用 pipeline 打包多个命令到一个请求中\n\npipeline 的使用方法如下:\n\n```python\nimport redis\n\nclient = redis.Redis()\npipe = client.pipeline()\npipe.lrange('key', 0, n - 1)\npipe.ltrim('key', n, -1)\nresult = pipe.execute()\n```\n\npipe.execute()返回一个列表,这个列表每一项按顺序对应每一个命令的执行结果。在上面的例子中,result 是一个有两项的列表,第一项对应 lrange 的返回结果,第二项为 True,表示 ltrim 执行成功。", "_____no_output_____" ] ], [ [ "# 真正可用的批量弹出数据函数\n\ndef batch_pop_real(key, n):\n pipe = client.pipeline()\n pipe.lrange(key, 0, n - 1)\n pipe.ltrim(key, n, -1)\n result = pipe.execute()\n return result[0]", "_____no_output_____" ], [ "# 清空列表并重新添加10000条数据\nclient.delete('test_batch_pop')\nclient.lpush('test_batch_pop', *list(range(10000)))", "_____no_output_____" ], [ "start = time.time()\nwhile True:\n datas = batch_pop_real('test_batch_pop', 1000)\n if not datas:\n break\n for data in datas:\n pass\nend = time.time()\nprint(f'批量弹出10000条数据,耗时:{end - start}')", "批量弹出10000条数据,耗时:0.18534111976623535\n" ], [ "client.llen('test_batch_pop')", "_____no_output_____" ] ], [ [ "![读者交流QQ群](https://kingname-1257411235.cos.ap-chengdu.myqcloud.com/2019-02-16-09-59-56.png)\n![](https://kingname-1257411235.cos.ap-chengdu.myqcloud.com/640.gif)\n![](https://kingname-1257411235.cos.ap-chengdu.myqcloud.com/2019-03-03-20-47-47.png)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d06e256628f9e4acd756b280dca783f73269318b
9,926
ipynb
Jupyter Notebook
penjualan.ipynb
avifmuhamadtaufiq/analisa-penjualan
4e9eeabf7ec640d276800732cbffda7c3d840850
[ "MIT" ]
null
null
null
penjualan.ipynb
avifmuhamadtaufiq/analisa-penjualan
4e9eeabf7ec640d276800732cbffda7c3d840850
[ "MIT" ]
null
null
null
penjualan.ipynb
avifmuhamadtaufiq/analisa-penjualan
4e9eeabf7ec640d276800732cbffda7c3d840850
[ "MIT" ]
null
null
null
24.092233
75
0.357445
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_excel('penjualan.xlsx', usecols=[1, 2, 3], skiprows=[0])", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "df.loc[0, 'JUMLAH'] = 47000", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "rekap_per_bulan = df.groupby(df.TGL.dt.month).sum()", "_____no_output_____" ], [ "rekap_per_bulan", "_____no_output_____" ], [ "df['JUMLAH'].sum()", "_____no_output_____" ], [ "rekap_per_bulan.sum()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06e54c0beafca6583f713bea2dc1fd41e489c02
894,979
ipynb
Jupyter Notebook
AlgoritmosClustering/ExperimentosClusters.ipynb
diegostaPy/UcomSeminario
0221060251c30f7b1caa4073d80ab1580bf6ab8f
[ "Apache-2.0" ]
3
2021-02-09T18:04:58.000Z
2021-03-19T01:56:56.000Z
Clase11-Clustering&GA/Agrupamiento/.ipynb_checkpoints/ExperimentosClusters-checkpoint.ipynb
diegostaPy/cursoIA
c18b68452f65e301a310c2e7d392558c8e266986
[ "Apache-2.0" ]
null
null
null
Clase11-Clustering&GA/Agrupamiento/.ipynb_checkpoints/ExperimentosClusters-checkpoint.ipynb
diegostaPy/cursoIA
c18b68452f65e301a310c2e7d392558c8e266986
[ "Apache-2.0" ]
1
2020-11-04T21:41:25.000Z
2020-11-04T21:41:25.000Z
272.19556
436,584
0.89765
[ [ [ "# importamos las librerías necesarias\n%matplotlib inline\nimport random\nimport tsfresh\nimport os\nimport math\nfrom scipy import stats\nfrom scipy.spatial.distance import pdist\nfrom math import sqrt, log, floor\nfrom fastdtw import fastdtw\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom statistics import mean\nfrom scipy.spatial.distance import euclidean\nimport scipy.cluster.hierarchy as hac\nfrom scipy.cluster.hierarchy import fcluster\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN\nfrom sklearn.manifold import TSNE\nfrom sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score, silhouette_score, silhouette_samples\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.spatial import distance\n\nsns.set(style='white')\n# \"fix\" the randomness for reproducibility\nrandom.seed(42)", "_____no_output_____" ], [ "!pip install tsfresh", "Collecting tsfresh\n Downloading tsfresh-0.17.0-py2.py3-none-any.whl (91 kB)\nRequirement already satisfied: statsmodels>=0.9.0 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (0.12.1)\nRequirement already satisfied: patsy>=0.4.1 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (0.5.1)\nRequirement already satisfied: scipy>=1.2.0 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (1.4.1)\nRequirement already satisfied: pandas>=0.25.0 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (1.0.5)\nCollecting dask[dataframe]>=2.9.0\n Downloading dask-2.30.0-py3-none-any.whl (848 kB)\nRequirement already satisfied: requests>=2.9.1 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (2.23.0)\nRequirement already satisfied: numpy>=1.15.1 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (1.18.5)\nCollecting tqdm>=4.10.0\n Downloading tqdm-4.51.0-py2.py3-none-any.whl (70 kB)\nCollecting distributed>=2.11.0\n Downloading distributed-2.30.1-py3-none-any.whl (656 kB)\nRequirement already satisfied: scikit-learn>=0.19.2 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from tsfresh) (0.22.1)\nRequirement already satisfied: six in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from patsy>=0.4.1->tsfresh) (1.15.0)\nRequirement already satisfied: python-dateutil>=2.6.1 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from pandas>=0.25.0->tsfresh) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from pandas>=0.25.0->tsfresh) (2020.1)\nRequirement already satisfied: pyyaml in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from dask[dataframe]>=2.9.0->tsfresh) (5.3.1)\nCollecting toolz>=0.8.2; extra == \"dataframe\"\n Downloading toolz-0.11.1-py3-none-any.whl (55 kB)\nCollecting fsspec>=0.6.0; extra == \"dataframe\"\n Downloading fsspec-0.8.4-py3-none-any.whl (91 kB)\nCollecting partd>=0.3.10; extra == \"dataframe\"\n Downloading partd-1.1.0-py3-none-any.whl (19 kB)\nRequirement already satisfied: chardet<4,>=3.0.2 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from requests>=2.9.1->tsfresh) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from requests>=2.9.1->tsfresh) (1.25.9)\nRequirement already satisfied: idna<3,>=2.5 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from requests>=2.9.1->tsfresh) (2.9)\nRequirement already satisfied: certifi>=2017.4.17 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from requests>=2.9.1->tsfresh) (2020.6.20)\nCollecting zict>=0.1.3\n Downloading zict-2.0.0-py3-none-any.whl (10 kB)\nCollecting cloudpickle>=1.5.0\n Downloading cloudpickle-1.6.0-py3-none-any.whl (23 kB)\nCollecting psutil>=5.0\n Downloading psutil-5.7.3-cp37-cp37m-win_amd64.whl (243 kB)\nRequirement already satisfied: setuptools in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from distributed>=2.11.0->tsfresh) (47.3.0.post20200616)\nCollecting sortedcontainers!=2.0.0,!=2.0.1\n Downloading sortedcontainers-2.2.2-py2.py3-none-any.whl (29 kB)\nCollecting msgpack>=0.6.0\n Downloading msgpack-1.0.0-cp37-cp37m-win_amd64.whl (72 kB)\nRequirement already satisfied: tornado>=5; python_version < \"3.8\" in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from distributed>=2.11.0->tsfresh) (6.0.4)\nCollecting tblib>=1.6.0\n Downloading tblib-1.7.0-py2.py3-none-any.whl (12 kB)\nRequirement already satisfied: click>=6.6 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from distributed>=2.11.0->tsfresh) (7.1.2)\nRequirement already satisfied: joblib>=0.11 in c:\\users\\yoda\\anaconda3\\envs\\tensorflow2\\lib\\site-packages (from scikit-learn>=0.19.2->tsfresh) (0.15.1)\nCollecting locket\n Downloading locket-0.2.0.tar.gz (3.5 kB)\nCollecting heapdict\n Downloading HeapDict-1.0.1-py3-none-any.whl (3.9 kB)\nBuilding wheels for collected packages: locket\n Building wheel for locket (setup.py): started\n Building wheel for locket (setup.py): finished with status 'done'\n Created wheel for locket: filename=locket-0.2.0-py3-none-any.whl size=4045 sha256=db38e5a4aa8529fa1f790ebe2bcc94b05ffeeccb4159a26e2933fbd080a63a4a\n Stored in directory: c:\\users\\yoda\\appdata\\local\\pip\\cache\\wheels\\7d\\21\\ce\\56f01c644a11bde5d09ecae16d9b5e9d7e988187624fd28fec\nSuccessfully built locket\nInstalling collected packages: toolz, fsspec, locket, partd, dask, tqdm, heapdict, zict, cloudpickle, psutil, sortedcontainers, msgpack, tblib, distributed, tsfresh\nSuccessfully installed cloudpickle-1.6.0 dask-2.30.0 distributed-2.30.1 fsspec-0.8.4 heapdict-1.0.1 locket-0.2.0 msgpack-1.0.0 partd-1.1.0 psutil-5.7.3 sortedcontainers-2.2.2 tblib-1.7.0 toolz-0.11.1 tqdm-4.51.0 tsfresh-0.17.0 zict-2.0.0\n" ] ], [ [ "### Dataset", "_____no_output_____" ], [ "Los datos son series temporales (casos semanales de Dengue) de distintos distritos de Paraguay", "_____no_output_____" ] ], [ [ "path = \"./data/Notificaciones/\"\nfilename_read = os.path.join(path,\"normalizado.csv\")\nnotificaciones = pd.read_csv(filename_read,delimiter=\",\",engine='python')\nnotificaciones.shape", "_____no_output_____" ], [ "listaMunicp = notificaciones['distrito_nombre'].tolist()\nlistaMunicp = list(dict.fromkeys(listaMunicp))\nprint('Son ', len(listaMunicp), ' distritos')\nlistaMunicp.sort()\nprint(listaMunicp)", "Son 217 distritos\n['1RO DE MARZO', '25 DE DICIEMBRE', '3 DE FEBRERO', 'ABAI', 'ACAHAY', 'ALBERDI', 'ALTO VERA', 'ALTOS', 'ANTEQUERA', 'AREGUA', 'ARROYOS Y ESTEROS', 'ASUNCION', 'ATYRA', 'AYOLAS', 'AZOTEY', 'BAHIA NEGRA', 'BELEN', 'BELLA VISTA', 'BENJAMIN ACEVAL', 'BORJA', 'BUENA VISTA', 'CAACUPE', 'CAAGUAZU', 'CAAZAPA', 'CABALLERO ALVAREZ', 'CAMBYRETA', 'CAPIATA', 'CAPIIBARY', 'CAPITAN BADO', 'CAPITAN MEZA', 'CAPITAN MIRANDA', 'CARAGUATAY', 'CARAPEGUA', 'CARAYAO', 'CARLOS ANTONIO LOPEZ', 'CARMELO PERALTA', 'CARMEN DEL PARANA', 'CECILIO BAEZ', 'CERRITO', 'CHACO', 'CHORE', 'COLONIA FRAM', 'COLONIA INDEPENDENCIA', 'CONCEPCION', 'CORONEL BOGADO', 'CORONEL MARTINEZ', 'CORONEL OVIEDO', 'CORPUS CHRISTI', 'CURUGUATY', 'DESMOCHADOS', 'DR BOTRELL', 'DR. JUAN MANUEL FRUTOS', 'EDELIRA', 'EMBOSCADA', 'ENCARNACION', 'ESCOBAR', 'EUGENIO A GARAY', 'EUSEBIO AYALA', 'FASSARDI', 'FELIX PEREZ CARDOZO', 'FERNANDO DE LA MORA', 'FILADELFIA', 'FUERTE OLIMPO', 'GENERAL AQUINO', 'GENERAL ARTIGAS', 'GENERAL BERNARDINO CABALLERO', 'GENERAL BRUGUEZ', 'GENERAL DELGADO', 'GENERAL DIAZ', 'GENERAL MORINIGO', 'GENERAL RESQUIN', 'GUARAMBARE', 'GUAYAIBI', 'GUAZUCUA', 'HERNANDARIAS', 'HOHENAU', 'HORQUETA', 'HUMAITA', 'ISLA PUCU', 'ISLA UMBU', 'ITA', 'ITACURUBI DE LA CORDILLERA', 'ITACURUBI DEL ROSARIO', 'ITAKYRY', 'ITANARA', 'ITAPE', 'ITAPUA POTY', 'ITAUGUA', 'ITURBE', 'J A SALDIVAR', 'JESUS', 'JOSE DOMINGO OCAMPOS', 'JUAN DE MENA', 'JUAN E. OLEARY', 'JUAN EULOGIO ESTIGARRIBIA', 'JUAN LEON MALLORQUIN', 'KATUETE', 'LA PALOMA', 'LA PASTORA', 'LA VICTORIA', 'LAMBARE', 'LAURELES', 'LEANDRO OVIEDO', 'LIMA', 'LIMOY PUEBLO', 'LIMPIO', 'LOMA GRANDE', 'LOMA PLATA', 'LORETO', 'LUQUE', 'MACIEL', 'MARIANO ROQUE ALONSO', 'MAURICIO JOSE TROCHE', 'MBARACAYU', 'MBOCAYATY', 'MBOCAYATY DEL YHAGUY', 'MCAL. ESTIGARRIBIA', 'MCAL. FRANCISCO SOLANO LOPEZ', 'MINGA GUAZU', 'MINGA PORA', 'MOISES BERTONI', 'NANAWA', 'NARANJAL', 'NATALICIO TALAVERA', 'NATALIO', 'NUEVA ALBORADA', 'NUEVA COLOMBIA', 'NUEVA ESPERANZA', 'NUEVA GERMANIA', 'NUEVA ITALIA', 'NUEVA LONDRES', 'OBLIGADO', 'PARAGUARI', 'PASO YOBAI', 'PEDRO JUAN CABALLERO', 'PILAR', 'PIRAPO', 'PIRAYU', 'PIRIBEBUY', 'POZO COLORADO', 'PUERTO FALCON', 'PUERTO PINASCO', 'QUIINDY', 'R I 3 CORRALES', 'RAUL ARSENIO OVIEDO', 'REPATRIACION', 'ROQUE GONZALEZ DE SANTA CRUZ', 'SALTO DEL GUAIRA', 'SAN ALBERTO', 'SAN ANTONIO', 'SAN BERNARDINO', 'SAN CARLOS', 'SAN COSME Y DAMIAN', 'SAN ESTANISLAO', 'SAN IGNACIO', 'SAN JOAQUIN', 'SAN JOSE DE LOS ARROYOS', 'SAN JOSE OBRERO', 'SAN JUAN BAUTISTA', 'SAN JUAN DEL PARANA', 'SAN JUAN NEPOMUCENO', 'SAN LAZARO', 'SAN LORENZO', 'SAN MIGUEL', 'SAN PATRICIO', 'SAN PEDRO', 'SAN PEDRO DEL PARANA', 'SAN PEDRO DEL YCUAMANDIYU', 'SAN RAFAEL DEL PARANA', 'SAN ROQUE GONZALEZ DE SANTACRUZ', 'SAN SALVADOR', 'SANTA ELENA', 'SANTA MARIA', 'SANTA RITA', 'SANTA ROSA', 'SANTA ROSA DEL AGUARAY', 'SANTA ROSA DEL MBUTUY', 'SANTA ROSA DEL MONDAY', 'SANTIAGO', 'SAPUCAI', 'SIMON BOLIVAR', 'TACUARAS', 'TACUATI', 'TAVAI', 'TAVAPY', 'TEBICUARY', 'TEBICUARYMI', 'TEMBIAPORA', 'TOBATI', 'TOMAS ROMERO PEREIRA', 'TRINIDAD', 'UNION', 'VALENZUELA', 'VAQUERIA', 'VILLA DEL ROSARIO', 'VILLA ELISA', 'VILLA HAYES', 'VILLA OLIVA', 'VILLALBIN', 'VILLARRICA', 'VILLETA', 'YAGUARON', 'YATAITY', 'YATAITY DEL NORTE', 'YATYTAY', 'YBY YAU', 'YBYRAROVANA', 'YBYTYMI', 'YEGROS', 'YGATIMI', 'YGUAZU', 'YHU', 'YPACARAI', 'YPANE', 'YPEJHU', 'YUTY', 'ZANJA PYTA']\n" ] ], [ [ "A continuación tomamos las series temporales que leímos y vemos como quedan", "_____no_output_____" ] ], [ [ "timeSeries = pd.DataFrame()\nfor muni in listaMunicp:\n municipio=notificaciones['distrito_nombre']==muni\n notif_x_municp=notificaciones[municipio]\n notif_x_municp = notif_x_municp.reset_index(drop=True)\n notif_x_municp = notif_x_municp['incidencia']\n notif_x_municp = notif_x_municp.replace('nan', np.nan).fillna(0.000001)\n notif_x_municp = notif_x_municp.replace([np.inf, -np.inf], np.nan).fillna(0.000001)\n timeSeries = timeSeries.append(notif_x_municp)\n ax = sns.tsplot(ax=None, data=notif_x_municp.values, err_style=\"unit_traces\")\nplt.show()", "/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/seaborn/timeseries.py:183: UserWarning: The `tsplot` function is deprecated and will be removed in a future release. Please update your code to use the new `lineplot` function.\n warnings.warn(msg, UserWarning)\n" ], [ "#timeseries shape\nn=217", "_____no_output_____" ], [ "timeSeries.shape", "_____no_output_____" ], [ "timeSeries.describe()", "_____no_output_____" ] ], [ [ "### Análisis de grupos (Clustering)", "_____no_output_____" ], [ "El Clustering o la clusterización es un proceso importante dentro del Machine learning. Este proceso desarrolla una acción fundamental que le permite a los algoritmos de aprendizaje automatizado entrenar y conocer de forma adecuada los datos con los que desarrollan sus actividades. Tiene como finalidad principal lograr el agrupamiento de conjuntos de objetos no etiquetados, para lograr construir subconjuntos de datos conocidos como Clusters. Cada cluster dentro de un grafo está formado por una colección de objetos o datos que a términos de análisis resultan similares entre si, pero que poseen elementos diferenciales con respecto a otros objetos pertenecientes al conjunto de datos y que pueden conformar un cluster independiente.", "_____no_output_____" ], [ "![imagen.png](attachment:imagen.png)", "_____no_output_____" ], [ "Aunque los datos no necesariamente son tan fáciles de agrupar", "_____no_output_____" ], [ "![imagen.png](attachment:imagen.png)", "_____no_output_____" ], [ "### Métricas de similitud", "_____no_output_____" ], [ " Para medir lo similares ( o disimilares) que son los individuos existe una enorme cantidad de índices de similaridad y de disimilaridad o divergencia. Todos ellos tienen propiedades y utilidades distintas y habrá que ser consciente de ellas para su correcta aplicación al caso que nos ocupe.\nLa mayor parte de estos índices serán o bien, indicadores basados en la distancia (considerando a los individuos como vectores en el espacio de las variables) (en este sentido un elevado valor de la distancia entre dos individuos nos indicará un alto grado de disimilaridad entre ellos); o bien, indicadores basados en coeficientes de correlación ; o bien basados en tablas de datos de posesión o no de una serie de atributos. \nA continuación mostramos las funciones de: \n* Distancia Euclidiana\n* Error cuadrático medio\n* Fast Dynamic Time Warping\n* Correlación de Pearson y\n* Correlación de Spearman.\n\nExisten muchas otras métricas y depende de la naturaleza de cada problema decidir cuál usar. Por ejemplo, *Fast Dymanic Time Warping* es una medida de similitud diseña especialmente para series temporales.\n", "_____no_output_____" ] ], [ [ "#Euclidean\ndef euclidean(x, y):\n r=np.linalg.norm(x-y)\n if math.isnan(r):\n r=1\n #print(r)\n return r", "_____no_output_____" ], [ "#RMSE\ndef rmse(x, y):\n r=sqrt(mean_squared_error(x,y))\n if math.isnan(r):\n r=1\n #print(r)\n return r", "_____no_output_____" ], [ "#Fast Dynamic time warping\ndef fast_DTW(x, y):\n r, _ = fastdtw(x, y, dist=euclidean)\n if math.isnan(r):\n r=1\n #print(r)\n return r", "_____no_output_____" ], [ "#Correlation\ndef corr(x, y):\n r=np.dot(x-mean(x),y-mean(y))/((np.linalg.norm(x-mean(x)))*(np.linalg.norm(y-mean(y))))\n if math.isnan(r):\n r=0\n #print(r)\n return 1 - r", "_____no_output_____" ], [ "#Spearman\ndef scorr(x, y):\n r = stats.spearmanr(x, y)[0]\n if math.isnan(r):\n r=0\n #print(r)\n return 1 - r", "_____no_output_____" ], [ "# compute distances using LCSS\n\n# function for LCSS computation\n# based on implementation from\n# https://rosettacode.org/wiki/Longest_common_subsequence\ndef lcs(a, b): \n lengths = [[0 for j in range(len(b)+1)] for i in range(len(a)+1)]\n # row 0 and column 0 are initialized to 0 already\n for i, x in enumerate(a):\n for j, y in enumerate(b):\n if x == y:\n lengths[i+1][j+1] = lengths[i][j] + 1\n else:\n lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])\n x, y = len(a), len(b)\n result = lengths[x][y]\n return result\n\ndef discretise(x):\n return int(x * 10)\n\ndef multidim_lcs(a, b):\n a = a.applymap(discretise)\n b = b.applymap(discretise)\n rows, dims = a.shape\n lcss = [lcs(a[i+2], b[i+2]) for i in range(dims)]\n return 1 - sum(lcss) / (rows * dims)", "_____no_output_____" ], [ "#Distancias para kmeans\n#Euclidean\neuclidean_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n euclidean_dist[i,j] = euclidean(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())\n#RMSE\nrmse_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n rmse_dist[i,j] = rmse(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())\n#Corr\ncorr_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n corr_dist[i,j] = corr(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())\n#scorr\nscorr_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n scorr_dist[i,j] = scorr(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())\n#DTW\ndtw_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n dtw_dist[i,j] = fast_DTW(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())", "_____no_output_____" ] ], [ [ "### Determinar el número de clusters a formar", "_____no_output_____" ], [ "La mayoría de las técnicas de clustering necesitan como *input* el número de clusters a formar, para eso lo que se hace es hacer una prueba con diferentes números de cluster y nos quedamos con el que dió menor error en general. Para medir ese error utilizamos **Silhouette score**.", "_____no_output_____" ], [ "El **Silhoutte score** se puede utilizar para estudiar la distancia de separación entre los clusters resultantes, especialmente si no hay conocimiento previo de cuáles son los verdaderos grupos para cada objeto, que es el caso más común en aplicaciones reales.", "_____no_output_____" ], [ "El Silhouette score $s(i)$ se calcula:\n\\begin{equation}\ns(i)=\\dfrac{b(i)-a(i)}{max(b(i),a(i))} \n\\end{equation}", "_____no_output_____" ], [ "Definamos $a (i)$ como la distancia media del punto $(i)$ a todos los demás puntos del grupo que se le asignó ($A$). Podemos interpretar $a (i)$ como qué tan bien se asigna el punto al grupo. Cuanto menor sea el valor, mejor será la asignación.\nDe manera similar, definamos $b (i)$ como la distancia media del punto $(i)$ a otros puntos de su grupo vecino más cercano ($B$). El grupo ($B$) es el grupo al que no se asigna el punto $(i)$ pero su distancia es la más cercana entre todos los demás grupos. $ s (i) $ se encuentra en el rango de [-1,1].", "_____no_output_____" ] ], [ [ "from yellowbrick.cluster import KElbowVisualizer\nmodel = AgglomerativeClustering()\nvisualizer = KElbowVisualizer(model, k=(3,20),metric='distortion', timings=False)\n\nvisualizer.fit(rmse_dist) # Fit the data to the visualizer\nvisualizer.show() # Finalize and render the figure", "/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\n warnings.warn(message, FutureWarning)\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n" ] ], [ [ "Así tenemos que son 9 los grupos que formaremos", "_____no_output_____" ] ], [ [ "k=9", "_____no_output_____" ] ], [ [ "## Técnicas de clustering", "_____no_output_____" ], [ "### K-means", "_____no_output_____" ], [ " El objetivo de este algoritmo es el de encontrar “K” grupos (clusters) entre los datos crudos. El algoritmo trabaja iterativamente para asignar a cada “punto” (las filas de nuestro conjunto de entrada forman una coordenada) uno de los “K” grupos basado en sus características. Son agrupados en base a la similitud de sus features (las columnas). Como resultado de ejecutar el algoritmo tendremos:\n\n* Los “centroids” de cada grupo que serán unas “coordenadas” de cada uno de los K conjuntos que se utilizarán para poder etiquetar nuevas muestras.\n* Etiquetas para el conjunto de datos de entrenamiento. Cada etiqueta perteneciente a uno de los K grupos formados.\n\nLos grupos se van definiendo de manera “orgánica”, es decir que se va ajustando su posición en cada iteración del proceso, hasta que converge el algoritmo. Una vez hallados los centroids deberemos analizarlos para ver cuales son sus características únicas, frente a la de los otros grupos. ", "_____no_output_____" ], [ "![imagen.png](attachment:imagen.png)", "_____no_output_____" ], [ "En la figura de arriba vemos como los datos se agrupan según el *centroid* que está representado por una estrella. El algortimo inicializa los centroides aleatoriamente y va ajustandolo en cada iteracción, los puntos que están más cerca del *centroid* son los que pertenecen al mismo grupo. ", "_____no_output_____" ], [ "### Clustering jerárquico", "_____no_output_____" ], [ "![imagen.png](attachment:imagen.png)", "_____no_output_____" ], [ "El algortimo de clúster jerárquico agrupa los datos basándose en la distancia entre cada uno y buscando que los datos que están dentro de un clúster sean los más similares entre sí.\n\nEn una representación gráfica los elementos quedan anidados en jerarquías con forma de árbol. ", "_____no_output_____" ], [ "### DBScan", "_____no_output_____" ], [ "El agrupamiento espacial basado en densidad de aplicaciones con ruido o Density-based spatial clustering of applications with noise (DBSCAN) es un algoritmo de agrupamiento de datos (data clustering). Es un algoritmo de agrupamiento basado en densidad (density-based clustering) porque encuentra un número de grupos (clusters) comenzando por una estimación de la distribución de densidad de los nodos correspondientes. DBSCAN es uno de los algoritmos de agrupamiento más usados y citados en la literatura científica.", "_____no_output_____" ], [ "![imagen.png](attachment:imagen.png)", "_____no_output_____" ], [ "Los puntos marcados en rojo son puntos núcleo. Los puntos amarillos son densamente alcanzables desde rojo y densamente conectados con rojo, y pertenecen al mismo clúster. El punto azul es un punto ruidoso que no es núcleo ni densamente alcanzable.", "_____no_output_____" ] ], [ [ "#Experimentos\nprint('Silhouette coefficent')\n#HAC + euclidean\nZ = hac.linkage(timeSeries, method='complete', metric=euclidean)\nclusters = fcluster(Z, k, criterion='maxclust')\nprint(\"HAC + euclidean distance: \",silhouette_score(euclidean_dist, clusters))\n#HAC + rmse\nZ = hac.linkage(timeSeries, method='complete', metric=rmse)\nclusters = fcluster(Z, k, criterion='maxclust')\nprint(\"HAC + rmse distance: \",silhouette_score( rmse_dist, clusters))\n#HAC + corr\nZ = hac.linkage(timeSeries, method='complete', metric=corr)\nclusters = fcluster(Z, k, criterion='maxclust')\nprint(\"HAC + corr distance: \",silhouette_score( corr_dist, clusters))\n#HAC + scorr\nZ = hac.linkage(timeSeries, method='complete', metric=scorr)\nclusters = fcluster(Z, k, criterion='maxclust')\nprint(\"HAC + scorr distance: \",silhouette_score( scorr_dist, clusters))\n#HAC + LCSS\n#Z = hac.linkage(timeSeries, method='complete', metric=multidim_lcs)\n#clusters = fcluster(Z, k, criterion='maxclust')\n#print(\"HAC + LCSS distance: \",silhouette_score( timeSeries, clusters, metric=multidim_lcs))\n#HAC + DTW\nZ = hac.linkage(timeSeries, method='complete', metric=fast_DTW)\nclusters = fcluster(Z, k, criterion='maxclust')\nprint(\"HAC + DTW distance: \",silhouette_score( dtw_dist, clusters))", "Silhouette coefficent\nHAC + euclidean distance: 0.7454524446944198\nHAC + rmse distance: 0.7454524446944243\nHAC + corr distance: 0.015722011745326624\nHAC + scorr distance: 0.4504165962932875\nHAC + DTW distance: 0.6792829923251535\n" ], [ "km_euc = KMeans(n_clusters=k).fit_predict(euclidean_dist)\nsilhouette_avg=silhouette_score( euclidean_dist, km_euc)\nprint(\"KM + euclidian distance: \",silhouette_score( euclidean_dist, km_euc))\nkm_rmse = KMeans(n_clusters=k).fit_predict(rmse_dist)\nprint(\"KM + rmse distance: \",silhouette_score( rmse_dist, km_rmse))\nkm_corr = KMeans(n_clusters=k).fit_predict(corr_dist)\nprint(\"KM + corr distance: \",silhouette_score( corr_dist, km_corr))\nkm_scorr = KMeans(n_clusters=k).fit_predict(scorr_dist)\nprint(\"KM + scorr distance: \",silhouette_score( scorr_dist, km_scorr))\nkm_dtw = KMeans(n_clusters=k).fit_predict(dtw_dist)\nprint(\"KM + dtw distance: \",silhouette_score( dtw_dist, clusters))", "KM + euclidian distance: 0.5661172150227499\nKM + rmse distance: 0.605311731093136\nKM + corr distance: 0.17182825003639043\nKM + scorr distance: 0.4764498624401292\nKM + dtw distance: 0.6792829923251535\n" ], [ "#Experimentos DBSCAN\nDB_euc = DBSCAN(eps=3, min_samples=2).fit_predict(euclidean_dist)\nsilhouette_avg=silhouette_score( euclidean_dist, DB_euc)\nprint(\"DBSCAN + euclidian distance: \",silhouette_score( euclidean_dist, DB_euc))\nDB_rmse = DBSCAN(eps=12, min_samples=10).fit_predict(rmse_dist)\n#print(\"DBSCAN + rmse distance: \",silhouette_score( rmse_dist, DB_rmse))\nprint(\"DBSCAN + rmse distance: \",0.00000000)\nDB_corr = DBSCAN(eps=3, min_samples=2).fit_predict(corr_dist)\nprint(\"DBSCAN + corr distance: \",silhouette_score( corr_dist, DB_corr))\nDB_scorr = DBSCAN(eps=3, min_samples=2).fit_predict(scorr_dist)\nprint(\"DBSCAN + scorr distance: \",silhouette_score( scorr_dist, DB_scorr))\nDB_dtw = DBSCAN(eps=3, min_samples=2).fit_predict(dtw_dist)\nprint(\"KM + dtw distance: \",silhouette_score( dtw_dist, DB_dtw))", "DBSCAN + euclidian distance: 0.8141967832004429\nDBSCAN + rmse distance: 0.0\nDBSCAN + corr distance: 0.4543067216391177\nDBSCAN + scorr distance: 0.005463947798855316\nKM + dtw distance: 0.5203731423414103\n" ] ], [ [ "## Clustering basado en propiedades", "_____no_output_____" ], [ "Otro enfoque en el clustering es extraer ciertas propiedades de nuestros datos y hacer la agrupación basándonos en eso, el procedimiento es igual a como si estuviesemos trabajando con nuestros datos reales.", "_____no_output_____" ] ], [ [ "from tsfresh import extract_features\n\n#features extraction\nextracted_features = extract_features(timeSeries, column_id=\"indice\")", "Feature Extraction: 100%|██████████| 100/100 [02:27<00:00, 1.47s/it]\n" ], [ "extracted_features.shape", "_____no_output_____" ], [ "list(extracted_features.columns.values)", "_____no_output_____" ], [ "n=217\nfeatures = pd.DataFrame()\nMean=[]\nVar=[]\naCF1=[]\nPeak=[]\nEntropy=[]\nCpoints=[]\nfor muni in listaMunicp:\n municipio=notificaciones['distrito_nombre']==muni\n notif_x_municp=notificaciones[municipio]\n notif_x_municp = notif_x_municp.reset_index(drop=True)\n notif_x_municp = notif_x_municp['incidencia']\n notif_x_municp = notif_x_municp.replace('nan', np.nan).fillna(0.000001)\n notif_x_municp = notif_x_municp.replace([np.inf, -np.inf], np.nan).fillna(0.000001)\n #Features\n mean=tsfresh.feature_extraction.feature_calculators.mean(notif_x_municp)\n var=tsfresh.feature_extraction.feature_calculators.variance(notif_x_municp)\n ACF1=tsfresh.feature_extraction.feature_calculators.autocorrelation(notif_x_municp,1)\n peak=tsfresh.feature_extraction.feature_calculators.number_peaks(notif_x_municp,20)\n entropy=tsfresh.feature_extraction.feature_calculators.sample_entropy(notif_x_municp)\n cpoints=tsfresh.feature_extraction.feature_calculators.number_crossing_m(notif_x_municp,5)\n Mean.append(mean)\n Var.append(var)\n aCF1.append(ACF1)\n Peak.append(peak)\n Entropy.append(entropy)\n Cpoints.append(cpoints)", "_____no_output_____" ], [ "data_tuples = list(zip(Mean,Var,aCF1,Peak,Entropy,Cpoints))\nfeatures = pd.DataFrame(data_tuples, columns =['Mean', 'Var', 'ACF1', 'Peak','Entropy','Cpoints']) \n# print the data \nfeatures", "_____no_output_____" ], [ "features.iloc[1]", "_____no_output_____" ], [ "#Distancias para kmeans\n#Euclidean\nf_euclidean_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(1,n):\n #print(\"j\",j)\n f_euclidean_dist[i,j] = euclidean(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())\n#RMSE\nf_rmse_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n f_rmse_dist[i,j] = rmse(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())\n#Corr\n#print(features.iloc[i].values.flatten())\n#print(features.iloc[j].values.flatten())\nprint('-------------------------------')\nf_corr_dist = np.zeros((n,n))\n#for i in range(0,n):\n # print(\"i\",i)\n # for j in range(0,n):\n # print(\"j\",j)\n # print(features.iloc[i].values.flatten())\n # print(features.iloc[j].values.flatten())\n # f_corr_dist[i,j] = corr(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())\n#scorr\nf_scorr_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n f_scorr_dist[i,j] = scorr(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())\n#DTW\nf_dtw_dist = np.zeros((n,n))\nfor i in range(0,n):\n #print(\"i\",i)\n for j in range(0,n):\n # print(\"j\",j)\n f_dtw_dist[i,j] = fast_DTW(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())", "-------------------------------\n" ], [ "from yellowbrick.cluster import KElbowVisualizer\nmodel = AgglomerativeClustering()\nvisualizer = KElbowVisualizer(model, k=(3,50),metric='distortion', timings=False)\n\nvisualizer.fit(f_scorr_dist) # Fit the data to the visualizer\nvisualizer.show() # Finalize and render the figure", "/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix\n return linkage(y, method='ward', metric='euclidean')\n" ], [ "k=9", "_____no_output_____" ], [ "km_euc = KMeans(n_clusters=k).fit_predict(f_euclidean_dist)\nsilhouette_avg=silhouette_score( f_euclidean_dist, km_euc)\nprint(\"KM + euclidian distance: \",silhouette_score( f_euclidean_dist, km_euc))\nkm_rmse = KMeans(n_clusters=k).fit_predict(f_rmse_dist)\nprint(\"KM + rmse distance: \",silhouette_score( f_rmse_dist, km_rmse))\n#km_corr = KMeans(n_clusters=k).fit_predict(f_corr_dist)\n#print(\"KM + corr distance: \",silhouette_score( f_corr_dist, km_corr))\n#print(\"KM + corr distance: \",silhouette_score( f_corr_dist, 0.0))\nkm_scorr = KMeans(n_clusters=k).fit_predict(f_scorr_dist)\nprint(\"KM + scorr distance: \",silhouette_score( f_scorr_dist, km_scorr))\nkm_dtw = KMeans(n_clusters=k).fit_predict(f_dtw_dist)\nprint(\"KM + dtw distance: \",silhouette_score( f_dtw_dist, clusters))", "KM + euclidian distance: 0.6559998122476823\nKM + rmse distance: 0.6613509624273096\nKM + scorr distance: 0.9953915258435644\nKM + dtw distance: -0.29584107123155795\n" ], [ "#Experimentos HAC\nHAC_euc = AgglomerativeClustering(n_clusters=k).fit_predict(f_euclidean_dist)\nsilhouette_avg=silhouette_score( f_euclidean_dist, HAC_euc)\nprint(\"HAC + euclidian distance: \",silhouette_score( f_euclidean_dist, HAC_euc))\nHAC_rmse = AgglomerativeClustering(n_clusters=k).fit_predict(f_rmse_dist)\nprint(\"HAC + rmse distance: \",silhouette_score( f_rmse_dist, HAC_rmse))\n#HAC_corr = AgglomerativeClustering(n_clusters=k).fit_predict(f_corr_dist)\n#print(\"HAC + corr distance: \",silhouette_score( f_corr_dist,HAC_corr))\nprint(\"HAC + corr distance: \",0.0)\nHAC_scorr = AgglomerativeClustering(n_clusters=k).fit_predict(f_scorr_dist)\nprint(\"HAC + scorr distance: \",silhouette_score( f_scorr_dist, HAC_scorr))\nHAC_dtw = AgglomerativeClustering(n_clusters=k).fit_predict(f_dtw_dist)\nprint(\"HAC + dtw distance: \",silhouette_score( f_dtw_dist, HAC_dtw))", "HAC + euclidian distance: 0.6385211824022649\nHAC + rmse distance: 0.6385439064812587\nHAC + corr distance: 0.0\nHAC + scorr distance: 0.3133639497304035\nHAC + dtw distance: 0.584638909649381\n" ], [ "#Experimentos DBSCAN\nDB_euc = DBSCAN(eps=3, min_samples=2).fit_predict(f_euclidean_dist)\nsilhouette_avg=silhouette_score( f_euclidean_dist, DB_euc)\nprint(\"DBSCAN + euclidian distance: \",silhouette_score( f_euclidean_dist, DB_euc))\nDB_rmse = DBSCAN(eps=12, min_samples=10).fit_predict(f_rmse_dist)\n#print(\"DBSCAN + rmse distance: \",silhouette_score( f_rmse_dist, DB_rmse))\n#print(\"DBSCAN + rmse distance: \",0.00000000)\n#DB_corr = DBSCAN(eps=3, min_samples=2).fit_predict(f_corr_dist)\n#print(\"DBSCAN + corr distance: \",silhouette_score( f_corr_dist, DB_corr))\nprint(\"DBSCAN + corr distance: \",0.0)\nDB_scorr = DBSCAN(eps=3, min_samples=2).fit_predict(f_scorr_dist)\nprint(\"DBSCAN + scorr distance: \",silhouette_score( f_scorr_dist, DB_scorr))\nDB_dtw = DBSCAN(eps=3, min_samples=2).fit_predict(f_dtw_dist)\nprint(\"KM + dtw distance: \",silhouette_score( f_dtw_dist, DB_dtw))", "DBSCAN + euclidian distance: 0.7327015254414699\nDBSCAN + corr distance: 0.0\nDBSCAN + scorr distance: 0.982667657643341\nKM + dtw distance: 0.6447434480812199\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06e54fecf2d28e94dfab1e2550fe1cb6897ea82
4,668
ipynb
Jupyter Notebook
scripts_nbs/demo2.ipynb
ashishlal/facerecog
c22d7de6debbee9048af5d86387a750e16eb7945
[ "Apache-2.0" ]
null
null
null
scripts_nbs/demo2.ipynb
ashishlal/facerecog
c22d7de6debbee9048af5d86387a750e16eb7945
[ "Apache-2.0" ]
null
null
null
scripts_nbs/demo2.ipynb
ashishlal/facerecog
c22d7de6debbee9048af5d86387a750e16eb7945
[ "Apache-2.0" ]
null
null
null
25.933333
93
0.560197
[ [ [ "import face_recognition\nimport cv2\nimport numpy as np", "_____no_output_____" ], [ "\ndef img_encoding(imagePath):\n knownEncodings = []\n knownNames = []\n\n image = cv2.imread(imagePath)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # detect the (x, y)-coordinates of the bounding boxes\n # corresponding to each face in the input image\n boxes = face_recognition.face_locations(rgb,model='cnn')\n\n # compute the facial embedding for the face\n encodings = face_recognition.face_encodings(rgb, boxes)\n\n # loop over the encodings\n for encoding in encodings:\n # add each encoding + name to our set of known names and\n # encodings\n knownEncodings.append(encoding)\n # knownNames.append(name)\n return knownEncodings\n\ndef findCosineSimilarity(source_representation, test_representation):\n a = np.matmul(np.transpose(source_representation), test_representation)\n b = np.sum(np.multiply(source_representation, source_representation))\n c = np.sum(np.multiply(test_representation, test_representation))\n return 1 - (a / (np.sqrt(b) * np.sqrt(c)))\n\ndef findEuclideanDistance(source_representation, test_representation):\n euclidean_distance = source_representation - test_representation\n euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))\n euclidean_distance = np.sqrt(euclidean_distance)\n return euclidean_distance", "_____no_output_____" ], [ "database = {}", "_____no_output_____" ], [ "%%time\ndatabase['ashish_lal'] = img_encoding('../data/ashish_lal/00000000.jpg')[0]\ndatabase['alan_grant'] = img_encoding('../data/alan_grant/00000000.jpg')[0]\ndatabase['ivan_mihalj'] = img_encoding('../data/ivan_mihalj/00000000.jpg')[0]", "CPU times: user 512 ms, sys: 88 ms, total: 600 ms\nWall time: 576 ms\n" ], [ "%%time\nenc1 = img_encoding('../data/ashish_lal/00000001.jpg')[0]", "CPU times: user 304 ms, sys: 20 ms, total: 324 ms\nWall time: 306 ms\n" ], [ "%%time\nprint(findEuclideanDistance(enc1, database['ashish_lal']))\nprint(findEuclideanDistance(enc1, database['ivan_mihalj']))\nprint(findEuclideanDistance(enc1, database['alan_grant']))", "0.36230510846504294\n0.7108792102036539\n0.799418771947807\nCPU times: user 0 ns, sys: 0 ns, total: 0 ns\nWall time: 480 µs\n" ], [ "print(findCosineSimilarity(enc1, database['ashish_lal']))\nprint(findCosineSimilarity(enc1, database['ivan_mihalj']))\nprint(findCosineSimilarity(enc1, database['alan_grant']))", "0.039248688142542454\n0.13675468840499727\n0.16238160133150237\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d06e659031bef03ac0b1040021909dcdeae4fe20
43,911
ipynb
Jupyter Notebook
Welcome_To_Colaboratory.ipynb
user9990/Synthetic-data-gen
102e21663242508089b39f601d3cbc933b94b79b
[ "MIT" ]
null
null
null
Welcome_To_Colaboratory.ipynb
user9990/Synthetic-data-gen
102e21663242508089b39f601d3cbc933b94b79b
[ "MIT" ]
null
null
null
Welcome_To_Colaboratory.ipynb
user9990/Synthetic-data-gen
102e21663242508089b39f601d3cbc933b94b79b
[ "MIT" ]
null
null
null
146.37
31,886
0.851768
[ [ [ "<a href=\"https://colab.research.google.com/github/user9990/Synthetic-data-gen/blob/master/Welcome_To_Colaboratory.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "<p><img alt=\"Colaboratory logo\" height=\"45px\" src=\"/img/colab_favicon.ico\" align=\"left\" hspace=\"10px\" vspace=\"0px\"></p>\n\n<h1>What is Colaboratory?</h1>\n\nColaboratory, or \"Colab\" for short, allows you to write and execute Python in your browser, with \n- Zero configuration required\n- Free access to GPUs\n- Easy sharing\n\nWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below!", "_____no_output_____" ], [ "## **Getting started**\n\nThe document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.\n\nFor example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:", "_____no_output_____" ] ], [ [ "seconds_in_a_day = 24 * 60 * 60\nseconds_in_a_day", "_____no_output_____" ] ], [ [ "To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut \"Command/Ctrl+Enter\". To edit the code, just click the cell and start editing.\n\nVariables that you define in one cell can later be used in other cells:", "_____no_output_____" ] ], [ [ "seconds_in_a_week = 7 * seconds_in_a_day\nseconds_in_a_week", "_____no_output_____" ] ], [ [ "Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.com#create=true).\n\nColab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org).", "_____no_output_____" ], [ "## Data science\n\nWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom matplotlib import pyplot as plt\n\nys = 200 + np.random.randn(100)\nx = [x for x in range(len(ys))]\n\nplt.plot(x, ys, '-')\nplt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)\n\nplt.title(\"Sample Visualization\")\nplt.show()", "_____no_output_____" ] ], [ [ "You can import your own data into Colab notebooks from your Google Drive account, including from spreadsheets, as well as from Github and many other sources. To learn more about importing data, and how Colab can be used for data science, see the links below under [Working with Data](#working-with-data).", "_____no_output_____" ], [ "## Machine learning\n\nWith Colab you can import an image dataset, train an image classifier on it, and evaluate the model, all in just [a few lines of code](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb). Colab notebooks execute code on Google's cloud servers, meaning you can leverage the power of Google hardware, including [GPUs and TPUs](#using-accelerated-hardware), regardless of the power of your machine. All you need is a browser.", "_____no_output_____" ], [ "Colab is used extensively in the machine learning community with applications including:\n- Getting started with TensorFlow\n- Developing and training neural networks\n- Experimenting with TPUs\n- Disseminating AI research\n- Creating tutorials\n\nTo see sample Colab notebooks that demonstrate machine learning applications, see the [machine learning examples](#machine-learning-examples) below.", "_____no_output_____" ], [ "## More Resources\n\n### Working with Notebooks in Colab\n- [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)\n- [Guide to Markdown](/notebooks/markdown_guide.ipynb)\n- [Importing libraries and installing dependencies](/notebooks/snippets/importing_libraries.ipynb)\n- [Saving and loading notebooks in GitHub](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)\n- [Interactive forms](/notebooks/forms.ipynb)\n- [Interactive widgets](/notebooks/widgets.ipynb)\n- <img src=\"/img/new.png\" height=\"20px\" align=\"left\" hspace=\"4px\" alt=\"New\"></img>\n [TensorFlow 2 in Colab](/notebooks/tensorflow_version.ipynb)\n\n<a name=\"working-with-data\"></a>\n### Working with Data\n- [Loading data: Drive, Sheets, and Google Cloud Storage](/notebooks/io.ipynb) \n- [Charts: visualizing data](/notebooks/charts.ipynb)\n- [Getting started with BigQuery](/notebooks/bigquery.ipynb)\n\n### Machine Learning Crash Course\nThese are a few of the notebooks from Google's online Machine Learning course. See the [full course website](https://developers.google.com/machine-learning/crash-course/) for more.\n- [Intro to Pandas](/notebooks/mlcc/intro_to_pandas.ipynb)\n- [Tensorflow concepts](/notebooks/mlcc/tensorflow_programming_concepts.ipynb)\n- [First steps with TensorFlow](/notebooks/mlcc/first_steps_with_tensor_flow.ipynb)\n- [Intro to neural nets](/notebooks/mlcc/intro_to_neural_nets.ipynb)\n- [Intro to sparse data and embeddings](/notebooks/mlcc/intro_to_sparse_data_and_embeddings.ipynb)\n\n<a name=\"using-accelerated-hardware\"></a>\n### Using Accelerated Hardware\n- [TensorFlow with GPUs](/notebooks/gpu.ipynb)\n- [TensorFlow with TPUs](/notebooks/tpu.ipynb)", "_____no_output_____" ], [ "<a name=\"machine-learning-examples\"></a>\n\n## Machine Learning Examples\n\nTo see end-to-end examples of the interactive machine learning analyses that Colaboratory makes possible, check out these tutorials using models from [TensorFlow Hub](https://tfhub.dev).\n\nA few featured examples:\n\n- [Retraining an Image Classifier](https://tensorflow.org/hub/tutorials/tf2_image_retraining): Build a Keras model on top of a pre-trained image classifier to distinguish flowers.\n- [Text Classification](https://tensorflow.org/hub/tutorials/tf2_text_classification): Classify IMDB movie reviews as either *positive* or *negative*.\n- [Style Transfer](https://tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization): Use deep learning to transfer style between images.\n- [Multilingual Universal Sentence Encoder Q&A](https://tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa): Use a machine learning model to answer questions from the SQuAD dataset.\n- [Video Interpolation](https://tensorflow.org/hub/tutorials/tweening_conv3d): Predict what happened in a video between the first and the last frame.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06e693628e608f6f30bd7618fb342893a26fe8d
6,893
ipynb
Jupyter Notebook
Practices/_Keys/KEY_Practice09_Conditionals.ipynb
ssorbetto/curriculum-notebooks
467ba55859b081a6f9aa8af4ed69f8255fecb817
[ "CC-BY-4.0" ]
null
null
null
Practices/_Keys/KEY_Practice09_Conditionals.ipynb
ssorbetto/curriculum-notebooks
467ba55859b081a6f9aa8af4ed69f8255fecb817
[ "CC-BY-4.0" ]
null
null
null
Practices/_Keys/KEY_Practice09_Conditionals.ipynb
ssorbetto/curriculum-notebooks
467ba55859b081a6f9aa8af4ed69f8255fecb817
[ "CC-BY-4.0" ]
null
null
null
29.207627
339
0.584216
[ [ [ "# Practice with conditionals\n\nBefore we practice conditionals, let's review:\n\nTo execute a command when a condition is true, use `if`:\n\n```\nif [condition]:\n [command]\n```\n\nTo execute a command when a condition is true, and execute something else otherwise, use `if/else`:\n```\nif [condition]:\n [command 1]\nelse:\n [command 2]\n```\n\nTo execute a command when one condition is true, a different command if a second condition is true, and execute something else otherwise, use `if/elif/else`:\n```\nif [condition 1]:\n [command 1]\nelif [condition 2]:\n [command 2]\nelse:\n [command 3]\n```\n\nRemember that commands in an `elif` will only run if the first condition is false AND the second condition is true.", "_____no_output_____" ], [ "Let's say we are making a smoothie. In order to make a big enough smoothie, we want at least 4 cups of ingredients.", "_____no_output_____" ] ], [ [ "strawberries = 1\nbananas = 0.5\nmilk = 1\n\n# create a variable ingredients that equals the sum of all our ingredients\ningredients = strawberries + bananas + milk\n\n# write an if statement that prints out \"We have enough ingredients!\" if we have at least 4 cups of ingredients\nif ingredients >= 4:\n print(\"We have enough ingredients!\")", "_____no_output_____" ] ], [ [ "The code above will let us know if we have enough ingredients for our smoothie. But, if we don't have enough ingredients, the code won't print anything. Our code would be more informative if it also told us when we didn't have enough ingredients. Next, let's write code that also lets us know when we _don't_ have enough ingredients.", "_____no_output_____" ] ], [ [ "# write code that prints \"We have enough ingredients\" if we have at least 4 cups of ingredients\n# and also prints \"We don't have enough ingredients\" if we have less than 4 cups of ingredients\nif ingredients >=4:\n print(\"We have enough ingredients!\")\nelse:\n print(\"We do not have enough ingredients.\")", "We do not have enough ingredients.\n" ] ], [ [ "It might also be useful to know if we have exactly 4 cups of ingredients. Add to the code above so that it lets us know when we have more than enough ingredients, exactly enough ingredients, or not enough ingredients.", "_____no_output_____" ] ], [ [ "# write code that prints informative messages when we have more than 4 cups of ingredients,\n# exactly 4 cups of ingredients, or less than 4 cups of ingredients\nif ingredients > 4:\n print(\"we have more than enough ingredients\")\nelif ingredients is 4:\n print(\"we have exactly enough ingredients\")\nelse:\n print(\"we do not have enough ingredients\")", "we have exactly enough ingredients\n" ] ], [ [ "**Challenge**: Suppose our blender can only fit up to 6 cups inside. Add to the above code so that it also warns us when we have too many ingredients.", "_____no_output_____" ] ], [ [ "# write an if/elif/else style statement that does the following:\n# prints a message when we have exactly 4 cups of ingredients saying we have exactly the right amount of ingredients\n# prints a message when we have less than 4 cups of ingredients say we do not have enough \n# prints a message when we have 4-6 cups of ingredients saying we have more than enough\n# prints a message otherwise that says we have too many ingredients \nif ingredients is 4:\n print(\"we have exactly enough ingredients\")\nelif ingredients < 4:\n print(\"we do not have enough ingredients\")\nelif ingredients > 4 and ingredients < 6:\n print(\"we have more than enough ingredients\")\nelse:\n print(\"We have too many ingredients\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06e69fa0d2af1674a1bf7443f1197804ff85494
4,993
ipynb
Jupyter Notebook
examples/glyphs/table_server.ipynb
gitter-badger/bokeh
5481346de1642a4e6710d32b70262fd6c2674360
[ "BSD-3-Clause" ]
null
null
null
examples/glyphs/table_server.ipynb
gitter-badger/bokeh
5481346de1642a4e6710d32b70262fd6c2674360
[ "BSD-3-Clause" ]
null
null
null
examples/glyphs/table_server.ipynb
gitter-badger/bokeh
5481346de1642a4e6710d32b70262fd6c2674360
[ "BSD-3-Clause" ]
null
null
null
35.664286
124
0.490687
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06e6c7196a4ad8f2ed62de5d8aaff60b1238adb
165,468
ipynb
Jupyter Notebook
content/03/02f_chains-Copy1.ipynb
schemesmith/ledatascifi-2021
f3a081c794655a2f560f8b72edd827ed5c90ada3
[ "MIT" ]
null
null
null
content/03/02f_chains-Copy1.ipynb
schemesmith/ledatascifi-2021
f3a081c794655a2f560f8b72edd827ed5c90ada3
[ "MIT" ]
null
null
null
content/03/02f_chains-Copy1.ipynb
schemesmith/ledatascifi-2021
f3a081c794655a2f560f8b72edd827ed5c90ada3
[ "MIT" ]
null
null
null
428.673575
28,976
0.935685
[ [ [ "# Using \"method chains\" to create more readable code", "_____no_output_____" ], [ "### Game of Thrones example - slicing, group stats, and plotting\n\nI didn't find an off the shelf dataset to run our seminal analysis from last week, but I found [an analysis](https://www.kaggle.com/dhanushkishore/impact-of-game-of-thrones-on-us-baby-names) that explored if Game of Thrones prompted parents to start naming their children differently. The following is inspired by that, but uses pandas to acquire and wrangle our data in a \"Tidyverse\"-style (how R would do it) flow.\n", "_____no_output_____" ] ], [ [ "#TO USE datadotworld PACKAGE:\n#1. create account at data.world, then run the next two lines:\n#2. in terminal/powershell: pip install datadotworld[pandas]\n#\n# IF THIS DOESN'T WORK BC YOU GET AN ERROR ABOUT \"CCHARDET\", RUN:\n# conda install -c conda-forge cchardet\n# THEN RERUN: pip install datadotworld[pandas] \n#\n#3. in terminal/powershell: dw configure \n#3a. copy in API token from data.world (get from settings > advanced)\n\nimport datadotworld as dw\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nbaby_names = dw.load_dataset('nkrishnaswami/us-ssa-baby-names-national')\nbaby_names = baby_names.dataframes['names_ranks_counts']", "_____no_output_____" ] ], [ [ "#### Version 1\n1. save a slice of the dataset with the names we want (using `.loc`)\n2. sometimes a name is used by boys and girls in the same year, so combine the counts so that we have one observation per name per year\n3. save the dataset and then call a plot function\n", "_____no_output_____" ] ], [ [ "# restrict by name and only keep years after 2000\nsomenames = baby_names.loc[( # formating inside this () is just to make it clearer to a reader\n ( # condition 1: one of these names, | means \"or\"\n (baby_names['name'] == \"Sansa\") | (baby_names['name'] == \"Daenerys\") | \n (baby_names['name'] == \"Brienne\") | (baby_names['name'] == \"Cersei\") | (baby_names['name'] == \"Tyrion\") \n ) # end condition 1\n & # & means \"and\"\n ( # condition 2: these years\n baby_names['year'] >= 2000) # end condition 2\n )]\n\n# if a name is used by F and M in a given year, combine the count variable\n# Q: why is there a \"reset_index\"? \n# A: groupby automatically turns the groups (here name and year) into the index\n# reset_index makes the index simple integers 0, 1, 2 and also\n# turns the the grouping variables back into normal columns\n# A2: instead of reset_index, you can include `as_index=False` inside groupby!\n# (I just learned that myself!)\nsomenames_agg = somenames.groupby(['name','year'])['count'].sum().reset_index().sort_values(['name','year'])\n\n# plot\nsns.lineplot(data=somenames_agg, hue='name',x='year',y='count')\nplt.axvline(2011, 0,160,color='red') # add a line for when the show debuted\n", "_____no_output_____" ] ], [ [ "#### Version 2 - `query` > `loc`, for readability\nSame as V1, but step 1 uses `.query` to slice inside of `.loc`\n1. save a slice of the dataset with the names we want (using `.query`)\n2. sometimes a name is used by boys and girls in the same year, so combine the counts so that we have one observation per name per year\n3. save the dataset and then call a plot function", "_____no_output_____" ] ], [ [ "# use query instead to slice, and the rest is the same\nsomenames = baby_names.query('name in [\"Sansa\",\"Daenerys\",\"Brienne\",\"Cersei\",\"Tyrion\"] & \\\n year >= 2000') # this is one string with ' as the string start/end symbol. Inside, I can use \n # normal quote marks for strings. Also, I can break it into multiple lines with \\\nsomenames_agg = somenames.groupby(['name','year'])['count'].sum().reset_index().sort_values(['name','year'])\nsns.lineplot(data=somenames_agg, hue='name',x='year',y='count')\nplt.axvline(2011, 0,160,color='red') # add a line for when the show debuted\n", "_____no_output_____" ] ], [ [ "#### Version 3 - Method chaining!\nMethod chaining: Call the object (`baby_names`) and then keep calling one method on it after another. \n- Python will call the methods from left to right. \n- There is no need to store the intermediate dataset (like `somenames` and `somenames_agg` above!)\n - --> Easier to read and write without \"temp\" objects all over the place\n - You can always save the dataset at an intermediate step if you need to\n\nSo, the first two steps are the same, just the methods will be chained. And then, a bonus trick to plot\nwithout saving.\n1. Slice with `.query` to GoT-related names\n2. Combine M and F gender counts if a name is used by both in the same year\n3. Plot without saving: \"Pipe\" in the plotting function \n\nThe code below produces a plot identical to V1 and V2, **but it is unreadable. Don't try - I'm about to make this readable!** Just _one more_ iteration... \n", "_____no_output_____" ] ], [ [ "baby_names.query('name in [\"Sansa\",\"Daenerys\",\"Brienne\",\"Cersei\",\"Tyrion\"] & year >= 2000').groupby(['name','year'])['count'].sum().reset_index().pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')\nplt.axvline(2011, 0,160,color='red') # add a line for when the show debuted", "_____no_output_____" ] ], [ [ "To make this readable, we write a parentheses over multiple lines\n```\n(\n and python knows to execute the code inside as one line\n)\n```\n\nAnd as a result, we can write a long series of methods that is comprehensible, and if we want we can even comment on each line:", "_____no_output_____" ] ], [ [ "(baby_names\n .query('name in [\"Sansa\",\"Daenerys\",\"Brienne\",\"Cersei\",\"Tyrion\"] & \\\n year >= 2000')\n .groupby(['name','year'])['count'].sum() # for each name-year, combine M and F counts\n .reset_index() # give us the column names back as they were (makes the plot call easy)\n .pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')\n) \nplt.axvline(2011, 0,160,color='red') # add a line for when the show debuted\nplt.title(\"WOW THAT WAS EASY TO WRITE AND SHARE\")", "_____no_output_____" ] ], [ [ "**WOW. That's nice code!**\n\n\nAlso: **Naming your baby Daenerys after the hero...**\n\n...is a bad break. ", "_____no_output_____" ] ], [ [ "(baby_names\n .query('name in [\"Khaleesi\",\"Ramsay\",\"Lyanna\",\"Ellaria\",\"Meera\"] & \\\n year >= 2000')\n .groupby(['name','year'])['count'].sum() # for each name-year, combine M and F counts\n .reset_index() # give use the column names back as they were (makes the plot call easy)\n .pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')\n) \nplt.axvline(2011, 0,160,color='red') # add a line for when the show debuted\nplt.title(\"PEOPLE NAMED THEIR KID KHALEESI\")\n", "_____no_output_____" ] ], [ [ "**BUT IT COULD BE WORSE**", "_____no_output_____" ] ], [ [ "(baby_names\n .query('name in [\"Krymson\"] & year >= 1950')\n .groupby(['name','year'])['count'].sum() # for each name-year, combine M and F counts\n .reset_index() # give use the column names back as they were (makes the plot call easy)\n .pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')\n) \nplt.title(\"Alabama, wow...Krymson, really?\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06e6f3abf353b5d7da18f7a2f30264667c09706
183,894
ipynb
Jupyter Notebook
notebooks/random_patient_ids.ipynb
c42f/HumanReadableIDs.jl
b4d88cd1ae63bebd75c86d32a3a3920931f614b8
[ "MIT" ]
null
null
null
notebooks/random_patient_ids.ipynb
c42f/HumanReadableIDs.jl
b4d88cd1ae63bebd75c86d32a3a3920931f614b8
[ "MIT" ]
null
null
null
notebooks/random_patient_ids.ipynb
c42f/HumanReadableIDs.jl
b4d88cd1ae63bebd75c86d32a3a3920931f614b8
[ "MIT" ]
null
null
null
135.715129
40,717
0.678712
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d06e70e2a97a386dc5d3122571807f85b94d3610
25,047
ipynb
Jupyter Notebook
_notebooks/2021-01-26-Bookrating.ipynb
rajivreddy219/ai-projects
3e680eddc37c390ceb0b8d12b40e8bad2f441134
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-01-26-Bookrating.ipynb
rajivreddy219/ai-projects
3e680eddc37c390ceb0b8d12b40e8bad2f441134
[ "Apache-2.0" ]
null
null
null
_notebooks/2021-01-26-Bookrating.ipynb
rajivreddy219/ai-projects
3e680eddc37c390ceb0b8d12b40e8bad2f441134
[ "Apache-2.0" ]
null
null
null
28.922633
408
0.461852
[ [ [ "# \"Bookrating (Collaborative-Filtering)\"\n> \"Prediction of tangible books to read using collaborative filtering\"\n\n- toc: false\n- branch: master\n- badges: true\n- comments: true\n- categories: [jupyter, pytorch, pytorch-lightning]\n- hide: false\n- search_exclude: true", "_____no_output_____" ] ], [ [ "%%capture\n!pip install -U fastai", "_____no_output_____" ], [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "from fastai.collab import *\nimport pandas as pd\nimport torch.nn as nn", "_____no_output_____" ], [ "pathr = '/content/drive/MyDrive/my-datasets/collaborative-filtering/BX-Book-Ratings.csv'\npathb = '/content/drive/MyDrive/my-datasets/collaborative-filtering/BX-Books.csv'\npathu = '/content/drive/MyDrive/my-datasets/collaborative-filtering/BX-Users.csv'", "_____no_output_____" ], [ "dfr = pd.read_csv(pathr, sep=';', error_bad_lines=False, encoding='latin-1')\ndfb = pd.read_csv(pathb, sep=';', error_bad_lines=False, encoding='latin-1')\ndfu = pd.read_csv(pathu, sep=';', error_bad_lines=False, encoding='latin-1')", "b'Skipping line 6452: expected 8 fields, saw 9\\nSkipping line 43667: expected 8 fields, saw 10\\nSkipping line 51751: expected 8 fields, saw 9\\n'\nb'Skipping line 92038: expected 8 fields, saw 9\\nSkipping line 104319: expected 8 fields, saw 9\\nSkipping line 121768: expected 8 fields, saw 9\\n'\nb'Skipping line 144058: expected 8 fields, saw 9\\nSkipping line 150789: expected 8 fields, saw 9\\nSkipping line 157128: expected 8 fields, saw 9\\nSkipping line 180189: expected 8 fields, saw 9\\nSkipping line 185738: expected 8 fields, saw 9\\n'\nb'Skipping line 209388: expected 8 fields, saw 9\\nSkipping line 220626: expected 8 fields, saw 9\\nSkipping line 227933: expected 8 fields, saw 11\\nSkipping line 228957: expected 8 fields, saw 10\\nSkipping line 245933: expected 8 fields, saw 9\\nSkipping line 251296: expected 8 fields, saw 9\\nSkipping line 259941: expected 8 fields, saw 9\\nSkipping line 261529: expected 8 fields, saw 9\\n'\n/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py:2718: DtypeWarning: Columns (3) have mixed types.Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n" ], [ "dfb = dfb[['ISBN','Book-Title','Book-Author','Year-Of-Publication','Publisher']]", "_____no_output_____" ], [ "dfr.head()", "_____no_output_____" ], [ "dfb.head()", "_____no_output_____" ], [ "df = dfr.merge(dfb)\ndf.head()", "_____no_output_____" ], [ "dls = CollabDataLoaders.from_df(df, item_name='Book-Title', bs=64)\ndls.show_batch()", "_____no_output_____" ], [ "learn = collab_learner(dls, y_range=(0,5.5), n_factors=50)", "_____no_output_____" ], [ "learn.fit_one_cycle(5, 2e-3, wd=0.1)", "_____no_output_____" ], [ "def recommend(book):\n movie_factors = learn.model.i_weight.weight\n idx = dls.classes['Book-Title'].o2i[book]\n dist = nn.CosineSimilarity(dim=1)(movie_factors, movie_factors[idx][None])\n indices = dist.argsort(descending=True)[1:6]\n return dls.classes['Book-Title'][indices]", "_____no_output_____" ], [ "res = recommend('Harry Potter and the Prisoner of Azkaban (Book 3)')\nfor i in res:\n print(i)", "Harry Potter and the Goblet of Fire (Book 4)\nHarry Potter and the Chamber of Secrets (Book 2)\nThe X-Planes: X-1 to X-45: 3rd Edition\nSanctuary: Finding Moments of Refuge in the Presence of God\nHarry Potter and the Sorcerer's Stone (Book 1)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06e7383289e829b7de3d8245e6cf0eac15a95e8
39,599
ipynb
Jupyter Notebook
synth_grads.ipynb
tarcey/Synthetic-Gradients-in-TF
c57bff306b8098dba5811c65625fca624c1834d7
[ "CC0-1.0" ]
1
2020-12-22T19:16:16.000Z
2020-12-22T19:16:16.000Z
synth_grads.ipynb
tarcey/Synthetic-Gradients-in-TF
c57bff306b8098dba5811c65625fca624c1834d7
[ "CC0-1.0" ]
null
null
null
synth_grads.ipynb
tarcey/Synthetic-Gradients-in-TF
c57bff306b8098dba5811c65625fca624c1834d7
[ "CC0-1.0" ]
null
null
null
43.230349
162
0.467739
[ [ [ "Text classification with attention and synthetic gradients.\n\n\n\nImports and set-up:", "_____no_output_____" ] ], [ [ "%tensorflow_version 2.x\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport subprocess\nfrom sklearn.model_selection import train_test_split\nimport gensim\nimport re\nimport sys\nimport time\n\n# TODO: actually implement distribution in the training loop\nstrategy = tf.distribute.get_strategy()\n\nuse_mixed_precision = False\ntf.config.run_functions_eagerly(False)\ntf.get_logger().setLevel('ERROR')\n\nis_tpu = None\ntry:\n tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n is_tpu = True\nexcept ValueError:\n is_tpu = False\n\nif is_tpu:\n print('TPU available.')\n tf.config.experimental_connect_to_cluster(tpu)\n tf.tpu.experimental.initialize_tpu_system(tpu)\n strategy = tf.distribute.TPUStrategy(tpu)\n if use_mixed_precision:\n policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')\n tf.keras.mixed_precision.experimental.set_policy(policy)\nelse:\n print('No TPU available.')\n result = subprocess.run(\n ['nvidia-smi', '-L'],\n stdout=subprocess.PIPE).stdout.decode(\"utf-8\").strip()\n if \"has failed\" in result:\n print(\"No GPU available.\")\n else:\n print(result)\n strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(\n tf.distribute.experimental.CollectiveCommunication.NCCL)\n if use_mixed_precision:\n policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')\n tf.keras.mixed_precision.experimental.set_policy(policy)", "_____no_output_____" ] ], [ [ "Downloading the data", "_____no_output_____" ] ], [ [ "# Download the Sentiment140 dataset\n!mkdir -p data\n!wget -nc https://nyc3.digitaloceanspaces.com/ml-files-distro/v1/sentiment-analysis-is-bad/data/training.1600000.processed.noemoticon.csv.zip -P data\n!unzip -n -d data data/training.1600000.processed.noemoticon.csv.zip", "_____no_output_____" ] ], [ [ "Loading and splitting the data", "_____no_output_____" ] ], [ [ "sen140 = pd.read_csv(\n \"data/training.1600000.processed.noemoticon.csv\", encoding='latin-1',\n names=[\"target\", \"ids\", \"date\", \"flag\", \"user\", \"text\"])\nsen140.head()\nsen140 = sen140.sample(frac=1).reset_index(drop=True)\nsen140 = sen140[['text', 'target']]\nfeatures, targets = sen140.iloc[:, 0].values, sen140.iloc[:, 1].values\n\nprint(\"A random tweet\\t:\", features[0])\n\n# split between train and test sets\nx_train, x_test, y_train, y_test = train_test_split(features,\n targets,\n test_size=0.33)\ny_train = y_train.astype(\"float32\") / 4.0\ny_test = y_test.astype(\"float32\") / 4.0\nx_train = np.expand_dims(x_train, -1)\nx_test = np.expand_dims(x_test, -1)", "_____no_output_____" ] ], [ [ "Preprocessing data", "_____no_output_____" ] ], [ [ "def process_tweet(x):\n x = x.strip()\n x = x.lower()\n x = re.sub(r\"[^a-zA-Z0-9üöäÜÖÄß\\.,!\\?\\-%\\$€\\/ ]+'\", ' ', x)\n x = re.sub('([\\.,!\\?\\-%\\$€\\/])', r' \\1 ', x)\n x = re.sub('\\s{2,}', ' ', x)\n x = x.split()\n x.append(\"[&END&]\")\n length = len(x)\n return x\n\n\ntweets_train = []\ntweets_test = []\nfor tweet in x_train:\n tweets_train.append(process_tweet(tweet[0]))\nfor tweet in x_test:\n tweets_test.append(process_tweet(tweet[0]))\n\n\n# Building the initial vocab with all words from the training set\ndef add_or_update_word(_vocab, word):\n entry = None\n if word in _vocab:\n entry = _vocab[word]\n entry = (entry[0], entry[1] + 1)\n else:\n entry = (len(_vocab), 1)\n _vocab[word] = entry\n\n\nvocab_pre = {}\n# \"[&END&]\" is for padding, \"[&UNK&]\" for unknown words\nadd_or_update_word(vocab_pre, \"[&END&]\")\nadd_or_update_word(vocab_pre, \"[&UNK&]\")\nfor tweet in tweets_train:\n for word in tweet:\n add_or_update_word(vocab_pre, word)\n\n# limiting the vocabulary to only include words that appear at least 3 times\n# in the training data set. Reduces vocab size to about 1/6th.\n# This is to make it harder for the model to overfit by focusing on words that\n# may only appear in the training data, and also to generally make it learn to\n# handle unknown words (more robust)\nkeys = vocab_pre.keys()\nvocab = {}\nvocab[\"[&END&]\"] = 0\nvocab[\"[&UNK&]\"] = 1\nfor key in keys:\n freq = vocab_pre[key][1]\n index = vocab_pre[key][0]\n if freq >= 3 and index > 1:\n vocab[key] = len(vocab)\n\n\n# Replace words that have been removed from the vocabulary with \"[&UNK&]\" in\n# both the training and testing data\ndef filter_unknown(_in, _vocab):\n for tweet in _in:\n for i in range(len(tweet)):\n if not tweet[i] in _vocab:\n tweet[i] = \"[&UNK&]\"\n\n\nfilter_unknown(tweets_train, vocab)\nfilter_unknown(tweets_test, vocab)", "_____no_output_____" ] ], [ [ "Using gensim word2vec to get a good word embedding.", "_____no_output_____" ] ], [ [ "# train the embedding\nembedding_dims = 128\nembedding = gensim.models.Word2Vec(tweets_train,\n size=embedding_dims, min_count=0)", "_____no_output_____" ], [ "def tokenize(_in, _vocab):\n _out = []\n for i in range(len(_in)):\n tweet = _in[i]\n wordlist = []\n for word in tweet:\n wordlist.append(_vocab[word].index)\n _out.append(wordlist)\n return _out\n\n\ntokens_train = tokenize(tweets_train, embedding.wv.vocab)\ntokens_test = tokenize(tweets_test, embedding.wv.vocab)", "_____no_output_____" ] ], [ [ "Creating modules and defining the model.", "_____no_output_____" ] ], [ [ "class SequenceCollapseAttention(tf.Module):\n '''\n Collapses a sequence of arbitrary length into num_out_entries entries from \n the sequence according to dot-product attention. So, a variable length \n sequence is reduced to a sequence of a fixed, known length.\n '''\n\n def __init__(self,\n num_out_entries,\n initializer=tf.keras.initializers.HeNormal,\n name=None):\n super().__init__(name=name)\n self.is_built = False\n self.num_out_entries = num_out_entries\n self.initializer = initializer()\n\n def __call__(self, keys, query):\n if not self.is_built:\n self.weights = tf.Variable(\n self.initializer([query.shape[-1], self.num_out_entries]),\n trainable=True)\n self.biases = tf.Variable(tf.zeros([self.num_out_entries]),\n trainable=True)\n self.is_built = True\n\n scores = tf.linalg.matmul(query, self.weights) + self.biases\n scores = tf.transpose(scores, perm=(0, 2, 1))\n scores = tf.nn.softmax(scores)\n output = tf.linalg.matmul(scores, keys)\n return output\n\n\nclass WordEmbedding(tf.Module):\n '''\n Creates a word-embedding module from a provided embedding matrix.\n '''\n\n def __init__(self, embedding_matrix, trainable=False, name=None):\n super().__init__(name=name)\n self.embedding = tf.Variable(embedding_matrix, trainable=trainable)\n\n def __call__(self, x):\n return tf.nn.embedding_lookup(self.embedding, x)\n\n\ntestvar = None\n\n\nclass PositionalEncoding1D(tf.Module):\n '''\n Positional encoding as in the Attention Is All You Need paper. I hope.\n\n For experimentation, the weight by which the positional information is mixed\n into the input vectors is learned.\n '''\n\n def __init__(self, axis=-2, base=1000, name=None):\n super().__init__(name=name)\n self.axis = axis\n self.base = base\n self.encoding_weight = tf.Variable([2.0], trainable=True)\n testvar = self.encoding_weight\n\n def __call__(self, x):\n sequence_length = tf.shape(x)[self.axis]\n d = tf.shape(x)[-1]\n T = tf.shape(x)[self.axis]\n pos_enc = tf.range(0, d / 2, delta=1, dtype=tf.float32)\n pos_enc = (-2.0 / tf.cast(d, dtype=tf.float32)) * pos_enc\n base = tf.cast(tf.fill(tf.shape(pos_enc), self.base), dtype=tf.float32)\n pos_enc = tf.math.pow(base, pos_enc)\n pos_enc = tf.expand_dims(pos_enc, axis=0)\n pos_enc = tf.tile(pos_enc, [T, 1])\n t = tf.expand_dims(tf.range(1, T+1, delta=1, dtype=tf.float32), axis=-1)\n pos_enc = tf.math.multiply(pos_enc, t)\n pos_enc_sin = tf.expand_dims(tf.math.sin(pos_enc), axis=-1)\n pos_enc_cos = tf.expand_dims(tf.math.cos(pos_enc), axis=-1)\n pos_enc = tf.concat((pos_enc_sin, pos_enc_cos), axis=-1)\n pos_enc = tf.reshape(pos_enc, [T, d])\n return x + (pos_enc * self.encoding_weight)\n\n\nclass MLP_Block(tf.Module):\n '''\n With batch normalization before the activations.\n A regular old multilayer perceptron, hidden shapes are defined by the\n \"shapes\" argument.\n '''\n\n def __init__(self,\n shapes,\n initializer=tf.keras.initializers.HeNormal,\n name=None,\n activation=tf.nn.swish,\n trainable_batch_norms=False):\n super().__init__(name=name)\n self.is_built = False\n self.shapes = shapes\n self.initializer = initializer()\n self.weights = [None] * len(shapes)\n self.biases = [None] * len(shapes)\n self.bnorms = [None] * len(shapes)\n self.activation = activation\n self.trainable_batch_norms = trainable_batch_norms\n\n def _build(self, x):\n for n in range(0, len(self.shapes)):\n in_shape = x.shape[-1] if n == 0 else self.shapes[n - 1]\n factor = 1 if self.activation != tf.nn.crelu or n == 0 else 2\n self.weights[n] = tf.Variable(\n self.initializer([in_shape * factor, self.shapes[n]]),\n trainable=True)\n self.biases[n] = tf.Variable(tf.zeros([self.shapes[n]]),\n trainable=True)\n self.bnorms[n] = tf.keras.layers.BatchNormalization(\n trainable=self.trainable_batch_norms)\n self.is_built = True\n\n def __call__(self, x, training=False):\n if not self.is_built:\n self._build(x)\n\n h = x\n for n in range(len(self.shapes)):\n h = tf.linalg.matmul(h, self.weights[n]) + self.biases[n]\n h = self.bnorms[n](h, training=training)\n h = self.activation(h)\n\n return h\n\n\nclass SyntheticGradient(tf.Module):\n '''\n An implementation of synthetic gradients. When added to a model, this\n module will intercept incoming gradients and replace them by learned,\n synthetic ones.\n\n If you encounter NANs, try setting the sg_output_scale parameter to a lower\n value, or increase the number of initial_epochs or epochs.\n\n When the model using this module does not learn, the generator might be too\n simple, the sg_output_scale might be too low, the learning rate of the\n generator might be too large or too low, or the number of epochs might be\n too large or too low.\n\n If the number of initial epochs is too large, the generator can get stuck\n in a local minimum and fail to learn.\n\n The relative_generator_hidden_shapes list defines the shapes of the hidden\n layers of the generator as a multiple of its input dimension. For an affine\n transormation, pass an empty list.\n '''\n\n def __init__(self,\n initializer=tf.keras.initializers.GlorotUniform,\n activation=tf.nn.tanh,\n relative_generator_hidden_shapes=[6, ],\n learning_rate=0.01,\n epochs=1,\n initial_epochs=16,\n sg_output_scale=1,\n name=None):\n super().__init__(name=name)\n self.is_built = False\n self.initializer = initializer\n self.activation = activation\n self.relative_generator_hidden_shapes = relative_generator_hidden_shapes\n self.initial_epochs = initial_epochs\n self.epochs = epochs\n self.sg_output_scale = sg_output_scale\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n def build(self, xy, dy):\n '''\n Builds the gradient generator on its first run, and trains on the first\n incoming batch of gradients for a number of epochs to avoid bad results\n (including NANs) in the first few batches where the generator still\n outputs bad approximations. To further reduce NANs due to bad gradients,\n a fixed scaler for the outputs of the generator is computed based on the\n first batch.\n '''\n if self.is_built:\n return\n\n if len(self.relative_generator_hidden_shapes) > 0:\n generator_shape = [\n xy.shape[-1] * mult\n for mult in\n self.relative_generator_hidden_shapes]\n self.generator_hidden = MLP_Block(\n generator_shape,\n activation=self.activation,\n initializer=self.initializer,\n trainable_batch_norms=False)\n else:\n self.generator_hidden = tf.identity\n\n self.generator_out = MLP_Block(\n [dy.shape[-1]],\n activation=tf.identity,\n initializer=self.initializer,\n trainable_batch_norms=False)\n\n # calculate a static scaler for the generated gradients to avoid\n # overflows due to too large gradients\n self.generator_out_scale = 1.0\n x = self.generate_gradient(xy) / self.sg_output_scale\n mag_y = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(dy), axis=-1))\n mag_x = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(x), axis=-1))\n mag_scale = tf.math.reduce_mean(mag_y / mag_x,\n axis=tf.range(0, tf.rank(dy) - 1))\n self.generator_out_scale = tf.Variable(mag_scale, trainable=False)\n\n # train for a number of epochs on the first run, by default 16, to avoid\n # bad results in the beginning of training.\n for i in range(self.initial_epochs):\n self.train_generator(xy, dy)\n\n self.is_built = True\n\n def generate_gradient(self, x):\n '''\n Just an MLP, or an affine transformation if the hidden shape in the \n constructor is set to be empty.\n '''\n x = self.generator_hidden(x)\n out = self.generator_out(x)\n out = out * self.generator_out_scale\n return out * self.sg_output_scale\n\n def train_generator(self, x, target):\n '''\n Gradient descend for the gradient generator. This is called every time a\n gradient comes in, although in theory (especially with deeper gradient\n generators) once the gradients are modeled sufficiently, it could be OK\n to stop training on incoming gradients, thus fully decoupling the lower\n parts of the network from the upper parts relative to this SG module.\n '''\n with tf.GradientTape() as tape:\n l2_loss = target - self.generate_gradient(x)\n l2_loss = tf.math.reduce_sum(tf.math.square(l2_loss), axis=-1)\n # l2_loss = tf.math.sqrt(l2_dist)\n grads = tape.gradient(l2_loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(grads, self.trainable_variables))\n\n @tf.custom_gradient\n def sg(self, x, y):\n '''\n In the forward pass it is essentially a no-op (identity). In the\n backwards pass it replaces the incoming gradient by a synthetic one.\n '''\n x = tf.identity(x)\n\n def grad(dy):\n # concat x and the label to be inputs for the generator:\n xy = self.concat_x_and_y(x, y)\n\n if not self.is_built:\n self.build(xy, dy)\n\n # train the generator on the incoming gradient:\n for i in range(self.epochs):\n self.train_generator(xy, dy)\n\n # return the gradient. The second return value is the gradient for y\n # which should be zero since we only need y (labels) to generate the\n # synthetic gradients\n dy = self.generate_gradient(xy)\n return dy, tf.zeros(tf.shape(y))\n\n return x, grad\n\n def __call__(self, x, y):\n return self.sg(x, y)\n\n def concat_x_and_y(self, x, y):\n '''\n Probably an overly complex yet incomplete solution to a rather small\n inconvenience.\n Inconvenience: The gradient generators take the output of the last \n module AND the target/labels of the network as inputs. But those two \n tensors can be of different shapes. The obvious solution would be to \n manually reshape the targets so they can be concatenated with the \n outputs of the past state. But because i wanted this SG module to be as \n \"plug-and-play\" as possible, i tried to attempt automatic reshaping.\n\n Should work for 1d->1d, and 1d-sequence -> 1d, possibly 1d seq->seq,\n unsure about the rest.\n '''\n # insert as many dims before the last dim of y to give it the same rank\n # as x\n amount = tf.math.maximum(tf.rank(x) - tf.rank(y), 0)\n new_shape = tf.concat((tf.shape(y)[:-1],\n tf.tile([1], [amount]),\n [tf.shape(y)[-1]]), axis=-1)\n y = tf.reshape(y, new_shape)\n\n # tile the added dims such that x and y can be concatenated\n # In order to tile only the added dims, i need to set the dimensions \n # with a length of 1 (except the last) to the length of the \n # corresponding dimensions in x, while setting the rest to 1.\n # This is waiting to break.\n mask = tf.cast(tf.math.less_equal(tf.shape(y),\n tf.constant([1])), dtype=tf.int32)\n # ignore the last dim\n mask = tf.concat([mask[:-1], tf.constant([0])], axis=-1)\n\n zeros_to_ones = tf.math.subtract(\n tf.ones(tf.shape(mask), dtype=tf.int32),\n mask)\n # has ones where there is a one in the shape, now the 1s are set to the\n # length in x\n mask = tf.math.multiply(mask, tf.shape(x))\n # add ones to all other dimensions to preserve their shape\n mask = tf.math.add(zeros_to_ones, mask)\n # tile\n y = tf.tile(y, mask)\n return tf.concat((x, y), axis=-1)\n\n\nclass FlattenL2D(tf.Module):\n \"Flattens the last two dimensions only\"\n\n def __init__(self, name=None):\n super().__init__(name=name)\n\n def __call__(self, x):\n new_shape = tf.concat(\n (tf.shape(x)[:-2], [(tf.shape(x)[-1]) * (tf.shape(x)[-2])]),\n axis=-1)\n return tf.reshape(x, new_shape)\n\n\ninitializer = tf.keras.initializers.HeNormal\n\n\nclass SentimentAnalysisWithAttention(tf.Module):\n def __init__(self, name=None):\n super().__init__(name=name)\n\n # Structure and the idea behind it:\n # 1: The input sequence is embedded and is positionally encoded.\n # 2.1: An MLP block ('query') computes scores for the following\n # attention layer for each entry in the sequence. Ie, it decides\n # which words are worth a closer look.\n # 2.2: An attention layer selects n positionally encoded word\n # embeddings from the input sequence based on the learned queries.\n # 3: The result is flattened into a tensor of known shape and a number\n # of dense layers compute the final classification.\n\n self.embedding = WordEmbedding(embedding.wv.vectors)\n self.batch_norm = tf.keras.layers.BatchNormalization(trainable=True)\n self.pos_enc = PositionalEncoding1D()\n self.query = MLP_Block([256, 128], initializer=initializer)\n self.attention = SequenceCollapseAttention(num_out_entries=9,\n initializer=initializer)\n self.flatten = FlattenL2D()\n self.dense = MLP_Block([512, 256, 128, 64],\n initializer=initializer,\n trainable_batch_norms=True)\n self.denseout = MLP_Block([1],\n initializer=initializer,\n activation=tf.nn.sigmoid,\n trainable_batch_norms=True)\n\n # Synthetic gradient modules for the various layers.\n self.sg_query = SyntheticGradient(relative_generator_hidden_shapes=[9])\n self.sg_attention = SyntheticGradient()\n self.sg_dense = SyntheticGradient()\n\n def __call__(self, x, y=tf.constant([]), training=False):\n x = self.embedding(x)\n x = self.pos_enc(x)\n x = self.batch_norm(x, training=training)\n q = self.query(x, training=training)\n # q = self.sg_query(q, y) # SG\n x = self.attention(x, q)\n x = self.flatten(x)\n x = self.sg_attention(x, y) # SG\n x = self.dense(x, training=training)\n x = self.sg_dense(x, y) # SG\n output = self.denseout(x, training=training)\n return output\n\n\nmodel = SentimentAnalysisWithAttention()", "_____no_output_____" ], [ "class BatchGenerator(tf.keras.utils.Sequence):\n '''\n Creates batches from the given data, specifically it pads the sequences\n per batch only as much as necessary to make every sequence within a batch \n be of the same length.\n '''\n\n def __init__(self, inputs, labels, padding, batch_size):\n self.batch_size = batch_size\n self.labels = labels\n self.inputs = inputs\n self.padding = padding\n # self.on_epoch_end()\n\n def __len__(self):\n return int(np.floor(len(self.inputs) / self.batch_size))\n\n def __getitem__(self, index):\n max_length = 0\n start_index = index * self.batch_size\n end_index = start_index + self.batch_size\n for i in range(start_index, end_index):\n l = len(self.inputs[i])\n if l > max_length:\n max_length = l\n\n out_x = np.empty([self.batch_size, max_length], dtype='int32')\n out_y = np.empty([self.batch_size, 1], dtype='float32')\n for i in range(self.batch_size):\n out_y[i] = self.labels[start_index + i]\n tweet = self.inputs[start_index + i]\n l = len(tweet)\n l = min(l, max_length)\n for j in range(0, l):\n out_x[i][j] = tweet[j]\n for j in range(l, max_length):\n out_x[i][j] = self.padding\n return out_x, out_y", "_____no_output_____" ] ], [ [ "Training the model", "_____no_output_____" ] ], [ [ "def train_validation_loop(model_caller, data_generator, epochs, metrics=[]):\n batch_time = -1\n for epoch in range(epochs):\n start_e = time.time()\n start_p = time.time()\n num_batches = len(data_generator)\n predictions = [None] * num_batches\n for b in range(num_batches):\n start_b = time.time()\n\n x_batch, y_batch = data_generator[b]\n predictions[b] = model_caller(x_batch, y_batch, metrics=metrics)\n\n # progress output\n elapsed_t = time.time() - start_b\n if batch_time != -1:\n batch_time = 0.05 * elapsed_t + 0.95 * batch_time\n else:\n batch_time = elapsed_t\n if int(time.time() - start_p) >= 1 or b == (num_batches - 1):\n start_p = time.time()\n eta = int((num_batches - b) * batch_time)\n ela = int(time.time() - start_e)\n out_string = \"\\rEpoch %d/%d,\\tbatch %d/%d,\\telapsed: %d/%ds\" % (\n (epoch + 1), epochs, b + 1, num_batches, ela, ela + eta)\n for metric in metrics:\n out_string += \"\\t %s: %f\" % (metric.name,\n float(metric.result()))\n out_length = len(out_string)\n sys.stdout.write(out_string)\n sys.stdout.flush()\n for metric in metrics:\n metric.reset_states()\n sys.stdout.write(\"\\n\")\n return np.concatenate(predictions)\n\n\ndef trainer(model, loss, optimizer):\n @tf.function(experimental_relax_shapes=True)\n def training_step(x_batch,\n y_batch,\n model=model,\n loss=loss,\n optimizer=optimizer,\n metrics=[]):\n with tf.GradientTape() as tape:\n predictions = model(x_batch, y_batch, training=True)\n losses = loss(y_batch, predictions)\n grads = tape.gradient(losses, model.trainable_variables)\n\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n for metric in metrics:\n metric.update_state(y_batch, predictions)\n return predictions\n\n return training_step\n\n\nloss = tf.keras.losses.BinaryCrossentropy(from_logits=True)\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)\nmetrics = (tf.keras.metrics.BinaryCrossentropy(from_logits=True),\n tf.keras.metrics.BinaryAccuracy())\nbatch_size = 512\nepochs = 4\n\npadding = embedding.wv.vocab[\"[&END&]\"].index\ntraining_generator = BatchGenerator(tokens_train,\n y_train,\n padding,\n batch_size=batch_size)\n\ntrain_validation_loop(trainer(model, loss, optimizer),\n training_generator,\n epochs,\n metrics)", "_____no_output_____" ] ], [ [ "Testing it on validation data", "_____no_output_____" ] ], [ [ "def validator(model):\n @tf.function(experimental_relax_shapes=True)\n def validation_step(x_batch, y_batch, model=model, metrics=[]):\n predictions = model(x_batch, training=False)\n for metric in metrics:\n metric.update_state(y_batch, predictions)\n return predictions\n\n return validation_step\n\n\ntesting_generator = BatchGenerator(tokens_test,\n y_test,\n padding,\n batch_size=batch_size)\n\npredictions = train_validation_loop(validator(model),\n testing_generator,\n 1,\n metrics)", "_____no_output_____" ] ], [ [ "Get some example results from the the test data.", "_____no_output_____" ] ], [ [ "most_evil_tweet=None\nmost_evil_evilness=1\nmost_cool_tweet=None\nmost_cool_coolness=1\nmost_angelic_tweet=None\nmost_angelic_angelicness=0\ny_pred = np.concatenate(predictions)\nfor i in range(0,len(y_pred)):\n judgement = y_pred[i]\n polarity = abs(judgement-0.5)*2\n\n if judgement>=most_angelic_angelicness:\n most_angelic_angelicness = judgement\n most_angelic_tweet = x_test[i]\n if judgement<=most_evil_evilness:\n most_evil_evilness = judgement\n most_evil_tweet = x_test[i]\n if polarity<=most_cool_coolness:\n most_cool_coolness = polarity\n most_cool_tweet = x_test[i]\n\n\nprint(\"The evilest tweet known to humankind:\\n\\t\", most_evil_tweet)\nprint(\"Evilness: \", 1.0-most_evil_evilness)\nprint(\"\\n\")\nprint(\"The most angelic tweet any mortal has ever laid eyes upon:\\n\\t\",\n most_angelic_tweet)\nprint(\"Angelicness: \", most_angelic_angelicness)\nprint(\"\\n\")\nprint(\"This tweet is too cool for you, don't read:\\n\\t\", most_cool_tweet)\nprint(\"Coolness: \", 1.0-most_cool_coolness)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06e75a112ab21cef1fe5f7a8c877186fcc8ca32
46,212
ipynb
Jupyter Notebook
Fig.3.11_par_methods_overview/Par_method_comparision_overview.ipynb
pat-schmitt/supplementary_material_master_thesis
caf8b7f341f2275c1efb912ac1f13c2fd31d6eaf
[ "BSD-3-Clause" ]
null
null
null
Fig.3.11_par_methods_overview/Par_method_comparision_overview.ipynb
pat-schmitt/supplementary_material_master_thesis
caf8b7f341f2275c1efb912ac1f13c2fd31d6eaf
[ "BSD-3-Clause" ]
null
null
null
Fig.3.11_par_methods_overview/Par_method_comparision_overview.ipynb
pat-schmitt/supplementary_material_master_thesis
caf8b7f341f2275c1efb912ac1f13c2fd31d6eaf
[ "BSD-3-Clause" ]
null
null
null
43.108209
122
0.46568
[ [ [ "# Imports", "_____no_output_____" ] ], [ [ "import numpy as np\nimport xarray as xr\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.patches import Polygon\nfrom matplotlib import colors as mat_colors\nimport mpl_toolkits.axisartist as axisartist\nfrom mpl_toolkits.axes_grid1 import Size, Divider", "_____no_output_____" ] ], [ [ "# Define Functions", "_____no_output_____" ], [ "## Performance measurements", "_____no_output_____" ] ], [ [ "def BIAS(a1, a2):\n return (a1 - a2).mean().item()\n\n\ndef RMSE(a1, a2):\n return np.sqrt(((a1 - a2)**2).mean()).item()\n\n\ndef DIFF(a1, a2):\n return np.max(np.abs(a1 - a2)).item()", "_____no_output_____" ] ], [ [ "## help function for heatmap axis", "_____no_output_____" ] ], [ [ "def setup_axes(fig, rect):\n ax = axisartist.Subplot(fig, rect)\n fig.add_subplot(ax)\n\n return ax", "_____no_output_____" ] ], [ [ "## heatmap", "_____no_output_____" ] ], [ [ "def heatmap(datasets, # first_dataset, second_dataset,\n opti_var,\n annotation=None,\n annotation_x_position=0,\n annotation_y_position=1,\n fig=None, ax=None,\n cmap='vlag',\n cmap_levels=None,\n grid_color='grey',\n grid_linewidth=1.5,\n presentation=False,\n labels_pad=-360,\n xlim=None, # here use it do define max Diff sfc_h\n nr_of_iterations=None):\n if not ax:\n ax = plt.gca()\n \n if not fig:\n fig = plt.gcf()\n\n if all(dataset is None for dataset in datasets):\n raise ValueError('All datasets are None!')\n\n # define variables for plotting\n guess_opti_var = []\n first_guess_diff = []\n true_opti_var = []\n BIAS_opti_var = []\n RMSE_opti_var = []\n DIFF_opti_var = []\n fct_opti_var = []\n times = []\n maxiters = []\n BIAS_sfc = []\n RMSE_sfc = []\n DIFF_sfc = []\n BIAS_w = []\n RMSE_w = []\n DIFF_w = []\n BIAS_fg = []\n RMSE_fg = []\n DIFF_fg = []\n BIAS_sfc_fg = []\n RMSE_sfc_fg = []\n DIFF_sfc_fg = []\n array_length = 0\n check_first_guess = None\n check_true_opti_var = None\n\n # create data and label variables\n for dataset in datasets:\n # check if the current dataset contains data or if the data was not available\n if dataset is None:\n guess_opti_var.append(None)\n first_guess_diff.append(None)\n true_opti_var.append(None)\n BIAS_opti_var.append(None)\n RMSE_opti_var.append(None)\n DIFF_opti_var.append(None)\n fct_opti_var.append(None)\n times.append(None)\n maxiters.append(None)\n BIAS_sfc.append(None)\n RMSE_sfc.append(None)\n DIFF_sfc.append(None)\n BIAS_w.append(None)\n RMSE_w.append(None)\n DIFF_w.append(None)\n elif type(dataset) != xr.core.dataset.Dataset: # if no minimisation possible\n guess_opti_var.append('no_minimisation')\n first_guess_diff.append(None)\n true_opti_var.append(None)\n BIAS_opti_var.append(None)\n RMSE_opti_var.append(None)\n DIFF_opti_var.append(None)\n fct_opti_var.append(None)\n times.append(None)\n maxiters.append(None)\n BIAS_sfc.append(None)\n RMSE_sfc.append(None)\n DIFF_sfc.append(None)\n BIAS_w.append(None)\n RMSE_w.append(None)\n DIFF_w.append(None)\n else:\n # find index corresponding to max time\n max_index = len(dataset['computing_time'].values) - 1\n\n if nr_of_iterations is not None:\n max_index = nr_of_iterations - 1\n elif xlim is not None:\n # calculate all max diff surface_h\n all_DIFF_sfc_h = np.array(\n [DIFF(dataset.true_surface_h.data,\n dataset.surface_h.data[i-1])\n for i in dataset.coords['nr_of_iteration'].data])\n # only consider as many points until max DIFF is smaller xlim\n if all_DIFF_sfc_h[-1] < xlim:\n max_index = np.argmax(all_DIFF_sfc_h < xlim)\n\n if opti_var == 'bed_h':\n guess_opti_var.append((dataset.guessed_bed_h[max_index] - dataset.true_bed_h).values)\n first_guess_diff.append((dataset.first_guessed_bed_h - dataset.true_bed_h).values)\n true_opti_var.append(dataset.true_bed_h.values)\n BIAS_opti_var.append(BIAS(dataset.guessed_bed_h[max_index], dataset.true_bed_h))\n RMSE_opti_var.append(RMSE(dataset.guessed_bed_h[max_index], dataset.true_bed_h))\n DIFF_opti_var.append(DIFF(dataset.guessed_bed_h[max_index], dataset.true_bed_h))\n if check_first_guess is None:\n BIAS_fg = BIAS(dataset.first_guessed_bed_h, dataset.true_bed_h)\n RMSE_fg = RMSE(dataset.first_guessed_bed_h, dataset.true_bed_h)\n DIFF_fg = DIFF(dataset.first_guessed_bed_h, dataset.true_bed_h)\n elif opti_var == 'bed_shape':\n guess_opti_var.append((dataset.guessed_bed_shape[-1] - dataset.true_bed_shape).values)\n first_guess_diff.append((dataset.first_guessed_bed_shape - dataset.true_bed_shape).values)\n true_opti_var.append(dataset.true_bed_shape.values)\n BIAS_opti_var.append(BIAS(dataset.guessed_bed_shape[max_index], dataset.true_bed_shape))\n RMSE_opti_var.append(RMSE(dataset.guessed_bed_shape[max_index], dataset.true_bed_shape))\n DIFF_opti_var.append(DIFF(dataset.guessed_bed_shape[max_index], dataset.true_bed_shape))\n if check_first_guess is None:\n BIAS_fg = BIAS(dataset.first_guessed_bed_shape, dataset.true_bed_shape)\n RMSE_fg = RMSE(dataset.first_guessed_bed_shape, dataset.true_bed_shape)\n DIFF_fg = DIFF(dataset.first_guessed_bed_shape, dataset.true_bed_shape)\n elif opti_var == 'w0':\n guess_opti_var.append((dataset.guessed_w0[-1] - dataset.true_w0).values)\n first_guess_diff.append((dataset.first_guessed_w0 - dataset.true_w0).values)\n true_opti_var.append(dataset.true_w0.values)\n BIAS_opti_var.append(BIAS(dataset.guessed_w0[max_index], dataset.true_w0))\n RMSE_opti_var.append(RMSE(dataset.guessed_w0[max_index], dataset.true_w0))\n DIFF_opti_var.append(DIFF(dataset.guessed_w0[max_index], dataset.true_w0))\n if check_first_guess is None:\n BIAS_fg = BIAS(dataset.first_guessed_w0, dataset.true_w0)\n RMSE_fg = RMSE(dataset.first_guessed_w0, dataset.true_w0)\n DIFF_fg = DIFF(dataset.first_guessed_w0, dataset.true_w0)\n else:\n raise ValueError('Unknown opti var!')\n \n fct_opti_var.append(dataset.function_calls[max_index].values)\n times.append(dataset.computing_time[max_index].values)\n maxiters.append(dataset.attrs['maxiter_reached'])\n BIAS_sfc.append(BIAS(dataset.surface_h[max_index], dataset.true_surface_h))\n RMSE_sfc.append(RMSE(dataset.surface_h[max_index], dataset.true_surface_h))\n DIFF_sfc.append(DIFF(dataset.surface_h[max_index], dataset.true_surface_h))\n BIAS_w.append(BIAS(dataset.widths[max_index], dataset.true_widths))\n RMSE_w.append(RMSE(dataset.widths[max_index], dataset.true_widths))\n DIFF_w.append(DIFF(dataset.widths[max_index], dataset.true_widths))\n\n # determine array length for empty lines\n if array_length == 0:\n array_length = dataset.points_with_ice[-1].values + 1\n # check that the arrays have the same number of points with ice\n elif array_length != dataset.points_with_ice[-1].values + 1:\n raise ValueError('Not the same lentgth of points with ice!!!')\n\n # check if all experiments start with the same true values and first guess\n # in the first round save values\n if check_first_guess is None:\n check_first_guess = first_guess_diff[-1]\n check_true_opti_var = true_opti_var[-1]\n \n # not implemented yet\n BIAS_sfc_fg = BIAS(dataset.first_guess_surface_h, dataset.true_surface_h)\n RMSE_sfc_fg = RMSE(dataset.first_guess_surface_h, dataset.true_surface_h)\n DIFF_sfc_fg = DIFF(dataset.first_guess_surface_h, dataset.true_surface_h)\n BIAS_w_fg = BIAS(dataset.first_guess_widths, dataset.true_widths)\n RMSE_w_fg = RMSE(dataset.first_guess_widths, dataset.true_widths)\n DIFF_w_fg = DIFF(dataset.first_guess_widths, dataset.true_widths)\n\n # after first round compare all values to first ones to make sure comparing the same start conditions\n else:\n if np.sum(check_true_opti_var - true_opti_var[-1]) != 0:\n raise ValueError('Not the same true control variable!!!')\n if np.sum(check_first_guess - first_guess_diff[-1]) != 0:\n raise ValueError('Not the same first guess!!!')\n\n # create variables for ploting (data and y label)\n data = []\n y_labels = []\n\n # first add heading\n data.append(np.empty((array_length)) * np.nan)\n if not presentation:\n if opti_var == 'bed_h':\n y_labels.append(r' RMSE_b, DIFF_b, RMSE_s, DIFF_s, fct, $T_{cpu}$')\n elif opti_var in ['bed_shape', 'w0']:\n y_labels.append(r' RMSE_Ps, DIFF_Ps, RMSE_w, DIFF_w, fct, $T_{cpu}$')\n else:\n raise ValueError('Unknown opti_var !')\n y_label_variable_format = '{:7.2f}, {: 7.2f}, {:7.2f}, {:7.2f}'\n else:\n if opti_var == 'bed_h':\n y_labels.append(' DIFF_b, fct, t')\n elif opti_var in ['bed_shape', 'w0']:\n y_labels.append(' DIFF DIFF_w fct time')\n else:\n raise ValueError('Unknown opti_var !')\n y_label_variable_format = '{: 6.2f}' #', {:6.2f}'\n\n if not presentation:\n # add first guess\n data.append(check_first_guess)\n if opti_var == 'bed_h':\n y_labels.append(('fg:' + y_label_variable_format).format(RMSE_fg, DIFF_fg,\n RMSE_sfc_fg, DIFF_sfc_fg))\n elif opti_var in ['bed_shape', 'w0']:\n y_labels.append(('fg:' + y_label_variable_format).format(RMSE_fg, DIFF_fg,\n RMSE_w_fg, DIFF_w_fg))\n else:\n raise ValueError('Unknown opti_var !')\n else:\n # add first guess\n data.append(check_first_guess)\n if opti_var == 'bed_h':\n y_labels.append(('fg:' + y_label_variable_format).format(DIFF_fg))\n elif opti_var in ['bed_shape', 'w0']:\n y_labels.append(('fg:' + y_label_variable_format).format(DIFF_fg, DIFF_w_fg))\n else:\n raise ValueError('Unknown opti_var !')\n \n # add two format placeholders for fct_calls and time\n y_label_variable_format += ', {:4d}, {:4.0f}s'\n\n # add all other data with empty line for None datasets\n for i, guess in enumerate(guess_opti_var):\n if guess is None:\n data.append(np.empty((array_length)) * np.nan)\n if i < 9:\n y_labels.append((' ' + chr(65+i) + ': NO DATAFILE FOUND'))\n else:\n y_labels.append((' ' + chr(65+i) + ': NO DATAFILE FOUND'))\n elif type(guess) is str:\n data.append(np.empty((array_length)) * np.nan)\n if i < 9:\n y_labels.append((' ' + chr(65+i) + ': NO Minimisation Possible'))\n else:\n y_labels.append((' ' + chr(65+i) + ': NO Minimisation Possible'))\n else:\n data.append(guess)\n if i < 9:\n y_label_text = (' ' + chr(65+i) + ':' + y_label_variable_format)\n else:\n y_label_text = (' ' + chr(65+i) + ':' + y_label_variable_format)\n \n if maxiters[i] == 'yes':\n y_label_text += '+'\n\n if opti_var == 'bed_h':\n if not presentation:\n y_labels.append(y_label_text.format(RMSE_opti_var[i],\n DIFF_opti_var[i],\n RMSE_sfc[i],\n DIFF_sfc[i],\n fct_opti_var[i],\n times[i]))\n else:\n y_labels.append(y_label_text.format(DIFF_opti_var[i],\n fct_opti_var[i],\n times[i]))\n elif opti_var in ['bed_shape', 'w0']:\n if not presentation:\n y_labels.append(y_label_text.format(RMSE_opti_var[i],\n DIFF_opti_var[i],\n RMSE_w[i],\n DIFF_w[i],\n fct_opti_var[i],\n times[i]))\n else:\n y_labels.append(y_label_text.format(DIFF_opti_var[i],\n DIFF_w[i],\n fct_opti_var[i],\n times[i]))\n else:\n raise ValueError('Unknown opti_var !')\n\n # make data an numpy array\n data = np.array(data)\n\n #choose colormap\n if not cmap_levels:\n color_nr = 100\n if opti_var == 'bed_h':\n cmap_limit = np.max(np.abs(check_first_guess))\n #cmap_limit = np.max(np.array([np.abs(np.floor(np.nanmin(np.array(data)))),\n # np.abs(np.ceil(np.nanmax(np.array(data))))]))\n elif opti_var in ['bed_shape', 'w0']:\n cmap_limit = np.max(np.abs(check_first_guess))\n #cmap_limit = np.max(np.array([np.abs(np.floor(np.nanmin(np.array(data)) * 10)),\n # np.abs(np.ceil(np.nanmax(np.array(data)) * 10))])) / 10\n else:\n raise ValueError('Unknown opti var!!')\n #if (np.min(data) < 0) & (np.max(data) > 0):\n cmap_levels = np.linspace(-cmap_limit, cmap_limit, color_nr, endpoint=True)\n #elif (np.min(data) < 0) & (np.max(data) =< 0):\n # cmap_levels = np.linspace(-cmap_limit, 0, color_nr, endpoint=True)\n #elif (np.min(data) >= 0) & (np.max(data) > 0)\n else:\n color_nr = len(cmap_levels) - 1\n \n rel_color_steps = np.arange(color_nr)/color_nr\n if cmap == 'rainbow':\n colors = cm.rainbow(rel_color_steps)\n elif cmap == 'vlag':\n colors = sns.color_palette('vlag', color_nr)\n elif cmap == 'icefire':\n colors = sns.color_palette('icefire', color_nr)\n elif cmap == 'Spectral':\n colors = sns.color_palette('Spectral_r', color_nr)\n \n cmap = LinearSegmentedColormap.from_list('custom', colors, N=color_nr)\n cmap.set_bad(color='white')\n norm = mat_colors.BoundaryNorm(cmap_levels, cmap.N)\n\n # plot heatmap\n im = plt.imshow(data, aspect='auto', interpolation=None, cmap=cmap, norm=norm, alpha=1.)\n \n # Turn spines and ticks off and create white frame.\n for key, spine in ax.axis.items():\n spine.major_ticks.set_visible(False)\n spine.minor_ticks.set_visible(False)\n spine.line.set_visible(False)\n # spine.line.set_color(grid_color)\n # spine.line.set_linewidth(0) #grid_linewidth)\n \n # set y ticks\n ax.set_yticks(np.arange(data.shape[0]))\n \n ax.set_yticklabels(y_labels)\n #for tick in ax.get_yticklabels():\n # tick.set_fontname(\"Arial\")\n\n # align yticklabels left\n ax.axis[\"left\"].major_ticklabels.set_ha(\"left\")\n \n # set pad to put labels over heatmap\n ax.axis[\"left\"].major_ticklabels.set_pad(labels_pad)\n\n # set y minor grid\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", axis='y', color=grid_color, linestyle='-', linewidth=grid_linewidth)\n \n # set x ticklabels off\n ax.set_xticklabels([])\n \n # create colorbar\n cax = ax.inset_axes([1.01, 0.1, 0.03, 0.8]) \n #cax = fig.add_axes([0.5, 0, 0.01, 1])\n cbar = fig.colorbar(im, cax=cax, boundaries=cmap_levels, spacing='proportional',)\n cbar.set_ticks([np.min(cmap_levels),0,np.max(cmap_levels)])\n if opti_var == 'bed_h':\n cbar.set_ticklabels(['{:d}'.format(int(-cmap_limit)), '0' ,'{:d}'.format(int(cmap_limit))])\n elif opti_var == 'bed_shape':\n cbar.set_ticklabels(['{:.1f}'.format(-cmap_limit), '0' ,'{:.1f}'.format(cmap_limit)])\n elif opti_var == 'w0':\n cbar.set_ticklabels(['{:d}'.format(int(-cmap_limit)), '0' ,'{:d}'.format(int(cmap_limit))])\n else:\n raise ValueError('Unknown opti var!!')\n #cbar.ax.set_ylabel(cbarlabel,)\n \n # set title\n #ax.set_title(title)\n \n if annotation is not None:\n # include text\n ax.text(annotation_x_position, annotation_y_position, \n annotation,\n horizontalalignment='left',\n verticalalignment='center',\n transform=ax.transAxes)\n \n return im", "_____no_output_____" ] ], [ [ "## legend plot", "_____no_output_____" ] ], [ [ "def add_legend2(ax,\n title,\n fontsize,\n lw,\n ms,\n labels):\n \n ax.plot([],\n [],\n '-',\n lw=lw,\n ms=ms,\n c='none',\n label=labels[0])\n \n # plot for first gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c=color_1,\n label=labels[1])\n \n # plot for second gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c=color_2,\n zorder=5,\n label=labels[2])\n \n # plot for second gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c=color_3,\n zorder=5,\n label=labels[3])\n # plot for second gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c=color_4,\n zorder=5,\n label=labels[4])\n\n \n l = ax.legend(loc='center', fontsize=fontsize, title=title)\n plt.setp(l.get_title(), multialignment='center')\n ax.axis('off')", "_____no_output_____" ], [ "def add_legend(ax,\n #title,\n fontsize,\n lw,\n ms,\n labels):\n \n ax.plot([],\n [],\n '-',\n lw=lw,\n ms=ms,\n c='none',\n label=labels[0])\n \n # plot for first gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c='none',\n label=labels[1])\n \n # plot for second gradient scaling\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c='none',\n zorder=5,\n label=labels[2])\n\n ax.plot([],\n [],\n '.-',\n lw=lw,\n ms=ms,\n c='none',\n zorder=5,\n label=labels[3])\n\n \n leg = ax.legend(loc='center',\n fontsize=fontsize,\n #title=title,\n handlelength=0,\n handletextpad=0,\n fancybox=True)\n for item in leg.legendHandles:\n item.set_visible(False)\n ax.axis('off')", "_____no_output_____" ] ], [ [ "## performance plot", "_____no_output_____" ] ], [ [ "def performance_plot(ax,\n datasets,\n fig=None,\n # 'bed_h RMSE', 'bed_h Diff', 'bed_h Bias',\n # 'bed_shape RMSE', 'bed_shape Diff', 'bed_shape Bias',\n # 'w0 RMSE', 'w0 Diff', 'w0 Bias',\n # 'sfc_h RMSE', 'sfc_h Diff', 'sfc_h Bias',\n # 'widths RMSE', 'widths Diff', 'widths Bias'\n performance_measurement='bed_h RMSE',\n xlim=5,\n y_label='',\n annotation=None,\n annotation_x_position=-0.2,\n annotation_y_position=1,\n lw=2,\n fontsize=25,\n ms=10,\n nr_of_iterations=None,\n ax_xlim=None\n ):\n if not fig:\n fig = plt.gcf()\n\n measure = performance_measurement\n all_x = []\n all_y = []\n\n for dataset in datasets:\n if dataset is not None:\n max_index = len(dataset['computing_time'].values) - 1\n if nr_of_iterations is not None:\n max_index = nr_of_iterations - 1\n elif xlim is not None:\n # calculate all max diff surface_h\n all_DIFF_sfc_h = np.array(\n [DIFF(dataset.true_surface_h.data,\n dataset.surface_h.data[i-1])\n for i in dataset.coords['nr_of_iteration'].data])\n # only consider as many points until max DIFF is smaller xlim\n if all_DIFF_sfc_h[-1] < xlim:\n max_index = np.argmax(all_DIFF_sfc_h < xlim)\n\n # include time 0 for first guess\n tmp_x = [0]\n\n # add first guess values \n if measure == 'bed_h RMSE':\n tmp_y = [RMSE(dataset['first_guessed_bed_h'], dataset['true_bed_h'])]\n elif measure == 'bed_h Diff':\n tmp_y = [DIFF(dataset['first_guessed_bed_h'], dataset['true_bed_h'])]\n elif measure == 'bed_h Bias':\n tmp_y = [BIAS(dataset['first_guessed_bed_h'], dataset['true_bed_h'])]\n\n elif measure == 'bed_shape RMSE':\n tmp_y = [RMSE(dataset['first_guessed_bed_shape'], dataset['true_bed_shape'])]\n elif measure == 'bed_shape Diff':\n tmp_y = [DIFF(dataset['first_guessed_bed_shape'], dataset['true_bed_shape'])]\n elif measure == 'bed_shape Bias':\n tmp_y = [BIAS(dataset['first_guessed_bed_shape'], dataset['true_bed_shape'])]\n\n elif measure == 'w0 RMSE':\n tmp_y = [RMSE(dataset['first_guessed_w0'], dataset['true_w0'])]\n elif measure == 'w0 Diff':\n tmp_y = [DIFF(dataset['first_guessed_w0'], dataset['true_w0'])]\n elif measure == 'w0 Bias':\n tmp_y = [BIAS(dataset['first_guessed_w0'], dataset['true_w0'])]\n\n elif measure == 'sfc_h RMSE':\n tmp_y = [RMSE(dataset['first_guess_surface_h'], dataset['true_surface_h'])]\n elif measure == 'sfc_h Diff':\n tmp_y = [DIFF(dataset['first_guess_surface_h'], dataset['true_surface_h'])]\n elif measure == 'sfc_h Bias':\n tmp_y = [DIFF(dataset['first_guess_surface_h'], dataset['true_surface_h'])]\n\n elif measure == 'widths RMSE':\n tmp_y = [RMSE(dataset['first_guess_widths'], dataset['true_widths'])]\n elif measure == 'widths Diff':\n tmp_y = [DIFF(dataset['first_guess_widths'], dataset['true_widths'])]\n elif measure == 'widths Bias':\n tmp_y = [DIFF(dataset['first_guess_widths'], dataset['true_widths'])]\n\n else:\n raise ValueError('Unknown performance measurement!')\n\n for i in dataset.coords['nr_of_iteration'].values[:max_index + 1] - 1:\n tmp_x.append(dataset['computing_time'][i])\n if measure == 'bed_h RMSE':\n tmp_y.append(RMSE(dataset['guessed_bed_h'][i], dataset['true_bed_h']))\n elif measure == 'bed_h Diff':\n tmp_y.append(DIFF(dataset['guessed_bed_h'][i], dataset['true_bed_h']))\n elif measure == 'bed_h Bias':\n tmp_y.append(BIAS(dataset['guessed_bed_h'][i], dataset['true_bed_h']))\n\n elif measure == 'bed_shape RMSE':\n tmp_y.append(RMSE(dataset['guessed_bed_shape'][i], dataset['true_bed_shape']))\n elif measure == 'bed_shape Diff':\n tmp_y.append(DIFF(dataset['guessed_bed_shape'][i], dataset['true_bed_shape']))\n elif measure == 'bed_shape Bias':\n tmp_y.append(BIAS(dataset['guessed_bed_shape'][i], dataset['true_bed_shape']))\n\n elif measure == 'w0 RMSE':\n tmp_y.append(RMSE(dataset['guessed_w0'][i], dataset['true_w0']))\n elif measure == 'w0 Diff':\n tmp_y.append(DIFF(dataset['guessed_w0'][i], dataset['true_w0']))\n elif measure == 'w0 Bias':\n tmp_y.append(BIAS(dataset['guessed_w0'][i], dataset['true_w0']))\n\n elif measure == 'sfc_h RMSE':\n tmp_y.append(RMSE(dataset['surface_h'][i], dataset['true_surface_h']))\n elif measure == 'sfc_h Diff':\n tmp_y.append(DIFF(dataset['surface_h'][i], dataset['true_surface_h']))\n elif measure == 'sfc_h Bias':\n tmp_y.append(BIAS(dataset['surface_h'][i], dataset['true_surface_h']))\n\n elif measure == 'widths RMSE':\n tmp_y.append(RMSE(dataset['widths'][i], dataset['true_widths']))\n elif measure == 'widths Diff':\n tmp_y.append(DIFF(dataset['widths'][i], dataset['true_widths']))\n elif measure == 'widths Bias':\n tmp_y.append(BIAS(dataset['widths'][i], dataset['true_widths']))\n\n else:\n raise ValueError('Unknown performance measurement!')\n else:\n tmp_x = []\n tmp_y = []\n\n all_x.append(tmp_x)\n all_y.append(tmp_y)\n\n colors = [color_1, color_2, color_3, color_4]\n for i, (x, y) in enumerate(zip(all_x, all_y)):\n ax.plot(x, y,\n '.-',\n lw=lw,\n ms=ms,\n c=colors[i])\n\n #ax.legend((),(),title=measure, loc='best')\n #ax.axvline(60, alpha=0.5, c='gray', ls='--')\n # ax.axvline(20, alpha=0.5, c='gray', ls='--')\n #if xlim is not None:\n # ax.set_xlim(xlim)\n ax.tick_params(axis='both', colors=axis_color, width=lw)\n ax.spines['bottom'].set_color(axis_color)\n ax.spines['bottom'].set_linewidth(lw)\n ax.spines['left'].set_color(axis_color)\n ax.spines['left'].set_linewidth(lw)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n \n ax.set_xlabel(r'$T_{cpu}$', fontsize=fontsize, c=axis_color)\n ax.set_ylabel(y_label, fontsize=fontsize, c=axis_color)\n \n if ax_xlim is not None:\n ax.set_xlim(ax_xlim)\n \n if annotation is not None:\n ax.text(annotation_x_position, annotation_y_position,\n annotation,\n horizontalalignment='left',\n verticalalignment='center',\n transform = ax.transAxes)", "_____no_output_____" ] ], [ [ "# Define Colors", "_____no_output_____" ] ], [ [ "colors = sns.color_palette(\"colorblind\")\ncolors", "_____no_output_____" ], [ "axis_color = list(colors[7]) + [1.]\ncolor_1 = list(colors[3]) + [1.]\ncolor_2 = list(colors[0]) + [1.]\ncolor_3 = list(colors[4]) + [1.]\ncolor_4 = list(colors[2]) + [1.]\nglacier_color = glacier_color = list(colors[9]) + [.5]", "_____no_output_____" ] ], [ [ "# Import Data", "_____no_output_____" ] ], [ [ "input_folder = 'plot_data/'\nfilename_scale_1 = 'par_clif_cons_ret_bed_h_and_bed_shape_at_once_scal1reg11.nc'\nfilename_scale_1e4 = 'par_clif_cons_ret_bed_h_and_bed_shape_at_once_scal1e4reg11.nc'\nfilename_separated = 'par_clif_cons_ret_bed_h_and_bed_shape_separatedreg11.nc'\nfilename_calculated = 'par_clif_cons_ret_bed_h_and_bed_shape_calculatedreg11.nc'\n\ndatasets = []\n\nfor filename in [filename_scale_1, filename_separated, filename_scale_1e4, filename_calculated]:\n with xr.open_dataset(input_folder + filename) as ds:\n datasets.append(ds)", "_____no_output_____" ] ], [ [ "# Create figure with performance", "_____no_output_____" ] ], [ [ "#dataset,\nnr_of_iterations = None\nfacecolor = 'white'\nlabels_pad = -700\ncmap = 'Spectral'\nfontsize = 25\nlw=2\nms=10\nannotation_x_position_spatial = -0.15\nannotation_y_position_spatial = 0.9\nannotation_x_position_performance = -0.14\nannotation_y_position_performance = 1.05\n#index_start_first_profil_row = 0\n#index_end_first_profil_row = 6\n#index_start_second_profil_row = 65\n#index_end_second_profil_row = 71\nsave_file = True\nfilename = 'par_methods_overview.pdf'\n\n#plt.rcParams['font.family'] = 'monospace'\nmpl.rcParams.update({'font.size': fontsize})\n\nfig = plt.figure(figsize=(1,1), facecolor='white')\n\n# define grid\ntotal_width = 10\n# define fixed size of spatial subplot\nspatial_height = 2.5\nspatial_y_separation = 0.5\n# define fixed size for performance plot\nperformance_height = 2.5\nperformance_width = 8\nperformance_separation_y = 1\nseparation_y_performance_spatial = 0.5\n# define fixed size for legend\nlegend_height = 3.5\nseparation_x_legend_spatial = 0.5\n\n# fixed size in inch\n# along x axis x-index for locator\nhoriz = [Size.Fixed(total_width), # 0\n ]\n # y-index for locator\nvert = [Size.Fixed(performance_height), # 0 performance row 2\n Size.Fixed(separation_y_performance_spatial),\n Size.Fixed(performance_height), # 2 performance row 1\n Size.Fixed(separation_y_performance_spatial),\n Size.Fixed(spatial_height), # 4 spatial row 2\n Size.Fixed(spatial_y_separation), \n Size.Fixed(spatial_height), # 6 spatial row 1\n Size.Fixed(separation_x_legend_spatial), \n Size.Fixed(legend_height), # 8 legend\n ]\n\n# define indices for subplots for easier changes later\n# spatial heatmap\nspatial_nx = 0\nspatial_nx1 = 1\nspatial_ny_row_1 = 6\nspatial_ny_row_2 = 4\nspatial_annotation = ['(a)', '(b)']\n# performance\nperformance_nx = 0\nperformance_ny_row_1 = 2\nperformance_ny_row_2 = 0\n# legend\nlegend_nx = 0\nlegend_ny = 8\n\n# Position of the grid in the figure\nrect = (0., 0., 1., 1.) \n\n# divide the axes rectangle into grid whose size is specified by horiz * vert\ndivider = Divider(fig, rect, horiz, vert, aspect=False)\n\nwith plt.rc_context({'font.family': 'monospace'}):\n ax = setup_axes(fig, 111)\n im = heatmap(datasets,\n opti_var='bed_h',\n annotation=spatial_annotation[0],\n annotation_x_position=annotation_x_position_spatial,\n annotation_y_position=annotation_y_position_spatial,\n fig=fig,\n ax=ax,\n cmap=cmap,\n grid_color=facecolor,\n presentation=False,\n labels_pad=labels_pad,\n xlim=5,\n nr_of_iterations=nr_of_iterations)\n ax.set_axes_locator(divider.new_locator(nx=spatial_nx,\n nx1=spatial_nx1,\n ny=spatial_ny_row_1))\n \n ax = setup_axes(fig, 111)\n im = heatmap(datasets,\n opti_var='bed_shape',\n annotation=spatial_annotation[1],\n annotation_x_position=annotation_x_position_spatial,\n annotation_y_position=annotation_y_position_spatial,\n fig=fig,\n ax=ax,\n cmap='vlag',\n grid_color=facecolor,\n presentation=False,\n labels_pad=labels_pad,\n xlim=5,\n nr_of_iterations=nr_of_iterations)\n ax.set_axes_locator(divider.new_locator(nx=spatial_nx,\n nx1=spatial_nx1,\n ny=spatial_ny_row_2))\n# add perfomance plot bed_h RMSE\nax = fig.subplots()\nperformance_plot(ax,\n datasets,\n fig=None,\n # 'bed_h RMSE', 'bed_h Diff', 'bed_h Bias',\n # 'bed_shape RMSE', 'bed_shape Diff', 'bed_shape Bias',\n # 'w0 RMSE', 'w0 Diff', 'w0 Bias',\n # 'sfc_h RMSE', 'sfc_h Diff', 'sfc_h Bias',\n # 'widths RMSE', 'widths Diff', 'widths Bias'\n performance_measurement='bed_h RMSE',\n xlim=5,\n y_label='RMSE_b',\n annotation='(c)',\n annotation_x_position=annotation_x_position_performance,\n annotation_y_position=annotation_y_position_performance,\n lw=lw,\n fontsize=fontsize,\n ms=ms,\n nr_of_iterations=nr_of_iterations,\n ax_xlim=[0, 400]\n )\nax.set_axes_locator(divider.new_locator(nx=performance_nx,\n ny=performance_ny_row_1))\n\n# add perfomance plot bed_shape RMSE\nax = fig.subplots()\nperformance_plot(ax,\n datasets,\n fig=None,\n # 'bed_h RMSE', 'bed_h Diff', 'bed_h Bias',\n # 'bed_shape RMSE', 'bed_shape Diff', 'bed_shape Bias',\n # 'w0 RMSE', 'w0 Diff', 'w0 Bias',\n # 'sfc_h RMSE', 'sfc_h Diff', 'sfc_h Bias',\n # 'widths RMSE', 'widths Diff', 'widths Bias'\n performance_measurement='bed_shape RMSE',\n xlim=5,\n y_label='RMSE_Ps',\n annotation='(d)',\n annotation_x_position=annotation_x_position_performance,\n annotation_y_position=annotation_y_position_performance,\n lw=lw,\n fontsize=fontsize,\n ms=ms,\n nr_of_iterations=nr_of_iterations,\n ax_xlim=[0, 350]\n )\nax.set_axes_locator(divider.new_locator(nx=performance_nx,\n ny=performance_ny_row_2))\n\n# add legend\nax = fig.subplots()\nadd_legend2(ax=ax,\n title=(r'$\\bf{cliff}$ with $\\bf{constant}$ width and $\\bf{parabolic}$ shape,' +\n '\\n' +\n r'$\\bf{retreating}$ from an $\\bf{initial~ glacier~ surface}$,' +\n '\\n' + \n r'regularisation parameters $\\lambda_0$ = 1 and $\\lambda_1$ = 100'),\n fontsize=fontsize,\n lw=lw,\n ms=ms,\n labels=['fg: first guess',\n \"and A: 'explicit' without scaling\",\n \"and B: 'iterative'\",\n \"and C: 'explicit' with scaling of 1e-4\",\n \"and D: 'implicit' with no limits\"])\nax.set_axes_locator(divider.new_locator(nx=legend_nx,\n ny=legend_ny))\n\nif save_file:\n fig.savefig(filename, format='pdf', bbox_inches='tight', dpi=300);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06e7638508623de7eed506ae7d1b030d81d3d5e
10,105
ipynb
Jupyter Notebook
crypto-examples/Insert in Database.ipynb
zzhengnan/pycon-concurrency-tutorial-2020
42db4ef5a7ee15ca2e9d841850b1bacb21e9917a
[ "MIT" ]
107
2020-05-03T23:39:13.000Z
2022-03-27T13:18:00.000Z
crypto-examples/Insert in Database.ipynb
zzhengnan/pycon-concurrency-tutorial-2020
42db4ef5a7ee15ca2e9d841850b1bacb21e9917a
[ "MIT" ]
2
2021-05-13T13:38:44.000Z
2021-09-21T07:51:00.000Z
crypto-examples/Insert in Database.ipynb
zzhengnan/pycon-concurrency-tutorial-2020
42db4ef5a7ee15ca2e9d841850b1bacb21e9917a
[ "MIT" ]
48
2020-05-09T05:56:55.000Z
2022-03-27T04:33:55.000Z
19.284351
325
0.454033
[ [ [ "import sqlite3\nfrom pathlib import Path\nimport pandas as pd", "/Users/santiagobasulto/Library/Caches/pypoetry/virtualenvs/pycon-concurrency-tutorial-2020-a5tTVfGc-py3.8/lib/python3.8/site-packages/pandas/compat/__init__.py:117: UserWarning: Could not import the lzma module. Your installed Python is incomplete. Attempting to use lzma compression will result in a RuntimeError.\n warnings.warn(msg)\n" ], [ "SCHEMA = '''\n\ndrop table if exists price;\ncreate table price (\n id integer primary key autoincrement,\n exchange text not null,\n symbol text not null,\n open DECIMAL(10, 4),\n high DECIMAL(10, 4),\n low DECIMAL(10, 4),\n close DECIMAL(10, 4),\n volume DECIMAL(10, 4),\n day DATE not null\n);\n'''", "_____no_output_____" ] ], [ [ "### Prune DB", "_____no_output_____" ] ], [ [ "conn = sqlite3.connect('prices.db')", "_____no_output_____" ], [ "conn.executescript(SCHEMA)", "_____no_output_____" ], [ "conn.commit()", "_____no_output_____" ], [ "conn.close()", "_____no_output_____" ] ], [ [ "### Insert Data", "_____no_output_____" ] ], [ [ "BASE_PATH = Path('crypto_data/')", "_____no_output_____" ], [ "files = list(BASE_PATH.glob('*.csv'))", "_____no_output_____" ], [ "INSERT_STATEMENT = \"\"\"\nINSERT INTO price (\n exchange, symbol, open, high, low, close, volume, day\n) VALUES (?, ?, ?, ?, ?, ?, ?, ?);\n\"\"\"", "_____no_output_____" ], [ "conn = sqlite3.connect('prices.db')", "_____no_output_____" ], [ "for file in files:\n exchange, symbol = file.name[:-4].split('_')\n df = pd.read_csv(str(file))\n df['exchange'] = exchange\n df['symbol'] = symbol\n \n values = df[['exchange', 'symbol', 'OpenPrice', 'HighPrice', 'LowPrice', 'ClosePrice', 'Volume', 'DateTime']].values\n conn.executemany(INSERT_STATEMENT, values)\n conn.commit()", "_____no_output_____" ], [ "conn.close()", "_____no_output_____" ] ], [ [ "### Final Test", "_____no_output_____" ] ], [ [ "conn = sqlite3.connect('prices.db')", "_____no_output_____" ], [ "cursor = conn.cursor()", "_____no_output_____" ], [ "cursor.execute('SELECT COUNT(*) FROM price;')", "_____no_output_____" ], [ "cursor.fetchone()", "_____no_output_____" ], [ "cursor.execute('SELECT * FROM price LIMIT 5;')", "_____no_output_____" ], [ "cursor.fetchall()", "_____no_output_____" ], [ "conn.close()", "_____no_output_____" ] ], [ [ "### Exchanges", "_____no_output_____" ] ], [ [ "conn = sqlite3.connect('prices.db')", "_____no_output_____" ], [ "cursor = conn.cursor()", "_____no_output_____" ], [ "cursor.execute('SELECT DISTINCT exchange FROM price;')", "_____no_output_____" ], [ "cursor.fetchall()", "_____no_output_____" ], [ "cursor.execute('SELECT DISTINCT symbol FROM price;')", "_____no_output_____" ], [ "cursor.fetchall()", "_____no_output_____" ] ], [ [ "### Filtered query:", "_____no_output_____" ] ], [ [ "cursor.execute('SELECT * FROM price WHERE symbol = \"btc\" AND exchange = \"bitfinex\" AND day = \"2019-07-20\";')", "_____no_output_____" ], [ "cursor.fetchall()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d06e850dc138e0067559b4755800a2f5445ced7a
946
ipynb
Jupyter Notebook
src/Untitled.ipynb
mcditoos/krotov
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
[ "BSD-3-Clause" ]
null
null
null
src/Untitled.ipynb
mcditoos/krotov
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
[ "BSD-3-Clause" ]
null
null
null
src/Untitled.ipynb
mcditoos/krotov
6a70cc791fa21186997ad2ca5a72f6d30574e7a0
[ "BSD-3-Clause" ]
1
2021-11-26T17:01:29.000Z
2021-11-26T17:01:29.000Z
16.892857
36
0.491543
[ [ [ "import numpy as np\nnp.random.rand(15,15).shape", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d06e8f8184af64b7a1d55fc6c320f91757017707
542,628
ipynb
Jupyter Notebook
DFDC/Training Xception Classifier .ipynb
nizamphoenix/kaggle
a9c993d0441a6d9260d605a630f95d938e6329db
[ "MIT" ]
null
null
null
DFDC/Training Xception Classifier .ipynb
nizamphoenix/kaggle
a9c993d0441a6d9260d605a630f95d938e6329db
[ "MIT" ]
null
null
null
DFDC/Training Xception Classifier .ipynb
nizamphoenix/kaggle
a9c993d0441a6d9260d605a630f95d938e6329db
[ "MIT" ]
null
null
null
542,628
542,628
0.932901
[ [ [ "# import sys\n# #sys.path.insert(0,'../input/dlibpkg/dlib-19.19.0/')\n# sys.path.insert(0,'../input/imutils/imutils-0.5.3/')", "_____no_output_____" ], [ "# !pip install dlib", "_____no_output_____" ], [ "# import dlib\n# from scipy.spatial import distance as dist\n# from imutils.video import FileVideoStream\n# from imutils.video import VideoStream\n# from imutils import face_utils\n# import numpy as np\n# import imutils\n# import time\n# import cv2", "_____no_output_____" ], [ "# def eye_aspect_ratio(eye):\n# # compute the euclidean distances between the two sets of vertical eye landmarks (x, y)-coordinates\n# A = dist.euclidean(eye[1], eye[5])\n# B = dist.euclidean(eye[2], eye[4])\n# # compute the euclidean distance between the horizontal-eye landmark (x, y)-coordinates\n# C = dist.euclidean(eye[0], eye[3])\n# ear = (A + B) / (2.0 * C)\n# return ear\n \n# # define two constants, one for the eye aspect ratio to indicate\n# # blink and then a second constant for the number of consecutive\n# # frames the eye must be below the threshold\n# EYE_AR_THRESH = 0.3\n# EYE_AR_CONSEC_FRAMES = 3\n# # initialize the frame counters and the total number of blinks\n# COUNTER = 0\n# TOTAL = 0\n# print(\"[INFO] loading facial landmark predictor...\")\n# detector = dlib.get_frontal_face_detector()\n# dlib_path = '../input/face-det/shape_predictor_68_face_landmarks.dat'\n# predictor = dlib.shape_predictor(dlib_path)", "_____no_output_____" ], [ "# # loop over frames from the video stream and grab the indexes of the facial landmarks for the left and right eye, respectively\n# (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n# (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n# # start the video stream thread\n# print(\"[INFO] starting video stream thread..................\")\n# video_path = '../input/deepfake-detection-challenge/train_sample_videos/dkzvdrzcnr.mp4'\n# vs = FileVideoStream(video_path).start()\n# fileStream = True\n# time.sleep(1.0)\n\n# while True:\n# if fileStream and not vs.more():\n# break\n# frame = vs.read()\n# print(frame.shape)\n# frame = imutils.resize(frame, width=450)\n# print(\"after\")\n# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n# # detect faces in the grayscale frame\n# rects = detector(gray, 0)\n# print(rects)\n# # loop over the face detections\n# for rect in rects:\n# # determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a NumPy array\n# shape = predictor(gray, rect)\n# shape = face_utils.shape_to_np(shape)\n# # extract the left and right eye coordinates, then use the\n# # coordinates to compute the eye aspect ratio for both eyes\n# leftEye = shape[lStart:lEnd]\n# rightEye = shape[rStart:rEnd]\n# leftEAR = eye_aspect_ratio(leftEye)\n# rightEAR = eye_aspect_ratio(rightEye)\n# # average the eye aspect ratio together for both eyes\n# ear = (leftEAR + rightEAR) / 2.0\n# print(\"ear\")\n# # compute the convex hull for the left and right eye, then\n# # visualize each of the eyes\n# leftEyeHull = cv2.convexHull(leftEye)\n# rightEyeHull = cv2.convexHull(rightEye)\n# cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n# cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n\n# # check to see if the eye aspect ratio is below the blink\n# # threshold, and if so, increment the blink frame counter\n# if ear < EYE_AR_THRESH:\n# COUNTER += 1\n\n# # otherwise, the eye aspect ratio is not below the blink\n# # threshold\n# else:\n# # if the eyes were closed for a sufficient number of\n# # then increment the total number of blinks\n# if COUNTER >= EYE_AR_CONSEC_FRAMES:\n# TOTAL += 1\n\n# # reset the eye frame counter\n# COUNTER = 0", "_____no_output_____" ], [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nfrom tqdm import tqdm,trange\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ] ], [ [ "# Setup Data", "_____no_output_____" ] ], [ [ "#----------Training data set\ndf_train0 = pd.read_json('../input/deepfake/metadata0.json')\ndf_train1 = pd.read_json('../input/deepfake/metadata1.json')\ndf_train2 = pd.read_json('../input/deepfake/metadata2.json')\ndf_train3 = pd.read_json('../input/deepfake/metadata3.json')\ndf_train4 = pd.read_json('../input/deepfake/metadata4.json')\ndf_train5 = pd.read_json('../input/deepfake/metadata5.json')\ndf_train6 = pd.read_json('../input/deepfake/metadata6.json')\ndf_train7 = pd.read_json('../input/deepfake/metadata7.json')\ndf_train8 = pd.read_json('../input/deepfake/metadata8.json')\ndf_train9 = pd.read_json('../input/deepfake/metadata9.json')\ndf_train10 = pd.read_json('../input/deepfake/metadata10.json')\ndf_train11 = pd.read_json('../input/deepfake/metadata11.json')\ndf_train12 = pd.read_json('../input/deepfake/metadata12.json')\ndf_train13 = pd.read_json('../input/deepfake/metadata13.json')\ndf_train14 = pd.read_json('../input/deepfake/metadata14.json')\ndf_train15 = pd.read_json('../input/deepfake/metadata15.json')\ndf_train16 = pd.read_json('../input/deepfake/metadata16.json')\ndf_train17 = pd.read_json('../input/deepfake/metadata17.json')\ndf_train18 = pd.read_json('../input/deepfake/metadata18.json')\ndf_train19 = pd.read_json('../input/deepfake/metadata19.json')\ndf_train20 = pd.read_json('../input/deepfake/metadata20.json')\ndf_train21 = pd.read_json('../input/deepfake/metadata21.json')\ndf_train22 = pd.read_json('../input/deepfake/metadata22.json')\ndf_train23 = pd.read_json('../input/deepfake/metadata23.json')\ndf_train24 = pd.read_json('../input/deepfake/metadata24.json')\ndf_train25 = pd.read_json('../input/deepfake/metadata25.json')\ndf_train26 = pd.read_json('../input/deepfake/metadata26.json')\ndf_train27 = pd.read_json('../input/deepfake/metadata27.json')\ndf_train28 = pd.read_json('../input/deepfake/metadata28.json')\ndf_train29 = pd.read_json('../input/deepfake/metadata29.json')\ndf_train30 = pd.read_json('../input/deepfake/metadata30.json')\ndf_train31 = pd.read_json('../input/deepfake/metadata31.json')\ndf_train32 = pd.read_json('../input/deepfake/metadata32.json')\ndf_train33 = pd.read_json('../input/deepfake/metadata33.json')\ndf_train34 = pd.read_json('../input/deepfake/metadata34.json')\ndf_train35 = pd.read_json('../input/deepfake/metadata35.json')\ndf_train36 = pd.read_json('../input/deepfake/metadata36.json')\ndf_train37 = pd.read_json('../input/deepfake/metadata37.json')\ndf_train38 = pd.read_json('../input/deepfake/metadata38.json')\ndf_train39 = pd.read_json('../input/deepfake/metadata39.json')\ndf_train40 = pd.read_json('../input/deepfake/metadata40.json')\ndf_train41 = pd.read_json('../input/deepfake/metadata41.json')\ndf_train42 = pd.read_json('../input/deepfake/metadata42.json')\ndf_train43 = pd.read_json('../input/deepfake/metadata43.json')\ndf_train44 = pd.read_json('../input/deepfake/metadata44.json')\ndf_train45 = pd.read_json('../input/deepfake/metadata45.json')\ndf_train46 = pd.read_json('../input/deepfake/metadata46.json')\ndf_train = [df_train0 ,df_train1, df_train2, df_train3, df_train4,\n df_train5, df_train6, df_train7, df_train8, df_train9,df_train10,\n df_train11, df_train12, df_train13, df_train14, df_train15,df_train16, \n df_train17, df_train18, df_train19, df_train20, df_train21, df_train22, \n df_train23, df_train24, df_train25, df_train26, df_train27, df_train28, \n df_train29, df_train30, df_train31, df_train32, df_train33, df_train34,\n df_train35, df_train36, df_train37, df_train38, df_train39,\n df_train40, df_train41, df_train42, df_train43, df_train44, df_train45,\n df_train46]\ntrain_nums = [\"%.2d\" % i for i in range(len(df_train)+1)]\n#--------------Validation data set\ndf_val1 = pd.read_json('../input/deepfake/metadata47.json')\ndf_val2 = pd.read_json('../input/deepfake/metadata48.json')\ndf_val3 = pd.read_json('../input/deepfake/metadata49.json')\ndf_val = [df_val1, df_val2, df_val3]\nval_nums =['47', '48', '49']", "_____no_output_____" ], [ "# def get_all_paths(df_list,suffixes_list):\n# LABELS = {'REAL':0,'FAKE':1}\n# paths = []\n# labels = []\n# for df,suffix in tqdm(zip(df_list,suffixes_list),total=len(df_list)):\n# images_names = list(df.columns.values)\n# for img_name in images_names:\n# try:\n# paths.append(get_path(img_name,suffix))\n# labels.append(LABELS[df[img_name]['label']])\n# except Exception as err:\n# #print(err)\n# pass\n# return paths,labels", "_____no_output_____" ], [ "def get_orig_fakes(df):\n orig_fakes = {}\n temp = df.T.groupby(['original',df.T.index,]).count()\n for orig,fake in (list(temp.index)):\n fakes = []\n try:#if key exists\n fakes = orig_fakes[orig]\n fakes.append(fake)\n except KeyError as e:\n fakes.append(fake)\n finally:\n orig_fakes[orig] = fakes\n return orig_fakes\n\ndef get_path(img_name,suffix):\n path = '../input/deepfake/DeepFake'+suffix+'/DeepFake'+suffix+'/' + img_name.replace(\".mp4\",\"\")+ '.jpg'\n if not os.path.exists(path):\n raise Exception\n return path\n\ndef get_all_paths(df_list,suffixes_list):\n paths = []\n labels = []\n count = 0\n for df in tqdm(df_list,total=len(df_list)):\n orig_fakes = get_orig_fakes(df)\n for suffix in suffixes_list:\n try:\n for orig,fakes in orig_fakes.items():\n paths.append(get_path(orig,suffix))\n labels.append(0)#processing REAL image\n for img_name in fakes:\n paths.append(get_path(img_name,suffix))\n labels.append(1)#processing FAKES image\n except Exception as err:\n count+=1\n pass\n print(\"Exceptions:\",count)\n return paths,labels", "_____no_output_____" ], [ "%%time\nval_img_paths, val_img_labels = get_all_paths(df_val,val_nums)\ntrain_img_paths, train_img_labels = get_all_paths(df_train,train_nums)\nlen(train_img_paths),len(val_img_paths)", "100%|██████████| 3/3 [00:00<00:00, 24.20it/s]\n 2%|▏ | 1/47 [00:00<00:05, 7.74it/s]" ], [ "#NOT IDEMPOTENT\nval_img_labels = val_img_labels[:500]\nval_img_paths = val_img_paths[:500]\nlen(val_img_paths)", "_____no_output_____" ] ], [ [ "# Dataset", "_____no_output_____" ] ], [ [ "def read_img(path):\n return cv2.cvtColor(cv2.imread(path),cv2.COLOR_BGR2RGB)\nimport random\ndef shuffle(X,y):\n new = []\n for m,n in zip(X,y):\n new.append([m,n])\n random.shuffle(new)\n X,y = [],[]\n for path,label in new:\n X.append(path)\n y.append(label)\n return X,y", "_____no_output_____" ] ], [ [ "## FAKE-->1 REAL-->0", "_____no_output_____" ] ], [ [ "def get_data(train_paths, train_y, val_paths, val_y):\n train_X=[]\n for img in tqdm(train_paths):\n train_X.append(read_img(img))\n val_X=[]\n for img in tqdm(val_paths):\n val_X.append(read_img(img))\n train_X, train_y = shuffle(train_X,train_y)\n val_X, val_y = shuffle(val_X,val_y)\n return train_X, val_X, train_y, val_y", "_____no_output_____" ], [ "'''\ndef get_random_sampling(paths, y, val_paths, val_y):\n real=[]\n fake=[]\n for path,label in zip(paths,y):\n if label==0: \n real.append(path)\n else: \n fake.append(path)\n # fake=random.sample(fake,len(real))\n paths,y=[],[]\n for x in real:\n paths.append(x)\n y.append(0)\n for x in fake:\n paths.append(x)\n y.append(1)\n\n real=[]\n fake=[]\n for m,n in zip(val_paths,val_y):\n if n==0:\n real.append(m)\n else:\n fake.append(m)\n # fake=random.sample(fake,len(real))\n val_paths,val_y=[],[]\n for x in real:\n val_paths.append(x)\n val_y.append(0)\n for x in fake:\n val_paths.append(x)\n val_y.append(1)\n \n #training dataset\n X=[]\n for img in tqdm(paths):\n X.append(read_img(img))\n #validation dataset \n val_X=[]\n for img in tqdm(val_paths):\n val_X.append(read_img(img))\n\n # Balance with ffhq dataset\n ffhq = os.listdir('../input/ffhq-face-data-set/thumbnails128x128')\n X_ = []#used for train and val \n for file in tqdm(ffhq):\n im = read_img(f'../input/ffhq-face-data-set/thumbnails128x128/{file}')\n im = cv2.resize(im, (150,150))\n X_.append(im)\n random.shuffle(X_)\n #Appending REAL images from FFHQ dataset\n for i in range(64773 - 12130):\n X.append(X_[i])\n y.append(0)\n del X_[0:64773 - 12130]\n \n for i in range(6108 - 1258):\n val_X.append(X_[i])\n val_y.append(0)\n X, y = shuffle(X,y)\n val_X, val_y = shuffle(val_X,val_y)\n\n return X, val_X, y, val_y\n '''", "_____no_output_____" ], [ "from torch.utils.data import Dataset, DataLoader\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\nclass ImageDataset(Dataset):\n def __init__(self, X, y, training=True, transform=None):\n self.X = X\n self.y = y\n self.transform = transform\n self.training = training\n\n def __len__(self):\n return len(self.X)\n \n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n img = self.X[idx]\n\n if self.transform is not None:\n res = self.transform(image=img)\n img = res['image']\n \n img = np.rollaxis(img, 2, 0)\n # img = np.array(img).astype(np.float32) / 255.\n\n labels = self.y[idx]\n labels = np.array(labels).astype(np.float32)\n return [img, labels]", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "!pip install pytorchcv --quiet", "_____no_output_____" ], [ "from pytorchcv.model_provider import get_model\nmodel = get_model(\"xception\", pretrained=True)\n# model = get_model(\"resnet18\", pretrained=True)\nmodel = nn.Sequential(*list(model.children())[:-1]) # Remove original output layer\nmodel[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))\n# model[0].final_pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))", "_____no_output_____" ], [ "class Head(torch.nn.Module):\n def __init__(self, in_f, out_f):\n super().__init__()\n\n self.f = nn.Flatten()\n self.l = nn.Linear(in_f, 512)\n self.d = nn.Dropout(0.30)\n self.o = nn.Linear(512, out_f)\n self.b1 = nn.BatchNorm1d(in_f)\n self.b2 = nn.BatchNorm1d(512)\n self.r = nn.ReLU()\n\n def forward(self, x):\n x = self.f(x)\n x = self.b1(x)\n x = self.d(x)\n\n x = self.l(x)\n x = self.r(x)\n x = self.b2(x)\n x = self.d(x)\n\n out = self.o(x)\n return out", "_____no_output_____" ], [ "class FCN(torch.nn.Module):\n def __init__(self, base, in_f):\n super().__init__()\n self.base = base\n self.h1 = Head(in_f, 1)\n \n def forward(self, x):\n x = self.base(x)\n return self.h1(x)\n\nmodel = FCN(model, 2048)\nPATH = './model1.pth'\nmodel.load_state_dict(torch.load(PATH))\nmodel.eval()", "_____no_output_____" ] ], [ [ "# Train Functions", "_____no_output_____" ] ], [ [ "def calculate_loss(preds, targets):\n return F.binary_cross_entropy(F.sigmoid(preds), targets)\n\ndef train_model(epoch, optimizer, scheduler=None, history=None):\n model.train()\n total_loss = 0\n \n t = tqdm(train_loader)\n for i, (img_batch, y_batch) in enumerate(t):\n img_batch = img_batch.cuda().float()\n y_batch = y_batch.cuda().float()\n\n optimizer.zero_grad()#to avoid accumulating gradients\n preds_batch = model(img_batch)\n loss = calculate_loss(preds_batch, y_batch)\n \n total_loss += loss\n t.set_description(f'Epoch {epoch+1}/{n_epochs}, LR: %6f, Loss: %.4f'%(optimizer.state_dict()['param_groups'][0]['lr'],total_loss/(i+1)))\n\n if history is not None:\n history.loc[epoch + i / len(X), 'train_loss'] = loss.data.cpu().numpy()\n history.loc[epoch + i / len(X), 'lr'] = optimizer.state_dict()['param_groups'][0]['lr']\n\n loss.backward()#computing gradients\n optimizer.step()#updating parameters\n \n if scheduler is not None:\n scheduler.step()", "_____no_output_____" ], [ "def evaluate_model(epoch, scheduler=None, history=None):\n model.eval()\n total_loss = 0.0\n pred = []\n target = []\n with torch.no_grad():\n for img_batch, y_batch in val_loader:\n img_batch = img_batch.cuda().float()\n y_batch = y_batch.cuda().float()\n preds_batch = model(img_batch)\n loss = calculate_loss(preds_batch, y_batch)\n total_loss += loss\n pred = [*map(F.sigmoid,preds_batch)]\n target = [*map(lambda i: i.data.cpu(),y_batch)]\n \n\n pred = [p.data.cpu().numpy() for p in pred]\n pred2 = pred\n pred = [np.round(p) for p in pred]\n pred = np.array(pred)\n #calculating accuracy\n acc = sklearn.metrics.recall_score(target, pred, average='macro')\n target = [i.item() for i in target]\n pred2 = np.array(pred2).clip(0.1, 0.9)\n #calculating log-loss after clipping \n log_loss = sklearn.metrics.log_loss(target, pred2)\n\n total_loss /= len(val_loader)\n \n if history is not None:\n history.loc[epoch, 'dev_loss'] = total_loss.cpu().numpy()\n \n if scheduler is not None:\n scheduler.step(total_loss)\n\n print(f'Dev loss: %.4f, Acc: %.6f, log_loss: %.6f'%(total_loss,acc,log_loss))\n \n return total_loss", "_____no_output_____" ] ], [ [ "# Dataloaders", "_____no_output_____" ] ], [ [ "X, val_X, y, val_y = get_data(train_img_paths, train_img_labels,val_img_paths, val_img_labels)\n\nprint('There are '+str(y.count(1))+' fake train samples')\nprint('There are '+str(y.count(0))+' real train samples')\nprint('There are '+str(val_y.count(1))+' fake val samples')\nprint('There are '+str(val_y.count(0))+' real val samples')", "100%|██████████| 2286/2286 [00:02<00:00, 882.95it/s]\n100%|██████████| 149/149 [00:00<00:00, 881.23it/s]" ], [ "import albumentations\nfrom albumentations.augmentations.transforms import ShiftScaleRotate, HorizontalFlip, Normalize, RandomBrightnessContrast, MotionBlur, Blur, GaussNoise, JpegCompression\ntrain_transform = albumentations.Compose([\n ShiftScaleRotate(p=0.3, scale_limit=0.25, border_mode=1, rotate_limit=25),\n HorizontalFlip(p=0.2),\n RandomBrightnessContrast(p=0.3, brightness_limit=0.25, contrast_limit=0.5),\n MotionBlur(p=.2),\n GaussNoise(p=.2),\n JpegCompression(p=.2, quality_lower=50),\n Normalize()\n])\nval_transform = albumentations.Compose([Normalize()])\n\ntrain_dataset = ImageDataset(X, y, transform=train_transform)\nval_dataset = ImageDataset(val_X, val_y, transform=val_transform)", "_____no_output_____" ], [ "nrow, ncol = 5, 6\nfig, axes = plt.subplots(nrow, ncol, figsize=(20, 8))\naxes = axes.flatten()\nfor i, ax in enumerate(axes):\n image, label = train_dataset[i]\n image = np.rollaxis(image, 0, 3)\n image = image*std + mean\n image = np.clip(image, 0., 1.)\n ax.imshow(image)\n ax.set_title(f'label: {label}')", "_____no_output_____" ] ], [ [ "# Train", "_____no_output_____" ] ], [ [ "import gc\n\nhistory = pd.DataFrame()\nhistory2 = pd.DataFrame()\n\ntorch.cuda.empty_cache()\ngc.collect()\n\nbest = 1e10\nn_epochs = 20\nbatch_size = 64#BATCH SIZE CHANGED\n\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\nval_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False, num_workers=0)\n\nmodel = model.cuda()\n\noptimizer = torch.optim.AdamW(model.parameters(), lr=0.001)\n\nscheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, mode='min', factor=0.7, verbose=True, min_lr=1e-5)\n\nfor epoch in range(n_epochs):\n torch.cuda.empty_cache()\n gc.collect()\n train_model(epoch, optimizer, scheduler=None, history=history)\n loss = evaluate_model(epoch, scheduler=scheduler, history=history2)\n if loss < best:\n best = loss\n print(f'Saving best model...')\n torch.save(model.state_dict(), f'model2.pth')", "Epoch 1/20, LR: 0.001000, Loss: 0.7097: 100%|██████████| 36/36 [00:13<00:00, 2.73it/s]\n" ], [ "history2.plot()", "_____no_output_____" ], [ "import torch\nw = torch.rand(5)\nw.requires_grad_()\nprint(w) \ns = w.sum() \nprint(s)\ns.backward()\nprint(w.grad) # tensor([1., 1., 1., 1., 1.])\ns.backward()\nprint(w.grad) # tensor([2., 2., 2., 2., 2.])\ns.backward()\nprint(w.grad) # tensor([3., 3., 3., 3., 3.])\ns.backward()\nprint(w.grad) # tensor([4., 4., 4., 4., 4.])", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06e9563c4f31fba517106bb49b0018b21ae7a7e
1,375
ipynb
Jupyter Notebook
net/quantization_playground.ipynb
marbleton/FPGA_MNIST
4b4a30e0adca35de9adcad7b3fec08c516260790
[ "MIT" ]
7
2019-11-13T12:24:36.000Z
2021-03-31T02:39:35.000Z
net/quantization_playground.ipynb
marbleton/FPGA_MNIST
4b4a30e0adca35de9adcad7b3fec08c516260790
[ "MIT" ]
29
2019-12-17T22:06:04.000Z
2022-03-12T00:20:45.000Z
net/quantization_playground.ipynb
marbleton/FPGA_MNIST
4b4a30e0adca35de9adcad7b3fec08c516260790
[ "MIT" ]
4
2019-10-20T15:12:52.000Z
2020-10-13T13:36:37.000Z
18.835616
47
0.476364
[ [ [ "import numpy as np\n\n", "_____no_output_____" ], [ "# Input activation bits and fractions\nia_b = np.array([8, 8, 8, 8])\nia_f = np.array([6, 5, 5, 5])\n\n# Weights bits and fractions\nw_b = np.array([4, 4, 4, 4])\nw_f = np.array([4, 4, 4, 4])\n\n# Output activation bits and fractions\noa_b = np.array([8, 8, 8, 8])\noa_f = np.array([5, 5, 5, 5])\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d06e96d9a72035484adea7ad5ce2d08fff3c06d8
34,149
ipynb
Jupyter Notebook
4. Convolutional Neural Networks/Face_Recognition_v3a.ipynb
taktak-hi/Deep-Learning-Specialization
c6f7b0891dbdded6a542cb6f35d03c3b9ec1fdd0
[ "MIT" ]
null
null
null
4. Convolutional Neural Networks/Face_Recognition_v3a.ipynb
taktak-hi/Deep-Learning-Specialization
c6f7b0891dbdded6a542cb6f35d03c3b9ec1fdd0
[ "MIT" ]
null
null
null
4. Convolutional Neural Networks/Face_Recognition_v3a.ipynb
taktak-hi/Deep-Learning-Specialization
c6f7b0891dbdded6a542cb6f35d03c3b9ec1fdd0
[ "MIT" ]
null
null
null
41.746944
563
0.596796
[ [ [ "# Face Recognition\n\nIn this assignment, you will build a face recognition system. Many of the ideas presented here are from [FaceNet](https://arxiv.org/pdf/1503.03832.pdf). In lecture, we also talked about [DeepFace](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf). \n\nFace recognition problems commonly fall into two categories: \n\n- **Face Verification** - \"is this the claimed person?\". For example, at some airports, you can pass through customs by letting a system scan your passport and then verifying that you (the person carrying the passport) are the correct person. A mobile phone that unlocks using your face is also using face verification. This is a 1:1 matching problem. \n- **Face Recognition** - \"who is this person?\". For example, the video lecture showed a [face recognition video](https://www.youtube.com/watch?v=wr4rx0Spihs) of Baidu employees entering the office without needing to otherwise identify themselves. This is a 1:K matching problem. \n\nFaceNet learns a neural network that encodes a face image into a vector of 128 numbers. By comparing two such vectors, you can then determine if two pictures are of the same person.\n \n**In this assignment, you will:**\n- Implement the triplet loss function\n- Use a pretrained model to map face images into 128-dimensional encodings\n- Use these encodings to perform face verification and face recognition\n\n#### Channels-first notation\n\n* In this exercise, we will be using a pre-trained model which represents ConvNet activations using a **\"channels first\"** convention, as opposed to the \"channels last\" convention used in lecture and previous programming assignments. \n* In other words, a batch of images will be of shape $(m, n_C, n_H, n_W)$ instead of $(m, n_H, n_W, n_C)$. \n* Both of these conventions have a reasonable amount of traction among open-source implementations; there isn't a uniform standard yet within the deep learning community. ", "_____no_output_____" ], [ "## <font color='darkblue'>Updates</font>\n\n#### If you were working on the notebook before this update...\n* The current notebook is version \"3a\".\n* You can find your original work saved in the notebook with the previous version name (\"v3\") \n* To view the file directory, go to the menu \"File->Open\", and this will open a new tab that shows the file directory.\n\n#### List of updates\n* `triplet_loss`: Additional Hints added.\n* `verify`: Hints added.\n* `who_is_it`: corrected hints given in the comments.\n* Spelling and formatting updates for easier reading.\n", "_____no_output_____" ], [ "#### Load packages\nLet's load the required packages. ", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.layers.merge import Concatenate\nfrom keras.layers.core import Lambda, Flatten, Dense\nfrom keras.initializers import glorot_uniform\nfrom keras.engine.topology import Layer\nfrom keras import backend as K\nK.set_image_data_format('channels_first')\nimport cv2\nimport os\nimport numpy as np\nfrom numpy import genfromtxt\nimport pandas as pd\nimport tensorflow as tf\nfrom fr_utils import *\nfrom inception_blocks_v2 import *\n\n%matplotlib inline\n%load_ext autoreload\n%autoreload 2\n\nnp.set_printoptions(threshold=np.nan)", "Using TensorFlow backend.\n" ] ], [ [ "## 0 - Naive Face Verification\n\nIn Face Verification, you're given two images and you have to determine if they are of the same person. The simplest way to do this is to compare the two images pixel-by-pixel. If the distance between the raw images are less than a chosen threshold, it may be the same person! \n\n<img src=\"images/pixel_comparison.png\" style=\"width:380px;height:150px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u></center></caption>", "_____no_output_____" ], [ "* Of course, this algorithm performs really poorly, since the pixel values change dramatically due to variations in lighting, orientation of the person's face, even minor changes in head position, and so on. \n* You'll see that rather than using the raw image, you can learn an encoding, $f(img)$. \n* By using an encoding for each image, an element-wise comparison produces a more accurate judgement as to whether two pictures are of the same person.", "_____no_output_____" ], [ "## 1 - Encoding face images into a 128-dimensional vector \n\n### 1.1 - Using a ConvNet to compute encodings\n\nThe FaceNet model takes a lot of data and a long time to train. So following common practice in applied deep learning, let's load weights that someone else has already trained. The network architecture follows the Inception model from [Szegedy *et al.*](https://arxiv.org/abs/1409.4842). We have provided an inception network implementation. You can look in the file `inception_blocks_v2.py` to see how it is implemented (do so by going to \"File->Open...\" at the top of the Jupyter notebook. This opens the file directory that contains the '.py' file). ", "_____no_output_____" ], [ "The key things you need to know are:\n\n- This network uses 96x96 dimensional RGB images as its input. Specifically, inputs a face image (or batch of $m$ face images) as a tensor of shape $(m, n_C, n_H, n_W) = (m, 3, 96, 96)$ \n- It outputs a matrix of shape $(m, 128)$ that encodes each input face image into a 128-dimensional vector\n\nRun the cell below to create the model for face images.", "_____no_output_____" ] ], [ [ "FRmodel = faceRecoModel(input_shape=(3, 96, 96))", "_____no_output_____" ], [ "print(\"Total Params:\", FRmodel.count_params())", "Total Params: 3743280\n" ] ], [ [ "** Expected Output **\n<table>\n<center>\nTotal Params: 3743280\n</center>\n</table>\n", "_____no_output_____" ], [ "By using a 128-neuron fully connected layer as its last layer, the model ensures that the output is an encoding vector of size 128. You then use the encodings to compare two face images as follows:\n\n<img src=\"images/distance_kiank.png\" style=\"width:680px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 2**: <br> </u> <font color='purple'> By computing the distance between two encodings and thresholding, you can determine if the two pictures represent the same person</center></caption>\n\nSo, an encoding is a good one if: \n- The encodings of two images of the same person are quite similar to each other. \n- The encodings of two images of different persons are very different.\n\nThe triplet loss function formalizes this, and tries to \"push\" the encodings of two images of the same person (Anchor and Positive) closer together, while \"pulling\" the encodings of two images of different persons (Anchor, Negative) further apart. \n\n<img src=\"images/triplet_comparison.png\" style=\"width:280px;height:150px;\">\n<br>\n<caption><center> <u> <font color='purple'> **Figure 3**: <br> </u> <font color='purple'> In the next part, we will call the pictures from left to right: Anchor (A), Positive (P), Negative (N) </center></caption>", "_____no_output_____" ], [ "\n\n### 1.2 - The Triplet Loss\n\nFor an image $x$, we denote its encoding $f(x)$, where $f$ is the function computed by the neural network.\n\n<img src=\"images/f_x.png\" style=\"width:380px;height:150px;\">\n\n<!--\nWe will also add a normalization step at the end of our model so that $\\mid \\mid f(x) \\mid \\mid_2 = 1$ (means the vector of encoding should be of norm 1).\n!-->\n\nTraining will use triplets of images $(A, P, N)$: \n\n- A is an \"Anchor\" image--a picture of a person. \n- P is a \"Positive\" image--a picture of the same person as the Anchor image.\n- N is a \"Negative\" image--a picture of a different person than the Anchor image.\n\nThese triplets are picked from our training dataset. We will write $(A^{(i)}, P^{(i)}, N^{(i)})$ to denote the $i$-th training example. \n\nYou'd like to make sure that an image $A^{(i)}$ of an individual is closer to the Positive $P^{(i)}$ than to the Negative image $N^{(i)}$) by at least a margin $\\alpha$:\n\n$$\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2 + \\alpha < \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$$\n\nYou would thus like to minimize the following \"triplet cost\":\n\n$$\\mathcal{J} = \\sum^{m}_{i=1} \\large[ \\small \\underbrace{\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2}_\\text{(1)} - \\underbrace{\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2}_\\text{(2)} + \\alpha \\large ] \\small_+ \\tag{3}$$\n\nHere, we are using the notation \"$[z]_+$\" to denote $max(z,0)$. \n\nNotes:\n- The term (1) is the squared distance between the anchor \"A\" and the positive \"P\" for a given triplet; you want this to be small. \n- The term (2) is the squared distance between the anchor \"A\" and the negative \"N\" for a given triplet, you want this to be relatively large. It has a minus sign preceding it because minimizing the negative of the term is the same as maximizing that term.\n- $\\alpha$ is called the margin. It is a hyperparameter that you pick manually. We will use $\\alpha = 0.2$. \n\nMost implementations also rescale the encoding vectors to haven L2 norm equal to one (i.e., $\\mid \\mid f(img)\\mid \\mid_2$=1); you won't have to worry about that in this assignment.\n\n**Exercise**: Implement the triplet loss as defined by formula (3). Here are the 4 steps:\n1. Compute the distance between the encodings of \"anchor\" and \"positive\": $\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2$\n2. Compute the distance between the encodings of \"anchor\" and \"negative\": $\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$\n3. Compute the formula per training example: $ \\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2 - \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2 + \\alpha$\n3. Compute the full formula by taking the max with zero and summing over the training examples:\n$$\\mathcal{J} = \\sum^{m}_{i=1} \\large[ \\small \\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2 - \\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2+ \\alpha \\large ] \\small_+ \\tag{3}$$\n\n#### Hints\n* Useful functions: `tf.reduce_sum()`, `tf.square()`, `tf.subtract()`, `tf.add()`, `tf.maximum()`.\n* For steps 1 and 2, you will sum over the entries of $\\mid \\mid f(A^{(i)}) - f(P^{(i)}) \\mid \\mid_2^2$ and $\\mid \\mid f(A^{(i)}) - f(N^{(i)}) \\mid \\mid_2^2$. \n* For step 4 you will sum over the training examples.\n\n#### Additional Hints\n* Recall that the square of the L2 norm is the sum of the squared differences: $||x - y||_{2}^{2} = \\sum_{i=1}^{N}(x_{i} - y_{i})^{2}$\n* Note that the `anchor`, `positive` and `negative` encodings are of shape `(m,128)`, where m is the number of training examples and 128 is the number of elements used to encode a single example.\n* For steps 1 and 2, you will maintain the number of `m` training examples and sum along the 128 values of each encoding. \n[tf.reduce_sum](https://www.tensorflow.org/api_docs/python/tf/math/reduce_sum) has an `axis` parameter. This chooses along which axis the sums are applied. \n* Note that one way to choose the last axis in a tensor is to use negative indexing (`axis=-1`).\n* In step 4, when summing over training examples, the result will be a single scalar value.\n* For `tf.reduce_sum` to sum across all axes, keep the default value `axis=None`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: triplet_loss\n\ndef triplet_loss(y_true, y_pred, alpha = 0.2):\n \"\"\"\n Implementation of the triplet loss as defined by formula (3)\n \n Arguments:\n y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.\n y_pred -- python list containing three objects:\n anchor -- the encodings for the anchor images, of shape (None, 128)\n positive -- the encodings for the positive images, of shape (None, 128)\n negative -- the encodings for the negative images, of shape (None, 128)\n \n Returns:\n loss -- real number, value of the loss\n \"\"\"\n \n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\n \n ### START CODE HERE ### (≈ 4 lines)\n # Step 1: Compute the (encoding) distance between the anchor and the positive\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis = -1)\n # Step 2: Compute the (encoding) distance between the anchor and the negative\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis = -1)\n # Step 3: subtract the two previous distances and add alpha.\n basic_loss = pos_dist - neg_dist + alpha\n # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0))\n ### END CODE HERE ###\n \n return loss", "_____no_output_____" ], [ "with tf.Session() as test:\n tf.set_random_seed(1)\n y_true = (None, None, None)\n y_pred = (tf.random_normal([3, 128], mean=6, stddev=0.1, seed = 1),\n tf.random_normal([3, 128], mean=1, stddev=1, seed = 1),\n tf.random_normal([3, 128], mean=3, stddev=4, seed = 1))\n loss = triplet_loss(y_true, y_pred)\n \n print(\"loss = \" + str(loss.eval()))", "loss = 528.143\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **loss**\n </td>\n <td>\n 528.143\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "## 2 - Loading the pre-trained model\n\nFaceNet is trained by minimizing the triplet loss. But since training requires a lot of data and a lot of computation, we won't train it from scratch here. Instead, we load a previously trained model. Load a model using the following cell; this might take a couple of minutes to run. ", "_____no_output_____" ] ], [ [ "FRmodel.compile(optimizer = 'adam', loss = triplet_loss, metrics = ['accuracy'])\nload_weights_from_FaceNet(FRmodel)", "_____no_output_____" ] ], [ [ "Here are some examples of distances between the encodings between three individuals:\n\n<img src=\"images/distance_matrix.png\" style=\"width:380px;height:200px;\">\n<br>\n<caption><center> <u> <font color='purple'> **Figure 4**:</u> <br> <font color='purple'> Example of distance outputs between three individuals' encodings</center></caption>\n\nLet's now use this model to perform face verification and face recognition! ", "_____no_output_____" ], [ "## 3 - Applying the model", "_____no_output_____" ], [ "You are building a system for an office building where the building manager would like to offer facial recognition to allow the employees to enter the building.\n\nYou'd like to build a **Face verification** system that gives access to the list of people who live or work there. To get admitted, each person has to swipe an ID card (identification card) to identify themselves at the entrance. The face recognition system then checks that they are who they claim to be.", "_____no_output_____" ], [ "### 3.1 - Face Verification\n\nLet's build a database containing one encoding vector for each person who is allowed to enter the office. To generate the encoding we use `img_to_encoding(image_path, model)`, which runs the forward propagation of the model on the specified image. \n\nRun the following code to build the database (represented as a python dictionary). This database maps each person's name to a 128-dimensional encoding of their face.", "_____no_output_____" ] ], [ [ "database = {}\ndatabase[\"danielle\"] = img_to_encoding(\"images/danielle.png\", FRmodel)\ndatabase[\"younes\"] = img_to_encoding(\"images/younes.jpg\", FRmodel)\ndatabase[\"tian\"] = img_to_encoding(\"images/tian.jpg\", FRmodel)\ndatabase[\"andrew\"] = img_to_encoding(\"images/andrew.jpg\", FRmodel)\ndatabase[\"kian\"] = img_to_encoding(\"images/kian.jpg\", FRmodel)\ndatabase[\"dan\"] = img_to_encoding(\"images/dan.jpg\", FRmodel)\ndatabase[\"sebastiano\"] = img_to_encoding(\"images/sebastiano.jpg\", FRmodel)\ndatabase[\"bertrand\"] = img_to_encoding(\"images/bertrand.jpg\", FRmodel)\ndatabase[\"kevin\"] = img_to_encoding(\"images/kevin.jpg\", FRmodel)\ndatabase[\"felix\"] = img_to_encoding(\"images/felix.jpg\", FRmodel)\ndatabase[\"benoit\"] = img_to_encoding(\"images/benoit.jpg\", FRmodel)\ndatabase[\"arnaud\"] = img_to_encoding(\"images/arnaud.jpg\", FRmodel)", "_____no_output_____" ] ], [ [ "Now, when someone shows up at your front door and swipes their ID card (thus giving you their name), you can look up their encoding in the database, and use it to check if the person standing at the front door matches the name on the ID.\n\n**Exercise**: Implement the verify() function which checks if the front-door camera picture (`image_path`) is actually the person called \"identity\". You will have to go through the following steps:\n1. Compute the encoding of the image from `image_path`.\n2. Compute the distance between this encoding and the encoding of the identity image stored in the database.\n3. Open the door if the distance is less than 0.7, else do not open it.\n\n\n* As presented above, you should use the L2 distance [np.linalg.norm](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html). \n* (Note: In this implementation, compare the L2 distance, not the square of the L2 distance, to the threshold 0.7.) \n\n#### Hints\n* `identity` is a string that is also a key in the `database` dictionary.\n* `img_to_encoding` has two parameters: the `image_path` and `model`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: verify\n\ndef verify(image_path, identity, database, model):\n \"\"\"\n Function that verifies if the person on the \"image_path\" image is \"identity\".\n \n Arguments:\n image_path -- path to an image\n identity -- string, name of the person you'd like to verify the identity. Has to be an employee who works in the office.\n database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).\n model -- your Inception model instance in Keras\n \n Returns:\n dist -- distance between the image_path and the image of \"identity\" in the database.\n door_open -- True, if the door should open. False otherwise.\n \"\"\"\n \n ### START CODE HERE ###\n \n # Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)\n encoding = img_to_encoding(image_path, model)\n \n # Step 2: Compute distance with identity's image (≈ 1 line)\n dist = np.linalg.norm(encoding - database[identity])\n \n # Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)\n if dist < 0.7:\n print(\"It's \" + str(identity) + \", welcome in!\")\n door_open = True\n else:\n print(\"It's not \" + str(identity) + \", please go away\")\n door_open = False\n \n ### END CODE HERE ###\n \n return dist, door_open", "_____no_output_____" ] ], [ [ "Younes is trying to enter the office and the camera takes a picture of him (\"images/camera_0.jpg\"). Let's run your verification algorithm on this picture:\n\n<img src=\"images/camera_0.jpg\" style=\"width:100px;height:100px;\">", "_____no_output_____" ] ], [ [ "verify(\"images/camera_0.jpg\", \"younes\", database, FRmodel)", "It's younes, welcome in!\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **It's younes, welcome in!**\n </td>\n <td>\n (0.65939283, True)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "Benoit, who does not work in the office, stole Kian's ID card and tried to enter the office. The camera took a picture of Benoit (\"images/camera_2.jpg). Let's run the verification algorithm to check if benoit can enter.\n<img src=\"images/camera_2.jpg\" style=\"width:100px;height:100px;\">", "_____no_output_____" ] ], [ [ "verify(\"images/camera_2.jpg\", \"kian\", database, FRmodel)", "It's not kian, please go away\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **It's not kian, please go away**\n </td>\n <td>\n (0.86224014, False)\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 3.2 - Face Recognition\n\nYour face verification system is mostly working well. But since Kian got his ID card stolen, when he came back to the office the next day and couldn't get in! \n\nTo solve this, you'd like to change your face verification system to a face recognition system. This way, no one has to carry an ID card anymore. An authorized person can just walk up to the building, and the door will unlock for them! \n\nYou'll implement a face recognition system that takes as input an image, and figures out if it is one of the authorized persons (and if so, who). Unlike the previous face verification system, we will no longer get a person's name as one of the inputs. \n\n**Exercise**: Implement `who_is_it()`. You will have to go through the following steps:\n1. Compute the target encoding of the image from image_path\n2. Find the encoding from the database that has smallest distance with the target encoding. \n - Initialize the `min_dist` variable to a large enough number (100). It will help you keep track of what is the closest encoding to the input's encoding.\n - Loop over the database dictionary's names and encodings. To loop use `for (name, db_enc) in database.items()`.\n - Compute the L2 distance between the target \"encoding\" and the current \"encoding\" from the database.\n - If this distance is less than the min_dist, then set `min_dist` to `dist`, and `identity` to `name`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: who_is_it\n\ndef who_is_it(image_path, database, model):\n \"\"\"\n Implements face recognition for the office by finding who is the person on the image_path image.\n \n Arguments:\n image_path -- path to an image\n database -- database containing image encodings along with the name of the person on the image\n model -- your Inception model instance in Keras\n \n Returns:\n min_dist -- the minimum distance between image_path encoding and the encodings from the database\n identity -- string, the name prediction for the person on image_path\n \"\"\"\n \n ### START CODE HERE ### \n \n ## Step 1: Compute the target \"encoding\" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)\n encoding = img_to_encoding(image_path, model)\n \n ## Step 2: Find the closest encoding ##\n \n # Initialize \"min_dist\" to a large value, say 100 (≈1 line)\n min_dist = 100\n \n # Loop over the database dictionary's names and encodings.\n for (name, db_enc) in database.items():\n \n # Compute L2 distance between the target \"encoding\" and the current db_enc from the database. (≈ 1 line)\n dist = np.linalg.norm(encoding - db_enc)\n\n # If this distance is less than the min_dist, then set min_dist to dist, and identity to name. (≈ 3 lines)\n if dist < min_dist:\n min_dist = dist\n identity = name\n\n ### END CODE HERE ###\n \n if min_dist > 0.7:\n print(\"Not in the database.\")\n else:\n print (\"it's \" + str(identity) + \", the distance is \" + str(min_dist))\n \n return min_dist, identity", "_____no_output_____" ] ], [ [ "Younes is at the front-door and the camera takes a picture of him (\"images/camera_0.jpg\"). Let's see if your who_it_is() algorithm identifies Younes. ", "_____no_output_____" ] ], [ [ "who_is_it(\"images/camera_0.jpg\", database, FRmodel)", "it's younes, the distance is 0.659393\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td>\n **it's younes, the distance is 0.659393**\n </td>\n <td>\n (0.65939283, 'younes')\n </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "You can change \"`camera_0.jpg`\" (picture of younes) to \"`camera_1.jpg`\" (picture of bertrand) and see the result.", "_____no_output_____" ], [ "#### Congratulations!\n\n* Your face recognition system is working well! It only lets in authorized persons, and people don't need to carry an ID card around anymore! \n* You've now seen how a state-of-the-art face recognition system works.\n\n#### Ways to improve your facial recognition model\nAlthough we won't implement it here, here are some ways to further improve the algorithm:\n- Put more images of each person (under different lighting conditions, taken on different days, etc.) into the database. Then given a new image, compare the new face to multiple pictures of the person. This would increase accuracy.\n- Crop the images to just contain the face, and less of the \"border\" region around the face. This preprocessing removes some of the irrelevant pixels around the face, and also makes the algorithm more robust.\n", "_____no_output_____" ], [ "## Key points to remember\n- Face verification solves an easier 1:1 matching problem; face recognition addresses a harder 1:K matching problem. \n- The triplet loss is an effective loss function for training a neural network to learn an encoding of a face image.\n- The same encoding can be used for verification and recognition. Measuring distances between two images' encodings allows you to determine whether they are pictures of the same person. ", "_____no_output_____" ], [ "Congrats on finishing this assignment! \n", "_____no_output_____" ], [ "### References:\n\n- Florian Schroff, Dmitry Kalenichenko, James Philbin (2015). [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/pdf/1503.03832.pdf)\n- Yaniv Taigman, Ming Yang, Marc'Aurelio Ranzato, Lior Wolf (2014). [DeepFace: Closing the gap to human-level performance in face verification](https://research.fb.com/wp-content/uploads/2016/11/deepface-closing-the-gap-to-human-level-performance-in-face-verification.pdf) \n- The pretrained model we use is inspired by Victor Sy Wang's implementation and was loaded using his code: https://github.com/iwantooxxoox/Keras-OpenFace.\n- Our implementation also took a lot of inspiration from the official FaceNet github repository: https://github.com/davidsandberg/facenet \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06e9eb36b0902ca8b2ee29c9680978acd7f1acd
5,751
ipynb
Jupyter Notebook
a01_PySpark/e01_Resources/PySpark-and-MLlib/learning_spark_MLlib.ipynb
mindis/Big_Data_Analysis
b4aec3a0e285de5cac02ad7390712635a73a24db
[ "Apache-2.0" ]
null
null
null
a01_PySpark/e01_Resources/PySpark-and-MLlib/learning_spark_MLlib.ipynb
mindis/Big_Data_Analysis
b4aec3a0e285de5cac02ad7390712635a73a24db
[ "Apache-2.0" ]
null
null
null
a01_PySpark/e01_Resources/PySpark-and-MLlib/learning_spark_MLlib.ipynb
mindis/Big_Data_Analysis
b4aec3a0e285de5cac02ad7390712635a73a24db
[ "Apache-2.0" ]
1
2021-06-22T10:18:14.000Z
2021-06-22T10:18:14.000Z
25.446903
355
0.550165
[ [ [ "from pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setMaster('local').setAppName('EmailSpam')\nsc = SparkContext(conf=conf)", "_____no_output_____" ], [ "from pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib.feature import HashingTF\nfrom pyspark.mllib.classification import LogisticRegressionWithSGD\n\nspam = sc.textFile('spam.txt')\nham = sc.textFile('ham.txt')", "_____no_output_____" ], [ "tf = HashingTF(numFeatures = 10000)\nspamFeatures = spam.map(lambda email: tf.transform(email.split(' ')))\nhamFeatures = ham.map(lambda email: tf.transform(email.split(' ')))", "_____no_output_____" ], [ "# Create LabeledPoint datasets for positive (spam) and negative (ham) examples.\n\npositiveExamples = spamFeatures.map(lambda features: LabeledPoint(1, features))\nnegativeExamples = hamFeatures.map(lambda features: LabeledPoint(0, features))\n\ntrainingData = positiveExamples.union(negativeExamples)\ntrainingData.cache()", "_____no_output_____" ], [ "trainingData.take(2)", "_____no_output_____" ] ], [ [ "### Logistic Regression", "_____no_output_____" ] ], [ [ "model = LogisticRegressionWithSGD.train(trainingData)", "_____no_output_____" ], [ "posTest = tf.transform('O M G GET cheap stuff by sending money to ...'.split(' '))\nnegTest = tf.transform('Hi Dad, I started studying Spark the other ...'.split(' '))\nprint('Prediction for positive test example: %g' % model.predict(posTest))\nprint('Prediction for negative test example: %g' % model.predict(negTest))", "Prediction for positive test example: 0\nPrediction for negative test example: 0\n" ] ], [ [ "### Creating vectors", "_____no_output_____" ] ], [ [ "from numpy import array\nfrom pyspark.mllib.linalg import Vectors\n\ndenseVec1 = array([1.0, 2.0, 3.0])\ndenseVec2 = Vectors.dense([1.0, 2.0, 3.0])\n\nsparseVec1 = Vectors.sparse(4, {0:1.0, 2:2.0})\nsparseVec2 = Vectors.sparse(4, [0, 2], [1.0, 2.0])", "_____no_output_____" ] ], [ [ "### Using HashingTF", "_____no_output_____" ] ], [ [ "sentence = 'hello hello world'\nwords = sentence.split()\ntf = HashingTF(10000)\ntf.transform(words)", "_____no_output_____" ], [ "rdd = sc.wholeTextFiles(\"data\").map(lambda (name, text): text.split())\ntfVectors = tf.transform(rdd)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06eb1277c9cefa2f641c8054220915744d2a172
21,093
ipynb
Jupyter Notebook
docs/articles/deeplearningdat.ipynb
DeepInsider/playground-data
366ab37fe31024dd3b803586fb52eef4dbe1221e
[ "Apache-2.0" ]
12
2018-12-31T08:47:21.000Z
2021-02-07T16:25:26.000Z
docs/articles/deeplearningdat.ipynb
DeepInsider/playground-data
366ab37fe31024dd3b803586fb52eef4dbe1221e
[ "Apache-2.0" ]
null
null
null
docs/articles/deeplearningdat.ipynb
DeepInsider/playground-data
366ab37fe31024dd3b803586fb52eef4dbe1221e
[ "Apache-2.0" ]
5
2019-06-30T14:24:44.000Z
2021-06-24T15:00:58.000Z
24.583916
255
0.414593
[ [ [ "<a href=\"https://colab.research.google.com/github/DeepInsider/playground-data/blob/master/docs/articles/deeplearningdat.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "##### Copyright 2019 Digital Advantage - Deep Insider.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# 連載『機械学習 & ディープラーニング入門(データ構造編)』のノートブック", "_____no_output_____" ], [ "<table valign=\"middle\">\n <td>\n <a target=\"_blank\" href=\"https://deepinsider.jp/tutor/deeplearningdat\"> <img src=\"https://re.deepinsider.jp/img/ml-logo/manabu.svg\"/>Deep Insiderで記事を読む</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/DeepInsider/playground-data/blob/master/docs/articles/deeplearningdat.ipynb\"> <img src=\"https://re.deepinsider.jp/img/ml-logo/gcolab.svg\" />Google Colabで実行する</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/DeepInsider/playground-data/blob/master/docs/articles/deeplearningdat.ipynb\"> <img src=\"https://re.deepinsider.jp/img/ml-logo/github.svg\" />GitHubでソースコードを見る</a>\n </td>\n</table>", "_____no_output_____" ], [ "※上から順に実行してください。上のコードで実行したものを再利用しているところがあるため、すべて実行しないとエラーになるコードがあります。 \n すべてのコードを一括実行したい場合は、メニューバーから[ランタイム]-[すべてのセルを実行]をクリックしてください。", "_____no_output_____" ], [ "※このノートブックは「Python 2」でも実行できるようにしていますが、基本的に「Python 3」を利用することをお勧めします。\n Python 3を利用するには、メニューバーから[ランタイム]-[ランタイムのタイプを変更]を選択すると表示される[ノートブックの設定]ダイアログの、[ランタイムのタイプ]欄で「Python 3」に選択し、その右下にある[保存]ボタンをクリックしてください。", "_____no_output_____" ] ], [ [ "# Python バージョン2への対応\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nprint(sys.version_info.major) # 3 # バージョン(メジャー)\nprint(sys.version_info.minor) # 6 # バージョン(マイナー)", "_____no_output_____" ] ], [ [ "## Python言語におけるデータの構造", "_____no_output_____" ], [ "### Pythonにおける「1つの」データの表現", "_____no_output_____" ], [ "#### リスト1-1 「単一の」データを表現するコード", "_____no_output_____" ] ], [ [ "height = 177.2\n\nprint(height) # 177.2と出力される", "_____no_output_____" ] ], [ [ "#### リスト1-2 変数名だけを記述してオブジェクトの評価結果を出力", "_____no_output_____" ] ], [ [ "height # 177.2と出力される", "_____no_output_____" ] ], [ [ "#### リスト1-3 オブジェクト評価結果の出力とprint()関数の出力の違い", "_____no_output_____" ] ], [ [ "import numpy as np\narray2d = np.array([ [ 165.5, 58.4 ],\n [ 177.2, 67.8 ],\n [ 183.2, 83.7 ] ])\n\nprint(array2d) # [[165.5 58.4]\n # [177.2 67.8]\n # [183.2 83.7]]\n\narray2d # array([[165.5, 58.4],\n # [177.2, 67.8],\n # [183.2, 83.7]])", "_____no_output_____" ] ], [ [ "#### リスト2-1 「単一の」データを複数書いて表現するコード", "_____no_output_____" ] ], [ [ "hana_height = 165.5\ntaro_height = 177.2\njiro_height = 183.2\n\nhana_height, taro_height, jiro_height # (165.5, 177.2, 183.2)", "_____no_output_____" ] ], [ [ "#### リスト2-2 「複数(1次元)の」データを表現するコード", "_____no_output_____" ] ], [ [ "heights = [ 165.5, 177.2, 183.2 ]\n\nheights # [165.5, 177.2, 183.2]", "_____no_output_____" ] ], [ [ "### Pythonにおける「複数(2次元)の」データの表現", "_____no_output_____" ], [ "#### リスト3 「複数(2次元)の」データを表現するコード", "_____no_output_____" ] ], [ [ "people = [ [ 165.5, 58.4 ],\n [ 177.2, 67.8 ],\n [ 183.2, 83.7 ] ]\n\npeople # [165.5, 177.2, 183.2]", "_____no_output_____" ] ], [ [ "### Pythonにおける「複数(多次元)の」データの表現", "_____no_output_____" ], [ "#### リスト4 「複数(3次元)の」データを表現するコード", "_____no_output_____" ] ], [ [ "list3d = [\n [ [ 165.5, 58.4 ], [ 177.2, 67.8 ], [ 183.2, 83.7 ] ],\n [ [ 155.5, 48.4 ], [ 167.2, 57.8 ], [ 173.2, 73.7 ] ],\n [ [ 145.5, 38.4 ], [ 157.2, 47.8 ], [ 163.2, 63.7 ] ]\n]\n\nlist3d # [[[165.5, 58.4], [177.2, 67.8], [183.2, 83.7]],\n # [[155.5, 48.4], [167.2, 57.8], [173.2, 73.7]],\n # [[145.5, 38.4], [157.2, 47.8], [163.2, 63.7]]]", "_____no_output_____" ] ], [ [ "## AIプログラムにおけるデータの構造(基本編)", "_____no_output_____" ], [ "### NumPyのインストール", "_____no_output_____" ], [ "#### リスト5-1 `numpy`パッケージをインストールするためのシェルコマンド", "_____no_output_____" ] ], [ [ "!pip install numpy", "_____no_output_____" ] ], [ [ "### numpyモジュールのインポート", "_____no_output_____" ], [ "#### リスト5-2 `numpy`モジュールをインポートするコード例", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "### NumPyのデータ型「多次元配列」オブジェクトの作成", "_____no_output_____" ], [ "#### リスト5-3 `array`関数で多次元配列を作成するコード例(値を使用)", "_____no_output_____" ] ], [ [ "array2d = np.array([ [ 165.5, 58.4 ],\n [ 177.2, 67.8 ],\n [ 183.2, 83.7 ] ])\n\narray2d # array([[165.5, 58.4],\n # [177.2, 67.8],\n # [183.2, 83.7]])", "_____no_output_____" ] ], [ [ "#### リスト5-4 `array`関数で多次元配列を作成するコード例(変数を使用)", "_____no_output_____" ] ], [ [ "array3d = np.array(list3d)\n\narray3d # array([[[165.5, 58.4],\n # [177.2, 67.8],\n # [183.2, 83.7]],\n # \n # [[155.5, 48.4],\n # [167.2, 57.8],\n # [173.2, 73.7]],\n # \n # [[145.5, 38.4],\n # [157.2, 47.8],\n # [163.2, 63.7]]])", "_____no_output_____" ] ], [ [ "#### リスト5-5 `ndarray`クラスの`tolist()`メソッドで多次元リストに変換するコード例", "_____no_output_____" ] ], [ [ "tolist3d = array3d.tolist()\n\ntolist3d # [[[165.5, 58.4], [177.2, 67.8], [183.2, 83.7]],\n # [[155.5, 48.4], [167.2, 57.8], [173.2, 73.7]],\n # [[145.5, 38.4], [157.2, 47.8], [163.2, 63.7]]]", "_____no_output_____" ] ], [ [ "## AIプログラムにおけるデータの構造(応用編)", "_____no_output_____" ], [ "### Pandasのインストール", "_____no_output_____" ], [ "#### リスト6 ◎pandas◎パッケージをインストールするためのシェルコマンド", "_____no_output_____" ] ], [ [ "!pip install pandas", "_____no_output_____" ] ], [ [ "#### 図7-1 NumPyのデータをPandasで一覧表として表示する例", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.DataFrame(array2d, columns=['身長', '体重'])\ndf", "_____no_output_____" ] ], [ [ "## AIプログラムにおけるデータの計算", "_____no_output_____" ], [ "### AI・ディープラーニングで数学を使う理由", "_____no_output_____" ], [ "#### リスト7-1 3人の身長の平均を計算するコード例(個別の値を使用)", "_____no_output_____" ] ], [ [ "# hana_height, taro_height, jiro_height = 165.5, 177.2, 183.2 # Lesson 1のリスト2-1で宣言済み\n\naverage_height = (\n hana_height + \n taro_height + \n jiro_height \n) / 3\n\nprint(average_height) # 175.29999999999998", "_____no_output_____" ] ], [ [ "#### リスト7-2 3人の身長と体重の平均を計算するコード例(多次元配列を使用)", "_____no_output_____" ] ], [ [ "import numpy as np\n\narray1d = np.array([ 165.5, 177.2, 183.2 ])\n\naverage_height = np.average(array1d)\n\naverage_height # 175.29999999999998", "_____no_output_____" ] ], [ [ "### NumPyを使った計算", "_____no_output_____" ], [ "#### リスト8-1 3行2列の行列のさまざまな特性を表示するコード例", "_____no_output_____" ] ], [ [ "array2d = np.array([ [ 165.5, 58.4 ],\n [ 177.2, 67.8 ],\n [ 183.2, 83.7 ] ])\n\nprint(array2d.shape) # (3, 2)\nprint(array2d.ndim) # 2\nprint(array2d.size) # 6", "_____no_output_____" ] ], [ [ "#### リスト8-2 NumPyを使った行列計算", "_____no_output_____" ] ], [ [ "diet = np.array([ [ 1.0, 0.0 ],\n [ 0.0, 0.9 ] ])\n\nlose_weights = diet @ array2d.T\n# Python 3.5以降の場合。それ以前のPython 2系などの場合は、以下のmatmul関数を使う必要がある\n#lose_weights = np.matmul(diet, array2d.T)\n\nprint(lose_weights.T) # [[165.5 52.56]\n # [177.2 61.02]\n # [183.2 75.33]]", "_____no_output_____" ] ], [ [ "#### リスト8-3 全要素の平均値を算出(身長/体重別ではない)", "_____no_output_____" ] ], [ [ "averages = np.average(array2d)\n\naverages # 122.63333333333334", "_____no_output_____" ] ], [ [ "#### リスト8-4 身長/体重別の平均値を算出", "_____no_output_____" ] ], [ [ "averages = np.average(array2d, axis=0)\n\naverages # array([175.3 , 69.96666667])", "_____no_output_____" ] ], [ [ "#### リスト8-5 3次元配列データでグループごとの身長/体重別の平均値を算出", "_____no_output_____" ] ], [ [ "array3d = np.array(\n [ [ [ 165.5, 58.4 ], [ 177.2, 67.8 ], [ 183.2, 83.7 ] ],\n [ [ 155.5, 48.4 ], [ 167.2, 57.8 ], [ 173.2, 73.7 ] ],\n [ [ 145.5, 38.4 ], [ 157.2, 47.8 ], [ 163.2, 63.7 ] ] ]\n)\n\navr3d = np.average(array3d, axis=1)\n\nprint(avr3d) # [[175.3 69.96666667]\n # [165.3 59.96666667]\n # [155.3 49.96666667]]", "_____no_output_____" ] ], [ [ "## お疲れさまでした。データ構造の学習は修了です。", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d06ebcfeaa96064c6c99ecf3633df19903bda3e8
98,353
ipynb
Jupyter Notebook
site/en/tutorials/structured_data/time_series.ipynb
jcjveraa/docs-2
b5c78b0e9ab52094321153991d33534383d99610
[ "Apache-2.0" ]
2
2020-10-27T23:43:37.000Z
2021-11-09T10:29:16.000Z
site/en/tutorials/structured_data/time_series.ipynb
jcjveraa/docs-2
b5c78b0e9ab52094321153991d33534383d99610
[ "Apache-2.0" ]
1
2021-02-23T20:17:39.000Z
2021-02-23T20:17:39.000Z
site/en/tutorials/structured_data/time_series.ipynb
jcjveraa/docs-2
b5c78b0e9ab52094321153991d33534383d99610
[ "Apache-2.0" ]
null
null
null
33.868113
408
0.535459
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Time series forecasting", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/structured_data/time_series\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/structured_data/time_series.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/structured_data/time_series.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "This tutorial is an introduction to time series forecasting using TensorFlow. It builds a few different styles of models including Convolutional and Recurrent Neural Networks (CNNs and RNNs).\n\nThis is covered in two main parts, with subsections: \n\n* Forecast for a single timestep:\n * A single feature.\n * All features.\n* Forecast multiple steps:\n * Single-shot: Make the predictions all at once.\n * Autoregressive: Make one prediction at a time and feed the output back to the model.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import os\nimport datetime\n\nimport IPython\nimport IPython.display\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tensorflow as tf\n\nmpl.rcParams['figure.figsize'] = (8, 6)\nmpl.rcParams['axes.grid'] = False", "_____no_output_____" ] ], [ [ "## The weather dataset\nThis tutorial uses a <a href=\"https://www.bgc-jena.mpg.de/wetter/\" class=\"external\">weather time series dataset</a> recorded by the <a href=\"https://www.bgc-jena.mpg.de\" class=\"external\">Max Planck Institute for Biogeochemistry</a>.\n\nThis dataset contains 14 different features such as air temperature, atmospheric pressure, and humidity. These were collected every 10 minutes, beginning in 2003. For efficiency, you will use only the data collected between 2009 and 2016. This section of the dataset was prepared by François Chollet for his book [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).", "_____no_output_____" ] ], [ [ "zip_path = tf.keras.utils.get_file(\n origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',\n fname='jena_climate_2009_2016.csv.zip',\n extract=True)\ncsv_path, _ = os.path.splitext(zip_path)", "_____no_output_____" ] ], [ [ "This tutorial will just deal with **hourly predictions**, so start by sub-sampling the data from 10 minute intervals to 1h:", "_____no_output_____" ] ], [ [ "df = pd.read_csv(csv_path)\n# slice [start:stop:step], starting from index 5 take every 6th record.\ndf = df[5::6]\n\ndate_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S')", "_____no_output_____" ] ], [ [ "Let's take a glance at the data. Here are the first few rows:", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "Here is the evolution of a few features over time. ", "_____no_output_____" ] ], [ [ "plot_cols = ['T (degC)', 'p (mbar)', 'rho (g/m**3)']\nplot_features = df[plot_cols]\nplot_features.index = date_time\n_ = plot_features.plot(subplots=True)\n\nplot_features = df[plot_cols][:480]\nplot_features.index = date_time[:480]\n_ = plot_features.plot(subplots=True)", "_____no_output_____" ] ], [ [ "### Inspect and cleanup", "_____no_output_____" ], [ "Next look at the statistics of the dataset:", "_____no_output_____" ] ], [ [ "df.describe().transpose()", "_____no_output_____" ] ], [ [ "#### Wind velocity", "_____no_output_____" ], [ "One thing that should stand out is the `min` value of the wind velocity, `wv (m/s)` and `max. wv (m/s)` columns. This `-9999` is likely erroneous. There's a separate wind direction column, so the velocity should be `>=0`. Replace it with zeros:\n", "_____no_output_____" ] ], [ [ "wv = df['wv (m/s)']\nbad_wv = wv == -9999.0\nwv[bad_wv] = 0.0\n\nmax_wv = df['max. wv (m/s)']\nbad_max_wv = max_wv == -9999.0\nmax_wv[bad_max_wv] = 0.0\n\n# The above inplace edits are reflected in the DataFrame\ndf['wv (m/s)'].min()", "_____no_output_____" ] ], [ [ "### Feature engineering\n\nBefore diving in to build a model it's important to understand your data, and be sure that you're passing the model appropriately formatted data.", "_____no_output_____" ], [ "#### Wind\nThe last column of the data, `wd (deg)`, gives the wind direction in units of degrees. Angles do not make good model inputs, 360° and 0° should be close to each other, and wrap around smoothly. Direction shouldn't matter if the wind is not blowing. \n\nRight now the distribution of wind data looks like this:", "_____no_output_____" ] ], [ [ "plt.hist2d(df['wd (deg)'], df['wv (m/s)'], bins=(50, 50), vmax=400)\nplt.colorbar()\nplt.xlabel('Wind Direction [deg]')\nplt.ylabel('Wind Velocity [m/s]')", "_____no_output_____" ] ], [ [ "But this will be easier for the model to interpret if you convert the wind direction and velocity columns to a wind **vector**:", "_____no_output_____" ] ], [ [ "wv = df.pop('wv (m/s)')\nmax_wv = df.pop('max. wv (m/s)')\n\n# Convert to radians.\nwd_rad = df.pop('wd (deg)')*np.pi / 180\n\n# Calculate the wind x and y components.\ndf['Wx'] = wv*np.cos(wd_rad)\ndf['Wy'] = wv*np.sin(wd_rad)\n\n# Calculate the max wind x and y components.\ndf['max Wx'] = max_wv*np.cos(wd_rad)\ndf['max Wy'] = max_wv*np.sin(wd_rad)", "_____no_output_____" ] ], [ [ "The distribution of wind vectors is much simpler for the model to correctly interpret.", "_____no_output_____" ] ], [ [ "plt.hist2d(df['Wx'], df['Wy'], bins=(50, 50), vmax=400)\nplt.colorbar()\nplt.xlabel('Wind X [m/s]')\nplt.ylabel('Wind Y [m/s]')\nax = plt.gca()\nax.axis('tight')", "_____no_output_____" ] ], [ [ "#### Time", "_____no_output_____" ], [ "Similarly the `Date Time` column is very useful, but not in this string form. Start by converting it to seconds:", "_____no_output_____" ] ], [ [ "timestamp_s = date_time.map(datetime.datetime.timestamp)", "_____no_output_____" ] ], [ [ "Similar to the wind direction the time in seconds is not a useful model input. Being weather data it has clear daily and yearly periodicity. There are many ways you could deal with periodicity.\n\nA simple approach to convert it to a usable signal is to use `sin` and `cos` to convert the time to clear \"Time of day\" and \"Time of year\" signals:", "_____no_output_____" ] ], [ [ "day = 24*60*60\nyear = (365.2425)*day\n\ndf['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))\ndf['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))\ndf['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))\ndf['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))", "_____no_output_____" ], [ "plt.plot(np.array(df['Day sin'])[:25])\nplt.plot(np.array(df['Day cos'])[:25])\nplt.xlabel('Time [h]')\nplt.title('Time of day signal')", "_____no_output_____" ] ], [ [ "This gives the model access to the most important frequency features. In this case you knew ahead of time which frequencies were important. \n\nIf you didn't know, you can determine which frequencies are important using an `fft`. To check our assumptions, here is the `tf.signal.rfft` of the temperature over time. Note the obvious peaks at frequencies near `1/year` and `1/day`: ", "_____no_output_____" ] ], [ [ "fft = tf.signal.rfft(df['T (degC)'])\nf_per_dataset = np.arange(0, len(fft))\n\nn_samples_h = len(df['T (degC)'])\nhours_per_year = 24*365.2524\nyears_per_dataset = n_samples_h/(hours_per_year)\n\nf_per_year = f_per_dataset/years_per_dataset\nplt.step(f_per_year, np.abs(fft))\nplt.xscale('log')\nplt.ylim(0, 400000)\nplt.xlim([0.1, max(plt.xlim())])\nplt.xticks([1, 365.2524], labels=['1/Year', '1/day'])\n_ = plt.xlabel('Frequency (log scale)')", "_____no_output_____" ] ], [ [ "### Split the data", "_____no_output_____" ], [ "We'll use a `(70%, 20%, 10%)` split for the training, validation, and test sets. Note the data is **not** being randomly shuffled before splitting. This is for two reasons.\n\n1. It ensures that chopping the data into windows of consecutive samples is still possible.\n2. It ensures that the validation/test results are more realistic, being evaluated on data collected after the model was trained.", "_____no_output_____" ] ], [ [ "column_indices = {name: i for i, name in enumerate(df.columns)}\n\nn = len(df)\ntrain_df = df[0:int(n*0.7)]\nval_df = df[int(n*0.7):int(n*0.9)]\ntest_df = df[int(n*0.9):]\n\nnum_features = df.shape[1]", "_____no_output_____" ] ], [ [ "### Normalize the data\n\nIt is important to scale features before training a neural network. Normalization is a common way of doing this scaling. Subtract the mean and divide by the standard deviation of each feature.", "_____no_output_____" ], [ "The mean and standard deviation should only be computed using the training data so that the models have no access to the values in the validation and test sets.\n\nIt's also arguable that the model shouldn't have access to future values in the training set when training, and that this normalization should be done using moving averages. That's not the focus of this tutorial, and the validation and test sets ensure that you get (somewhat) honest metrics. So in the interest of simplicity this tutorial uses a simple average.", "_____no_output_____" ] ], [ [ "train_mean = train_df.mean()\ntrain_std = train_df.std()\n\ntrain_df = (train_df - train_mean) / train_std\nval_df = (val_df - train_mean) / train_std\ntest_df = (test_df - train_mean) / train_std", "_____no_output_____" ] ], [ [ "Now peek at the distribution of the features. Some features do have long tails, but there are no obvious errors like the `-9999` wind velocity value.", "_____no_output_____" ] ], [ [ "df_std = (df - train_mean) / train_std\ndf_std = df_std.melt(var_name='Column', value_name='Normalized')\nplt.figure(figsize=(12, 6))\nax = sns.violinplot(x='Column', y='Normalized', data=df_std)\n_ = ax.set_xticklabels(df.keys(), rotation=90)", "_____no_output_____" ] ], [ [ "## Data windowing\n\nThe models in this tutorial will make a set of predictions based on a window of consecutive samples from the data. \n\nThe main features of the input windows are:\n\n* The width (number of time steps) of the input and label windows\n* The time offset between them.\n* Which features are used as inputs, labels, or both. \n\nThis tutorial builds a variety of models (including Linear, DNN, CNN and RNN models), and uses them for both:\n\n* *Single-output*, and *multi-output* predictions.\n* *Single-time-step* and *multi-time-step* predictions.\n\nThis section focuses on implementing the data windowing so that it can be reused for all of those models.\n", "_____no_output_____" ], [ "Depending on the task and type of model you may want to generate a variety of data windows. Here are some examples:\n\n1. For example, to make a single prediction 24h into the future, given 24h of history you might define a window like this:\n\n ![One prediction 24h into the future.](images/raw_window_24h.png)\n\n2. A model that makes a prediction 1h into the future, given 6h of history would need a window like this:\n\n ![One prediction 1h into the future.](images/raw_window_1h.png)", "_____no_output_____" ], [ "The rest of this section defines a `WindowGenerator` class. This class can:\n\n1. Handle the indexes and offsets as shown in the diagrams above.\n1. Split windows of features into a `(features, labels)` pairs.\n2. Plot the content of the resulting windows.\n3. Efficiently generate batches of these windows from the training, evaluation, and test data, using `tf.data.Dataset`s.", "_____no_output_____" ], [ "### 1. Indexes and offsets\n\nStart by creating the `WindowGenerator` class. The `__init__` method includes all the necessary logic for the input and label indices.\n\nIt also takes the train, eval, and test dataframes as input. These will be converted to `tf.data.Dataset`s of windows later.", "_____no_output_____" ] ], [ [ "class WindowGenerator():\n def __init__(self, input_width, label_width, shift,\n train_df=train_df, val_df=val_df, test_df=test_df,\n label_columns=None):\n # Store the raw data.\n self.train_df = train_df\n self.val_df = val_df\n self.test_df = test_df\n\n # Work out the label column indices.\n self.label_columns = label_columns\n if label_columns is not None:\n self.label_columns_indices = {name: i for i, name in\n enumerate(label_columns)}\n self.column_indices = {name: i for i, name in\n enumerate(train_df.columns)}\n\n # Work out the window parameters.\n self.input_width = input_width\n self.label_width = label_width\n self.shift = shift\n\n self.total_window_size = input_width + shift\n\n self.input_slice = slice(0, input_width)\n self.input_indices = np.arange(self.total_window_size)[self.input_slice]\n\n self.label_start = self.total_window_size - self.label_width\n self.labels_slice = slice(self.label_start, None)\n self.label_indices = np.arange(self.total_window_size)[self.labels_slice]\n\n def __repr__(self):\n return '\\n'.join([\n f'Total window size: {self.total_window_size}',\n f'Input indices: {self.input_indices}',\n f'Label indices: {self.label_indices}',\n f'Label column name(s): {self.label_columns}'])", "_____no_output_____" ] ], [ [ "Here is code to create the 2 windows shown in the diagrams at the start of this section:", "_____no_output_____" ] ], [ [ "w1 = WindowGenerator(input_width=24, label_width=1, shift=24,\n label_columns=['T (degC)'])\nw1", "_____no_output_____" ], [ "w2 = WindowGenerator(input_width=6, label_width=1, shift=1,\n label_columns=['T (degC)'])\nw2", "_____no_output_____" ] ], [ [ "### 2. Split\nGiven a list consecutive inputs, the `split_window` method will convert them to a window of inputs and a window of labels.\n\nThe example `w2`, above, will be split like this:\n\n![The initial window is all consecuitive samples, this splits it into an (inputs, labels) pairs](images/split_window.png)\n\nThis diagram doesn't show the `features` axis of the data, but this `split_window` function also handles the `label_columns` so it can be used for both the single output and multi-output examples.", "_____no_output_____" ] ], [ [ "def split_window(self, features):\n inputs = features[:, self.input_slice, :]\n labels = features[:, self.labels_slice, :]\n if self.label_columns is not None:\n labels = tf.stack(\n [labels[:, :, self.column_indices[name]] for name in self.label_columns],\n axis=-1)\n\n # Slicing doesn't preserve static shape information, so set the shapes\n # manually. This way the `tf.data.Datasets` are easier to inspect.\n inputs.set_shape([None, self.input_width, None])\n labels.set_shape([None, self.label_width, None])\n\n return inputs, labels\n\nWindowGenerator.split_window = split_window", "_____no_output_____" ] ], [ [ "Try it out:", "_____no_output_____" ] ], [ [ "# Stack three slices, the length of the total window:\nexample_window = tf.stack([np.array(train_df[:w2.total_window_size]),\n np.array(train_df[100:100+w2.total_window_size]),\n np.array(train_df[200:200+w2.total_window_size])])\n\n\nexample_inputs, example_labels = w2.split_window(example_window)\n\nprint('All shapes are: (batch, time, features)')\nprint(f'Window shape: {example_window.shape}')\nprint(f'Inputs shape: {example_inputs.shape}')\nprint(f'labels shape: {example_labels.shape}')", "_____no_output_____" ] ], [ [ "Typically data in TensorFlow is packed into arrays where the outermost index is across examples (the \"batch\" dimension). The middle indices are the \"time\" or \"space\" (width, height) dimension(s). The innermost indices are the features.\n\nThe code above took a batch of 3, 7-timestep windows, with 19 features at each time step. It split them into a batch of 6-timestep, 19 feature inputs, and a 1-timestep 1-feature label. The label only has one feature because the `WindowGenerator` was initialized with `label_columns=['T (degC)']`. Initially this tutorial will build models that predict single output labels.", "_____no_output_____" ], [ "### 3. Plot\n\nHere is a plot method that allows a simple visualization of the split window:", "_____no_output_____" ] ], [ [ "w2.example = example_inputs, example_labels", "_____no_output_____" ], [ "def plot(self, model=None, plot_col='T (degC)', max_subplots=3):\n inputs, labels = self.example\n plt.figure(figsize=(12, 8))\n plot_col_index = self.column_indices[plot_col]\n max_n = min(max_subplots, len(inputs))\n for n in range(max_n):\n plt.subplot(3, 1, n+1)\n plt.ylabel(f'{plot_col} [normed]')\n plt.plot(self.input_indices, inputs[n, :, plot_col_index],\n label='Inputs', marker='.', zorder=-10)\n\n if self.label_columns:\n label_col_index = self.label_columns_indices.get(plot_col, None)\n else:\n label_col_index = plot_col_index\n\n if label_col_index is None:\n continue\n\n plt.scatter(self.label_indices, labels[n, :, label_col_index],\n edgecolors='k', label='Labels', c='#2ca02c', s=64)\n if model is not None:\n predictions = model(inputs)\n plt.scatter(self.label_indices, predictions[n, :, label_col_index],\n marker='X', edgecolors='k', label='Predictions',\n c='#ff7f0e', s=64)\n\n if n == 0:\n plt.legend()\n\n plt.xlabel('Time [h]')\n\nWindowGenerator.plot = plot", "_____no_output_____" ] ], [ [ "This plot aligns inputs, labels, and (later) predictions based on the time that the item refers to:", "_____no_output_____" ] ], [ [ "w2.plot()", "_____no_output_____" ] ], [ [ "You can plot the other columns, but the example window `w2` configuration only has labels for the `T (degC)` column.", "_____no_output_____" ] ], [ [ "w2.plot(plot_col='p (mbar)')", "_____no_output_____" ] ], [ [ "### 4. Create `tf.data.Dataset`s", "_____no_output_____" ], [ "Finally this `make_dataset` method will take a time series `DataFrame` and convert it to a `tf.data.Dataset` of `(input_window, label_window)` pairs using the `preprocessing.timeseries_dataset_from_array` function.", "_____no_output_____" ] ], [ [ "def make_dataset(self, data):\n data = np.array(data, dtype=np.float32)\n ds = tf.keras.preprocessing.timeseries_dataset_from_array(\n data=data,\n targets=None,\n sequence_length=self.total_window_size,\n sequence_stride=1,\n shuffle=True,\n batch_size=32,)\n\n ds = ds.map(self.split_window)\n\n return ds\n\nWindowGenerator.make_dataset = make_dataset", "_____no_output_____" ] ], [ [ "The `WindowGenerator` object holds training, validation and test data. Add properties for accessing them as `tf.data.Datasets` using the above `make_dataset` method. Also add a standard example batch for easy access and plotting:", "_____no_output_____" ] ], [ [ "@property\ndef train(self):\n return self.make_dataset(self.train_df)\n\n@property\ndef val(self):\n return self.make_dataset(self.val_df)\n\n@property\ndef test(self):\n return self.make_dataset(self.test_df)\n\n@property\ndef example(self):\n \"\"\"Get and cache an example batch of `inputs, labels` for plotting.\"\"\"\n result = getattr(self, '_example', None)\n if result is None:\n # No example batch was found, so get one from the `.train` dataset\n result = next(iter(self.train))\n # And cache it for next time\n self._example = result\n return result\n\nWindowGenerator.train = train\nWindowGenerator.val = val\nWindowGenerator.test = test\nWindowGenerator.example = example", "_____no_output_____" ] ], [ [ "Now the `WindowGenerator` object gives you access to the `tf.data.Dataset` objects, so you can easily iterate over the data.\n\nThe `Dataset.element_spec` property tells you the structure, `dtypes` and shapes of the dataset elements.", "_____no_output_____" ] ], [ [ "# Each element is an (inputs, label) pair\nw2.train.element_spec", "_____no_output_____" ] ], [ [ "Iterating over a `Dataset` yields concrete batches:", "_____no_output_____" ] ], [ [ "for example_inputs, example_labels in w2.train.take(1):\n print(f'Inputs shape (batch, time, features): {example_inputs.shape}')\n print(f'Labels shape (batch, time, features): {example_labels.shape}')", "_____no_output_____" ] ], [ [ "## Single step models\n\nThe simplest model you can build on this sort of data is one that predicts a single feature's value, 1 timestep (1h) in the future based only on the current conditions.\n\nSo start by building models to predict the `T (degC)` value 1h into the future.\n\n![Predict the next time step](images/narrow_window.png)\n\nConfigure a `WindowGenerator` object to produce these single-step `(input, label)` pairs:", "_____no_output_____" ] ], [ [ "single_step_window = WindowGenerator(\n input_width=1, label_width=1, shift=1,\n label_columns=['T (degC)'])\nsingle_step_window", "_____no_output_____" ] ], [ [ "The `window` object creates `tf.data.Datasets` from the training, validation, and test sets, allowing you to easily iterate over batches of data.\n", "_____no_output_____" ] ], [ [ "for example_inputs, example_labels in single_step_window.train.take(1):\n print(f'Inputs shape (batch, time, features): {example_inputs.shape}')\n print(f'Labels shape (batch, time, features): {example_labels.shape}')", "_____no_output_____" ] ], [ [ "### Baseline\n\nBefore building a trainable model it would be good to have a performance baseline as a point for comparison with the later more complicated models.\n\nThis first task is to predict temperature 1h in the future given the current value of all features. The current values include the current temperature. \n\nSo start with a model that just returns the current temperature as the prediction, predicting \"No change\". This is a reasonable baseline since temperature changes slowly. Of course, this baseline will work less well if you make a prediction further in the future.\n\n![Send the input to the output](images/baseline.png)", "_____no_output_____" ] ], [ [ "class Baseline(tf.keras.Model):\n def __init__(self, label_index=None):\n super().__init__()\n self.label_index = label_index\n\n def call(self, inputs):\n if self.label_index is None:\n return inputs\n result = inputs[:, :, self.label_index]\n return result[:, :, tf.newaxis]", "_____no_output_____" ] ], [ [ "Instantiate and evaluate this model:", "_____no_output_____" ] ], [ [ "baseline = Baseline(label_index=column_indices['T (degC)'])\n\nbaseline.compile(loss=tf.losses.MeanSquaredError(),\n metrics=[tf.metrics.MeanAbsoluteError()])\n\nval_performance = {}\nperformance = {}\nval_performance['Baseline'] = baseline.evaluate(single_step_window.val)\nperformance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "That printed some performance metrics, but those don't give you a feeling for how well the model is doing.\n\nThe `WindowGenerator` has a plot method, but the plots won't be very interesting with only a single sample. So, create a wider `WindowGenerator` that generates windows 24h of consecutive inputs and labels at a time. \n\nThe `wide_window` doesn't change the way the model operates. The model still makes predictions 1h into the future based on a single input time step. Here the `time` axis acts like the `batch` axis: Each prediction is made independently with no interaction between time steps.", "_____no_output_____" ] ], [ [ "wide_window = WindowGenerator(\n input_width=24, label_width=24, shift=1,\n label_columns=['T (degC)'])\n\nwide_window", "_____no_output_____" ] ], [ [ "This expanded window can be passed directly to the same `baseline` model without any code changes. This is possible because the inputs and labels have the same number of timesteps, and the baseline just forwards the input to the output:\n\n ![One prediction 1h into the future, ever hour.](images/last_window.png)", "_____no_output_____" ] ], [ [ "print('Input shape:', wide_window.example[0].shape)\nprint('Output shape:', baseline(wide_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Plotting the baseline model's predictions you can see that it is simply the labels, shifted right by 1h.", "_____no_output_____" ] ], [ [ "wide_window.plot(baseline)", "_____no_output_____" ] ], [ [ "In the above plots of three examples the single step model is run over the course of 24h. This deserves some explaination:\n\n* The blue \"Inputs\" line shows the input temperature at each time step. The model recieves all features, this plot only shows the temperature.\n* The green \"Labels\" dots show the target prediction value. These dots are shown at the prediction time, not the input time. That is why the range of labels is shifted 1 step relative to the inputs.\n* The orange \"Predictions\" crosses are the model's prediction's for each output time step. If the model were predicting perfectly the predictions would land directly on the \"labels\".", "_____no_output_____" ], [ "### Linear model\n\nThe simplest **trainable** model you can apply to this task is to insert linear transformation between the input and output. In this case the output from a time step only depends on that step:\n\n![A single step prediction](images/narrow_window.png)\n\nA `layers.Dense` with no `activation` set is a linear model. The layer only transforms the last axis of the data from `(batch, time, inputs)` to `(batch, time, units)`, it is applied independently to every item across the `batch` and `time` axes.", "_____no_output_____" ] ], [ [ "linear = tf.keras.Sequential([\n tf.keras.layers.Dense(units=1)\n])", "_____no_output_____" ], [ "print('Input shape:', single_step_window.example[0].shape)\nprint('Output shape:', linear(single_step_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "This tutorial trains many models, so package the training procedure into a function:", "_____no_output_____" ] ], [ [ "MAX_EPOCHS = 20\n\ndef compile_and_fit(model, window, patience=2):\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=patience,\n mode='min')\n\n model.compile(loss=tf.losses.MeanSquaredError(),\n optimizer=tf.optimizers.Adam(),\n metrics=[tf.metrics.MeanAbsoluteError()])\n\n history = model.fit(window.train, epochs=MAX_EPOCHS,\n validation_data=window.val,\n callbacks=[early_stopping])\n return history", "_____no_output_____" ] ], [ [ "Train the model and evaluate its performance:", "_____no_output_____" ] ], [ [ "history = compile_and_fit(linear, single_step_window)\n\nval_performance['Linear'] = linear.evaluate(single_step_window.val)\nperformance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "Like the `baseline` model, the linear model can be called on batches of wide windows. Used this way the model makes a set of independent predictions on consecuitive time steps. The `time` axis acts like another `batch` axis. There are no interactions between the predictions at each time step.\n\n![A single step prediction](images/wide_window.png)", "_____no_output_____" ] ], [ [ "print('Input shape:', wide_window.example[0].shape)\nprint('Output shape:', baseline(wide_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Here is the plot of its example predictions on the `wide_window`, note how in many cases the prediction is clearly better than just returning the input temperature, but in a few cases it's worse:", "_____no_output_____" ] ], [ [ "wide_window.plot(linear)", "_____no_output_____" ] ], [ [ "One advantage to linear models is that they're relatively simple to interpret.\nYou can pull out the layer's weights, and see the weight assigned to each input:", "_____no_output_____" ] ], [ [ "plt.bar(x = range(len(train_df.columns)),\n height=linear.layers[0].kernel[:,0].numpy())\naxis = plt.gca()\naxis.set_xticks(range(len(train_df.columns)))\n_ = axis.set_xticklabels(train_df.columns, rotation=90)", "_____no_output_____" ] ], [ [ "Sometimes the model doesn't even place the most weight on the input `T (degC)`. This is one of the risks of random initialization. ", "_____no_output_____" ], [ "### Dense\n\nBefore applying models that actually operate on multiple time-steps, it's worth checking the performance of deeper, more powerful, single input step models.\n\nHere's a model similar to the `linear` model, except it stacks several a few `Dense` layers between the input and the output: ", "_____no_output_____" ] ], [ [ "dense = tf.keras.Sequential([\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=1)\n])\n\nhistory = compile_and_fit(dense, single_step_window)\n\nval_performance['Dense'] = dense.evaluate(single_step_window.val)\nperformance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "### Multi-step dense\n\nA single-time-step model has no context for the current values of its inputs. It can't see how the input features are changing over time. To address this issue the model needs access to multiple time steps when making predictions:\n\n![Three time steps are used for each prediction.](images/conv_window.png)\n", "_____no_output_____" ], [ "The `baseline`, `linear` and `dense` models handled each time step independently. Here the model will take multiple time steps as input to produce a single output.\n\nCreate a `WindowGenerator` that will produce batches of the 3h of inputs and, 1h of labels:", "_____no_output_____" ], [ "Note that the `Window`'s `shift` parameter is relative to the end of the two windows.\n", "_____no_output_____" ] ], [ [ "CONV_WIDTH = 3\nconv_window = WindowGenerator(\n input_width=CONV_WIDTH,\n label_width=1,\n shift=1,\n label_columns=['T (degC)'])\n\nconv_window", "_____no_output_____" ], [ "conv_window.plot()\nplt.title(\"Given 3h as input, predict 1h into the future.\")", "_____no_output_____" ] ], [ [ "You could train a `dense` model on a multiple-input-step window by adding a `layers.Flatten` as the first layer of the model:", "_____no_output_____" ] ], [ [ "multi_step_dense = tf.keras.Sequential([\n # Shape: (time, features) => (time*features)\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(units=32, activation='relu'),\n tf.keras.layers.Dense(units=32, activation='relu'),\n tf.keras.layers.Dense(units=1),\n # Add back the time dimension.\n # Shape: (outputs) => (1, outputs)\n tf.keras.layers.Reshape([1, -1]),\n])", "_____no_output_____" ], [ "print('Input shape:', conv_window.example[0].shape)\nprint('Output shape:', multi_step_dense(conv_window.example[0]).shape)", "_____no_output_____" ], [ "history = compile_and_fit(multi_step_dense, conv_window)\n\nIPython.display.clear_output()\nval_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val)\nperformance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0)", "_____no_output_____" ], [ "conv_window.plot(multi_step_dense)", "_____no_output_____" ] ], [ [ "The main down-side of this approach is that the resulting model can only be executed on input windows of exactly this shape. ", "_____no_output_____" ] ], [ [ "print('Input shape:', wide_window.example[0].shape)\ntry:\n print('Output shape:', multi_step_dense(wide_window.example[0]).shape)\nexcept Exception as e:\n print(f'\\n{type(e).__name__}:{e}')", "_____no_output_____" ] ], [ [ "The convolutional models in the next section fix this problem.", "_____no_output_____" ], [ "### Convolution neural network\n \nA convolution layer (`layers.Conv1D`) also takes multiple time steps as input to each prediction.", "_____no_output_____" ], [ "Below is the **same** model as `multi_step_dense`, re-written with a convolution. \n\nNote the changes:\n* The `layers.Flatten` and the first `layers.Dense` are replaced by a `layers.Conv1D`.\n* The `layers.Reshape` is no longer necessary since the convolution keeps the time axis in its output.", "_____no_output_____" ] ], [ [ "conv_model = tf.keras.Sequential([\n tf.keras.layers.Conv1D(filters=32,\n kernel_size=(CONV_WIDTH,),\n activation='relu'),\n tf.keras.layers.Dense(units=32, activation='relu'),\n tf.keras.layers.Dense(units=1),\n])", "_____no_output_____" ] ], [ [ "Run it on an example batch to see that the model produces outputs with the expected shape:", "_____no_output_____" ] ], [ [ "print(\"Conv model on `conv_window`\")\nprint('Input shape:', conv_window.example[0].shape)\nprint('Output shape:', conv_model(conv_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Train and evaluate it on the ` conv_window` and it should give performance similar to the `multi_step_dense` model.", "_____no_output_____" ] ], [ [ "history = compile_and_fit(conv_model, conv_window)\n\nIPython.display.clear_output()\nval_performance['Conv'] = conv_model.evaluate(conv_window.val)\nperformance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "The difference between this `conv_model` and the `multi_step_dense` model is that the `conv_model` can be run on inputs of any length. The convolutional layer is applied to a sliding window of inputs:\n\n![Executing a convolutional model on a sequence](images/wide_conv_window.png)\n\nIf you run it on wider input, it produces wider output:", "_____no_output_____" ] ], [ [ "print(\"Wide window\")\nprint('Input shape:', wide_window.example[0].shape)\nprint('Labels shape:', wide_window.example[1].shape)\nprint('Output shape:', conv_model(wide_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Note that the output is shorter than the input. To make training or plotting work, you need the labels, and prediction to have the same length. So build a `WindowGenerator` to produce wide windows with a few extra input time steps so the label and prediction lengths match: ", "_____no_output_____" ] ], [ [ "LABEL_WIDTH = 24\nINPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)\nwide_conv_window = WindowGenerator(\n input_width=INPUT_WIDTH,\n label_width=LABEL_WIDTH,\n shift=1,\n label_columns=['T (degC)'])\n\nwide_conv_window", "_____no_output_____" ], [ "print(\"Wide conv window\")\nprint('Input shape:', wide_conv_window.example[0].shape)\nprint('Labels shape:', wide_conv_window.example[1].shape)\nprint('Output shape:', conv_model(wide_conv_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Now you can plot the model's predictions on a wider window. Note the 3 input time steps before the first prediction. Every prediction here is based on the 3 preceding timesteps:", "_____no_output_____" ] ], [ [ "wide_conv_window.plot(conv_model)", "_____no_output_____" ] ], [ [ "### Recurrent neural network\n\nA Recurrent Neural Network (RNN) is a type of neural network well-suited to time series data. RNNs process a time series step-by-step, maintaining an internal state from time-step to time-step.\n\nFor more details, read the [text generation tutorial](https://www.tensorflow.org/tutorials/text/text_generation) or the [RNN guide](https://www.tensorflow.org/guide/keras/rnn). \n\nIn this tutorial, you will use an RNN layer called Long Short Term Memory ([LSTM](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/LSTM)).", "_____no_output_____" ], [ "An important constructor argument for all keras RNN layers is the `return_sequences` argument. This setting can configure the layer in one of two ways.\n\n1. If `False`, the default, the layer only returns the output of the final timestep, giving the model time to warm up its internal state before making a single prediction: \n\n![An lstm warming up and making a single prediction](images/lstm_1_window.png)\n\n2. If `True` the layer returns an output for each input. This is useful for:\n * Stacking RNN layers. \n * Training a model on multiple timesteps simultaneously.\n\n![An lstm making a prediction after every timestep](images/lstm_many_window.png)", "_____no_output_____" ] ], [ [ "lstm_model = tf.keras.models.Sequential([\n # Shape [batch, time, features] => [batch, time, lstm_units]\n tf.keras.layers.LSTM(32, return_sequences=True),\n # Shape => [batch, time, features]\n tf.keras.layers.Dense(units=1)\n])", "_____no_output_____" ] ], [ [ "With `return_sequences=True` the model can be trained on 24h of data at a time.\n\nNote: This will give a pessimistic view of the model's performance. On the first timestep the model has no access to previous steps, and so can't do any better than the simple `linear` and `dense` models shown earlier.", "_____no_output_____" ] ], [ [ "print('Input shape:', wide_window.example[0].shape)\nprint('Output shape:', lstm_model(wide_window.example[0]).shape)", "_____no_output_____" ], [ "history = compile_and_fit(lstm_model, wide_window)\n\nIPython.display.clear_output()\nval_performance['LSTM'] = lstm_model.evaluate(wide_window.val)\nperformance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)", "_____no_output_____" ], [ "wide_window.plot(lstm_model)", "_____no_output_____" ] ], [ [ "### Performance", "_____no_output_____" ], [ "With this dataset typically each of the models does slightly better than the one before it.", "_____no_output_____" ] ], [ [ "x = np.arange(len(performance))\nwidth = 0.3\nmetric_name = 'mean_absolute_error'\nmetric_index = lstm_model.metrics_names.index('mean_absolute_error')\nval_mae = [v[metric_index] for v in val_performance.values()]\ntest_mae = [v[metric_index] for v in performance.values()]\n\nplt.ylabel('mean_absolute_error [T (degC), normalized]')\nplt.bar(x - 0.17, val_mae, width, label='Validation')\nplt.bar(x + 0.17, test_mae, width, label='Test')\nplt.xticks(ticks=x, labels=performance.keys(),\n rotation=45)\n_ = plt.legend()", "_____no_output_____" ], [ "for name, value in performance.items():\n print(f'{name:12s}: {value[1]:0.4f}')", "_____no_output_____" ] ], [ [ "### Multi-output models\n\nThe models so far all predicted a single output feature, `T (degC)`, for a single time step.\n\nAll of these models can be converted to predict multiple features just by changing the number of units in the output layer and adjusting the training windows to include all features in the `labels`.\n", "_____no_output_____" ] ], [ [ "single_step_window = WindowGenerator(\n # `WindowGenerator` returns all features as labels if you \n # don't set the `label_columns` argument.\n input_width=1, label_width=1, shift=1)\n\nwide_window = WindowGenerator(\n input_width=24, label_width=24, shift=1)\n\nfor example_inputs, example_labels in wide_window.train.take(1):\n print(f'Inputs shape (batch, time, features): {example_inputs.shape}')\n print(f'Labels shape (batch, time, features): {example_labels.shape}')", "_____no_output_____" ] ], [ [ "Note above that the `features` axis of the labels now has the same depth as the inputs, instead of 1.", "_____no_output_____" ], [ "#### Baseline\n\nThe same baseline model can be used here, but this time repeating all features instead of selecting a specific `label_index`.", "_____no_output_____" ] ], [ [ "baseline = Baseline()\nbaseline.compile(loss=tf.losses.MeanSquaredError(),\n metrics=[tf.metrics.MeanAbsoluteError()])", "_____no_output_____" ], [ "val_performance = {}\nperformance = {}\nval_performance['Baseline'] = baseline.evaluate(wide_window.val)\nperformance['Baseline'] = baseline.evaluate(wide_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "#### Dense", "_____no_output_____" ] ], [ [ "dense = tf.keras.Sequential([\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=64, activation='relu'),\n tf.keras.layers.Dense(units=num_features)\n])", "_____no_output_____" ], [ "history = compile_and_fit(dense, single_step_window)\n\nIPython.display.clear_output()\nval_performance['Dense'] = dense.evaluate(single_step_window.val)\nperformance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)", "_____no_output_____" ] ], [ [ "#### RNN\n", "_____no_output_____" ] ], [ [ "%%time\nwide_window = WindowGenerator(\n input_width=24, label_width=24, shift=1)\n\nlstm_model = tf.keras.models.Sequential([\n # Shape [batch, time, features] => [batch, time, lstm_units]\n tf.keras.layers.LSTM(32, return_sequences=True),\n # Shape => [batch, time, features]\n tf.keras.layers.Dense(units=num_features)\n])\n\nhistory = compile_and_fit(lstm_model, wide_window)\n\nIPython.display.clear_output()\nval_performance['LSTM'] = lstm_model.evaluate( wide_window.val)\nperformance['LSTM'] = lstm_model.evaluate( wide_window.test, verbose=0)\n\nprint()", "_____no_output_____" ] ], [ [ "<a id=\"residual\"></a>\n\n#### Advanced: Residual connections\n\nThe `Baseline` model from earlier took advantage of the fact that the sequence doesn't change drastically from time step to time step. Every model trained in this tutorial so far was randomly initialized, and then had to learn that the output is a a small change from the previous time step.\n\nWhile you can get around this issue with careful initialization, it's simpler to build this into the model structure.\n\nIt's common in time series analysis to build models that instead of predicting the next value, predict how the value will change in the next timestep.\nSimilarly, \"Residual networks\" or \"ResNets\" in deep learning refer to architectures where each layer adds to the model's accumulating result.\n\nThat is how you take advantage of the knowledge that the change should be small.\n\n![A model with a residual connection](images/residual.png)\n\nEssentially this initializes the model to match the `Baseline`. For this task it helps models converge faster, with slightly better performance.", "_____no_output_____" ], [ "This approach can be used in conjunction with any model discussed in this tutorial. \n\nHere it is being applied to the LSTM model, note the use of the `tf.initializers.zeros` to ensure that the initial predicted changes are small, and don't overpower the residual connection. There are no symmetry-breaking concerns for the gradients here, since the `zeros` are only used on the last layer.", "_____no_output_____" ] ], [ [ "class ResidualWrapper(tf.keras.Model):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def call(self, inputs, *args, **kwargs):\n delta = self.model(inputs, *args, **kwargs)\n\n # The prediction for each timestep is the input\n # from the previous time step plus the delta\n # calculated by the model.\n return inputs + delta", "_____no_output_____" ], [ "%%time\nresidual_lstm = ResidualWrapper(\n tf.keras.Sequential([\n tf.keras.layers.LSTM(32, return_sequences=True),\n tf.keras.layers.Dense(\n num_features,\n # The predicted deltas should start small\n # So initialize the output layer with zeros\n kernel_initializer=tf.initializers.zeros)\n]))\n\nhistory = compile_and_fit(residual_lstm, wide_window)\n\nIPython.display.clear_output()\nval_performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.val)\nperformance['Residual LSTM'] = residual_lstm.evaluate(wide_window.test, verbose=0)\nprint()", "_____no_output_____" ] ], [ [ "#### Performance", "_____no_output_____" ], [ "Here is the overall performance for these multi-output models.", "_____no_output_____" ] ], [ [ "x = np.arange(len(performance))\nwidth = 0.3\n\nmetric_name = 'mean_absolute_error'\nmetric_index = lstm_model.metrics_names.index('mean_absolute_error')\nval_mae = [v[metric_index] for v in val_performance.values()]\ntest_mae = [v[metric_index] for v in performance.values()]\n\nplt.bar(x - 0.17, val_mae, width, label='Validation')\nplt.bar(x + 0.17, test_mae, width, label='Test')\nplt.xticks(ticks=x, labels=performance.keys(),\n rotation=45)\nplt.ylabel('MAE (average over all outputs)')\n_ = plt.legend()", "_____no_output_____" ], [ "for name, value in performance.items():\n print(f'{name:15s}: {value[1]:0.4f}')", "_____no_output_____" ] ], [ [ "The above performances are averaged across all model outputs.", "_____no_output_____" ], [ "## Multi-step models\n\nBoth the single-output and multiple-output models in the previous sections made **single time step predictions**, 1h into the future.\n\nThis section looks at how to expand these models to make **multiple time step predictions**.\n\nIn a multi-step prediction, the model needs to learn to predict a range of future values. Thus, unlike a single step model, where only a single future point is predicted, a multi-step model predicts a sequence of the future values.\n\nThere are two rough approaches to this:\n\n1. Single shot predictions where the entire time series is predicted at once.\n2. Autoregressive predictions where the model only makes single step predictions and its output is fed back as its input.\n\nIn this section all the models will predict **all the features across all output time steps**.\n", "_____no_output_____" ], [ "For the multi-step model, the training data again consists of hourly samples. However, here, the models will learn to predict 24h of the future, given 24h of the past.\n\nHere is a `Window` object that generates these slices from the dataset:", "_____no_output_____" ] ], [ [ "OUT_STEPS = 24\nmulti_window = WindowGenerator(input_width=24,\n label_width=OUT_STEPS,\n shift=OUT_STEPS)\n\nmulti_window.plot()\nmulti_window", "_____no_output_____" ] ], [ [ "### Baselines", "_____no_output_____" ], [ "A simple baseline for this task is to repeat the last input time step for the required number of output timesteps:\n\n![Repeat the last input, for each output step](images/multistep_last.png)", "_____no_output_____" ] ], [ [ "class MultiStepLastBaseline(tf.keras.Model):\n def call(self, inputs):\n return tf.tile(inputs[:, -1:, :], [1, OUT_STEPS, 1])\n\nlast_baseline = MultiStepLastBaseline()\nlast_baseline.compile(loss=tf.losses.MeanSquaredError(),\n metrics=[tf.metrics.MeanAbsoluteError()])\n\nmulti_val_performance = {}\nmulti_performance = {}\n\nmulti_val_performance['Last'] = last_baseline.evaluate(multi_window.val)\nmulti_performance['Last'] = last_baseline.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(last_baseline)", "_____no_output_____" ] ], [ [ "Since this task is to predict 24h given 24h another simple approach is to repeat the previous day, assuming tomorrow will be similar:\n\n![Repeat the previous day](images/multistep_repeat.png)", "_____no_output_____" ] ], [ [ "class RepeatBaseline(tf.keras.Model):\n def call(self, inputs):\n return inputs\n\nrepeat_baseline = RepeatBaseline()\nrepeat_baseline.compile(loss=tf.losses.MeanSquaredError(),\n metrics=[tf.metrics.MeanAbsoluteError()])\n\nmulti_val_performance['Repeat'] = repeat_baseline.evaluate(multi_window.val)\nmulti_performance['Repeat'] = repeat_baseline.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(repeat_baseline)", "_____no_output_____" ] ], [ [ "### Single-shot models\n\nOne high level approach to this problem is use a \"single-shot\" model, where the model makes the entire sequence prediction in a single step.\n\nThis can be implemented efficiently as a `layers.Dense` with `OUT_STEPS*features` output units. The model just needs to reshape that output to the required `(OUTPUT_STEPS, features)`.", "_____no_output_____" ], [ "#### Linear\n\nA simple linear model based on the last input time step does better than either baseline, but is underpowered. The model needs to predict `OUTPUT_STEPS` time steps, from a single input time step with a linear projection. It can only capture a low-dimensional slice of the behavior, likely based mainly on the time of day and time of year.\n\n![Predct all timesteps from the last time-step](images/multistep_dense.png)", "_____no_output_____" ] ], [ [ "multi_linear_model = tf.keras.Sequential([\n # Take the last time-step.\n # Shape [batch, time, features] => [batch, 1, features]\n tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),\n # Shape => [batch, 1, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS*num_features,\n kernel_initializer=tf.initializers.zeros),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape([OUT_STEPS, num_features])\n])\n\nhistory = compile_and_fit(multi_linear_model, multi_window)\n\nIPython.display.clear_output()\nmulti_val_performance['Linear'] = multi_linear_model.evaluate(multi_window.val)\nmulti_performance['Linear'] = multi_linear_model.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(multi_linear_model)", "_____no_output_____" ] ], [ [ "#### Dense\n\nAdding a `layers.Dense` between the input and output gives the linear model more power, but is still only based on a single input timestep.", "_____no_output_____" ] ], [ [ "multi_dense_model = tf.keras.Sequential([\n # Take the last time step.\n # Shape [batch, time, features] => [batch, 1, features]\n tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),\n # Shape => [batch, 1, dense_units]\n tf.keras.layers.Dense(512, activation='relu'),\n # Shape => [batch, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS*num_features,\n kernel_initializer=tf.initializers.zeros),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape([OUT_STEPS, num_features])\n])\n\nhistory = compile_and_fit(multi_dense_model, multi_window)\n\nIPython.display.clear_output()\nmulti_val_performance['Dense'] = multi_dense_model.evaluate(multi_window.val)\nmulti_performance['Dense'] = multi_dense_model.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(multi_dense_model)", "_____no_output_____" ] ], [ [ "#### CNN", "_____no_output_____" ], [ "A convolutional model makes predictions based on a fixed-width history, which may lead to better performance than the dense model since it can see how things are changing over time:\n\n![A convolutional model sees how things change over time](images/multistep_conv.png)", "_____no_output_____" ] ], [ [ "CONV_WIDTH = 3\nmulti_conv_model = tf.keras.Sequential([\n # Shape [batch, time, features] => [batch, CONV_WIDTH, features]\n tf.keras.layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :]),\n # Shape => [batch, 1, conv_units]\n tf.keras.layers.Conv1D(256, activation='relu', kernel_size=(CONV_WIDTH)),\n # Shape => [batch, 1, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS*num_features,\n kernel_initializer=tf.initializers.zeros),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape([OUT_STEPS, num_features])\n])\n\nhistory = compile_and_fit(multi_conv_model, multi_window)\n\nIPython.display.clear_output()\n\nmulti_val_performance['Conv'] = multi_conv_model.evaluate(multi_window.val)\nmulti_performance['Conv'] = multi_conv_model.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(multi_conv_model)", "_____no_output_____" ] ], [ [ "#### RNN", "_____no_output_____" ], [ "A recurrent model can learn to use a long history of inputs, if it's relevant to the predictions the model is making. Here the model will accumulate internal state for 24h, before making a single prediction for the next 24h.\n\nIn this single-shot format, the LSTM only needs to produce an output at the last time step, so set `return_sequences=False`.\n\n![The lstm accumulates state over the input window, and makes a single prediction for the next 24h](images/multistep_lstm.png)\n", "_____no_output_____" ] ], [ [ "multi_lstm_model = tf.keras.Sequential([\n # Shape [batch, time, features] => [batch, lstm_units]\n # Adding more `lstm_units` just overfits more quickly.\n tf.keras.layers.LSTM(32, return_sequences=False),\n # Shape => [batch, out_steps*features]\n tf.keras.layers.Dense(OUT_STEPS*num_features,\n kernel_initializer=tf.initializers.zeros),\n # Shape => [batch, out_steps, features]\n tf.keras.layers.Reshape([OUT_STEPS, num_features])\n])\n\nhistory = compile_and_fit(multi_lstm_model, multi_window)\n\nIPython.display.clear_output()\n\nmulti_val_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.val)\nmulti_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(multi_lstm_model)", "_____no_output_____" ] ], [ [ "### Advanced: Autoregressive model\n\nThe above models all predict the entire output sequence as a in a single step.\n\nIn some cases it may be helpful for the model to decompose this prediction into individual time steps. Then each model's output can be fed back into itself at each step and predictions can be made conditioned on the previous one, like in the classic [Generating Sequences With Recurrent Neural Networks](https://arxiv.org/abs/1308.0850).\n\nOne clear advantage to this style of model is that it can be set up to produce output with a varying length.\n\nYou could take any of single single-step multi-output models trained in the first half of this tutorial and run in an autoregressive feedback loop, but here you'll focus on building a model that's been explicitly trained to do that.\n\n![Feedback a model's output to its input](images/multistep_autoregressive.png)\n", "_____no_output_____" ], [ "#### RNN\n\nThis tutorial only builds an autoregressive RNN model, but this pattern could be applied to any model that was designed to output a single timestep.\n\nThe model will have the same basic form as the single-step `LSTM` models: An `LSTM` followed by a `layers.Dense` that converts the `LSTM` outputs to model predictions.\n\nA `layers.LSTM` is a `layers.LSTMCell` wrapped in the higher level `layers.RNN` that manages the state and sequence results for you (See [Keras RNNs](https://www.tensorflow.org/guide/keras/rnn) for details).\n\nIn this case the model has to manually manage the inputs for each step so it uses `layers.LSTMCell` directly for the lower level, single time step interface.", "_____no_output_____" ] ], [ [ "class FeedBack(tf.keras.Model):\n def __init__(self, units, out_steps):\n super().__init__()\n self.out_steps = out_steps\n self.units = units\n self.lstm_cell = tf.keras.layers.LSTMCell(units)\n # Also wrap the LSTMCell in an RNN to simplify the `warmup` method.\n self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True)\n self.dense = tf.keras.layers.Dense(num_features)", "_____no_output_____" ], [ "feedback_model = FeedBack(units=32, out_steps=OUT_STEPS)", "_____no_output_____" ] ], [ [ "The first method this model needs is a `warmup` method to initialize its internal state based on the inputs. Once trained this state will capture the relevant parts of the input history. This is equivalent to the single-step `LSTM` model from earlier:", "_____no_output_____" ] ], [ [ "def warmup(self, inputs):\n # inputs.shape => (batch, time, features)\n # x.shape => (batch, lstm_units)\n x, *state = self.lstm_rnn(inputs)\n\n # predictions.shape => (batch, features)\n prediction = self.dense(x)\n return prediction, state\n\nFeedBack.warmup = warmup", "_____no_output_____" ] ], [ [ "This method returns a single time-step prediction, and the internal state of the LSTM:", "_____no_output_____" ] ], [ [ "prediction, state = feedback_model.warmup(multi_window.example[0])\nprediction.shape", "_____no_output_____" ] ], [ [ "With the `RNN`'s state, and an initial prediction you can now continue iterating the model feeding the predictions at each step back as the input.\n\nThe simplest approach to collecting the output predictions is to use a python list, and `tf.stack` after the loop.", "_____no_output_____" ], [ "Note: Stacking a python list like this only works with eager-execution, using `Model.compile(..., run_eagerly=True)` for training, or with a fixed length output. For a dynamic output length you would need to use a `tf.TensorArray` instead of a python list, and `tf.range` instead of the python `range`.", "_____no_output_____" ] ], [ [ "def call(self, inputs, training=None):\n # Use a TensorArray to capture dynamically unrolled outputs.\n predictions = []\n # Initialize the lstm state\n prediction, state = self.warmup(inputs)\n\n # Insert the first prediction\n predictions.append(prediction)\n\n # Run the rest of the prediction steps\n for n in range(1, self.out_steps):\n # Use the last prediction as input.\n x = prediction\n # Execute one lstm step.\n x, state = self.lstm_cell(x, states=state,\n training=training)\n # Convert the lstm output to a prediction.\n prediction = self.dense(x)\n # Add the prediction to the output\n predictions.append(prediction)\n\n # predictions.shape => (time, batch, features)\n predictions = tf.stack(predictions)\n # predictions.shape => (batch, time, features)\n predictions = tf.transpose(predictions, [1, 0, 2])\n return predictions\n\nFeedBack.call = call", "_____no_output_____" ] ], [ [ "Test run this model on the example inputs:", "_____no_output_____" ] ], [ [ "print('Output shape (batch, time, features): ', feedback_model(multi_window.example[0]).shape)", "_____no_output_____" ] ], [ [ "Now train the model:", "_____no_output_____" ] ], [ [ "history = compile_and_fit(feedback_model, multi_window)\n\nIPython.display.clear_output()\n\nmulti_val_performance['AR LSTM'] = feedback_model.evaluate(multi_window.val)\nmulti_performance['AR LSTM'] = feedback_model.evaluate(multi_window.test, verbose=0)\nmulti_window.plot(feedback_model)", "_____no_output_____" ] ], [ [ "### Performance", "_____no_output_____" ], [ "There are clearly diminishing returns as a function of model complexity on this problem.", "_____no_output_____" ] ], [ [ "x = np.arange(len(multi_performance))\nwidth = 0.3\n\n\nmetric_name = 'mean_absolute_error'\nmetric_index = lstm_model.metrics_names.index('mean_absolute_error')\nval_mae = [v[metric_index] for v in multi_val_performance.values()]\ntest_mae = [v[metric_index] for v in multi_performance.values()]\n\nplt.bar(x - 0.17, val_mae, width, label='Validation')\nplt.bar(x + 0.17, test_mae, width, label='Test')\nplt.xticks(ticks=x, labels=multi_performance.keys(),\n rotation=45)\nplt.ylabel(f'MAE (average over all times and outputs)')\n_ = plt.legend()", "_____no_output_____" ] ], [ [ "The metrics for the multi-output models in the first half of this tutorial show the performance averaged across all output features. These performances similar but also averaged across output timesteps. ", "_____no_output_____" ] ], [ [ "for name, value in multi_performance.items():\n print(f'{name:8s}: {value[1]:0.4f}')", "_____no_output_____" ] ], [ [ "The gains achieved going from a dense model to convolutional and recurrent models are only a few percent (if any), and the autoregressive model performed clearly worse. So these more complex approaches may not be worth while on **this** problem, but there was no way to know without trying, and these models could be helpful for **your** problem.", "_____no_output_____" ], [ "## Next steps\n\nThis tutorial was a quick introduction to time series forecasting using TensorFlow.\n\n* For further understanding, see:\n * Chapter 15 of [Hands-on Machine Learning with Scikit-Learn, Keras, and TensorFlow](https://www.oreilly.com/library/view/hands-on-machine-learning/9781492032632/), 2nd Edition \n * Chapter 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).\n * Lesson 8 of [Udacity's intro to TensorFlow for deep learning](https://www.udacity.com/course/intro-to-tensorflow-for-deep-learning--ud187), and the [exercise notebooks](https://github.com/tensorflow/examples/tree/master/courses/udacity_intro_to_tensorflow_for_deep_learning) \n* Also remember that you can implement any [classical time series model](https://otexts.com/fpp2/index.html) in TensorFlow, this tutorial just focuses on TensorFlow's built-in functionality.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d06ece895db16b3f8607da08f5bc5104e7b4a998
17,697
ipynb
Jupyter Notebook
Word_Embeddings_MySQL.ipynb
krokodilj/word_embedding_storage
206c14cee1af0768b6e187167333dcccf0095e9d
[ "MIT" ]
37
2018-09-02T23:55:19.000Z
2022-01-05T00:51:08.000Z
Word_Embeddings_MySQL.ipynb
krokodilj/word_embedding_storage
206c14cee1af0768b6e187167333dcccf0095e9d
[ "MIT" ]
6
2018-09-03T06:56:07.000Z
2022-02-02T09:57:42.000Z
Word_Embeddings_MySQL.ipynb
krokodilj/word_embedding_storage
206c14cee1af0768b6e187167333dcccf0095e9d
[ "MIT" ]
7
2018-09-03T08:14:08.000Z
2021-08-25T10:18:33.000Z
32.176364
301
0.519241
[ [ [ "# Word Embeddings in MySQL\n\nThis example uses the official MySQL Connector within Python3 to store and retrieve various amounts of Word Embeddings.\n\nWe will use a local MySQL database running as a Docker Container for testing purposes. To start the database run:\n\n```\ndocker run -ti --rm --name ohmysql -e MYSQL_ROOT_PASSWORD=mikolov -e MYSQL_DATABASE=embeddings -p 3306:3306 mysql:5.7\n```", "_____no_output_____" ] ], [ [ "import mysql.connector\nimport io\nimport time\nimport numpy\nimport plotly\nfrom tqdm import tqdm_notebook as tqdm", "_____no_output_____" ] ], [ [ "# Dummy Embeddings\n\nFor testing purposes we will use randomly generated numpy arrays as dummy embbeddings.", "_____no_output_____" ] ], [ [ "def embeddings(n=1000, dim=300):\n \"\"\"\n Yield n tuples of random numpy arrays of *dim* length indexed by *n*\n \"\"\"\n idx = 0\n while idx < n:\n yield (str(idx), numpy.random.rand(dim))\n idx += 1", "_____no_output_____" ] ], [ [ "# Conversion Functions\n\nSince we can't just save a NumPy array into the database, we will convert it into a BLOB.", "_____no_output_____" ] ], [ [ "def adapt_array(array):\n \"\"\"\n Using the numpy.save function to save a binary version of the array,\n and BytesIO to catch the stream of data and convert it into a BLOB.\n \"\"\"\n out = io.BytesIO()\n numpy.save(out, array)\n out.seek(0)\n\n return out.read()\n\ndef convert_array(blob):\n \"\"\"\n Using BytesIO to convert the binary version of the array back into a numpy array.\n \"\"\"\n out = io.BytesIO(blob)\n out.seek(0)\n\n return numpy.load(out)", "_____no_output_____" ], [ "connection = mysql.connector.connect(user='root', password='mikolov',\n host='127.0.0.1',\n database='embeddings')\n\ncursor = connection.cursor()\ncursor.execute('CREATE TABLE IF NOT EXISTS `embeddings` (`key` TEXT, `embedding` BLOB);')\nconnection.commit()", "_____no_output_____" ], [ "%%time\nfor key, emb in embeddings():\n arr = adapt_array(emb)\n cursor.execute('INSERT INTO `embeddings` (`key`, `embedding`) VALUES (%s, %s);', (key, arr))\n connection.commit()", "CPU times: user 1.58 s, sys: 170 ms, total: 1.75 s\nWall time: 1min 55s\n" ], [ "%%time\nfor key, _ in embeddings():\n cursor.execute('SELECT embedding FROM `embeddings` WHERE `key`=%s;', (key,))\n data = cursor.fetchone()\n arr = convert_array(data[0])", "CPU times: user 393 ms, sys: 42.7 ms, total: 435 ms\nWall time: 1.26 s\n" ] ], [ [ "# Sample some data\n\nTo test the I/O we will write and read some data from the database. This may take a while.", "_____no_output_____" ] ], [ [ "write_times = []\nread_times = []\ncounts = [500, 1000, 2000, 3000, 4000, 5000]\n\nfor c in counts:\n print(c)\n cursor.execute('DROP TABLE IF EXISTS `embeddings`;')\n cursor.execute('CREATE TABLE IF NOT EXISTS `embeddings` (`key` TEXT, `embedding` BLOB);')\n connection.commit()\n\n start_time_write = time.time()\n for key, emb in tqdm(embeddings(c), total=c):\n arr = adapt_array(emb)\n cursor.execute('INSERT INTO `embeddings` (`key`, `embedding`) VALUES (%s, %s);', (key, arr))\n connection.commit()\n write_times.append(time.time() - start_time_write)\n \n start_time_read = time.time()\n for key, emb in embeddings(c):\n cursor.execute('SELECT embedding FROM `embeddings` WHERE `key`=%s;', (key,))\n data = cursor.fetchone()\n arr = convert_array(data[0])\n read_times.append(time.time() - start_time_read)\n \nprint('DONE')", "500\n" ] ], [ [ "# Results", "_____no_output_____" ] ], [ [ "plotly.offline.init_notebook_mode(connected=True)\ntrace = plotly.graph_objs.Scatter(\n y = write_times,\n x = counts,\n mode = 'lines+markers'\n)\nlayout = plotly.graph_objs.Layout(title=\"MySQL Write Times\",\n yaxis=dict(title='Time in Seconds'),\n xaxis=dict(title='Embedding Count'))\ndata = [trace]\nfig = plotly.graph_objs.Figure(data=data, layout=layout)\nplotly.offline.iplot(fig, filename='jupyter-scatter-write')", "_____no_output_____" ], [ "plotly.offline.init_notebook_mode(connected=True)\ntrace = plotly.graph_objs.Scatter(\n y = read_times,\n x = counts,\n mode = 'lines+markers'\n)\nlayout = plotly.graph_objs.Layout(title=\"MySQL Read Times\",\n yaxis=dict(title='Time in Seconds'),\n xaxis=dict(title='Embedding Count'))\ndata = [trace]\nfig = plotly.graph_objs.Figure(data=data, layout=layout)\nplotly.offline.iplot(fig, filename='jupyter-scatter-read')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d06ecf1a83e1e9319afe6938aeaf0a6e1f49932c
396,509
ipynb
Jupyter Notebook
dedupe/Product Data/product_amazon_walmart.ipynb
MauriceStr/StandardizedDI_Clients_SDASP
a63fd4978f3264457116ec4149ab43c50d43661f
[ "MIT" ]
null
null
null
dedupe/Product Data/product_amazon_walmart.ipynb
MauriceStr/StandardizedDI_Clients_SDASP
a63fd4978f3264457116ec4149ab43c50d43661f
[ "MIT" ]
null
null
null
dedupe/Product Data/product_amazon_walmart.ipynb
MauriceStr/StandardizedDI_Clients_SDASP
a63fd4978f3264457116ec4149ab43c50d43661f
[ "MIT" ]
null
null
null
112.933352
7,867
0.315229
[ [ [ "from future.builtins import next\nimport os\nimport csv\nimport re\nimport logging\nimport optparse\n\nimport dedupe\nfrom unidecode import unidecode\n\nimport pandas as pd", "_____no_output_____" ], [ "pd.options.display.float_format = '{:20,.2f}'.format\npd.set_option('display.max_rows', 5000)\npd.set_option('display.max_columns', 5000)\npd.set_option('display.width', 1000)\npd.set_option('display.max_colwidth', -1)", "_____no_output_____" ], [ "amazon_walmart_all_path = (r'/home/ubuntu/jupyter/ServerX/1_Standard Data Integration/Sample Datasets'\n r'/Processed Data/product_samples/amazon_walmart_all.csv')", "_____no_output_____" ] ], [ [ "## Prepare df and dict corpus", "_____no_output_____" ] ], [ [ "fields_of_interest = [\n 'Id',\n 'name',\n 'producer',\n 'description',\n 'price',\n 'category',\n 'source'\n]", "_____no_output_____" ], [ "amazon_walmart_all_df = pd.read_csv(amazon_walmart_all_path, sep=',', quotechar='\"')[fields_of_interest]", "_____no_output_____" ], [ "amazon_walmart_all_df.dtypes", "_____no_output_____" ], [ "x = amazon_walmart_all_df[amazon_walmart_all_df['category'].isnull()]\nx.head(1)", "_____no_output_____" ], [ "z = amazon_walmart_all_df[amazon_walmart_all_df['producer'].isnull()]\nz.head(1)", "_____no_output_____" ], [ "y = amazon_walmart_all_df[amazon_walmart_all_df['price'].isnull()]\ny.head(1)", "_____no_output_____" ], [ "h = amazon_walmart_all_df[amazon_walmart_all_df['description'].isnull()]\nh.head()", "_____no_output_____" ], [ "amazon_walmart_all_df[amazon_walmart_all_df['name'].isnull()]", "_____no_output_____" ], [ "description_corpus = amazon_walmart_all_df['description'].to_list()\ndescription_corpus = [x for x in description_corpus if str(x) != 'nan']", "_____no_output_____" ], [ "description_corpus[1]", "_____no_output_____" ], [ "category_corpus = amazon_walmart_all_df.drop_duplicates().to_dict('records')", "_____no_output_____" ], [ "categories = list(amazon_walmart_all_df['category'].unique())\ncategories = [x for x in categories if str(x) != 'nan']", "_____no_output_____" ], [ "producer_corpus = amazon_walmart_all_df.drop_duplicates().to_dict('records')", "_____no_output_____" ], [ "producers = list(amazon_walmart_all_df['producer'].unique())\nproducers = [x for x in producers if str(x) != 'nan']", "_____no_output_____" ], [ "producers.sort()\nproducers", "_____no_output_____" ], [ "input_file = amazon_walmart_all_path\noutput_file = 'amazon_walmart_output3.csv'\nsettings_file = 'amazon_walmart_learned_settings3'\ntraining_file = 'amazon_walmart_training3.json'", "_____no_output_____" ], [ "float('1.25')", "_____no_output_____" ], [ "def preProcess(key, column):\n \n try : # python 2/3 string differences\n column = column.decode('utf8')\n except AttributeError:\n pass\n column = unidecode(column)\n column = re.sub(' +', ' ', column)\n column = re.sub('\\n', ' ', column)\n column = column.strip().strip('\"').strip(\"'\").lower().strip()\n column = column.lower()\n if not column:\n return None\n \n if key == 'price':\n column = float(column) \n return column\n\ndef readData(filename):\n \n data_d = {}\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n clean_row = [(k, preProcess(k, v)) for (k, v) in row.items()]\n row_id = int(row['Id'])\n data_d[row_id] = dict(clean_row)\n\n return data_d ", "_____no_output_____" ], [ "print('importing data ...')\ndata_d = readData(input_file)", "importing data ...\n" ], [ "fields = [\n {'field' : 'name', 'type': 'Name'},\n {'field' : 'name', 'type': 'String'},\n {'field' : 'description', \n 'type': 'Text',\n 'corpus': description_corpus,\n 'has_missing': True\n },\n {'field' : 'category', \n 'type': 'FuzzyCategorical',\n 'categories': categories,\n 'corpus': category_corpus,\n 'has missing' : True\n }, \n {'field' : 'producer', \n 'type': 'FuzzyCategorical',\n 'categories': producers,\n 'corpus': producer_corpus,\n 'has_missing': True\n },\n {'field' : 'price', \n 'type': 'Price',\n 'has_missing': True\n },\n]", "_____no_output_____" ], [ "deduper = dedupe.Dedupe(fields)", "_____no_output_____" ], [ "# took about 20 min with blocked proportion 0.8\ndeduper.prepare_training(data_d)", "INFO:dedupe.canopy_index:Removing stop word with\nINFO:dedupe.canopy_index:Removing stop word 7\nINFO:dedupe.canopy_index:Removing stop word is\nINFO:dedupe.canopy_index:Removing stop word than\nINFO:dedupe.canopy_index:Removing stop word get\nINFO:dedupe.canopy_index:Removing stop word quickly\nINFO:dedupe.canopy_index:Removing stop word that\nINFO:dedupe.canopy_index:Removing stop word your\nINFO:dedupe.canopy_index:Removing stop word into\nINFO:dedupe.canopy_index:Removing stop word and\nINFO:dedupe.canopy_index:Removing stop word share\nINFO:dedupe.canopy_index:Removing stop word works\nINFO:dedupe.canopy_index:Removing stop word you\nINFO:dedupe.canopy_index:Removing stop word protect\nINFO:dedupe.canopy_index:Removing stop word of\nINFO:dedupe.canopy_index:Removing stop word access\nINFO:dedupe.canopy_index:Removing stop word are\nINFO:dedupe.canopy_index:Removing stop word a\nINFO:dedupe.canopy_index:Removing stop word easy\nINFO:dedupe.canopy_index:Removing stop word to\nINFO:dedupe.canopy_index:Removing stop word off\nINFO:dedupe.canopy_index:Removing stop word wide\nINFO:dedupe.canopy_index:Removing stop word or\nINFO:dedupe.canopy_index:Removing stop word dvd\nINFO:dedupe.canopy_index:Removing stop word play\nINFO:dedupe.canopy_index:Removing stop word other\nINFO:dedupe.canopy_index:Removing stop word media\nINFO:dedupe.canopy_index:Removing stop word automatically\nINFO:dedupe.canopy_index:Removing stop word based\nINFO:dedupe.canopy_index:Removing stop word such\nINFO:dedupe.canopy_index:Removing stop word make\nINFO:dedupe.canopy_index:Removing stop word the\nINFO:dedupe.canopy_index:Removing stop word for\nINFO:dedupe.canopy_index:Removing stop word go\nINFO:dedupe.canopy_index:Removing stop word camera\nINFO:dedupe.canopy_index:Removing stop word in\nINFO:dedupe.canopy_index:Removing stop word create\nINFO:dedupe.canopy_index:Removing stop word just\nINFO:dedupe.canopy_index:Removing stop word using\nINFO:dedupe.canopy_index:Removing stop word all\nINFO:dedupe.canopy_index:Removing stop word use\nINFO:dedupe.canopy_index:Removing stop word video\nINFO:dedupe.canopy_index:Removing stop word them\nINFO:dedupe.canopy_index:Removing stop word audio\nINFO:dedupe.canopy_index:Removing stop word view\nINFO:dedupe.canopy_index:Removing stop word only\nINFO:dedupe.canopy_index:Removing stop word s\nINFO:dedupe.canopy_index:Removing stop word help\nINFO:dedupe.canopy_index:Removing stop word hard\nINFO:dedupe.canopy_index:Removing stop word up\nINFO:dedupe.canopy_index:Removing stop word plus\nINFO:dedupe.canopy_index:Removing stop word any\nINFO:dedupe.canopy_index:Removing stop word more\nINFO:dedupe.canopy_index:Removing stop word most\nINFO:dedupe.canopy_index:Removing stop word music\nINFO:dedupe.canopy_index:Removing stop word if\nINFO:dedupe.canopy_index:Removing stop word technology\nINFO:dedupe.canopy_index:Removing stop word high\nINFO:dedupe.canopy_index:Removing stop word screen\nINFO:dedupe.canopy_index:Removing stop word fast\nINFO:dedupe.canopy_index:Removing stop word simply\nINFO:dedupe.canopy_index:Removing stop word convenient\nINFO:dedupe.canopy_index:Removing stop word large\nINFO:dedupe.canopy_index:Removing stop word easily\nINFO:dedupe.canopy_index:Removing stop word right\nINFO:dedupe.canopy_index:Removing stop word type\nINFO:dedupe.canopy_index:Removing stop word style\nINFO:dedupe.canopy_index:Removing stop word look\nINFO:dedupe.canopy_index:Removing stop word what\nINFO:dedupe.canopy_index:Removing stop word files\nINFO:dedupe.canopy_index:Removing stop word designed\nINFO:dedupe.canopy_index:Removing stop word including\nINFO:dedupe.canopy_index:Removing stop word digital\nINFO:dedupe.canopy_index:Removing stop word multi\nINFO:dedupe.canopy_index:Removing stop word 30\nINFO:dedupe.canopy_index:Removing stop word when\nINFO:dedupe.canopy_index:Removing stop word 5\nINFO:dedupe.canopy_index:Removing stop word supports\nINFO:dedupe.canopy_index:Removing stop word compatible\nINFO:dedupe.canopy_index:Removing stop word small\nINFO:dedupe.canopy_index:Removing stop word x\nINFO:dedupe.canopy_index:Removing stop word viewing\nINFO:dedupe.canopy_index:Removing stop word ideal\nINFO:dedupe.canopy_index:Removing stop word this\nINFO:dedupe.canopy_index:Removing stop word great\nINFO:dedupe.canopy_index:Removing stop word design\nINFO:dedupe.canopy_index:Removing stop word space\nINFO:dedupe.canopy_index:Removing stop word users\nINFO:dedupe.canopy_index:Removing stop word features\nINFO:dedupe.canopy_index:Removing stop word usb\nINFO:dedupe.canopy_index:Removing stop word plug\nINFO:dedupe.canopy_index:Removing stop word comes\nINFO:dedupe.canopy_index:Removing stop word pc\nINFO:dedupe.canopy_index:Removing stop word 10\nINFO:dedupe.canopy_index:Removing stop word inch\nINFO:dedupe.canopy_index:Removing stop word t\nINFO:dedupe.canopy_index:Removing stop word case\nINFO:dedupe.canopy_index:Removing stop word 100\nINFO:dedupe.canopy_index:Removing stop word home\nINFO:dedupe.canopy_index:Removing stop word 2\nINFO:dedupe.canopy_index:Removing stop word network\nINFO:dedupe.canopy_index:Removing stop word its\nINFO:dedupe.canopy_index:Removing stop word photo\nINFO:dedupe.canopy_index:Removing stop word front\nINFO:dedupe.canopy_index:Removing stop word larger\nINFO:dedupe.canopy_index:Removing stop word allows\nINFO:dedupe.canopy_index:Removing stop word 4\nINFO:dedupe.canopy_index:Removing stop word photos\nINFO:dedupe.canopy_index:Removing stop word hours\nINFO:dedupe.canopy_index:Removing stop word work\nINFO:dedupe.canopy_index:Removing stop word over\nINFO:dedupe.canopy_index:Removing stop word products\nINFO:dedupe.canopy_index:Removing stop word applications\nINFO:dedupe.canopy_index:Removing stop word wireless\nINFO:dedupe.canopy_index:Removing stop word box\nINFO:dedupe.canopy_index:Removing stop word without\nINFO:dedupe.canopy_index:Removing stop word top\nINFO:dedupe.canopy_index:Removing stop word includes\nINFO:dedupe.canopy_index:Removing stop word interface\nINFO:dedupe.canopy_index:Removing stop word compact\nINFO:dedupe.canopy_index:Removing stop word desktop\nINFO:dedupe.canopy_index:Removing stop word also\nINFO:dedupe.canopy_index:Removing stop word support\nINFO:dedupe.canopy_index:Removing stop word they\nINFO:dedupe.canopy_index:Removing stop word offers\nINFO:dedupe.canopy_index:Removing stop word mac\nINFO:dedupe.canopy_index:Removing stop word key\nINFO:dedupe.canopy_index:Removing stop word data\nINFO:dedupe.canopy_index:Removing stop word built\nINFO:dedupe.canopy_index:Removing stop word power\nINFO:dedupe.canopy_index:Removing stop word not\nINFO:dedupe.canopy_index:Removing stop word player\nINFO:dedupe.canopy_index:Removing stop word 3\nINFO:dedupe.canopy_index:Removing stop word set\nINFO:dedupe.canopy_index:Removing stop word protection\nINFO:dedupe.canopy_index:Removing stop word back\nINFO:dedupe.canopy_index:Removing stop word file\nINFO:dedupe.canopy_index:Removing stop word year\nINFO:dedupe.canopy_index:Removing stop word warranty\nINFO:dedupe.canopy_index:Removing stop word about\nINFO:dedupe.canopy_index:Removing stop word re\nINFO:dedupe.canopy_index:Removing stop word fit\nINFO:dedupe.canopy_index:Removing stop word has\nINFO:dedupe.canopy_index:Removing stop word per\nINFO:dedupe.canopy_index:Removing stop word like\nINFO:dedupe.canopy_index:Removing stop word have\nINFO:dedupe.canopy_index:Removing stop word full\nINFO:dedupe.canopy_index:Removing stop word standard\nINFO:dedupe.canopy_index:Removing stop word maximum\nINFO:dedupe.canopy_index:Removing stop word 0\nINFO:dedupe.canopy_index:Removing stop word which\nINFO:dedupe.canopy_index:Removing stop word each\nINFO:dedupe.canopy_index:Removing stop word while\nINFO:dedupe.canopy_index:Removing stop word through\nINFO:dedupe.canopy_index:Removing stop word life\nINFO:dedupe.canopy_index:Removing stop word adapter\nINFO:dedupe.canopy_index:Removing stop word installation\nINFO:dedupe.canopy_index:Removing stop word cd\nINFO:dedupe.canopy_index:Removing stop word be\nINFO:dedupe.canopy_index:Removing stop word capacity\nINFO:dedupe.canopy_index:Removing stop word system\nINFO:dedupe.canopy_index:Removing stop word extra\nINFO:dedupe.canopy_index:Removing stop word dual\nINFO:dedupe.canopy_index:Removing stop word systems\nINFO:dedupe.canopy_index:Removing stop word keep\n" ], [ "dedupe.consoleLabel(deduper)", "name : durable bridge\ncategory : audio video accessories\nproducer : durable\nprice : 203.86\n\nname : durable bridge\ncategory : audio video accessories\nproducer : durable\nprice : 203.86\n\n0/10 positive, 0/10 negative\nDo these records refer to the same thing?\n(y)es / (n)o / (u)nsure / (f)inished\n" ], [ "data_d[1]", "_____no_output_____" ], [ "deduper.train()", "INFO:rlr.crossvalidation:using cross validation to find optimum alpha...\n/home/ubuntu/anaconda3/lib/python3.7/site-packages/rlr/crossvalidation.py:122: RuntimeWarning: invalid value encountered in double_scalars\n * (true_distinct + false_distinct)))\nINFO:rlr.crossvalidation:optimum alpha: 0.000100, score 0.24869807198976637\nINFO:dedupe.training:Final predicate set:\nINFO:dedupe.training:(SimplePredicate: (oneGramFingerprint, name), SimplePredicate: (sortedAcronym, name))\nINFO:dedupe.training:(SimplePredicate: (roundTo1, price), TfidfTextCanopyPredicate: (0.8, name))\n" ], [ "threshold = deduper.threshold(data_d, recall_weight=1)\nthreshold", "INFO:dedupe.canopy_index:Removing stop word new\nINFO:dedupe.canopy_index:Removing stop word 4\nINFO:dedupe.canopy_index:Removing stop word black\nINFO:dedupe.canopy_index:Removing stop word digital\nINFO:dedupe.canopy_index:Removing stop word with\nINFO:dedupe.canopy_index:Removing stop word and\nINFO:dedupe.canopy_index:Removing stop word white\nINFO:dedupe.canopy_index:Removing stop word case\nINFO:dedupe.canopy_index:Removing stop word x\nINFO:dedupe.canopy_index:Removing stop word inch\nINFO:dedupe.canopy_index:Removing stop word 1\nINFO:dedupe.canopy_index:Removing stop word for\nINFO:dedupe.canopy_index:Removing stop word gb\nINFO:dedupe.canopy_index:Removing stop word 2\nINFO:dedupe.canopy_index:Removing stop word usb\nINFO:dedupe.canopy_index:Removing stop word 8\nINFO:dedupe.canopy_index:Removing stop word 3\nINFO:dedupe.blocking:10000, 2.3534082 seconds\nINFO:dedupe.blocking:20000, 4.5438192 seconds\nINFO:dedupe.api:Maximum expected recall and precision\nINFO:dedupe.api:recall: 0.891\nINFO:dedupe.api:precision: 0.688\nINFO:dedupe.api:With threshold: 0.424\n" ], [ "print('clustering...')\nclustered_dupes = deduper.match(data_d, threshold)\n\nprint('# duplicate sets', len(clustered_dupes))", "clustering...\n" ], [ "for key, values in data_d.items():\n values['price'] = str(values['price']) ", "_____no_output_____" ], [ "cluster_membership = {}\ncluster_id = 0\nfor (cluster_id, cluster) in enumerate(clustered_dupes):\n id_set, scores = cluster\n cluster_d = [data_d[c] for c in id_set]\n \n \n canonical_rep = dedupe.canonicalize(cluster_d)\n for record_id, score in zip(id_set, scores):\n cluster_membership[record_id] = {\n \"cluster id\" : cluster_id,\n \"canonical representation\" : canonical_rep,\n \"confidence\": score\n }\n\nsingleton_id = cluster_id + 1\n\nwith open(output_file, 'w') as f_output, open(input_file) as f_input:\n writer = csv.writer(f_output)\n reader = csv.reader(f_input)\n\n heading_row = next(reader)\n heading_row.insert(0, 'confidence_score')\n heading_row.insert(0, 'Cluster ID')\n canonical_keys = canonical_rep.keys()\n for key in canonical_keys:\n heading_row.append('canonical_' + key)\n\n writer.writerow(heading_row)\n\n for row in reader:\n row_id = int(row[0])\n if row_id in cluster_membership:\n cluster_id = cluster_membership[row_id][\"cluster id\"]\n canonical_rep = cluster_membership[row_id][\"canonical representation\"]\n row.insert(0, cluster_membership[row_id]['confidence'])\n row.insert(0, cluster_id)\n for key in canonical_keys:\n row.append(canonical_rep[key].encode('utf8'))\n else:\n row.insert(0, None)\n row.insert(0, singleton_id)\n singleton_id += 1\n for key in canonical_keys:\n row.append(None)\n writer.writerow(row)\n ", "_____no_output_____" ], [ "fields_of_interest = ['Cluster ID', 'confidence_score', 'Id', 'name', 'producer', 'description', 'price']", "_____no_output_____" ], [ "amazon_walmart_output = pd.read_csv('amazon_walmart_output2.csv', sep=',', quotechar='\"')[fields_of_interest]", "_____no_output_____" ], [ "amazon_walmart_output[amazon_walmart_output['confidence_score'] == None]", "_____no_output_____" ], [ "amazon_walmart_output = amazon_walmart_output[fields_of_interest]", "_____no_output_____" ], [ "amazon_walmart_output[amazon_walmart_output['confidence_score'] > 0.9].sort_values('Cluster ID')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06edb781d7fa714883df05f336fbd5b4fa1bf52
98,107
ipynb
Jupyter Notebook
python/figS6_logit_twobins_MTurk.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
null
null
null
python/figS6_logit_twobins_MTurk.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
null
null
null
python/figS6_logit_twobins_MTurk.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
1
2020-11-29T18:12:33.000Z
2020-11-29T18:12:33.000Z
113.026498
31,968
0.811288
[ [ [ "%matplotlib inline\nimport io\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patch\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nsns.set_style('darkgrid')\nsns.set_context('notebook')", "c:\\users\\hjl161\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\statsmodels\\tools\\_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "datafile_AMT = '../data/MTurk_anonymous.xlsx'\n#datafile_DTU1 = '../data/DTU1_anonymous.xlsx'\n#datafile_DTU2 = '../data/DTU2_anonymous.xlsx'", "_____no_output_____" ], [ "df_MTurk = pd.DataFrame(pd.read_excel(datafile_AMT))\ndf_MTurk.drop(df_MTurk.columns[df_MTurk.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\n#df_DTU1 = pd.DataFrame(pd.read_excel(datafile_DTU1))\n#df_DTU1.drop(df_DTU1.columns[df_DTU1.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)\n#df_DTU2 = pd.DataFrame(pd.read_excel(datafile_DTU2))\n#df_DTU2.drop(df_DTU2.columns[df_DTU2.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)", "_____no_output_____" ], [ "df_MTurk.head()", "_____no_output_____" ] ], [ [ "In the following, we want to partition responses according to how many times a player has miscoordinated before. For a start, we concatenate all three experiments into one dataframe:", "_____no_output_____" ] ], [ [ "#df = pd.concat([\n# df_MTurk.join(pd.Series(['MTurk']*len(df_MTurk), name='experiment')), \n# df_DTU1.join(pd.Series(['DTU1']*len(df_DTU1), name='experiment')),\n# df_DTU2.join(pd.Series(['DTU2']*len(df_DTU2), name='experiment'))],\n# ignore_index=True)", "_____no_output_____" ], [ "#df.head(), len(df)", "_____no_output_____" ] ], [ [ "Now we group the data into response pairs", "_____no_output_____" ] ], [ [ "df = df_MTurk.groupby(['session', 'group', 'round'], as_index=False)[['code', 'id_in_group', 'arrival', 'choice', 'certainty']].agg(lambda x: tuple(x))\ndf.head()", "_____no_output_____" ], [ "#df0 = pd.DataFrame(columns=df.columns)\n#df1_ = pd.DataFrame(columns=df.columns)\n#sessions = df.session.unique()\n#for session in sessions:\n# df_session = df[df.session == session]\n# groups = df_session.group.unique()\n# for group in groups:\n# df_group = df_session[df_session.group == group]\n# miss = 0\n# for idx, row in df.iterrows():\n# if sum(row['choice']) != 1:\n# row['miss'] = miss\n# df0 = df0.append(row, ignore_index=True)\n# else:\n# miss += 1\n# df1_ = df1_.append(row, ignore_index=True)\n#df.head()", "_____no_output_____" ], [ "# initialize new dataframes that will hold data with a condition. df0 will contain all choices \n# having had zero previous miscoordinations, while df1_ will contain all choices having had one\n# or more previous miscoordinations\ndf0 = pd.DataFrame(columns=df.columns)\ndf1_ = pd.DataFrame(columns=df.columns)", "_____no_output_____" ], [ "# partition the dataframe into two bins - the first having 0 and 1, and the other 2 or more\n# miscoordinations:\nsessions = df.session.unique()\nfor session in sessions:\n df_session = df[df.session == session]\n groups = df_session.group.unique()\n for group in groups:\n df_group = df_session[df_session.group == group]\n miss = 0\n for idx, row in df_group.iterrows():\n if sum(row['choice']) != 1:\n if miss == 0:\n df0 = df0.append(row, ignore_index=True)\n else:\n df1_ = df1_.append(row, ignore_index=True)\n else:\n if miss == 0:\n df0 = df0.append(row, ignore_index=True)\n else:\n df1_ = df1_.append(row, ignore_index=True)\n miss += 1", "_____no_output_____" ] ], [ [ "Now a bit of magic: we need to separate the tuples again and create rows for each person: (see ref at https://stackoverflow.com/questions/53218931/how-to-unnest-explode-a-column-in-a-pandas-dataframe", "_____no_output_____" ] ], [ [ "def unnesting(df, explode):\n idx = df.index.repeat(df[explode[0]].str.len())\n df1 = pd.concat([\n pd.DataFrame({x: np.concatenate(df[x].values)}) for x in explode], axis=1)\n df1.index = idx\n\n return df1.join(df.drop(explode, 1), how='left')\n\n\ndfnew0 = unnesting(df0,['code', 'id_in_group', 'arrival', 'choice', 'certainty'])\ndfnew1_ = unnesting(df1_,['code', 'id_in_group', 'arrival', 'choice', 'certainty'])", "_____no_output_____" ], [ "dfnew0['arrival'].replace({9.0: 8.6, 9.1: 8.7}, inplace=True)\ndfnew1_['arrival'].replace({9.0: 8.6, 9.1: 8.7}, inplace=True)\nlen(dfnew0), len(dfnew1_)", "_____no_output_____" ], [ "dfnew0.head()", "_____no_output_____" ], [ "sns.regplot(x='arrival', y='choice', data=dfnew0, ci=95, logistic=True)\nsns.despine()", "_____no_output_____" ], [ "sns.regplot(x='arrival', y='choice', data=dfnew1_, ci=95, logistic=True)\nsns.despine()", "_____no_output_____" ], [ "dfall = pd.concat([dfnew0.join(pd.Series(['zero']*len(dfnew0), name='miss')), \n dfnew1_.join(pd.Series(['one_or_more']*len(dfnew1_), name='miss'))],\n ignore_index=True)", "_____no_output_____" ], [ "dfall.head()", "_____no_output_____" ], [ "pal = dict(zero=\"blue\", one_or_more=\"orange\")\ng = sns.lmplot(x=\"arrival\", y=\"choice\", hue=\"miss\", data=dfall, palette=pal, \n logistic=True, ci=95, n_boot=10000, x_estimator=np.mean, x_ci=\"ci\",\n y_jitter=.2, legend=False, height=3, aspect=1.5)\n#g = sns.FacetGrid(dfall, hue='miscoordinations', palette=pal, height=5);\n#g.map(plt.scatter, \"arrival\", \"choice\", marker='o', s=50, alpha=.7, linewidth=.5, color='white', edgecolor=\"white\")\n#g.map(sns.regplot, \"arrival\", \"choice\", scatter_kws={\"color\": \"grey\"}, ci=95, n_boot=100, logistic=True, height=3, aspect=1.5)\ng.set(xlim=(7.98, 8.72))\ng.set(ylabel='frequency of canteen choice')\nmy_ticks = [\"8:00\", \"8:10\", \"8:20\", \"8:30\", \"8:40\", \"8:50\", \"9:00\", \"9:10\"]\ng.set(xticks=[8,8.1,8.2,8.3,8.4,8.5,8.6,8.7], yticks=[0,.1,.2,.3,.4,.5,.6,.7,.8,.9,1])\ng.set(xticklabels = my_ticks) #, yticklabels = [\"office\",\"\",\"\",\"\",\"\",\".5\",\"\",\"\",\"\",\"\",\"canteen\"])\n\n# make my own legend:\nname_to_color = {\n ' 0 (n=2762)': 'blue',\n '>0 (n=1498)': 'orange',\n}\npatches = [patch.Patch(color=v, label=k) for k,v in name_to_color.items()]\nplt.legend(title='# miscoordinations', handles=patches, loc='lower left')\nplt.title('MTurk', fontsize=16)\nplt.tight_layout()\n\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nPLOTS_DIR = '../plots'\n\nif not os.path.exists(PLOTS_DIR):\n os.makedirs(PLOTS_DIR)\n\nplt.savefig(os.path.join(PLOTS_DIR, 'figS6_logit_2bins_MTurk.png'),\n bbox_inches='tight', transparent=True, dpi=300)\nplt.savefig(os.path.join(PLOTS_DIR, 'figS6_logit_2bins_MTurk.pdf'), transparent=True, dpi=300)\n\nsns.despine()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06edba58237a592e8babe8958b5cec7769bc0c0
17,449
ipynb
Jupyter Notebook
Notebooks/Index.ipynb
covid-multivent/covid-multivent.github.io
749579588bc090f7c595f54c284f7757d1c7f116
[ "MIT-0" ]
null
null
null
Notebooks/Index.ipynb
covid-multivent/covid-multivent.github.io
749579588bc090f7c595f54c284f7757d1c7f116
[ "MIT-0" ]
null
null
null
Notebooks/Index.ipynb
covid-multivent/covid-multivent.github.io
749579588bc090f7c595f54c284f7757d1c7f116
[ "MIT-0" ]
1
2021-01-13T16:49:53.000Z
2021-01-13T16:49:53.000Z
41.348341
892
0.453378
[ [ [ "# COVID-MultiVent\n\nThe COVID MultiVent System was developed through a collaboration between Dr. Arun Agarwal, pulmonary critical care fellow at the University of Missouri, and a group of medical students at the University of Pennsylvania with backgrounds in various types of engineering. The system allows for monitoring of pressure and volume delivered to each patient and incorporates valve components that allow for peak inspiratory pressure (PIP), positive end expiratory pressure (PEEP), and inspiratory volume to be modulated independently for each patient. We have created open source assembly resources for this system to expedite its adoption in hospitals around the world with dire need of ventilation resources. The system is assembled with components that can be readily found in most hospitals. If you have questions regarding the system please contact us at **[email protected]** \n\nDr. Arun Agarwal\n\nAlexander Lee\n\nFeyisope Eweje\n\nStephen Landy\n\nDavid Gordon\n\nRyan Gallagher \n\nAlfredo Lucas", "_____no_output_____" ] ], [ [ "%%html\n<iframe src=\"https://docs.google.com/presentation/d/e/2PACX-1vT2kAsF54vHJoIti5iQkk8zUTkcQuRBFnbtgAhCoHwxrThIzZ14mCpdgNkWrqS8bRf5xFB2ZoeXlfUk/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"480\" height=\"299\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\"></iframe>", "_____no_output_____" ] ], [ [ "### Original Video and Facebook Group", "_____no_output_____" ], [ "Please see below Dr. Agrawal's original video showing the proposed setup. A link to the MultiVent facebook group can be found [here](https://facebook.com/COVIDMULTIVENT/).", "_____no_output_____" ] ], [ [ "%%html\n\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/odnhnnlBlpM\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>", "_____no_output_____" ] ], [ [ "# Assembly Guide and Component List\n\nFor detailed instructions on how to build the MultiVent setup please click [here](https://docs.google.com/document/d/11Z7MpeGSC6m3ipsMVLe-Ofkw4HeqCN6x1Zhqy8FMRpc/edit?usp=sharing)\n\n\nBelow see the proposed parts list with the number of each part required for a single or a 2-patient setup. A list of vendors associated with each part is also included, however, note that almost all of these components will be readily available in many hospitals that already have ventilator setups. \nClick [here](https://docs.google.com/spreadsheets/d/1XikdFKNdgZAywoPw4oIWqJ9e0-a059ucXh8DSOGl4GA/edit?usp=sharing) for a Google Sheets version of this list.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ndf = pd.read_excel('Website Ventilator splitting components.xlsx')\ndf", "_____no_output_____" ] ], [ [ "## Additional Resources", "_____no_output_____" ], [ "[US Public Health Service guidelines for ventilator splitting](https://www.hhs.gov/sites/default/files/optimizing-ventilator-use-during-covid19-pandemic.pdf)\n\n[Ventilation Sharing Protocol developed by the Columbia University College of Physicians & Surgeons/New York-Presbyterian Hospital (3/24/20)](https://www.gnyha.org/wp-content/uploads/2020/03/Ventilator-Sharing-Protocol-Dual-Patient-Ventilation-with-a-Single-Mechanical-Ventilator-for-Use-during-Critical-Ventilator-Shortages.pdf)\n\n[PulmCrit - Splitting ventilators to provide titrated support to a large group of patients](https://emcrit.org/pulmcrit/split-ventilators/)\n\n[Medium article explain vent splitting methods - A better way of connecting multiple patients to a single ventilator](https://medium.com/@pinsonhannah/a-better-way-of-connecting-multiple-patients-to-a-single-ventilator-fa9cf42679c6)\n", "_____no_output_____" ], [ "# ***DISCLAIMER - This system is currently not endorsed by the University of Missouri nor the University of Pennsylvania. Multi-patient ventilation should be attempted with the greatest of caution as an option of last resort with appropriate clinical guidance. The COVID MultiVent team does not assume any liability for adverse events that may occur that could in any way be tied to use of the system.***\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d06f094f701ce4b6200aaa583fb30c1cba482745
8,847
ipynb
Jupyter Notebook
performance_test/helper_scripts/plot_logs.ipynb
ZhenshengLee/performance_test
d77820a8852d30a129a0fe845bf6e55bf5192c13
[ "Apache-2.0" ]
1
2021-07-22T09:41:28.000Z
2021-07-22T09:41:28.000Z
performance_test/helper_scripts/plot_logs.ipynb
ZhenshengLee/performance_test
d77820a8852d30a129a0fe845bf6e55bf5192c13
[ "Apache-2.0" ]
null
null
null
performance_test/helper_scripts/plot_logs.ipynb
ZhenshengLee/performance_test
d77820a8852d30a129a0fe845bf6e55bf5192c13
[ "Apache-2.0" ]
null
null
null
33.384906
128
0.507856
[ [ [ "import glob\nimport itertools\nfrom ipywidgets import widgets, Layout\nimport numpy as np\nimport os\nimport pandas as pd\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\nfrom apex_performance_plotter.apex_performance_plotter.load_logfiles import load_logfiles\n\npio.templates.default = \"plotly_white\"\nfrom IPython.core.interactiveshell import InteractiveShell", "_____no_output_____" ], [ "# Define the folders where to look for experiment outputs\nos.chdir('../../../../experiment')\nlogfiles = glob.glob('{}*'.format('log'))\nselected_logfiles = widgets.SelectMultiple(\n options=logfiles,\n description='Experiments',\n disabled=False,\n layout=Layout(width='100%')\n)\ndisplay(selected_logfiles)\n\n# Select the experiments to plot", "_____no_output_____" ], [ "# Display selected experiment properties\nInteractiveShell.ast_node_interactivity = \"all\"\n\nheaders, dataframes = load_logfiles(selected_logfiles)\n\nfor idx, header in enumerate(headers):\n display(header)\n \nInteractiveShell.ast_node_interactivity = \"last\"", "_____no_output_____" ], [ "colors = ['#4363d8','#800000','#f58231','#e6beff']\n\n# Plot latencies\nfigure_latencies = go.FigureWidget()\nfigure_latencies.layout.xaxis.title = 'Time (s)'\nfigure_latencies.layout.yaxis.title = 'Latencies (ms)'\n\nfor idx, experiment in enumerate(dataframes):\n\n figure_latencies.add_scatter(x=experiment['T_experiment'],\n y=experiment['latency_max (ms)'],\n mode='markers', marker_color=colors[idx],\n marker_symbol='x',\n name= 'latency_max',\n text=headers[idx]['Logfile name']);\n figure_latencies.add_scatter(x=experiment['T_experiment'],\n y=experiment['latency_mean (ms)'],\n mode='markers', marker_color=colors[idx],\n marker_symbol='triangle-up',\n name='latency_mean',\n text=headers[idx]['Logfile name']);\n figure_latencies.add_scatter(x=experiment['T_experiment'],\n y=experiment['latency_min (ms)'],\n mode='markers', marker_color=colors[idx],\n name='latency_min',\n text=headers[idx]['Logfile name'])\n\nfigure_latencies", "_____no_output_____" ], [ "# Plot CPU usage\nfigure_cpu_usage = go.FigureWidget()\nfigure_cpu_usage.layout.xaxis.title = 'Time (s)'\nfigure_cpu_usage.layout.yaxis.title = 'CPU usage (%)'\n\nfor idx, experiment in enumerate(dataframes):\n\n figure_cpu_usage.add_scatter(x=experiment['T_experiment'],\n y=experiment['cpu_usage (%)'],\n mode='markers', marker_color=colors[idx],\n marker_symbol='x',\n name= 'cpu_usage',\n text=headers[idx]['Logfile name']);\n\nfigure_cpu_usage", "_____no_output_____" ], [ "# Plot memory consumption\nfigure_memory_usage = go.FigureWidget()\nfigure_memory_usage.layout.xaxis.title = 'Time (s)'\nfigure_memory_usage.layout.yaxis.title = 'Memory consumption (MB)'\n\nfor idx, experiment in enumerate(dataframes):\n\n figure_memory_usage.add_scatter(x=experiment['T_experiment'],\n y=experiment['ru_maxrss']/1024,\n mode='markers', marker_color=colors[idx],\n marker_symbol='x',\n name= 'ru_maxrss',\n text=headers[idx]['Logfile name']);\n\nfigure_memory_usage", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d06f0cbe2366a155467a2c32a6917815607f4729
30,078
ipynb
Jupyter Notebook
Homework/HW10/HW10-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
Homework/HW10/HW10-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
Homework/HW10/HW10-final.ipynb
JasmineeeeeTONG/CS207_coursework
666239ee5f8bd7cbe04725a52870191a3d40d8c2
[ "MIT" ]
null
null
null
32.10032
294
0.433074
[ [ [ "# Homework 10: `SQL`\n\n## Due Date: Thursday, November 16th at 11:59 PM\n\nYou will create a database of the NASA polynomial coefficients for each specie.\n\n**Please turn in your database with your `Jupyter` notebook!**", "_____no_output_____" ], [ "# Question 1: Convert XML to a SQL database", "_____no_output_____" ], [ "Create two tables named `LOW` and `HIGH`, each corresponding to data given for the low and high temperature range.\nEach should have the following column names:\n\n- `SPECIES_NAME`\n- `TLOW`\n- `THIGH`\n- `COEFF_1`\n- `COEFF_2`\n- `COEFF_3`\n- `COEFF_4`\n- `COEFF_5`\n- `COEFF_6`\n- `COEFF_7`\n\nPopulate the tables using the XML file you created in last assignment. If you did not complete the last assignment, you may also use the `example_thermo.xml` file.\n\n`TLOW` should refer to the temperature at the low range and `THIGH` should refer to the temperature at the high range. For example, in the `LOW` table, $H$ would have `TLOW` at $200$ and `THIGH` at $1000$ and in the `HIGH` table, $H$ would have `TLOW` at $1000$ and `THIGH` at $3500$.\n\nFor both tables, `COEFF_1` through `COEFF_7` should be populated with the corresponding coefficients for the low temperature data and high temperature data.", "_____no_output_____" ] ], [ [ "import xml.etree.ElementTree as ET\nimport sqlite3\nimport pandas as pd\nfrom IPython.core.display import display\n\npd.set_option('display.width', 500)\npd.set_option('display.max_columns', 100)\npd.set_option('display.notebook_repr_html', True)\n\n# Create and connect to database\ndb = sqlite3.connect('NASA_coef.sqlite')\ncursor = db.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS LOW\")\ncursor.execute(\"DROP TABLE IF EXISTS HIGH\")\ncursor.execute(\"PRAGMA foreign_keys=1\")\n\n# Create the table for low temperature range\ncursor.execute('''CREATE TABLE LOW (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, \n SPECIES_NAME TEXT,\n TLOW REAL, \n THIGH REAL, \n COEFF_1 REAL, \n COEFF_2 REAL,\n COEFF_3 REAL,\n COEFF_4 REAL,\n COEFF_5 REAL,\n COEFF_6 REAL,\n COEFF_7 REAL)''')\n\ndb.commit()\n\n# Create the table for high temperature range\ncursor.execute('''CREATE TABLE HIGH (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, \n SPECIES_NAME TEXT,\n TLOW REAL, \n THIGH REAL, \n COEFF_1 REAL, \n COEFF_2 REAL,\n COEFF_3 REAL,\n COEFF_4 REAL,\n COEFF_5 REAL,\n COEFF_6 REAL,\n COEFF_7 REAL)''')\ndb.commit()\n\n# The given helper function (from L18) to visualize tables\ndef viz_tables(cols, query):\n q = cursor.execute(query).fetchall()\n framelist = []\n for i, col_name in enumerate(cols):\n framelist.append((col_name, [col[i] for col in q]))\n return pd.DataFrame.from_items(framelist)\n\n", "_____no_output_____" ], [ "tree = ET.parse('thermo.xml')\n\nspecies_data = tree.find('speciesData')\nspecies_list = species_data.findall('species') # a list of all species\n\nfor species in species_list:\n name = species.get('name')\n NASA_list = species.find('thermo').findall('NASA')\n for NASA in NASA_list:\n T_min = float(NASA.get('Tmin'))\n T_max = float(NASA.get('Tmax'))\n coefs = NASA.find('floatArray').text.split(',')\n vals_to_insert = (name, T_min, T_max, float(coefs[0].strip()), float(coefs[1].strip()), \n float(coefs[2].strip()), float(coefs[3].strip()), float(coefs[4].strip()), \n float(coefs[5].strip()), float(coefs[6].strip())) \n\n if T_max > 1000: # high range temperature\n cursor.execute('''INSERT INTO HIGH \n (SPECIES_NAME, TLOW, THIGH, COEFF_1, COEFF_2, COEFF_3, COEFF_4, COEFF_5, COEFF_6, COEFF_7)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', vals_to_insert)\n \n else: # low range temperature\n cursor.execute('''INSERT INTO LOW \n (SPECIES_NAME, TLOW, THIGH, COEFF_1, COEFF_2, COEFF_3, COEFF_4, COEFF_5, COEFF_6, COEFF_7)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', vals_to_insert)", "_____no_output_____" ], [ "LOW_cols = [col[1] for col in cursor.execute(\"PRAGMA table_info(LOW)\")]\ndisplay(viz_tables(LOW_cols, '''SELECT * FROM LOW'''))", "_____no_output_____" ], [ "HIGH_cols = [col[1] for col in cursor.execute(\"PRAGMA table_info(HIGH)\")]\ndisplay(viz_tables(HIGH_cols, '''SELECT * FROM HIGH'''))", "_____no_output_____" ] ], [ [ "# Question 2: `WHERE` Statements", "_____no_output_____" ], [ "1. Write a `Python` function `get_coeffs` that returns an array of 7 coefficients. \n \n The function should take in two parameters: 1.) `species_name` and 2.) `temp_range`, an indicator variable ('low' or 'high') to indicate whether the coefficients should come from the low or high temperature range. \n The function should use `SQL` commands and `WHERE` statements on the table you just created in Question 1 (rather than taking data from the XML directly).\n```python\ndef get_coeffs(species_name, temp_range):\n ''' Fill in here'''\n return coeffs\n```\n\n2. Write a python function `get_species` that returns all species that have a temperature range above or below a given value. The function should take in two parameters: 1.) `temp` and 2.) `temp_range`, an indicator variable ('low' or 'high').\n\n When temp_range is 'low', we are looking for species with a temperature range lower than the given temperature, and for a 'high' temp_range, we want species with a temperature range higher than the given temperature.\n\n This exercise may be useful if different species have different `LOW` and `HIGH` ranges.\n\n And as before, you should accomplish this through `SQL` queries and where statements.\n\n```python\ndef get_species(temp, temp_range):\n ''' Fill in here'''\n return coeffs\n```", "_____no_output_____" ] ], [ [ "def get_coeffs(species_name, temp_range):\n query = '''SELECT COEFF_1, COEFF_2, COEFF_3, COEFF_4, COEFF_5, COEFF_6, COEFF_7\n FROM {} \n WHERE SPECIES_NAME = \"{}\"'''.format(temp_range.upper(), species_name)\n coeffs = list(cursor.execute(query).fetchall()[0])\n return coeffs", "_____no_output_____" ], [ "get_coeffs('H', 'low')", "_____no_output_____" ], [ "def get_species(temp, temp_range):\n if temp_range == 'low': # temp_range == 'low'\n query = '''SELECT SPECIES_NAME FROM {} WHERE TLOW < {}'''.format(temp_range.upper(), temp)\n else: # temp_range == 'high'\n query = '''SELECT SPECIES_NAME FROM {} WHERE THIGH > {}'''.format(temp_range.upper(), temp)\n species = []\n for s in cursor.execute(query).fetchall():\n species.append(s[0])\n return species", "_____no_output_____" ], [ "get_species(500, 'low')", "_____no_output_____" ], [ "get_species(100, 'low')", "_____no_output_____" ], [ "get_species(3000, 'high')", "_____no_output_____" ], [ "get_species(3500, 'high')", "_____no_output_____" ] ], [ [ "# Question 3: `JOIN` STATEMENTS", "_____no_output_____" ], [ "Create a table named `ALL_TEMPS` that has the following columns:\n\n- `SPECIES_NAME`\n- `TEMP_LOW`\n- `TEMP_HIGH`\n\nThis table should be created by joining the tables `LOW` and `HIGH` on the value `SPECIES_NAME`.", "_____no_output_____" ] ], [ [ "# Create the table for ALL_TEMPS\ncursor.execute(\"DROP TABLE IF EXISTS ALL_TEMPS\")\n\ncursor.execute('''CREATE TABLE ALL_TEMPS (\n id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, \n SPECIES_NAME TEXT,\n TEMP_LOW REAL, \n TEMP_HIGH REAL)''')\ndb.commit()\n\n# Insert TEMP_LOW and TEMP_HIGH of all species to ALL_TEMPS\nquery = '''SELECT LOW.SPECIES_NAME, LOW.TLOW AS TEMP_LOW, HIGH.THIGH AS TEMP_HIGH\n FROM LOW\n INNER JOIN HIGH ON LOW.SPECIES_NAME = HIGH.SPECIES_NAME'''\n\nfor record in cursor.execute(query).fetchall():\n cursor.execute('''INSERT INTO ALL_TEMPS \n (SPECIES_NAME, TEMP_LOW, TEMP_HIGH)\n VALUES (?, ?, ?)''', record)", "_____no_output_____" ], [ "ALL_TEMPS_cols = [col[1] for col in cursor.execute(\"PRAGMA table_info(ALL_TEMPS)\")]\ndisplay(viz_tables(ALL_TEMPS_cols, '''SELECT * FROM ALL_TEMPS'''))", "_____no_output_____" ] ], [ [ "1. Write a `Python` function `get_range` that returns the range of temperatures for a given species_name.\n\nThe range should be computed within the `SQL` query (i.e. you should subtract within the `SELECT` statement in the `SQL` query).\n```python\ndef get_range(species_name):\n '''Fill in here'''\n return range\n```\n\nNote that `TEMP_LOW` is the lowest temperature in the `LOW` range and `TEMP_HIGH` is the highest temperature in the `HIGH` range.", "_____no_output_____" ] ], [ [ "def get_range(species_name):\n query = '''SELECT (TEMP_HIGH - TEMP_LOW) AS T_range FROM ALL_TEMPS WHERE SPECIES_NAME = \"{}\"'''.format(species_name)\n T_range = cursor.execute(query).fetchall()[0][0]\n return T_range", "_____no_output_____" ], [ "get_range('O')", "_____no_output_____" ], [ "# Close the Database\ndb.commit()\ndb.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d06f0e52f14d544079fce1213c79d915a6a087f9
124,087
ipynb
Jupyter Notebook
src/drafts/check_results_bert_base_MNLI.ipynb
lukewanless/looking-for-equivalences
70cf55317d0a5b519b3a76e0f76dcba24847bc1b
[ "MIT" ]
null
null
null
src/drafts/check_results_bert_base_MNLI.ipynb
lukewanless/looking-for-equivalences
70cf55317d0a5b519b3a76e0f76dcba24847bc1b
[ "MIT" ]
null
null
null
src/drafts/check_results_bert_base_MNLI.ipynb
lukewanless/looking-for-equivalences
70cf55317d0a5b519b3a76e0f76dcba24847bc1b
[ "MIT" ]
1
2021-04-28T01:32:22.000Z
2021-04-28T01:32:22.000Z
333.567204
45,800
0.927454
[ [ [ "# Results for BERT when applying syn tranformation to both premise and hypothesis to the MNLI dataset", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom tqdm import tqdm\nfrom IPython.display import display, HTML \nfrom lr.analysis.util import get_ts_from_results_folder \nfrom lr.analysis.util import get_rho_stats_from_result_list\nfrom lr.stats.h_testing import get_ks_stats_from_p_values_compared_to_uniform_dist", "_____no_output_____" ] ], [ [ "## Get Results", "_____no_output_____" ] ], [ [ "all_accs = []\nall_transformed_accs = []\nall_paired_t_p_values = []\nall_dev_plus_diff = []\nall_time = []\n\nm_name = \"bert_base\"\nfolder = \"mnli\"\ntest_repetitions = 5\nbatchs = range(1, test_repetitions + 1)\n\nfor i in tqdm(batchs):\n test_accuracy = get_ts_from_results_folder(path=\"results/{}/{}/syn_p_h/batch{}/\".format(folder,m_name, i),\n stat=\"test_accuracy\")\n \n transformed_test_accuracy = get_ts_from_results_folder(path=\"results/{}/{}/syn_p_h/batch{}/\".format(folder,m_name, i),\n stat=\"transformed_test_accuracy\")\n \n paired_t_p_value = get_ts_from_results_folder(path=\"results/{}/{}/syn_p_h/batch{}/\".format(folder, m_name, i),\n stat=\"paired_t_p_value\")\n \n diff = get_ts_from_results_folder(path=\"results/{}/{}/syn_p_h/batch{}/\".format(folder, m_name, i),\n stat=\"dev_plus_accuracy_difference\")\n \n t_time = get_ts_from_results_folder(path=\"results/{}/{}/syn_p_h/batch{}/\".format(folder, m_name,i),\n stat=\"test_time\")\n\n \n all_accs.append(test_accuracy)\n all_transformed_accs.append(transformed_test_accuracy)\n all_paired_t_p_values.append(paired_t_p_value)\n all_dev_plus_diff.append(diff)\n all_time.append(t_time)", "100%|██████████| 5/5 [00:00<00:00, 10.57it/s]\n" ], [ "total_time = pd.concat(all_time,1).sum().sum()\nn_params = 109484547\nprint(\"Time for all experiments = {:.1f} hours\".format(total_time))\nprint(\"Number of paramaters for BERT = {}\".format(n_params))", "Time for all experiments = 204.6 hours\nNumber of paramaters for BERT = 109484547\n" ] ], [ [ "## Accuracy", "_____no_output_____" ] ], [ [ "rhos, mean_acc, error_acc, _ = get_rho_stats_from_result_list(all_accs)\n\n_, mean_acc_t, error_acc_t, _ = get_rho_stats_from_result_list(all_transformed_accs)\n\nfig, ax = plt.subplots(figsize=(12,6))\nax.errorbar(rhos, mean_acc, yerr=error_acc, fmt='-o', label=\"original test data\");\nax.errorbar(rhos, mean_acc_t, yerr=error_acc_t, fmt='-o', label=\"transformed test data\");\nax.legend(loc=\"best\");\nax.set_xlabel(r\"$\\rho$\", fontsize=14);\nax.set_ylabel(\"accuracy\", fontsize=14);\nax.set_title(\"BERT accuracy\\n\\ndataset: MNLI\\ntransformation: synonym substitution\\ntest repetitions: {}\\n\".format(test_repetitions));\nfig.tight_layout()\nfig.savefig('figs/bert_base_acc_mnli_syn_p_h.png', bbox_inches=None, pad_inches=0.5)", "_____no_output_____" ] ], [ [ "## P-values", "_____no_output_____" ] ], [ [ "rhos, mean_p_values, error_p_values, min_p_values = get_rho_stats_from_result_list(all_paired_t_p_values)\n\nalpha = 0.05\nalpha_adj = alpha / test_repetitions\n\nrejected_ids = []\nremain_ids = []\n\nfor i,p in enumerate(min_p_values):\n if p < alpha_adj:\n rejected_ids.append(i)\n else:\n remain_ids.append(i)\n \nrhos_rejected = rhos[rejected_ids]\nrhos_remain = rhos[remain_ids]\ny_rejected = mean_p_values[rejected_ids]\ny_remain = mean_p_values[remain_ids]\nerror_rejected = error_p_values[rejected_ids]\nerror_remain = error_p_values[remain_ids]\n\ntitle_msg = \"BERT p-values\\n\\ndataset:\"\ntitle_msg += \"MNLI\\ntransformation: synonym substitution\\ntest repetitions: {}\\n\".format(test_repetitions)\ntitle_msg += \"significance level = {:.1%} \\n\".format(alpha)\ntitle_msg += \"adjusted significance level = {:.2%} \\n\".format(alpha_adj)\n\n\nfig, ax = plt.subplots(figsize=(12,6))\nax.errorbar(rhos_rejected, y_rejected, yerr=error_rejected, fmt='o', linewidth=0.50, label=\"at least one p-value is smaller than {:.2%}\".format(alpha_adj));\nax.errorbar(rhos_remain, y_remain, yerr=error_remain, fmt='o', linewidth=0.50, label=\"all p-values are greater than {:.2%}\".format(alpha_adj));\nax.legend(loc=\"best\");\nax.set_xlabel(r\"$\\rho$\", fontsize=14);\nax.set_ylabel(\"p-value\", fontsize=14);\nax.set_title(title_msg);\nfig.tight_layout()\nfig.tight_layout()\nfig.savefig('figs/bert_p_values_mnli_syn_p_h.png', bbox_inches=None, pad_inches=0.5)", "_____no_output_____" ] ], [ [ "## Accuracy difference", "_____no_output_____" ] ], [ [ "rhos, diff, _,_ = get_rho_stats_from_result_list(all_dev_plus_diff)\n_, test_acc, _,_ = get_rho_stats_from_result_list(all_accs)\n_, test_acc_t, _,_ = get_rho_stats_from_result_list(all_transformed_accs)\ntest_diff = np.abs(test_acc - test_acc_t)\n\nfig, ax = plt.subplots(figsize=(12,6))\nax.errorbar(rhos, diff, fmt='-o', label=\"validation\");\nax.errorbar(rhos, test_diff, fmt='-o', label=\"test\");\n\nax.legend(loc=\"best\");\nax.set_xlabel(r\"$\\rho$\", fontsize=14);\nax.set_ylabel(\"average accuracy difference\", fontsize=14);\nax.set_title(\"BERT accuracy difference\\n\\ndataset: MNLI\\ntransformation: synonym substitution\\ntest repetitions: {}\\n\".format(test_repetitions));\nfig.tight_layout()\nfig.savefig('figs/bert_acc_diff_mnli_syn_p_h.png', bbox_inches=None, pad_inches=0.5)", "_____no_output_____" ] ], [ [ "## Selecting the best $\\rho$", "_____no_output_____" ] ], [ [ "id_min = np.argmin(diff)\nmin_rho = rhos[id_min]\nmin_rho_test_acc = test_acc[id_min]\nmin_rho_transformed_test_acc = test_acc_t[id_min]\ntest_accuracy_loss_pct = np.round(((min_rho_test_acc - test_acc[0]) / test_acc[0]) * 100, 1)\n\nanalysis = {\"dataset\":\"mnli\",\n \"model\": \"BERT\",\n \"rho\":min_rho,\n \"test_accuracy_loss_pct\": test_accuracy_loss_pct,\n \"average_test_accuracy\": min_rho_test_acc,\n \"average_transformed_test_accuracy\": min_rho_transformed_test_acc,\n \"combined_accuracy\": np.mean([min_rho_test_acc,min_rho_transformed_test_acc])}\nanalysis = pd.DataFrame(analysis, index=[0])\nanalysis", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06f1b9e341ed8f674548b1114f5cdfa9183c9cb
15,185
ipynb
Jupyter Notebook
1_intro_to_python.ipynb
stickbass/buenosaires2018
138efbed6d47e79daa48ab41f4b8c69f433a4ff5
[ "MIT" ]
null
null
null
1_intro_to_python.ipynb
stickbass/buenosaires2018
138efbed6d47e79daa48ab41f4b8c69f433a4ff5
[ "MIT" ]
null
null
null
1_intro_to_python.ipynb
stickbass/buenosaires2018
138efbed6d47e79daa48ab41f4b8c69f433a4ff5
[ "MIT" ]
null
null
null
24.975329
320
0.476852
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/tompollard/buenosaires2018/blob/master/1_intro_to_python.ipynb)", "_____no_output_____" ], [ "# Programming in Python", "_____no_output_____" ], [ "In this part of the workshop, we will introduce some basic programming concepts in Python. We will then explore how these concepts allow us to carry out an anlysis that can be reproduced.", "_____no_output_____" ], [ "## Working with variables", "_____no_output_____" ], [ "You can get output from Python by typing math into a code cell. Try executing a sum below (for example: 3 + 5).", "_____no_output_____" ] ], [ [ "3+5", "_____no_output_____" ] ], [ [ "However, to do anything useful, we will need to assign values to `variables`. Assign a height in cm to a variable in the cell below.", "_____no_output_____" ] ], [ [ "height_cm = 180", "_____no_output_____" ] ], [ [ "Now the value has been assigned to our variable, we can print it in the console with `print`.", "_____no_output_____" ] ], [ [ "print('Height in cm is:', height_cm)", "_____no_output_____" ] ], [ [ "We can also do arithmetic with the variable. Convert the height in cm to metres, then print the new value as before (Warning! In Python 2, dividing an integer by an integer will return an integer.)", "_____no_output_____" ] ], [ [ "height_m = height_cm / 100\nprint('height in metres:',height_m)", "_____no_output_____" ] ], [ [ "We can check which variables are available in memory with the special command: `%whos`", "_____no_output_____" ] ], [ [ "%whos", "_____no_output_____" ] ], [ [ "We can see that each of our variables has a type (in this case `int` and `float`), describing the type of data held by the variable. We can use `type` to check the data type of a variable.", "_____no_output_____" ] ], [ [ "type(height_cm)", "_____no_output_____" ] ], [ [ "Another data type is a `list`, which can hold a series of items. For example, we might measure a patient's heart rate several times over a period. ", "_____no_output_____" ] ], [ [ "heartrate = [66,64,63,62,66,69,70,75,76]", "_____no_output_____" ], [ "print(heartrate)", "_____no_output_____" ], [ "type(heartrate)", "_____no_output_____" ] ], [ [ "## Repeating actions in loops", "_____no_output_____" ], [ "We can access individual items in a list using an index (note, in Python, indexing begins with 0!). For example, let's view the first `[0]` and second `[1]` heart rate measurements.", "_____no_output_____" ] ], [ [ "print(heartrate[0])", "_____no_output_____" ], [ "print(heartrate[1])", "_____no_output_____" ] ], [ [ "We can iterate through a list with the help of a `for` loop. Let's try looping over our list of heart rates, printing each item as we go.", "_____no_output_____" ] ], [ [ "for i in heartrate:\n print('the heart rate is:', i)", "_____no_output_____" ] ], [ [ "## Making choices", "_____no_output_____" ], [ "Sometimes we want to take different actions depending on a set of conditions. We can do this using an `if/else` statement. Let's write a statement to test if a mean arterial pressure (`meanpressure`) is high or low.", "_____no_output_____" ] ], [ [ "meanpressure = 70\n\nif meanpressure < 60:\n print('Low pressure')\nelif meanpressure > 100:\n print('High pressure')\nelse:\n print('Normal pressure')", "_____no_output_____" ] ], [ [ "## Writing our own functions", "_____no_output_____" ], [ "To help organise our code and to avoid replicating the same code again and again, we can create functions. \n\nLet's create a function to convert temperature in fahrenheit to celsius, using the following formula:\n\n`celsius = (fahrenheit - 32) * 5/9`", "_____no_output_____" ] ], [ [ "def fahr_to_celsius(temp):\n celsius = (temp - 32) * 5/9\n return celsius\n ", "_____no_output_____" ] ], [ [ "Now we can call the function `fahr_to_celsius` to convert a temperature from celsius to fahrenheit.\n\n", "_____no_output_____" ] ], [ [ "body_temp_f = 98.6\nbody_temp_c = fahr_to_celsius(body_temp_f)\nprint('Patient body temperature is:', body_temp_c, 'celsius')", "_____no_output_____" ] ], [ [ "## Reusing code with libraries", "_____no_output_____" ], [ "Python is a popular language for data analysis, so thankfully we can benefit from the hard work of others with the use of libraries. Pandas, a popular library for data analysis, introduces the `DataFrame`, a convenient data structure similar to a spreadsheet. Before using a library, we will need to import it.", "_____no_output_____" ] ], [ [ "# let's assign pandas an alias, pd, for brevity\nimport pandas as pd", "_____no_output_____" ] ], [ [ "We have shared a demo dataset online containing physiological data relating to 1000 patients admitted to an intensive care unit in Boston, Massachussetts, USA. Let's load this data into our new data structure.\n", "_____no_output_____" ] ], [ [ "url=\"https://raw.githubusercontent.com/tompollard/tableone/master/data/pn2012_demo.csv\"\ndata=pd.read_csv(url)", "_____no_output_____" ] ], [ [ "The variable `data` should now contain our new dataset. Let's view the first few rows using `head()`. Note: parentheses `\"()\"` are generally required when we are performing an action/operation. In this case, the action is to select a limited number of rows.", "_____no_output_____" ] ], [ [ "data.head()", "_____no_output_____" ] ], [ [ "We can perform other operations on the dataframe. For example, using `mean()` to get an average of the columns.", "_____no_output_____" ] ], [ [ "data.mean()", "_____no_output_____" ] ], [ [ "If we are unsure of the meaning of a method, we can check by adding `?` after the method. For example, what is `max`?", "_____no_output_____" ] ], [ [ "data.max?", "_____no_output_____" ], [ "data.max()", "_____no_output_____" ] ], [ [ "We can access a single column in the data by specifying the column name after the variable. For example, we can select a list of ages with `data.Age`, and then find the mean for this column in a similar way to before.", "_____no_output_____" ] ], [ [ "print('The mean age of patients is:', data.Age.mean())", "_____no_output_____" ] ], [ [ "Pandas also provides a convenient method `plot` for plotting data. Let's plot a distribution of the patient ages in our dataset.", "_____no_output_____" ] ], [ [ "data.Age.plot(kind='kde', title='Age of patients in years')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06f28a898fc931ec80f423393e2a22d00d1e1d2
162,974
ipynb
Jupyter Notebook
Python/Pandas/07-Sorting-Data/Pandas-Demo.ipynb
marflejs/code_snippets
2e928ed7f5eb3a21cccc3915cf797ab6a8d30e2b
[ "MIT" ]
9,588
2017-03-21T16:07:40.000Z
2022-03-31T08:43:39.000Z
Python/Pandas/07-Sorting-Data/Pandas-Demo.ipynb
marflejs/code_snippets
2e928ed7f5eb3a21cccc3915cf797ab6a8d30e2b
[ "MIT" ]
135
2017-04-29T15:28:11.000Z
2022-03-27T19:20:49.000Z
Python/Pandas/07-Sorting-Data/Pandas-Demo.ipynb
marflejs/code_snippets
2e928ed7f5eb3a21cccc3915cf797ab6a8d30e2b
[ "MIT" ]
20,939
2017-03-27T14:42:56.000Z
2022-03-31T16:41:14.000Z
49.581381
93
0.355572
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('data/survey_results_public.csv', index_col='Respondent')\nschema_df = pd.read_csv('data/survey_results_schema.csv', index_col='Column')", "_____no_output_____" ], [ "pd.set_option('display.max_columns', 85)\npd.set_option('display.max_rows', 85)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.sort_values(by=['Country', 'ConvertedComp'], ascending=[True, False], inplace=True)", "_____no_output_____" ], [ "df[['Country', 'ConvertedComp']].head(50)", "_____no_output_____" ], [ "df['ConvertedComp'].nlargest(10)", "_____no_output_____" ], [ "df.nsmallest(10, 'ConvertedComp')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06f4345c0012f4276c2243c887b09faf42cbae5
52,724
ipynb
Jupyter Notebook
01_ICST2020/03_BUildup_transformation.ipynb
achmfirmansyah/sweet_project
6fd272013d87f3fecc358c70a53171360d413d80
[ "MIT" ]
null
null
null
01_ICST2020/03_BUildup_transformation.ipynb
achmfirmansyah/sweet_project
6fd272013d87f3fecc358c70a53171360d413d80
[ "MIT" ]
null
null
null
01_ICST2020/03_BUildup_transformation.ipynb
achmfirmansyah/sweet_project
6fd272013d87f3fecc358c70a53171360d413d80
[ "MIT" ]
null
null
null
59.981797
483
0.587702
[ [ [ "<a href=\"https://colab.research.google.com/github/achmfirmansyah/sweet_project/blob/master/ICST2020/03_BUildup_transformation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\n!pip install rasterio\nimport rasterio\n!pip install geopandas\nimport geopandas as gpd\nimport numpy as np\nfrom google.colab import drive\ndrive.mount('/content/gdrive')\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n!pip install xgboost\nfrom xgboost import XGBClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix", "Collecting rasterio\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/02/7e/eed7dfd109fc89ed3cf8b5ed3f26f841b03b92f6ca1c31c4745f938a081b/rasterio-1.1.5-cp36-cp36m-manylinux1_x86_64.whl (18.2MB)\n\u001b[K |████████████████████████████████| 18.2MB 1.3MB/s \n\u001b[?25hCollecting affine\n Downloading https://files.pythonhosted.org/packages/ac/a6/1a39a1ede71210e3ddaf623982b06ecfc5c5c03741ae659073159184cd3e/affine-2.3.0-py2.py3-none-any.whl\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from rasterio) (1.18.5)\nRequirement already satisfied: click<8,>=4.0 in /usr/local/lib/python3.6/dist-packages (from rasterio) (7.1.2)\nRequirement already satisfied: attrs in /usr/local/lib/python3.6/dist-packages (from rasterio) (19.3.0)\nCollecting snuggs>=1.4.1\n Downloading https://files.pythonhosted.org/packages/cc/0e/d27d6e806d6c0d1a2cfdc5d1f088e42339a0a54a09c3343f7f81ec8947ea/snuggs-1.4.7-py3-none-any.whl\nCollecting cligj>=0.5\n Downloading https://files.pythonhosted.org/packages/e4/be/30a58b4b0733850280d01f8bd132591b4668ed5c7046761098d665ac2174/cligj-0.5.0-py3-none-any.whl\nCollecting click-plugins\n Downloading https://files.pythonhosted.org/packages/e9/da/824b92d9942f4e472702488857914bdd50f73021efea15b4cad9aca8ecef/click_plugins-1.1.1-py2.py3-none-any.whl\nRequirement already satisfied: pyparsing>=2.1.6 in /usr/local/lib/python3.6/dist-packages (from snuggs>=1.4.1->rasterio) (2.4.7)\nInstalling collected packages: affine, snuggs, cligj, click-plugins, rasterio\nSuccessfully installed affine-2.3.0 click-plugins-1.1.1 cligj-0.5.0 rasterio-1.1.5 snuggs-1.4.7\nCollecting geopandas\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/f8/dd/c0a6429cc7692efd5c99420c9df525c40f472b50705871a770449027e244/geopandas-0.8.0-py2.py3-none-any.whl (962kB)\n\u001b[K |████████████████████████████████| 962kB 2.8MB/s \n\u001b[?25hCollecting pyproj>=2.2.0\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/e5/c3/071e080230ac4b6c64f1a2e2f9161c9737a2bc7b683d2c90b024825000c0/pyproj-2.6.1.post1-cp36-cp36m-manylinux2010_x86_64.whl (10.9MB)\n\u001b[K |████████████████████████████████| 10.9MB 197kB/s \n\u001b[?25hCollecting fiona\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ec/20/4e63bc5c6e62df889297b382c3ccd4a7a488b00946aaaf81a118158c6f09/Fiona-1.8.13.post1-cp36-cp36m-manylinux1_x86_64.whl (14.7MB)\n\u001b[K |████████████████████████████████| 14.7MB 299kB/s \n\u001b[?25hRequirement already satisfied: shapely in /usr/local/lib/python3.6/dist-packages (from geopandas) (1.7.0)\nRequirement already satisfied: pandas>=0.23.0 in /usr/local/lib/python3.6/dist-packages (from geopandas) (1.0.5)\nCollecting munch\n Downloading https://files.pythonhosted.org/packages/cc/ab/85d8da5c9a45e072301beb37ad7f833cd344e04c817d97e0cc75681d248f/munch-2.5.0-py2.py3-none-any.whl\nRequirement already satisfied: click<8,>=4.0 in /usr/local/lib/python3.6/dist-packages (from fiona->geopandas) (7.1.2)\nRequirement already satisfied: cligj>=0.5 in /usr/local/lib/python3.6/dist-packages (from fiona->geopandas) (0.5.0)\nRequirement already satisfied: click-plugins>=1.0 in /usr/local/lib/python3.6/dist-packages (from fiona->geopandas) (1.1.1)\nRequirement already satisfied: six>=1.7 in /usr/local/lib/python3.6/dist-packages (from fiona->geopandas) (1.12.0)\nRequirement already satisfied: attrs>=17 in /usr/local/lib/python3.6/dist-packages (from fiona->geopandas) (19.3.0)\nRequirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.23.0->geopandas) (1.18.5)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.23.0->geopandas) (2018.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.23.0->geopandas) (2.8.1)\nInstalling collected packages: pyproj, munch, fiona, geopandas\nSuccessfully installed fiona-1.8.13.post1 geopandas-0.8.0 munch-2.5.0 pyproj-2.6.1.post1\nGo to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/gdrive\nRequirement already satisfied: xgboost in /usr/local/lib/python3.6/dist-packages (0.90)\nRequirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.18.5)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from xgboost) (1.4.1)\n" ], [ "lokus=['jabodetabek','mebidangro','maminasata','kedungsepur']\nconfussion_matrix_list=[]\nclassification_report_list=[]", "_____no_output_____" ], [ "#Create model\nfor lokasi in lokus:\n print(lokasi)\n dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n dataset_bands=pd.DataFrame()\n for i in dataset.indexes:\n temp=dataset.read(i)\n temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n dataset_bands=temp.join(dataset_bands)\n dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n dataset[['U_class']], test_size = 0.20)\n xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n learning_rate=0.05, max_depth= 4, \n min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n seed= 123, silent=1, subsample= 0.8)\n xgclassifier.fit(X_train.values,y_train.values)\n y_pred=xgclassifier.predict(X_test.values)\n #confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n classification_report_list.append(classification_report(y_test, y_pred))\n dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n class_2014_=xgclassifier.predict(dataset.values)\n confussion_matrix_list.append(confusion_matrix(dataset_bands[['U_class']],class_2014_))\n temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n temp = temp_.read(1)\n \n array_class_2014=class_2014_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n \n profile = temp_.profile\n profile.update(\n dtype=array_class_2014.dtype,\n count=1,\n compress='lzw')\n with rasterio.open(\n '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2014_'+lokasi+'.tif','w',**profile) as dst2:\n dst2.write(array_class_2014,1)\n dst2.close()", "jabodetabek\n" ], [ "confussion_matrix_list[3]", "_____no_output_____" ], [ "#Create model\nfor lokasi in lokus:\n print(lokasi)\n dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n dataset_bands=pd.DataFrame()\n for i in dataset.indexes:\n temp=dataset.read(i)\n temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n dataset_bands=temp.join(dataset_bands)\n dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n dataset[['U_class']], test_size = 0.20)\n xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n learning_rate=0.05, max_depth= 4, \n min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n seed= 123, silent=1, subsample= 0.8)\n xgclassifier.fit(X_train.values,y_train.values)\n y_pred=xgclassifier.predict(X_test.values)\n confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2019:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2019_=xgclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2019=class_2019_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2019.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2019_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2019,1)\n # dst2.close()", "jabodetabek\n" ], [ "#Create model\nfor lokasi in lokus:\n #print(lokasi)\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n #dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n #X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n # dataset[['U_class']], test_size = 0.20)\n #xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n # learning_rate=0.05, max_depth= 4, \n # min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n # seed= 123, silent=1, subsample= 0.8)\n #xgclassifier.fit(X_train.values,y_train.values)\n #y_pred=xgclassifier.predict(X_test.values)\n\n #classification_2015:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2015_=xgclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2015=class_2015_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2015.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2015_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2015,1)\n # dst2.close()", "jabodetabek\n" ], [ "lokus=['gerbangkertasusila']\n#Create model\nfor lokasi in lokus:\n print(lokasi)\n dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n dataset_bands=pd.DataFrame()\n for i in dataset.indexes:\n temp=dataset.read(i)\n temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n dataset_bands=temp.join(dataset_bands)\n dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n dataset[['U_class']], test_size = 0.20)\n xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n learning_rate=0.05, max_depth= 5, \n min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n seed= 123, silent=1, subsample= 0.8)\n xgclassifier.fit(X_train.values,y_train.values)\n #y_pred=xgclassifier.predict(X_test.values)\n y_pred=xgclassifier.predict(dataset_bands[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']].values)\n confussion_matrix_list.append(confusion_matrix(dataset_bands[['U_class']],y_pred))\n #confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n #classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2015:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2015_=xgclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2015=class_2015_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2015.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2015_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2015,1)\n # dst2.close()\n#Create model\n#for lokasi in lokus:\n # print(lokasi)\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n #dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n #X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n # dataset[['U_class']], test_size = 0.20)\n #xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n # learning_rate=0.05, max_depth= 5, \n # min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n # seed= 123, silent=1, subsample= 0.8)\n #xgclassifier.fit(X_train.values,y_train.values)\n #y_pred=xgclassifier.predict(X_test.values)\n #confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n #classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2019:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2019_=xgclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2019=class_2019_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2019.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2019_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2019,1)\n # dst2.close()\n\n#Create model\n#for lokasi in lokus:\n #print(lokasi)\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n #dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n #X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n # dataset[['U_class']], test_size = 0.20)\n #xgclassifier = XGBClassifier(random_state=123,colsample_bytree= 0.7, \n # learning_rate=0.05, max_depth= 5, \n # min_child_weight=11, n_estimators= 500, nthread= 4, objective= 'binary:logistic', \n # seed= 123, silent=1, subsample= 0.8)\n #xgclassifier.fit(X_train.values,y_train.values)\n #y_pred=xgclassifier.predict(X_test.values)\n #confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n #classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2019:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2014_=xgclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2014=class_2014_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2014.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2014_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2014,1)\n # dst2.close()", "gerbangkertasusila\n" ], [ "confussion_matrix_list[4]", "_____no_output_____" ], [ "lokus=['bandungraya']\nfrom sklearn.ensemble import RandomForestClassifier\n#Create model\nfor lokasi in lokus:\n print(lokasi)\n dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n dataset_bands=pd.DataFrame()\n for i in dataset.indexes:\n temp=dataset.read(i)\n temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n dataset_bands=temp.join(dataset_bands)\n dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n dataset[['U_class']], test_size = 0.20)\n rfclassifier = RandomForestClassifier(random_state=123,max_depth=6,n_estimators=500,criterion='entropy')\n rfclassifier.fit(X_train.values,y_train.values)\n y_pred=rfclassifier.predict(dataset_bands[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']].values)\n confussion_matrix_list.append(confusion_matrix(dataset_bands['U_class'],y_pred))\n #y_pred=rfclassifier.predict(X_test.values)\n\n #classification_2014:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n ##dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2015_=rfclassifier.predict(dataset.values)\n #dataset_bands['U_class']\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2015_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2015=class_2015_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2015.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2015_'+lokasi+'_2.tif','w',**profile) as dst2:\n # dst2.write(array_class_2015,1)\n # dst2.close()\n#Create model\n#for lokasi in lokus:\n # print(lokasi)\n # dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n # dataset_bands=pd.DataFrame()\n # for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n # dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n #dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n #X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n # dataset[['U_class']], test_size = 0.20)\n #rfclassifier = RandomForestClassifier(random_state=123,max_depth=6,n_estimators=500,criterion='entropy')\n #rfclassifier.fit(X_train.values,y_train.values)\n #y_pred=rfclassifier.predict(X_test.values)\n #confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n #classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2019:\n #dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #dataset_bands=pd.DataFrame()\n #for i in dataset.indexes:\n # temp=dataset.read(i)\n # temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n # dataset_bands=temp.join(dataset_bands)\n #dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n # 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n #dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n #dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n # 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n #dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n #class_2019_=rfclassifier.predict(dataset.values)\n #temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2019_'+lokasi+'.tif')\n #temp = temp_.read(1)\n #array_class_2019=class_2019_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n #profile = temp_.profile\n #profile.update(\n # dtype=array_class_2019.dtype,\n # count=1,\n # compress='lzw')\n #with rasterio.open(\n # '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2019_'+lokasi+'.tif','w',**profile) as dst2:\n # dst2.write(array_class_2019,1)\n # dst2.close()\n\n#Create model\n#for lokasi in lokus:\n# print(lokasi)\n# dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n# dataset_bands=pd.DataFrame()\n# for i in dataset.indexes:\n# temp=dataset.read(i)\n# temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n# dataset_bands=temp.join(dataset_bands)\n# dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n# 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n# dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n# dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n# 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n# dataset=dataset_bands.query('U_class==0').sample(10000).append(dataset_bands.query('U_class==1').sample(10000))\n# dataset=dataset.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7','U_class']]\n# X_train, X_test, y_train, y_test = train_test_split(dataset[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDVI_landsat7','NDBI_landsat7','LSE_der_landsat7']],\n# dataset[['U_class']], test_size = 0.20)\n# rfclassifier = RandomForestClassifier(random_state=123,max_depth=6,n_estimators=500,criterion='entropy')\n# rfclassifier.fit(X_train.values,y_train.values)\n# y_pred=rfclassifier.predict(X_test.values)\n# confussion_matrix_list.append(confusion_matrix(y_test, y_pred))\n# classification_report_list.append(classification_report(y_test, y_pred))\n\n #classification_2019:\n# dataset = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n# dataset_bands=pd.DataFrame()\n# for i in dataset.indexes:\n# temp=dataset.read(i)\n# temp=pd.DataFrame(data=np.array(temp).flatten()).rename(columns={0:i})\n# dataset_bands=temp.join(dataset_bands)\n# dataset_bands.rename(columns={1:'BU_class',2:'NDVI_landsat7',3:'NDBI_landsat7',4:'MNDWI_landsat7',\n# 5:'SAVI_landsat7',6:'LSE_landsat7',7:'rad_VIIRS'},inplace=True)\n# dataset_bands['U_class']=dataset_bands.BU_class.apply(lambda y: 1 if y>=3 else 0)\n# dataset_bands['LSE_der_landsat7']=dataset_bands.NDVI_landsat7.apply(lambda y: 0.995 if y < -0.185 else (\n# 0.970 if y< 0.157 else (1.0098+0.047*np.log(y) if y<0.727 else 0.990)))\n# dataset=dataset_bands.reset_index()[['rad_VIIRS','SAVI_landsat7','MNDWI_landsat7','NDBI_landsat7','NDVI_landsat7','LSE_der_landsat7']]\n# class_2014_=rfclassifier.predict(dataset.values)\n# temp_ = rasterio.open('/content/gdrive/My Drive/Urban_monitoring/Urban_/compiled_GHSL_30_train_2014_'+lokasi+'.tif')\n# temp = temp_.read(1)\n# array_class_2014=class_2014_.reshape(temp.shape[0],temp.shape[1]).astype(np.uint8)\n# profile = temp_.profile\n# profile.update(\n# dtype=array_class_2014.dtype,\n# count=1,\n# compress='lzw')\n# with rasterio.open(\n# '/content/gdrive/My Drive/Urban_monitoring/Urban_/GHSL_/GHSL_rev/rev_class_2014_'+lokasi+'.tif','w',**profile) as dst2:\n# dst2.write(array_class_2014,1)\n# dst2.close()", "bandungraya\n" ], [ "confussion_matrix_list[5]", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06f46cf1627736523708137dc57373fb80a291f
8,007
ipynb
Jupyter Notebook
music-app/Data_Scraping/applescraper.ipynb
DL-ify/Music-app
9410469a87930201f46e05dc79e4a2c5bc73f098
[ "MIT" ]
1
2020-08-16T09:59:57.000Z
2020-08-16T09:59:57.000Z
music-app/Data_Scraping/applescraper.ipynb
DL-ify/Music-app
9410469a87930201f46e05dc79e4a2c5bc73f098
[ "MIT" ]
null
null
null
music-app/Data_Scraping/applescraper.ipynb
DL-ify/Music-app
9410469a87930201f46e05dc79e4a2c5bc73f098
[ "MIT" ]
2
2020-08-16T10:00:00.000Z
2020-08-16T11:02:05.000Z
29.116364
91
0.49719
[ [ [ "'''\n\nManual AppleMusic Scraper\n\n'''", "_____no_output_____" ], [ "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport datetime\nimport json\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://music.apple.com/\")", "_____no_output_____" ] ], [ [ "### Sign in to Apple Music and navigate to required song class-name", "_____no_output_____" ] ], [ [ "songs_wrap = driver.find_elements_by_class_name(\"song-name\")\nsongs = [x.text for x in song_wrap]\n\nsong_subdiv = driver.find_elements_by_class_name(\"song-name-wrapper\")\n\nartists_wrap = [x.find_element_by_class_name(\"dt-link-to\") for x in song_subdiv]\nartists = [x.text for x in artists_wrap]", "_____no_output_____" ], [ "len(songs)", "_____no_output_____" ], [ "len(artists)", "_____no_output_____" ], [ "'''\nTo combine song and artists lists into a single list of dictionaries\n'''\nl = []\nd = {}\n\nfor x in range(0,99):\n d[\"artist\"] = artists[x]\n d[\"song\"] = songs[x]\n l.append(d)\n d = {} ", "_____no_output_____" ], [ "l", "_____no_output_____" ], [ "#Write list as Json file\nwith open(\"Love100++.json\", \"w\") as read_file:\n json.dump(l, read_file)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d06f64fd3d3dbbb4eda7c30f9c8b279d6fafe651
237,174
ipynb
Jupyter Notebook
colabs/QNN_hands_on.ipynb
derekchen-2/physics-math-tutorials
cc3a4d89133d27d7f31628d4aefe12b1576fef5e
[ "Apache-2.0" ]
4
2022-01-05T14:58:41.000Z
2022-03-30T17:33:38.000Z
colabs/QNN_hands_on.ipynb
derekchen-2/physics-math-tutorials
cc3a4d89133d27d7f31628d4aefe12b1576fef5e
[ "Apache-2.0" ]
null
null
null
colabs/QNN_hands_on.ipynb
derekchen-2/physics-math-tutorials
cc3a4d89133d27d7f31628d4aefe12b1576fef5e
[ "Apache-2.0" ]
4
2022-02-18T23:07:55.000Z
2022-02-27T23:17:13.000Z
237,174
237,174
0.550381
[ [ [ "Copyright 2021 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.", "_____no_output_____" ], [ "# Quantum Neural Networks\n\nThis notebook provides an introduction to Quantum Neural Networks (QNNs) using the Cirq. The presentation mostly follows [Farhi and Neven](https://arxiv.org/abs/1802.06002). We will construct a simple network for classification to demonstrate its utility on some randomly generated toy data.\n\nFirst we need to install cirq, which has to be done each time this notebook is run. Executing the following cell will do that.", "_____no_output_____" ] ], [ [ "# install published dev version\n# !pip install cirq~=0.4.0.dev\n\n# install directly from HEAD:\n!pip install git+https://github.com/quantumlib/Cirq.git@8c59dd97f8880ac5a70c39affa64d5024a2364d0", "Collecting git+https://github.com/quantumlib/Cirq.git@8c59dd97f8880ac5a70c39affa64d5024a2364d0\n Cloning https://github.com/quantumlib/Cirq.git (to revision 8c59dd97f8880ac5a70c39affa64d5024a2364d0) to /tmp/pip-req-build-p85k13_x\nRequirement already satisfied: google-api-python-client~=1.6 in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (1.6.7)\nCollecting matplotlib~=2.2 (from cirq==0.4.0.dev42)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/9e/59/f235ab21bbe7b7c6570c4abf17ffb893071f4fa3b9cf557b09b60359ad9a/matplotlib-2.2.3-cp36-cp36m-manylinux1_x86_64.whl (12.6MB)\n\u001b[K 100% |████████████████████████████████| 12.6MB 837kB/s \n\u001b[?25hRequirement already satisfied: networkx~=2.1 in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (2.2)\nRequirement already satisfied: numpy~=1.12 in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (1.14.6)\nRequirement already satisfied: protobuf~=3.5 in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (3.6.1)\nRequirement already satisfied: requests~=2.18 in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (2.18.4)\nCollecting sortedcontainers~=2.0 (from cirq==0.4.0.dev42)\n Downloading https://files.pythonhosted.org/packages/be/e3/a065de5fdd5849450a8a16a52a96c8db5f498f245e7eda06cc6725d04b80/sortedcontainers-2.0.5-py2.py3-none-any.whl\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from cirq==0.4.0.dev42) (1.1.0)\nCollecting typing_extensions (from cirq==0.4.0.dev42)\n Downloading https://files.pythonhosted.org/packages/62/4f/392a1fa2873e646f5990eb6f956e662d8a235ab474450c72487745f67276/typing_extensions-3.6.6-py3-none-any.whl\nRequirement already satisfied: oauth2client<5.0.0dev,>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client~=1.6->cirq==0.4.0.dev42) (4.1.3)\nRequirement already satisfied: uritemplate<4dev,>=3.0.0 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client~=1.6->cirq==0.4.0.dev42) (3.0.0)\nRequirement already satisfied: six<2dev,>=1.6.1 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client~=1.6->cirq==0.4.0.dev42) (1.11.0)\nRequirement already satisfied: httplib2<1dev,>=0.9.2 in /usr/local/lib/python3.6/dist-packages (from google-api-python-client~=1.6->cirq==0.4.0.dev42) (0.11.3)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib~=2.2->cirq==0.4.0.dev42) (2.3.0)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib~=2.2->cirq==0.4.0.dev42) (2.5.3)\nRequirement already satisfied: pytz in /usr/local/lib/python3.6/dist-packages (from matplotlib~=2.2->cirq==0.4.0.dev42) (2018.7)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib~=2.2->cirq==0.4.0.dev42) (0.10.0)\nCollecting kiwisolver>=1.0.1 (from matplotlib~=2.2->cirq==0.4.0.dev42)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/a7/88719d132b18300b4369fbffa741841cfd36d1e637e1990f27929945b538/kiwisolver-1.0.1-cp36-cp36m-manylinux1_x86_64.whl (949kB)\n\u001b[K 100% |████████████████████████████████| 952kB 10.0MB/s \n\u001b[?25hRequirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx~=2.1->cirq==0.4.0.dev42) (4.3.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from protobuf~=3.5->cirq==0.4.0.dev42) (40.6.2)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests~=2.18->cirq==0.4.0.dev42) (2018.10.15)\nRequirement already satisfied: idna<2.7,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests~=2.18->cirq==0.4.0.dev42) (2.6)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests~=2.18->cirq==0.4.0.dev42) (3.0.4)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests~=2.18->cirq==0.4.0.dev42) (1.22)\nRequirement already satisfied: pyasn1>=0.1.7 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5.0.0dev,>=1.5.0->google-api-python-client~=1.6->cirq==0.4.0.dev42) (0.4.4)\nRequirement already satisfied: rsa>=3.1.4 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5.0.0dev,>=1.5.0->google-api-python-client~=1.6->cirq==0.4.0.dev42) (4.0)\nRequirement already satisfied: pyasn1-modules>=0.0.5 in /usr/local/lib/python3.6/dist-packages (from oauth2client<5.0.0dev,>=1.5.0->google-api-python-client~=1.6->cirq==0.4.0.dev42) (0.2.2)\nBuilding wheels for collected packages: cirq\n Running setup.py bdist_wheel for cirq ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \bdone\n\u001b[?25h Stored in directory: /tmp/pip-ephem-wheel-cache-ub0vqjao/wheels/ae/db/83/edbb28e59157c931c9342e7ae581bebdf4939c0465a413aaaf\nSuccessfully built cirq\nInstalling collected packages: kiwisolver, matplotlib, sortedcontainers, typing-extensions, cirq\n Found existing installation: matplotlib 2.1.2\n Uninstalling matplotlib-2.1.2:\n Successfully uninstalled matplotlib-2.1.2\nSuccessfully installed cirq-0.4.0.dev42 kiwisolver-1.0.1 matplotlib-2.2.3 sortedcontainers-2.0.5 typing-extensions-3.6.6\n" ] ], [ [ "To verify that Cirq is installed in your environment, try to `import cirq` and print out a diagram of the Foxtail device. It should produce a 2x11 grid of qubits.", "_____no_output_____" ] ], [ [ "import cirq\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(cirq.google.Foxtail)", "(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)\n│ │ │ │ │ │ │ │ │ │ │\n│ │ │ │ │ │ │ │ │ │ │\n(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)\n" ] ], [ [ "### The QNN Idea\nWe'll begin by describing here the QNN model we are pursuing. We'll discuss the quantum circuit describing a very simple neuron, and how it can be trained.", "_____no_output_____" ], [ "As in an ordinary neural network, a QNN takes in data, processes that data, and then returns an answer. In the quantum case, the data will be encoded into the initial quantum state, and the processing step is the action of a quantum circuit on that quantum state. At the end we will measure one or more of the qubits, and the statistics of those measurements are the output of the net.\n\n", "_____no_output_____" ], [ "#### Classical vs Quantum", "_____no_output_____" ], [ "An ordinary neural network can only handle classical input. The input to a QNN, though, is a quantum state, which consists of $2^n$ complex amplitudes for $n$-qubits. If you attached your quantum computer directly to some physics experiment, for example, then you could have a QNN do some post-processing on the experimental wavefunction in lieu of a more traditional measurement. There are some very exciting possiblities there, but unfortunately we wil not be considering them in this Colab. It requires significantly more quantum background to understand what's going on, and it's harder to give examples because the input states themselves can be quite complicated. For recent examples of that kind of network, though, check out [this](https://arxiv.org/abs/1805.08654) paper and [this](https://arxiv.org/abs/1810.03787) paper. The basic ingredients are similar to what we'll cover here", "_____no_output_____" ], [ "In this Colab we'll focus on classical inputs, by which I mean the specification of one of the computational basis states as the initial state. There are a total of $2^n$ of these states for $n$ qubits. Note the crucial difference between this case and the quantum case: in the quantum case the input is $2^n$-*dimensional*, while in the classical case there are $2^n$ *possible* inputs. The quantum neural network can process these inputs in a \"quantum\" way, meaning that it may be able to evaluate certain functions on these inputs more efficiently than a classical network. Whether the \"quantum\" processing is actually useful in practice remains to be seen, and in this Colab we will not have time to really get into that aspect of a QNN.", "_____no_output_____" ], [ "#### Data Processing", "_____no_output_____" ], [ "Given the classical input state, what will we do with it? At this stage it's helpful to be more specific and definite about the problem we are trying to solve. The problem we're going to focus on in this Colab is __two-category classicfication__. That means that after the quantum circuit has finished running, we measure one of the qubits, the *readout* qubit, and the value of that qubit will tell us which of the two categories our classical input state belonged to. Since this is quantum, the output that qubit is going to be random according to some probability distributuion. So really we're going to repeat the computation many times and take a majority vote.", "_____no_output_____" ], [ "Our classical input data is a bitstring that is converted into a computational basis state. We want to influence the readout qubit in a way that depends on this state. Our main tool for this a gate we call the $ZX$-gate, which acts on two qubits as\n$$\n\\exp(i \\pi w Z \\otimes X) = \\begin{bmatrix}\n\\cos \\pi w & i\\sin\\pi w &0&0\\\\\ni\\sin\\pi w & \\cos \\pi w &0&0\\\\\n0&0& \\cos \\pi w & -i\\sin\\pi w \\\\\n0&0 & -i\\sin\\pi w & \\cos\\pi w \n\\end{bmatrix},\n$$\nwhere $w$ is a free parameter ($w$ stands for weight). This gate rotates the second qubit around the $X$-axis (on the Bloch sphere) either clockwise or counterclockwise depending on the state of the first qubit as seen in the computational basis. The amount of the rotation is determined by $w$.", "_____no_output_____" ], [ "If we connect each of our input qubits to the readout qubit using one of these gates, then the result is that the readout qubit will be rotated in a way that depeonds the initial state in a straightforward way. This rotation is in the $YZ$ plane, so will change the statistics of measurements in either the $Z$ basis or the $Y$ basis for the readout qubit. We're going to choose to have the initial state of the readout qubit to be a standard computational basis state as usual, which is a $Z$ eigenstate but \"neutral\" with respect to $Y$ (i.e., 50/50 probabilty of $Y=+1$ or $Y=-1$). Then after all of the rotations are complete we'll measure the readout qubit in the $Y$ basis. If all goes well, then the net rotation induced by the $ZX$ gates will place the readout qubit near one of the two $Y$ eigenstates in a way that depends on the initial data.", "_____no_output_____" ], [ "To summarize, here is our strategy for processing the two-category classification problem:\n\n1) Prepare a computational basis state corresponding to the input that should be categorized.\n\n2) Use $ZX$ gates to rotate the state of the readout qubit in a way that depends on the input.\n\n3) Measure the readout qubit in the $Y$ basis to get the predicted label. Take a majority vote after many repetitions.", "_____no_output_____" ], [ "This is the simplest possible kind of network, and really only corresponds to a single neuron. We'll talk about more complicated possibilities after understanding how to implement this one.", "_____no_output_____" ], [ "### Custom Two-Qubit Gate\n\nOur first task is to code up the $ZX$ gate described above, which is given by the matrix\n$$\n\\exp(i \\pi w Z \\otimes X) = \\begin{bmatrix}\n\\cos \\pi w & i\\sin\\pi w &0&0\\\\\ni\\sin\\pi w & \\cos \\pi w &0&0\\\\\n0&0& \\cos \\pi w & -i\\sin\\pi w \\\\\n0&0 & -i\\sin\\pi w & \\cos\\pi w \n\\end{bmatrix},\n$$", "_____no_output_____" ], [ "Just from the form of the gate we can see that it performs a rotation by angle $\\pm \\pi w$ on the second qubit depending on the value of the first qubit. If we only had one or the other of these two blocks, then this gate would literally be a controlled rotation. For example, using the Cirq conventions,\n$$\nCR_X(\\theta) = \\begin{bmatrix}\n1 & 0 &0&0\\\\\n0 & 1 &0&0\\\\\n0&0& \\cos \\theta/2 & -i\\sin \\theta/2 \\\\\n0&0 & -i\\sin\\theta/2 & \\cos\\theta/2 \n\\end{bmatrix},\n$$\nwhich means that setting $\\theta = 2\\pi w$ should give us (part) of our desired transformation.", "_____no_output_____" ] ], [ [ "a = cirq.NamedQubit(\"a\")\nb = cirq.NamedQubit(\"b\")\nw = .25 # Put your own weight here.\nangle = 2*np.pi*w\ncircuit = cirq.Circuit.from_ops(cirq.ControlledGate(cirq.Rx(angle)).on(a,b))\nprint(circuit)\ncircuit.to_unitary_matrix().round(2)", "a: ───@──────────\n │\nb: ───Rx(0.5π)───\n" ] ], [ [ "__Question__: The rotation in the upper-left block is by the opposite angle. But how do we get the rotation to happen in the upper-left block of the $4\\times 4$ matrix in the first place? What is the circuit?", "_____no_output_____" ], [ "#### Solution", "_____no_output_____" ], [ "Switching the upper-left and lower-right blocks of a controlled gate corresponds to activating when the control qubit is in the $|0\\rangle$ state instead of the $|1\\rangle$ state. We can arrange this to happen by taking the control gate we already have and conjugating the control qubit by $X$ gates (which implement the NOT operation). Don't forget to also rotate by the opposite angle.", "_____no_output_____" ] ], [ [ "a = cirq.NamedQubit(\"a\")\nb = cirq.NamedQubit(\"b\")\nw = 0.25 # Put your own weight here.\nangle = 2*np.pi*w\ncircuit = cirq.Circuit.from_ops([cirq.X(a),\n cirq.ControlledGate(cirq.Rx(-angle)).on(a,b),\n cirq.X(a)])\nprint(circuit)\ncircuit.to_unitary_matrix().round(2)", "a: ───X───@───────────X───\n │\nb: ───────Rx(-0.5π)───────\n" ] ], [ [ "#### The Full $ZX$ Gate", "_____no_output_____" ], [ "We can put together the two controlled rotations to make the full $ZX$ gate. Having discussed the decomposition already, we can make our own class and specify its action using the `_decpompose_` method. Fill in the following code block to implement this gate.", "_____no_output_____" ] ], [ [ "class ZXGate(cirq.ops.gate_features.TwoQubitGate):\n \"\"\"ZXGate with variable weight.\"\"\"\n \n def __init__(self, weight=1):\n \"\"\"Initializes the ZX Gate up to phase.\n\n Args:\n weight: rotation angle, period 2 \n \"\"\"\n self.weight = weight\n \n def _decompose_(self, qubits):\n a, b = qubits\n ## YOUR CODE HERE\n\n # This lets the weight be a Symbol. Useful for paramterization.\n def _resolve_parameters_(self, param_resolver):\n return ZXGate(weight=param_resolver.value_of(self.weight))\n\n # How should the gate look in ASCII diagrams?\n def _circuit_diagram_info_(self, args): \n return cirq.protocols.CircuitDiagramInfo(\n wire_symbols=('Z', 'X'),\n exponent=self.weight) ", "_____no_output_____" ] ], [ [ "#### Solution", "_____no_output_____" ] ], [ [ "class ZXGate(cirq.ops.gate_features.TwoQubitGate):\n \"\"\"ZXGate with variable weight.\"\"\"\n \n def __init__(self, weight=1):\n \"\"\"Initializes the ZX Gate up to phase.\n\n Args:\n weight: rotation angle, period 2 \n \"\"\"\n self.weight = weight\n \n def _decompose_(self, qubits):\n a, b = qubits\n yield cirq.ControlledGate(cirq.Rx(2*np.pi*self.weight)).on(a,b)\n yield cirq.X(a)\n yield cirq.ControlledGate(cirq.Rx(-2*np.pi*self.weight)).on(a,b)\n yield cirq.X(a)\n\n # This lets the weight be a Symbol. Useful for paramterization.\n def _resolve_parameters_(self, param_resolver):\n return ZXGate(weight=param_resolver.value_of(self.weight))\n\n # How should the gate look in ASCII diagrams?\n def _circuit_diagram_info_(self, args): \n return cirq.protocols.CircuitDiagramInfo(\n wire_symbols=('Z', 'X'),\n exponent=self.weight) ", "_____no_output_____" ] ], [ [ "#### EigenGate Implementation\n\nAnother way to specify how a gate works is by an explicit eigen-action. In our case that is also easy, since we know that the gate acts as a phase (the eigenvalue) when the first qubit is in a $Z$ eigenstate (i.e., a computational basis state) and the second qubit is in an $X$ eigenstate.\n\nThe way we specify eigen-actions in Cirq is through the `_eigen_components` method, where we need to specify the eigenvalue as a phase together with a projector onto the eigenspace of that phase. We also conventionally specify the gate at $w=1$ and set $w$ internally to be the `exponent` of the gate, which automatically implements other values of $w$ for us.\n\nIn the case of the $ZX$ gate with $w=1$, one of our eigenvalues is $\\exp(+i\\pi)$, which is specified as $1$ in Cirq. (Because $1$ is the coefficeint of $i\\pi$ in the exponential.) This is the phase when when the first qubit is in the $Z=+1$ state and the second qubit is in the $X=+1$ state, or when the first qubit is in the $Z=-1$ state and the second qubit is in the $X=-1$ state. The projector onto these states is\n$$\n\\begin{align}\nP &= |0+\\rangle \\langle 0{+}| + |1-\\rangle \\langle 1{-}|\\\\\n&= \\frac{1}{2}\\big(|00\\rangle \\langle 00| +|00\\rangle \\langle 01|+|01\\rangle \\langle 00|+|01\\rangle \\langle 01|+ |10\\rangle \\langle 10|-|10\\rangle \\langle 11|-|11\\rangle \\langle 10|+|11\\rangle \\langle 11|\\big)\\\\\n&=\\frac{1}{2}\\begin{bmatrix}\n1 & 1 &0&0\\\\\n1 & 1 &0&0\\\\\n0&0& 1 & -1 \\\\\n0&0 & -1 & 1\n\\end{bmatrix}\n\\end{align}\n$$\nA similar formula holds for the eigenvalue $\\exp(-i\\pi)$ with the two blocks in the projector flipped.\n\n\n__Exercise__: Implement the $ZX$ gate as an `EigenGate` using this decomposition. The following codeblock will get you started.\n", "_____no_output_____" ] ], [ [ "class ZXGate(cirq.ops.eigen_gate.EigenGate,\n cirq.ops.gate_features.TwoQubitGate):\n \"\"\"ZXGate with variable weight.\"\"\"\n \n def __init__(self, weight=1):\n \"\"\"Initializes the ZX Gate up to phase.\n\n Args:\n weight: rotation angle, period 2 \n \"\"\"\n self.weight = weight\n super().__init__(exponent=weight) # Automatically handles weights other than 1\n \n def _eigen_components(self):\n return [\n (1, np.array([[0.5, 0.5, 0, 0],\n [ 0.5, 0.5, 0, 0],\n [0, 0, 0.5, -0.5],\n [0, 0, -0.5, 0.5]])),\n (??, ??) # YOUR CODE HERE: phase and projector for the other eigenvalue\n ]\n\n # This lets the weight be a Symbol. Useful for parameterization.\n def _resolve_parameters_(self, param_resolver):\n return ZXGate(weight=param_resolver.value_of(self.weight))\n\n # How should the gate look in ASCII diagrams?\n def _circuit_diagram_info_(self, args): \n return cirq.protocols.CircuitDiagramInfo(\n wire_symbols=('Z', 'X'),\n exponent=self.weight) ", "_____no_output_____" ] ], [ [ "#### Solution", "_____no_output_____" ] ], [ [ "class ZXGate(cirq.ops.eigen_gate.EigenGate,\n cirq.ops.gate_features.TwoQubitGate):\n \"\"\"ZXGate with variable weight.\"\"\"\n \n def __init__(self, weight=1):\n \"\"\"Initializes the ZX Gate up to phase.\n\n Args:\n weight: rotation angle, period 2 \n \"\"\"\n self.weight = weight\n super().__init__(exponent=weight) # Automatically handles weights other than 1\n \n def _eigen_components(self):\n return [\n (1, np.array([[0.5, 0.5, 0, 0],\n [ 0.5, 0.5, 0, 0],\n [0, 0, 0.5, -0.5],\n [0, 0, -0.5, 0.5]])),\n (-1, np.array([[0.5, -0.5, 0, 0],\n [ -0.5, 0.5, 0, 0],\n [0, 0, 0.5, 0.5],\n [0, 0, 0.5, 0.5]]))\n ]\n\n # This lets the weight be a Symbol. Useful for parameterization.\n def _resolve_parameters_(self, param_resolver):\n return ZXGate(weight=param_resolver.value_of(self.weight))\n\n # How should the gate look in ASCII diagrams?\n def _circuit_diagram_info_(self, args): \n return cirq.protocols.CircuitDiagramInfo(\n wire_symbols=('Z', 'X'),\n exponent=self.weight) ", "_____no_output_____" ] ], [ [ "#### Testing the Gate", "_____no_output_____" ], [ "__BEFORE MOVING ON__ make sure you've executed the `EigenGate` solution of the $ZX$ gate implementation. That's the one assumed for the code below, though other implementations may work just as well. In general, the cells in this Colab may depend on previous cells.\n\nLet's test out our gate. First we'll make a simple test circuit to see that the ASCII diagrams are diplaying properly:", "_____no_output_____" ] ], [ [ "a = cirq.NamedQubit(\"a\")\nb = cirq.NamedQubit(\"b\")\nw = .15 # Put your own weight here. Try using a cirq.Symbol.\ncircuit = cirq.Circuit.from_ops(ZXGate(w).on(a,b))\nprint(circuit)", "a: ───Z────────\n │\nb: ───X^0.15───\n" ] ], [ [ "We should also check that the matrix is what we expect:", "_____no_output_____" ] ], [ [ "test_matrix = np.array([[np.cos(np.pi*w), 1j*np.sin(np.pi*w), 0, 0],\n [1j*np.sin(np.pi*w), np.cos(np.pi*w), 0, 0],\n [0, 0, np.cos(np.pi*w), -1j*np.sin(np.pi*w)],\n [0, 0, -1j*np.sin(np.pi*w),np.cos(np.pi*w)]])\n# Test for five digits of accuracy. Won't work with cirq.Symbol\nassert (circuit.to_unitary_matrix().round(5) == test_matrix.round(5)).all()", "_____no_output_____" ] ], [ [ "### Create Circuit\n\nNow we have to create the QNN circuit. We are simply going to let a $ZX$ gate act between each data qubit and the readout qubit. For simplicity, let's share a single weight between all of the gates. You are invited to experiment with making the weights different, but in our example problem below we can set them all equal by symmetry.\n\n__Question__: What about the order of these actions? Which data qubits should interact with the readout qubit first?\n\nRemember that we also want to measure the readout qubit in the $Y$ basis. Fundamentally speaking, all measurements in Cirq are computational basis measurements, and so we have to implement the change of basis by hand.\n\n\n__Question__: What is the circuit for a basis transformation from the $Y$ basis to the computational basis? We want to choose our transformation so that an eigenstate with $Y=+1$ becomes an eigenstate with $Z=+1$ prior to measurement.", "_____no_output_____" ], [ "#### Solutions", "_____no_output_____" ], [ "* The $ZX$ gates all commute with each other, so the order of implementation doesn't matter!\n\n* We want a transformation that maps $\\big(|0\\rangle + i |1\\rangle\\big)/\\sqrt{2}$ to $|0\\rangle$ and $\\big(|0\\rangle - i |1\\rangle\\big)\\sqrt{2}$ to $|1\\rangle$. Recall that the phase gate $S$ is given in matrix form by\n$$\nS = \\begin{bmatrix}\n1 & 0 \\\\\n0 & i\n\\end{bmatrix},\n$$\nand the Hadamard transform is given by\n$$\nH = \\frac{1}{\\sqrt{2}}\\begin{bmatrix}\n1 & 1 \\\\\n1 & -1\n\\end{bmatrix},\n$$\nSo acting with $S^{-1}$ and then $H$ gives what we want. We'll add these two gates to the end of the circuit on the readout qubit so that the final measurement effectively occurs in the $Y$ basis.\n\n", "_____no_output_____" ], [ "#### Make Circuit", "_____no_output_____" ], [ "A clean way of making circuits is to define generators for logically-related circuit elements, and then `append` those to the circuit you want to make. Here is a code snippet that initializes our qubits and defines a generator for a single layer of $ZX$ gates:", "_____no_output_____" ] ], [ [ "# Total number of data qubits\nINPUT_SIZE = 9\n\ndata_qubits = cirq.LineQubit.range(INPUT_SIZE)\nreadout = cirq.NamedQubit('r')\n\n# Initialize parameters of the circuit\nparams = {'w': 0}\n\ndef ZX_layer():\n \"\"\"Adds a ZX gate between each data qubit and the readout.\n All gates are given the same cirq.Symbol for a weight.\"\"\"\n for qubit in data_qubits:\n yield ZXGate(cirq.Symbol('w')).on(qubit, readout)", "_____no_output_____" ] ], [ [ "Use this generator to create the QNN circuit. Don't forget to add the basis change for the readout qubit at the end!", "_____no_output_____" ] ], [ [ "qnn = cirq.Circuit()\nqnn.append(???) # YOUR CODE HERE", "_____no_output_____" ] ], [ [ "#### Solution ", "_____no_output_____" ] ], [ [ "qnn = cirq.Circuit()\nqnn.append(ZX_layer())\nqnn.append([cirq.S(readout)**-1, cirq.H(readout)]) # Basis transformation", "_____no_output_____" ] ], [ [ "#### View the Circuit", "_____no_output_____" ], [ "It's usually a good idea to view the ASCII diagram of your circuit to make sure it's doing what you want. This can be displayed by printing the circuit.", "_____no_output_____" ] ], [ [ "print(qnn)", "0: ───Z────────────────────────────────────────────────────────────────\n │\n1: ───┼─────Z──────────────────────────────────────────────────────────\n │ │\n2: ───┼─────┼─────Z────────────────────────────────────────────────────\n │ │ │\n3: ───┼─────┼─────┼─────Z──────────────────────────────────────────────\n │ │ │ │\n4: ───┼─────┼─────┼─────┼─────Z────────────────────────────────────────\n │ │ │ │ │\n5: ───┼─────┼─────┼─────┼─────┼─────Z──────────────────────────────────\n │ │ │ │ │ │\n6: ───┼─────┼─────┼─────┼─────┼─────┼─────Z────────────────────────────\n │ │ │ │ │ │ │\n7: ───┼─────┼─────┼─────┼─────┼─────┼─────┼─────Z──────────────────────\n │ │ │ │ │ │ │ │\n8: ───┼─────┼─────┼─────┼─────┼─────┼─────┼─────┼─────Z────────────────\n │ │ │ │ │ │ │ │ │\nr: ───X^w───X^w───X^w───X^w───X^w───X^w───X^w───X^w───X^w───S^-1───H───\n" ] ], [ [ "You can experiment with adding more layers of $ZX$ gates (or adding other kinds of transformations!) to your QNN, but we can use this simplest kind of circuit to analyze a simple toy problem, which is what we will do next.", "_____no_output_____" ], [ "### A Toy Problem: Biased Coin Flips\n\nAs a toy problem, let's get our quantum neuron to decide whether a coin is biased toward heads or toward tails based on a sequence of coin flips.", "_____no_output_____" ], [ "To be specific, let's try to train a QNN to distinguish between a coin that yields \"heads\" with probability $p$, and one that yields \"heads\" with probability $1-p$. Without loss of generality, let's say that $p\\leq 0.5$. We don't need a fancy QNN to come up with a winning strategy: given a series of coin flips, you guess $p$ if the majority of flips are \"tails\" and $1-p$ if the majority are \"heads.\" But for purposes of illustration, let's do it the fancy way.", "_____no_output_____" ], [ "To translate this problem into our QNN language, we need to encode the sequence of coin flips into a computational basis state. Let's associate $0$ with tails and $1$ with heads. So the sequence of coin flips becomes a sequence of $0$s and $1$s, and these define a computational basis state.", "_____no_output_____" ], [ "We also need to define a convention for our labeling of the two coins. We'll say that the $p$ coin (majority tails) gets the label $-1$ and the $1-p$ coin (majority heads) gets the label $+1$. So when we measure $Y$ at the end of the computation we can say that the majority-vote of the $Y$ outcome is our predicted label.", "_____no_output_____" ], [ "To be a little more nuanced (and to aid the formulation of the problem), let's say that the expectation value $\\langle Y \\rangle$ for a given input state defines our estimator for the label of that state. We're going to use that to define a loss function for training next.\n", "_____no_output_____" ], [ "### Define Loss Function", "_____no_output_____" ], [ "Suppose we have a collection of $N$ (bitstring, label) pairs. A useful loss function to characterize the effectiveness of our QNN on this collection is\n$$\n\\text{Loss} = \\frac{1}{2N}\\sum_{j=1}^n (1- \\ell_j\\langle Y \\rangle_j),\n$$\nwhere $\\ell_j$ is the label of the $j$th pair and $\\langle Y \\rangle_j$ is the expectation value of $Y$ on the readout qubit using the $j$th bitstring as input. If the network is perfect, the loss is equal to zero. If the network is maximally unsure about the labels (so that $\\langle Y \\rangle_j = 0$ for all $j$) then the loss is equal to $1/2$. And if the network gets everything wrong, then the loss is equal to $1$. We're going to train our network using this loss function, so next we'll write some functions to compute the loss.", "_____no_output_____" ], [ "Another useful function to have around is the average classification error. Recall that our prescription was to execute the quantum circuit many times and take a majority vote to compute the predicted label. The majority vote for the readout is the same as $\\text{sign}(\\langle Y \\rangle)$, so we can write a formula for the error in this procedure as\n$$\n\\text{Error} = \\frac{1}{2N}\\sum_{j=1}^n \\big(1- \\ell_j\\text{sign}\\big(\\langle Y \\rangle_j\\big)\\big).\n$$\nThis is not so useful as a loss function because it is not smooth and does not provide an incentive to make $|\\langle Y \\rangle|$ large, but it can be an informative quantity to compute.\n\n__Question__: Why would we want $|\\langle Y \\rangle|$ to be large?", "_____no_output_____" ], [ "#### Solution", "_____no_output_____" ], [ "When we implement this algorithm on the actual hardware, $\\langle Y \\rangle$ can only be estimated by repeatedly executing the circuit and measuring the result. The more measurements we make, the better our estimate of $\\langle Y \\rangle$ will be. Even if we are only interested in $\\text{sign}\\big(\\langle Y \\rangle\\big)$, we will need to meake enough measurements to be sure that our estimate has the correct sign, and if $|\\langle Y \\rangle|$ is large then fewer measurements will be required to have high confidence in the sign.\n\n\nFurthermore, if the machine is noisy (which it will be), then the noise will induce some errors in our estimate of $\\langle Y \\rangle$. If $|\\langle Y \\rangle|$ is small then it's likely that the noise will lead to the wrong sign.", "_____no_output_____" ], [ "#### Expectation Value\n\nOur first function computes the expectation value of the readout qubit for our circuit given a specification of the initial state. Rather than a bitstring, we'll specify the initial state as an array of $0$s and $1$s. These are the outputs of the coin flips in our toy problem. We'll compute the expectation value exactly using the wavefunction for now.", "_____no_output_____" ] ], [ [ "def readout_expectation(state):\n \"\"\"Takes in a specification of a state as an array of 0s and 1s\n and returns the expectation value of Z on ther readout qubit.\n Uses the XmonSimulator to calculate the wavefunction exactly.\"\"\" \n \n # A convenient representation of the state as an integer\n state_num = int(np.sum(state*2**np.arange(len(state))))\n\n resolver = cirq.ParamResolver(params)\n simulator = cirq.Simulator()\n\n # Specify an explicit qubit order so that we know which qubit is the readout\n result = simulator.simulate(qnn, resolver, qubit_order=[readout]+data_qubits,\n initial_state=state_num)\n wf = result.final_state\n\n # Becase we specified qubit order, the Z value of the readout is the most\n # significant bit.\n Z_readout = np.append(np.ones(2**INPUT_SIZE), -np.ones(2**INPUT_SIZE))\n\n return np.sum(np.abs(wf)**2 * Z_readout)", "_____no_output_____" ] ], [ [ "#### Loss and Error\n\nThe next functions take a list of states (each specified as an array of $0$s and $1$s as before) and a corresponding list of labels and computes the loss and error, respectively, of that list.", "_____no_output_____" ] ], [ [ "def loss(states, labels):\n loss=0\n for state, label in zip(states,labels):\n loss += 1 - label*readout_expectation(state)\n return loss/(2*len(states))\n \ndef classification_error(states, labels):\n error=0\n for state,label in zip(states,labels):\n error += 1 - label*np.sign(readout_expectation(state))\n return error/(2*len(states))", "_____no_output_____" ] ], [ [ "#### Generating Data", "_____no_output_____" ], [ "For our toy problem we'll want to be able to generate a batch of data. Here is a helper function for that task:", "_____no_output_____" ] ], [ [ "def make_batch():\n \"\"\"Generates a set of labels, then uses those labels to generate inputs.\n label = -1 corresponds to majority 0 in the sate, label = +1 corresponds to\n majority 1.\n \"\"\"\n np.random.seed(0) # For consistency in demo\n labels = (-1)**np.random.choice(2, size=100) # Smaller batch sizes will speed up computation\n states = []\n for label in labels:\n states.append(np.random.choice(2, size=INPUT_SIZE, p=[0.5-label*0.2,0.5+label*0.2]))\n return states, labels\n\nstates, labels = make_batch()", "_____no_output_____" ] ], [ [ "### Training\n\nNow we'll try to find the optimal weight to solve our toy problem. For illustration, we'll do both a brute force search of the paramter space as well as a stochastic gradient descent.", "_____no_output_____" ], [ "#### Brute Force Search\n\nLet's compute both the loss and error rate on a batch of data as a function of the shared weight between all the gates.", "_____no_output_____" ] ], [ [ "# Using cirq.Simulator with the EigenGate implementation of ZZ, this takes\n# about 30s to run. Using the XmonSimulator took about 40 minutes the last\n# time I tried it!\n%%time\nlinspace = np.linspace(start=-1, stop=1, num=80)\ntrain_losses = []\nerror_rates = []\nfor p in linspace:\n params = {'w': p}\n train_losses.append(loss(states, labels))\n error_rates.append(classification_error(states, labels))", "CPU times: user 29.5 s, sys: 2.93 ms, total: 29.5 s\nWall time: 29.6 s\n" ], [ "plt.plot(linspace, train_losses)\nplt.xlabel('Weight')\nplt.ylabel('Loss')\nplt.title('Loss as a Function of Weight')\nplt.show()\nplt.plot(linspace, error_rates)\nplt.xlabel('Weight')\nplt.ylabel('Error Rate')\nplt.title('Error Rate as a Function of Weight')\nplt.show()", "_____no_output_____" ] ], [ [ "__Question__: Why are the loss and error functions periodic with period $1$ when the $ZX$ gate is periodic with period $2$?", "_____no_output_____" ], [ "#### Solution", "_____no_output_____" ], [ "This kind of \"halving\" of the periodicity of $\\langle Y \\rangle$ compared to the period of the gates itself is typical of qubit systems. We can analyze how it works mathematically in a simpler setting. Instead of the $ZX$ Gate, let's just imagine that we rotate the readout qubit around the $X$ axis by some fixed amout. This is the effective calculation for a single fixed data input.\n$$\n\\begin{align}\n\\langle Y \\rangle &= \\langle 0 |\\exp(-i \\pi w X) Y \\exp(i \\pi w X) |0 \\rangle\\\\\n&= \\langle 0 |\\big(\\cos \\pi w - i X\\sin \\pi w \\big) Y \\big(\\cos \\pi w + i X \\sin \\pi w \\big) |0 \\rangle\\\\\n&= \\langle 0 |\\big(Y\\cos 2\\pi w +Z \\sin 2\\pi w \\big) |0 \\rangle\\\\\n&= \\sin 2\\pi w.\n\\end{align}\n$$", "_____no_output_____" ], [ "#### Stochastic Gradient Descent\n\nTo train the network we'll use stochastic gradient descent. Note that this isn't necessarily a good idea since the loss function is far from convex, and there's a good chance we'll get stuck in very inefficient local minimum if we initialize the paramters randomly. But as an exercise we'll do it anyway. In the next section we'll discuss other ways to train these sorts of networks.", "_____no_output_____" ], [ "We'll compute the gradient of the loss function using a symmetric finite-difference approximation: $f'(x) \\approx (f(x + \\epsilon) - f(x-\\epsilon))/2\\epsilon$. This is the most straightforward way to do it using the quantum computer. We'll also generate a new instance of the problem each time.", "_____no_output_____" ] ], [ [ "def stochastic_grad_loss():\n \"\"\"Generates a new data point and computes the gradient of the loss\n using that data point.\"\"\"\n \n # Randomly generate the data point.\n label = (-1)**np.random.choice(2)\n state = np.random.choice(2, size=INPUT_SIZE, p=[0.5-label*0.2,0.5+label*0.2])\n \n # Compute the gradient using finite difference\n eps = 10**-5 # Discretization of gradient. Try different values.\n params['w'] -= eps\n loss1 = loss([state],[label])\n params['w'] += 2*eps\n grad = (loss([state],[label])-loss1)/(2*eps)\n params['w'] -= eps # Reset the parameter value\n return grad", "_____no_output_____" ] ], [ [ "We can apply this function repeatedly to flow toward the minimum:", "_____no_output_____" ] ], [ [ "eta = 10**-4 # Learning rate. Try different values.\nparams = {'w': 0} # Initialize weight. Try different values.\nfor i in range(201):\n if not i%25:\n print('Step: {} Loss: {}'.format(i, loss(states, labels)))\n grad = stochastic_grad_loss()\n params['w'] += -eta*grad\nprint('Final Weight: {}'.format(params['w']))", "Step: 0 Loss: 0.5\nStep: 25 Loss: 0.29664170142263174\nStep: 50 Loss: 0.21596111725317313\nStep: 75 Loss: 0.19353972657117993\nStep: 100 Loss: 0.1930989919230342\nStep: 125 Loss: 0.19318223176524044\nStep: 150 Loss: 0.19358215024578385\nStep: 175 Loss: 0.1965144828868506\nStep: 200 Loss: 0.1930640292633325\nFinal Weight: -0.0443500901565141\n" ] ], [ [ "### Use Sampling Instead of Calculating from the Wavefunction\n\nOn real hardware we will have to use sampling to find results instead of computing the exact wavefunction. Rewrite the `readout_expectation` function to compute the expectation value using sampling instead. Unlike with the wavefunction calculation, we also need to build our circuit in a way that accounts for the initial state (we are always assumed to start in the all $|0\\rangle$ state)\n\n\n", "_____no_output_____" ] ], [ [ "def readout_expectation_sample(state):\n \"\"\"Takes in a specification of a state as an array of 0s and 1s\n and returns the expectation value of Z on ther readout qubit.\n Uses the XmonSimulator to sample the final wavefunction.\"\"\"\n \n # We still need to resolve the parameters in the circuit.\n resolver = cirq.ParamResolver(params)\n \n # Make a copy of the QNN to avoid making changes to the global variable.\n measurement_circuit = qnn.copy()\n \n # Modify the measurement circuit to account for the desired input state. \n # YOUR CODE HERE\n \n # Add appropriate measurement gate(s) to the circuit.\n # YOUR CODE HERE\n \n simulator = cirq.google.XmonSimulator() \n result = simulator.run(measurement_circuit, resolver, repetitions=10**6) # Try adjusting the repetitions\n \n # Return the Z expectation value\n return ((-1)**result.measurements['m']).mean()", "_____no_output_____" ] ], [ [ "#### Solution", "_____no_output_____" ] ], [ [ "def readout_expectation_sample(state):\n \"\"\"Takes in a specification of a state as an array of 0s and 1s\n and returns the expectation value of Z on ther readout qubit.\n Uses the XmonSimulator to sample the final wavefunction.\"\"\"\n \n # We still need to resolve the parameters in the circuit.\n resolver = cirq.ParamResolver(params)\n \n # Make a copy of the QNN to avoid making changes to the global variable.\n measurement_circuit = qnn.copy()\n \n # Modify the measurement circuit to account for the desired input state. \n for i, qubit in enumerate(data_qubits):\n if state[i]:\n measurement_circuit.insert(0,cirq.X(qubit))\n \n # Add appropriate measurement gate(s) to the circuit.\n measurement_circuit.append(cirq.measure(readout, key='m'))\n \n simulator = cirq.Simulator() \n result = simulator.run(measurement_circuit, resolver, repetitions=10**6) # Try adjusting the repetitions\n \n # Return the Z expectation value\n return ((-1)**result.measurements['m']).mean()", "_____no_output_____" ] ], [ [ "#### Comparison of Sampling with the Exact Wavefunction", "_____no_output_____" ], [ "Just to illustrate the difference between sampling and using the wavefunction, try running the two methods several times on identical input:\n", "_____no_output_____" ] ], [ [ "state = [0,0,0,1,0,1,1,0,1] # Try different initial states.\nparams = {'w': 0.05} # Try different weights.\n\nprint(\"Exact expectation value: {}\".format(readout_expectation(state)))\nprint(\"Estimates from sampling:\")\nfor _ in range(5):\n print(readout_expectation_sample(state))", "Exact expectation value: 0.3090169429779053\nEstimates from sampling:\n0.308354\n0.306674\n0.309026\n0.310854\n0.30854\n" ] ], [ [ "As an exercise, try repeating some of the above calculations (e.g., the SGD optimization) using `readout_expectation_sample` in place of `readout_expectation`. How many repetitions should you use? How should the hyperparameters `eps` and `eta` be adjusted in response to the number of repetitions?", "_____no_output_____" ], [ "### Optimizing For Hardware\n\nThere are more issues to think about if you want to run your network on real hardware. First is the connectivity issue, and second is minimizing the number of two-qubit operations.", "_____no_output_____" ], [ "Consider the Foxtail device:", "_____no_output_____" ] ], [ [ "print(cirq.google.Foxtail)", "(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)\n│ │ │ │ │ │ │ │ │ │ │\n│ │ │ │ │ │ │ │ │ │ │\n(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)\n" ] ], [ [ "The qubits are arranged in two rows of eleven qubits each, and qubits can only communicate to their nearest neighbors along the horizontal and vertial connections. That does not mesh well with the QNN we designed, where all of the data qubits need to interact with the readout qubit.", "_____no_output_____" ], [ "There is no *in-principle* restriction on the kinds of algorithms you are allowed to run. The solution to the connectivity problem is to make use of SWAP gates, which have the effect of exchanging the states of two (neighboring) qubits. It's equivalent to what you would get if you physically exchanged the positions of two of the qubits in the grid. The problem is that each SWAP operation is costly, so you want to avoid SWAPing as much as possible. We need to think carefully about our algorithm design to minimize the number of SWAPs performed as the circuit is executed.\n\n__Question__: How should we modify our QNN circuit so that it can runs efficiently on the Foxtail device?", "_____no_output_____" ], [ "#### Solution", "_____no_output_____" ], [ "One strategy is to move the readout qubit around as it talks to the other qubits. Suppose the readout qubit starts in the $(0,0)$ position. First it can interact with the qubits in the $(1,0)$ and $(0,1)$ positons like normal, then SWAP with the $(0,1)$ qubit. Now the readout qubit is in the $(0,1)$ position and can interact with the $(1,1)$ and $(0,2)$ qubits before SWAPing with the $(0,2)$ qubit. It continues down the line in this fashion.\n\nLet's code up this circuit:", "_____no_output_____" ] ], [ [ "qnn_fox = cirq.Circuit()\n\nw = 0.2 # Want an explicit numerical weight for later \nfor i in range(10):\n qnn_fox.append([ZXGate(w).on(cirq.GridQubit(1,i), cirq.GridQubit(0,i)),\n ZXGate(w).on(cirq.GridQubit(0,i+1), cirq.GridQubit(0,i)),\n cirq.SWAP(cirq.GridQubit(0,i), cirq.GridQubit(0,i+1))])\n \nqnn_fox.append(ZXGate(w).on(cirq.GridQubit(1,10), cirq.GridQubit(0,10)))\n\nqnn_fox.append([(cirq.S**-1)(cirq.GridQubit(0,10)),cirq.H(cirq.GridQubit(0,10)),\n cirq.measure(cirq.GridQubit(0,10))])\n\nprint(qnn_fox)", "(0, 0): ────X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │\n(0, 1): ────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │\n(0, 2): ────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │ │\n(0, 3): ────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │ │ │\n(0, 4): ────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │ │ │ │\n(0, 5): ────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │ │ │ │ │\n(0, 6): ────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────────────────────────\n │ │ │ │ │ │ │ │ │\n(0, 7): ────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────────────────────────\n │ │ │ │ │ │ │ │ │ │\n(0, 8): ────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────────────────────────\n │ │ │ │ │ │ │ │ │ │ │\n(0, 9): ────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────X───────×──────────────────────────\n │ │ │ │ │ │ │ │ │ │ │ │\n(0, 10): ───┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────Z^0.2───×───X───────S^-1───H───M───\n │ │ │ │ │ │ │ │ │ │ │\n(1, 0): ────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │ │ │ │ │ │\n(1, 1): ────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │ │ │ │ │\n(1, 2): ────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │ │ │ │\n(1, 3): ────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │ │ │\n(1, 4): ────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │ │\n(1, 5): ────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │ │\n(1, 6): ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │ │\n(1, 7): ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼───────────────────┼──────────────────────\n │ │ │\n(1, 8): ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼───────────────────┼──────────────────────\n │ │\n(1, 9): ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2───────────────┼──────────────────────\n │\n(1, 10): ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────Z^0.2──────────────────\n" ] ], [ [ "As coded, this circuit still won't run on the Foxtail device. That's because the gates we've defined are not native gates. Cirq has a built-in method that will convert our gates to Xmon gates (which are native for the Foxtail device) and attempt to optimze the circuit by reducing the total number of gates:", "_____no_output_____" ] ], [ [ "cirq.google.optimized_for_xmon(qnn_fox, new_device=cirq.google.Foxtail, allow_partial_czs=True)", "_____no_output_____" ] ], [ [ "Notice how we were able to pass in the `new_device` argument without getting an error messgae. That means the circuit will run properly on the Foxtail.", "_____no_output_____" ], [ "\n__Question__: We were smart to place the SWAP gates and $ZX$ gates next to each other where possible. Why?\n\n__Question__: Can you see any ways to further optimize this circuit by hand? Hint: not all of the qubits are being measured.", "_____no_output_____" ], [ "#### Solutions", "_____no_output_____" ], [ "* Placing the SWAP and $ZX$ gates next to each other lets the optimizer treat the ocmbination of them as a single gate, which leads to fewer total two-qubit gates. \n\n* The state of any qubit which is not being measured does not matter. In particular, any single-qubit gate acting on a non-measured qubit after the last two-qubit gate acting on that qubit will not affect the state of the measured qubit and so can be dropped.", "_____no_output_____" ], [ "### Exercise: Multiple Weights\n\nInstead of just a single weight, create create a neuron with multiple weights. How will you optimize those weights?", "_____no_output_____" ], [ "### Exercise: Analytic Calculation\n\nBecause we stuck to such a simple example, essentially everything in this notebook can be calculated analytically. Do those calculations.", "_____no_output_____" ], [ "### Exercise: Add More \"Quantum\" Operations\n\nThe neuron we constructed essentially does a classial calculation. You can add more ingredients that make the data processing more \"quantum.\" For example, you can add layers of Hadamard gates in between additional layers of $ZX$ gates. This sort of thing was explored in [Farhi and Neven](https://arxiv.org/abs/1802.06002). Try playing around with it.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d06f6b9208f86e4a3de1cff611f259f3e0f172b3
29,226
ipynb
Jupyter Notebook
Examples/2-Content/2.03-News/EX-2.03.01-News.ipynb
Refinitiv-API-Samples/Example.DataLibrary.Python
12f6527e6857e7a939728215aab4c74fab74f6e6
[ "Apache-2.0" ]
2
2021-12-21T13:31:51.000Z
2022-02-17T15:57:00.000Z
Examples/2-Content/2.03-News/EX-2.03.01-News.ipynb
Refinitiv-API-Samples/Example.DataLibrary.Python
12f6527e6857e7a939728215aab4c74fab74f6e6
[ "Apache-2.0" ]
null
null
null
Examples/2-Content/2.03-News/EX-2.03.01-News.ipynb
Refinitiv-API-Samples/Example.DataLibrary.Python
12f6527e6857e7a939728215aab4c74fab74f6e6
[ "Apache-2.0" ]
2
2022-03-07T08:52:26.000Z
2022-03-17T04:34:13.000Z
40.200825
461
0.515158
[ [ [ "----\n<img src=\"../../../files/refinitiv.png\" width=\"20%\" style=\"vertical-align: top;\">\n\n# Data Library for Python\n\n----", "_____no_output_____" ], [ "## Content layer - News\nThis notebook demonstrates how to retrieve News.", "_____no_output_____" ], [ "#### Learn more\n\nTo learn more about the Refinitiv Data Library for Python please join the Refinitiv Developer Community. By [registering](https://developers.refinitiv.com/iam/register) and [logging](https://developers.refinitiv.com/content/devportal/en_us/initCookie.html) into the Refinitiv Developer Community portal you will have free access to a number of learning materials like \n [Quick Start guides](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/quick-start), \n [Tutorials](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/learning), \n [Documentation](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-library-for-python/docs)\n and much more.\n\n#### Getting Help and Support\n\nIf you have any questions regarding using the API, please post them on \nthe [Refinitiv Data Q&A Forum](https://community.developers.refinitiv.com/spaces/321/index.html). \nThe Refinitiv Developer Community will be happy to help. ", "_____no_output_____" ], [ "## Set the configuration file location\nFor a better ease of use, you have the option to set initialization parameters of the Refinitiv Data Library in the _refinitiv-data.config.json_ configuration file. This file must be located beside your notebook, in your user folder or in a folder defined by the _RD_LIB_CONFIG_PATH_ environment variable. The _RD_LIB_CONFIG_PATH_ environment variable is the option used by this series of examples. The following code sets this environment variable. ", "_____no_output_____" ] ], [ [ "import os\nos.environ[\"RD_LIB_CONFIG_PATH\"] = \"../../../Configuration\"", "_____no_output_____" ] ], [ [ "## Some Imports to start with", "_____no_output_____" ] ], [ [ "import refinitiv.data as rd\nfrom refinitiv.data.content import news\nfrom datetime import timedelta", "_____no_output_____" ] ], [ [ "## Open the data session\n\nThe open_session() function creates and open sessions based on the information contained in the refinitiv-data.config.json configuration file. Please edit this file to set the session type and other parameters required for the session you want to open.", "_____no_output_____" ] ], [ [ "rd.open_session('platform.rdp')", "_____no_output_____" ] ], [ [ "## Retrieve data", "_____no_output_____" ], [ "### Headlines", "_____no_output_____" ], [ "#### Get headlines", "_____no_output_____" ] ], [ [ "response = news.headlines.Definition(\"Apple\").get_data()\nresponse.data.df", "_____no_output_____" ] ], [ [ "#### Get headlines within a range of dates", "_____no_output_____" ] ], [ [ "response = news.headlines.Definition(\n query=\"Refinitiv\",\n date_from=\"20.03.2021\", \n date_to=timedelta(days=-4), \n count=3\n).get_data()\nresponse.data.df", "_____no_output_____" ] ], [ [ "#### Get a limited number of headlines", "_____no_output_____" ] ], [ [ "response = news.headlines.Definition(query = \"Google\", count = 350).get_data()\nresponse.data.df", "_____no_output_____" ] ], [ [ "### Story", "_____no_output_____" ] ], [ [ "response = news.story.Definition(\"urn:newsml:reuters.com:20211003:nNRAgvhyiu:1\").get_data()\nprint(response.data.story.title, '\\n')\nprint(response.data.story.content)", "Google Doodle marks birthday of Spanish ocean scientist María de los Ángeles Alvariño González \n\nFor best results when printing this announcement, please click on link below:\nhttp://newsfile.refinitiv.com/getnewsfile/v1/story?guid=urn:newsml:reuters.com:20211003:nNRAgvhyiu&default-theme=true\n\n\nThe Google Doodle today (3 OCtober) celebrates the 105th birthday of\nSpanish-American professor and marine research biologist María de los\nÁngeles Alvariño González, who is regarded as one of the most important\nSpanish scientists of all time.\n\nBorn in 1916, her love of natural history began with her father's library and\ndeepened as she pursued coastline oceanography research.\n\nAlthough the Spanish Institute of Oceanography (IEO) only accepted men at the\ntime, her university work impressed the organization so much that they\nappointed her as a marine biologist in 1952.\n\nBased in Vigo, she began her pioneering research on zooplankton, tiny\norganisms that serve as the foundation of the oceanic food chain and\nidentified some species to be the best indicators of ocean health.\n\nIn 1953, the British Council awarded Ángeles Alvariño a fellowship that\nresulted in her becoming the first woman to work as a scientist aboard a\nBritish research vessel.\n\nFollowing several expeditions, she furthered her studies in the United States\nwhere she retired as one of the world's most prestigious marine biologists in\n1987.\n\nDuring her career she discovered 22 new species of zooplankton and published\nover 100 scientific papers. Even today she is the only Spanish scientist of\n1,000 in the \"Encyclopedia of World Scientists,\" and a modern research vessel\nin IEO's fleet bears her name.\n\nRead More\n\nAP News Digest 6:30 a.m.\n(https://www.independent.co.uk/news/world/europe/joe-biden-communist-party-east-african-covid-child-b1931394.html)\n\nLa Palma volcano turns 'much more aggressive' and blows open new fissures\n(https://www.independent.co.uk/news/world/europe/la-palma-volcano-eruption-fissures-b1931367.html)\n\nHow security fears for Rutte revealed power of Dutch drug gangs\n(https://www.independent.co.uk/independentpremium/rutte-netherlands-drug-gangs-gangsters-b1931165.html)\n\n\n\nCopyright © 2021 Independent.co.ukk. All rights reserved.\n" ] ], [ [ "## Close the session", "_____no_output_____" ] ], [ [ "rd.close_session()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06f75d9c81c82faa4c027434afac290b8e12bee
180,037
ipynb
Jupyter Notebook
spotify_user_behaviour_predictor/4_Stat_Infer.ipynb
MacyChan/spotify-user-behaviour-predictor
9f8bd80643802c4bda63db58c47e98a4ad4ae6a5
[ "MIT" ]
1
2022-01-06T23:40:18.000Z
2022-01-06T23:40:18.000Z
spotify_user_behaviour_predictor/_build/html/_sources/4_Stat_Infer.ipynb
MacyChan/spotify-user-behaviour-predictor
9f8bd80643802c4bda63db58c47e98a4ad4ae6a5
[ "MIT" ]
null
null
null
spotify_user_behaviour_predictor/_build/html/_sources/4_Stat_Infer.ipynb
MacyChan/spotify-user-behaviour-predictor
9f8bd80643802c4bda63db58c47e98a4ad4ae6a5
[ "MIT" ]
null
null
null
562.615625
170,403
0.935713
[ [ [ "# Statistic Inference\n- This is a Python base notebook\n- Using `rpy2` for R functions\n\nWe saw some pattern in EDA, naturally, we would like to see if the different between feature are significantly related to the target.", "_____no_output_____" ], [ "## Import libaries", "_____no_output_____" ] ], [ [ "import rpy2\nimport rpy2.robjects as robjects\nfrom rpy2.robjects.packages import importr", "_____no_output_____" ], [ "%load_ext rpy2.ipython", "_____no_output_____" ], [ "%%R\nlibrary(tidyverse)\nlibrary(broom)\nlibrary(GGally)", "R[write to console]: ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──\n\nR[write to console]: ✔ ggplot2 3.3.5 ✔ purrr 0.3.4\n✔ tibble 3.1.6 ✔ dplyr 1.0.7\n✔ tidyr 1.1.4 ✔ stringr 1.4.0\n✔ readr 2.1.1 ✔ forcats 0.5.1\n\nR[write to console]: ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──\n✖ dplyr::filter() masks stats::filter()\n✖ dplyr::lag() masks stats::lag()\n\nR[write to console]: Registered S3 method overwritten by 'GGally':\n method from \n +.gg ggplot2\n\n" ] ], [ [ "## Reading the data CSV\nRead in the data CSV and store it as a pandas dataframe named `spotify_df`. ", "_____no_output_____" ] ], [ [ "%%R\nspotify_df <- read_csv(\"data/spotify_data.csv\")\nhead(spotify_df)", "R[write to console]: New names:\n* `` -> ...1\n\n" ] ], [ [ "## Regression", "_____no_output_____" ], [ "### Data Wrangle\n- Remove `song_title` and `artist` for relationship study by regression. As both of them are neither numerical nor categorical features.", "_____no_output_____" ] ], [ [ "%%R\nspotify_df_num <- spotify_df[2:15]\nhead(spotify_df_num)", "# A tibble: 6 × 14\n acousticness danceability duration_ms energy instrumentalness key liveness\n <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>\n1 0.0102 0.833 204600 0.434 0.0219 2 0.165 \n2 0.199 0.743 326933 0.359 0.00611 1 0.137 \n3 0.0344 0.838 185707 0.412 0.000234 2 0.159 \n4 0.604 0.494 199413 0.338 0.51 5 0.0922\n5 0.18 0.678 392893 0.561 0.512 5 0.439 \n6 0.00479 0.804 251333 0.56 0 8 0.164 \n# … with 7 more variables: loudness <dbl>, mode <dbl>, speechiness <dbl>,\n# tempo <dbl>, time_signature <dbl>, valence <dbl>, target <dbl>\n" ] ], [ [ "## Set up regression model", "_____no_output_____" ], [ "Here, I am interested in determining factors associated with `target`. In particular, I will use a Multiple Linear Regression (MLR) Model to study the relation between `target` and all other features.", "_____no_output_____" ] ], [ [ "%%R\nML_reg <- lm( target ~ ., data = spotify_df_num) |> tidy(conf.int = TRUE)\n\nML_reg<- ML_reg |>\n mutate(Significant = p.value < 0.05) |>\n mutate_if(is.numeric, round, 3)\n\nML_reg", "# A tibble: 14 × 8\n term estimate std.error statistic p.value conf.low conf.high Significant\n <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <lgl> \n 1 (Interce… -0.313 0.206 -1.52 0.128 -0.717 0.09 FALSE \n 2 acoustic… -0.325 0.055 -5.92 0 -0.433 -0.217 TRUE \n 3 danceabi… 0.415 0.078 5.33 0 0.262 0.568 TRUE \n 4 duration… 0 0 4.08 0 0 0 TRUE \n 5 energy 0.09 0.093 0.974 0.33 -0.092 0.272 FALSE \n 6 instrume… 0.268 0.044 6.05 0 0.181 0.354 TRUE \n 7 key 0.001 0.003 0.334 0.739 -0.005 0.007 FALSE \n 8 liveness 0.098 0.07 1.4 0.162 -0.039 0.236 FALSE \n 9 loudness -0.023 0.005 -4.81 0 -0.033 -0.014 TRUE \n10 mode -0.035 0.022 -1.58 0.113 -0.078 0.008 FALSE \n11 speechin… 0.816 0.121 6.74 0 0.579 1.05 TRUE \n12 tempo 0.001 0 1.95 0.052 0 0.002 FALSE \n13 time_sig… -0.009 0.042 -0.205 0.838 -0.091 0.074 FALSE \n14 valence 0.165 0.051 3.24 0.001 0.065 0.265 TRUE \n" ] ], [ [ "- We can see that a lot of features are statiscally correlated with target. They are listed in the table below.", "_____no_output_____" ] ], [ [ "%%R\nML_reg |>\n filter(Significant == TRUE) |>\n select(term) ", "# A tibble: 7 × 1\n term \n <chr> \n1 acousticness \n2 danceability \n3 duration_ms \n4 instrumentalness\n5 loudness \n6 speechiness \n7 valence \n" ] ], [ [ "### GGpairs\nBelow is the ggpair plots to visual the correlation between different features.", "_____no_output_____" ] ], [ [ "%%R\nggpairs(data = spotify_df_num)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06f9039c88af0dc745e54826b2fe2c749bb66d5
8,733
ipynb
Jupyter Notebook
sagemaker-python-sdk/tensorflow_moving_from_framework_mode_to_script_mode/tensorflow_moving_from_framework_mode_to_script_mode.ipynb
dleen/amazon-sagemaker-examples
8edd462adf37b5b173098ffea729b7198ce8d41c
[ "Apache-2.0" ]
3
2019-07-23T16:24:55.000Z
2019-07-24T06:28:48.000Z
sagemaker-python-sdk/tensorflow_moving_from_framework_mode_to_script_mode/tensorflow_moving_from_framework_mode_to_script_mode.ipynb
lokeshinumpudi/amazon-sagemaker-examples
bb0a5fcafac9e1735672e72c263f017edebecaaa
[ "Apache-2.0" ]
1
2019-06-28T19:48:53.000Z
2019-06-28T19:48:53.000Z
sagemaker-python-sdk/tensorflow_moving_from_framework_mode_to_script_mode/tensorflow_moving_from_framework_mode_to_script_mode.ipynb
lokeshinumpudi/amazon-sagemaker-examples
bb0a5fcafac9e1735672e72c263f017edebecaaa
[ "Apache-2.0" ]
1
2019-12-31T10:11:42.000Z
2019-12-31T10:11:42.000Z
31.301075
557
0.589717
[ [ [ "# Migrating scripts from Framework Mode to Script Mode\n\nThis notebook focus on how to migrate scripts using Framework Mode to Script Mode. The original notebook using Framework Mode can be find here https://github.com/awslabs/amazon-sagemaker-examples/blob/4c2a93114104e0b9555d7c10aaab018cac3d7c04/sagemaker-python-sdk/tensorflow_distributed_mnist/tensorflow_local_mode_mnist.ipynb", "_____no_output_____" ], [ "### Set up the environment", "_____no_output_____" ] ], [ [ "import os\nimport subprocess\nimport sagemaker\nfrom sagemaker import get_execution_role\n\nsagemaker_session = sagemaker.Session()\n\nrole = get_execution_role()", "_____no_output_____" ] ], [ [ "### Download the MNIST dataset", "_____no_output_____" ] ], [ [ "import utils\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\ndata_sets = input_data.read_data_sets('data', dtype=tf.uint8, reshape=False, validation_size=5000)\n\nutils.convert_to(data_sets.train, 'train', 'data')\nutils.convert_to(data_sets.validation, 'validation', 'data')\nutils.convert_to(data_sets.test, 'test', 'data')", "_____no_output_____" ] ], [ [ "### Upload the data\nWe use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use this later when we start the training job.", "_____no_output_____" ] ], [ [ "inputs = sagemaker_session.upload_data(path='data', key_prefix='data/mnist')", "_____no_output_____" ] ], [ [ "# Construct an entry point script for training \nOn this example, we assume that you aready have a Framework Mode training script named `mnist.py`:", "_____no_output_____" ] ], [ [ "!pygmentize 'mnist.py'", "_____no_output_____" ] ], [ [ "The training script `mnist.py` include the Framework Mode functions ```model_fn```, ```train_input_fn```, ```eval_input_fn```, and ```serving_input_fn```. We need to create a entrypoint script that uses the functions above to create a ```tf.estimator```:", "_____no_output_____" ] ], [ [ "%%writefile train.py\n\nimport argparse\n# import original framework mode script\nimport mnist\n\nimport tensorflow as tf\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # read hyperparameters as script arguments\n parser.add_argument('--training_steps', type=int)\n parser.add_argument('--evaluation_steps', type=int)\n\n args, _ = parser.parse_known_args()\n\n # creates a tf.Estimator using `model_fn` that saves models to /opt/ml/model\n estimator = tf.estimator.Estimator(model_fn=mnist.model_fn, model_dir='/opt/ml/model')\n\n\n # creates parameterless input_fn function required by the estimator\n def input_fn():\n return mnist.train_input_fn(training_dir='/opt/ml/input/data/training', params=None)\n\n\n train_spec = tf.estimator.TrainSpec(input_fn, max_steps=args.training_steps)\n\n\n # creates parameterless serving_input_receiver_fn function required by the exporter\n def serving_input_receiver_fn():\n return mnist.serving_input_fn(params=None)\n\n\n exporter = tf.estimator.LatestExporter('Servo',\n serving_input_receiver_fn=serving_input_receiver_fn)\n\n\n # creates parameterless input_fn function required by the evaluation\n def input_fn():\n return mnist.eval_input_fn(training_dir='/opt/ml/input/data/training', params=None)\n\n\n eval_spec = tf.estimator.EvalSpec(input_fn, steps=args.evaluation_steps, exporters=exporter)\n \n # start training and evaluation\n tf.estimator.train_and_evaluate(estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)", "_____no_output_____" ] ], [ [ "## Changes in the SageMaker TensorFlow estimator\n\nWe need to create a TensorFlow estimator pointing to ```train.py``` as the entrypoint:", "_____no_output_____" ] ], [ [ "from sagemaker.tensorflow import TensorFlow\n\nmnist_estimator = TensorFlow(entry_point='train.py',\n dependencies=['mnist.py'],\n role='SageMakerRole',\n framework_version='1.13',\n hyperparameters={'training_steps':10, 'evaluation_steps':10},\n py_version='py3',\n train_instance_count=1,\n train_instance_type='local')\n\nmnist_estimator.fit(inputs)", "_____no_output_____" ] ], [ [ "# Deploy the trained model to prepare for predictions\n\nThe deploy() method creates an endpoint (in this case locally) which serves prediction requests in real-time.", "_____no_output_____" ] ], [ [ "mnist_predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type='local')", "_____no_output_____" ] ], [ [ "# Invoking the endpoint", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nfor i in range(10):\n data = mnist.test.images[i].tolist()\n\n predict_response = mnist_predictor.predict(data)\n \n print(\"========================================\")\n label = np.argmax(mnist.test.labels[i])\n print(\"label is {}\".format(label))\n print(\"prediction is {}\".format(predict_response))", "_____no_output_____" ] ], [ [ "# Clean-up\n\nDeleting the local endpoint when you're finished is important since you can only run one local endpoint at a time.", "_____no_output_____" ] ], [ [ "mnist_estimator.delete_endpoint()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06f95b1aee5c619a6f836db1ad8574687349353
11,894
ipynb
Jupyter Notebook
2020_TaCo/FBA/TaCo_cost_comparison.ipynb
he-hai/PubSuppl
0af6863948c717a8f4ca6197bfa7f7428aa054f9
[ "CC-BY-4.0" ]
null
null
null
2020_TaCo/FBA/TaCo_cost_comparison.ipynb
he-hai/PubSuppl
0af6863948c717a8f4ca6197bfa7f7428aa054f9
[ "CC-BY-4.0" ]
null
null
null
2020_TaCo/FBA/TaCo_cost_comparison.ipynb
he-hai/PubSuppl
0af6863948c717a8f4ca6197bfa7f7428aa054f9
[ "CC-BY-4.0" ]
null
null
null
49.352697
1,712
0.577014
[ [ [ "# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport cobra\n\nprint('Python version:', sys.version)\nprint('numpy version:', np.__version__)\nprint('pandas version:', pd.__version__)\nprint('cobrapy version:', cobra.__version__)", "Python version: 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)]\nnumpy version: 1.16.4\npandas version: 1.0.3\ncobrapy version: 0.15.4\n" ], [ "def AddRxn(model, newRxnFile):\n \"\"\"Function of adding new reactions to the model.\"\"\"\n n1 = len(model.reactions)\n AllAddRxn = pd.read_csv(newRxnFile, sep=',', index_col='RxnID', skipinitialspace=True)\n n2 = len(AllAddRxn)\n for i in range(n2):\n ID = AllAddRxn.index.values[i]\n addRxn = cobra.Reaction(ID)\n model.add_reactions([addRxn])\n addRxnInf = model.reactions[n1 + i]\n addRxnInf.name = AllAddRxn.loc[ID, 'RxnName']\n addRxnInf.reaction = AllAddRxn.loc[ID, 'RxnFormula']\n addRxnInf.subsystem = AllAddRxn.loc[ID, 'Subsystem']\n addRxnInf.lower_bound = AllAddRxn.loc[ID, 'LowerBound']\n addRxnInf.upper_bound = AllAddRxn.loc[ID, 'UpperBound']\n return model\n", "_____no_output_____" ], [ "def flux2file(model, product, psw, output_dir='tmp'):\n \"\"\"Function of exporting flux data.\"\"\"\n n = len(model.reactions)\n modelMatrix = np.empty([n, 9], dtype = object)\n for i in range(len(model.reactions)):\n x = model.reactions[i]\n modelMatrix[i, 0] = i + 1\n modelMatrix[i, 1] = x.id\n modelMatrix[i, 2] = x.name\n modelMatrix[i, 3] = x.reaction\n modelMatrix[i, 4] = x.subsystem\n modelMatrix[i, 5] = x.lower_bound\n modelMatrix[i, 6] = x.upper_bound\n modelMatrix[i, 7] = x.flux\n modelMatrix[i, 8] = abs(x.flux)\n \n df = pd.DataFrame(data = modelMatrix, \n columns = ['N', 'RxnID', 'RxnName', 'Reaction', 'SubSystem', \n 'LowerBound', 'UpperBound', 'Flux-core', 'abs(Flux)'])\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n filepath = os.path.join(output_dir, '{}_{}.xlsx'.format(product, psw))\n df.to_excel(filepath, index=False)\n ", "_____no_output_____" ], [ "model = cobra.Model()\n\nAddRxn(model,'CBBrxns.csv')\n\n# the model has a constraint of producing 1 of 3pg (sink_3pg)\n# set the objective to minimize ATP cost\nmodel.objective = {model.reactions.DM_atp: 1}\nmodel.objective_direction = 'min'\n\n# set the carboxylation and oxygenation reaction ratio \n# of RuBisCO to be 3:1\nrubisco_flux = model.problem.Constraint(\n model.reactions.RBPC.flux_expression - 3 * model.reactions.RBPO.flux_expression,\n lb = 0, \n ub = 0\n)\nmodel.add_cons_vars(rubisco_flux)", "unknown metabolite 'co2' created\nunknown metabolite 'h2o' created\nunknown metabolite 'rb15bp' created\nunknown metabolite '3pg' created\nunknown metabolite 'h' created\nunknown metabolite 'o2' created\nunknown metabolite '2pglyc' created\nunknown metabolite 'atp' created\nunknown metabolite '13dpg' created\nunknown metabolite 'adp' created\nunknown metabolite 'nadph' created\nunknown metabolite 'g3p' created\nunknown metabolite 'nadp' created\nunknown metabolite 'pi' created\nunknown metabolite 'dhap' created\nunknown metabolite 'fdp' created\nunknown metabolite 'f6p' created\nunknown metabolite 'e4p' created\nunknown metabolite 'xu5p' created\nunknown metabolite 's17bp' created\nunknown metabolite 's7p' created\nunknown metabolite 'r5p' created\nunknown metabolite 'ru5p' created\nunknown metabolite 'amp' created\nunknown metabolite 'nad' created\nunknown metabolite 'nadh' created\nunknown metabolite 'fdxo' created\nunknown metabolite 'fdxrd' created\nunknown metabolite 'ppi' created\nunknown metabolite 'hco3' created\nunknown metabolite 'nh3' created\n" ], [ "photores = {\n 'NPR': 'a_NPRrxns.csv', # natural photorespiration\n 'GLC': 'b_GLCrxns.csv', # glycerate bypass\n 'OX': 'c_OXrxns.csv', # glycolate oxidation pathway\n 'A5P': 'd_A5Prxns.csv', # arabinose-5-phosphate shunt\n '3OHP': 'e_3OHPrxns.csv', # 3-hydroxypropionate bypass\n 'TACO': 'f_TACOrxns.csv', # tartronyl-CoA pathway\n}\n\ncost_df = pd.DataFrame()", "_____no_output_____" ], [ "for psw, rxns in photores.items():\n with model as m:\n AddRxn(m, rxns)\n m.optimize()\n flux2file(m,'3pg',psw,'output')\n for cost in ['DM_atp', 'DM_e', 'Fdr', 'EX_co2', 'RBPC', 'RBPO']:\n cost_df.loc[cost, psw] = abs(m.reactions.get_by_id(cost).flux)\n ", "unknown metabolite 'glyclt' created\nunknown metabolite 'glx' created\nunknown metabolite 'h2o2' created\nunknown metabolite 'ser' created\nunknown metabolite 'gly' created\nunknown metabolite 'hpyr' created\nunknown metabolite 'glu' created\nunknown metabolite 'akg' created\nunknown metabolite 'gln' created\nunknown metabolite 'mlthf' created\nunknown metabolite 'thf' created\nunknown metabolite 'glyc' created\nunknown metabolite 'glyclt' created\nunknown metabolite 'glx' created\nunknown metabolite '2h3oppan' created\nunknown metabolite 'glyc' created\nunknown metabolite 'glyclt' created\nunknown metabolite 'glx' created\nunknown metabolite 'h2o2' created\nunknown metabolite 'accoa' created\nunknown metabolite 'mal' created\nunknown metabolite 'coa' created\nunknown metabolite 'pyr' created\nunknown metabolite 'glyclt' created\nunknown metabolite 'coa' created\nunknown metabolite 'glyccoa' created\nunknown metabolite 'gcald' created\nunknown metabolite 'ara5p' created\nunknown metabolite 'glyclt' created\nunknown metabolite 'glx' created\nunknown metabolite 'ppcoa' created\nunknown metabolite 'mmcoa' created\nunknown metabolite 'citmcoa' created\nunknown metabolite 'pyr' created\nunknown metabolite 'accoa' created\nunknown metabolite 'malcoa' created\nunknown metabolite '3hpp' created\nunknown metabolite 'coa' created\nunknown metabolite 'pep' created\nunknown metabolite '2pg' created\nunknown metabolite 'glyclt' created\nunknown metabolite 'coa' created\nunknown metabolite 'glyccoa' created\nunknown metabolite 'tarcoa' created\nunknown metabolite '2h3oppan' created\nunknown metabolite 'glyc' created\n" ], [ "# Assuming we cannot improve the GCC hydrolysis\n# The GCC M5 hydrolyse 3.9 ATP per carboxylation (Fig. 2c)\nwith model as m:\n AddRxn(m, 'f_TACOrxns.csv')\n m.reactions.GCC.add_metabolites({'atp': -2.9, 'adp': 2.9, 'pi': 2.9})\n m.optimize()\n flux2file(m,'3pg', 'TACO_2','output')\n for cost in ['DM_atp', 'DM_e', 'Fdr', 'EX_co2', 'RBPC', 'RBPO']:\n cost_df.loc[cost, 'TACO_2'] = abs(m.reactions.get_by_id(cost).flux)", "unknown metabolite 'glyclt' created\nunknown metabolite 'coa' created\nunknown metabolite 'glyccoa' created\nunknown metabolite 'tarcoa' created\nunknown metabolite '2h3oppan' created\nunknown metabolite 'glyc' created\n" ], [ "cost_df.to_excel('TaCo costs comparison.xlsx')\ncost_df", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06fa64fd9513893886e20d1f9d652384c3cd73b
13,417
ipynb
Jupyter Notebook
2-Regression/1-Tools/linnerud.ipynb
pr0gmtc/ML-For-Beginners
88fb680172cca0cb3a4be4d82571b7d5bbbb5556
[ "MIT" ]
null
null
null
2-Regression/1-Tools/linnerud.ipynb
pr0gmtc/ML-For-Beginners
88fb680172cca0cb3a4be4d82571b7d5bbbb5556
[ "MIT" ]
null
null
null
2-Regression/1-Tools/linnerud.ipynb
pr0gmtc/ML-For-Beginners
88fb680172cca0cb3a4be4d82571b7d5bbbb5556
[ "MIT" ]
null
null
null
72.524324
9,321
0.836327
[ [ [ "The Linnerud dataset is a multi-output regression dataset. It consists of three excercise (data) and three physiological (target) variables collected from twenty middle-aged men in a fitness club:\n\nphysiological - CSV containing 20 observations on 3 physiological variables:\nWeight, Waist and Pulse.\n\nexercise - CSV containing 20 observations on 3 exercise variables:\nChins, Situps and Jumps.", "_____no_output_____" ], [ "To create a Regression model that would plot the relationship between the waistline and how many situps are accomplished we need to take both of the variables into separate 1 dimensional vectors, divide them into training and testing sets, train a linear regression model using train set and check its output on the test set", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model, model_selection", "_____no_output_____" ], [ "X, y = datasets.load_linnerud(return_X_y=True)\nprint(X.shape)\nprint(X[0])", "(20, 3)\n[ 5. 162. 60.]\n" ] ], [ [ "Taking Waist and Situps variables:", "_____no_output_____" ] ], [ [ "X = X[:, np.newaxis, 1]\ny = y[:, np.newaxis, 1]", "_____no_output_____" ], [ "print(X.shape)\nprint(y.shape)", "(20, 1)\n(20, 1)\n" ], [ "X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.33)", "_____no_output_____" ], [ "model = linear_model.LinearRegression()\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred = model.predict(X_test)", "_____no_output_____" ], [ "plt.scatter(X_test, y_test, color='black')\nplt.plot(X_test, y_pred, color='blue', linewidth=3)\nplt.show()", "_____no_output_____" ] ], [ [ "We can see that there is an inverse correlation between number of situps someone does and the waistline", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d06fac4d27e9270ad004f3e7638d3d8aa4b32630
7,060
ipynb
Jupyter Notebook
get_test_masks/get_test_mask_from_DilatedSegUNet_new_version_coord_at_middle.ipynb
ydxb7/graduate
836c47f881ff6c4edfdf1a0ee23bd04602788ca3
[ "Unlicense" ]
null
null
null
get_test_masks/get_test_mask_from_DilatedSegUNet_new_version_coord_at_middle.ipynb
ydxb7/graduate
836c47f881ff6c4edfdf1a0ee23bd04602788ca3
[ "Unlicense" ]
null
null
null
get_test_masks/get_test_mask_from_DilatedSegUNet_new_version_coord_at_middle.ipynb
ydxb7/graduate
836c47f881ff6c4edfdf1a0ee23bd04602788ca3
[ "Unlicense" ]
null
null
null
31.801802
110
0.514873
[ [ [ "import numpy as np\nimport torch\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\n# %matplotlib inline\n# plt.rcParams['figure.figsize'] = (20, 20)\n# plt.rcParams['image.interpolation'] = 'bilinear'\n\nimport sys\nsys.path.append('../train/')\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader, Dataset\nimport torchvision.datasets as datasets\nimport torchvision\nimport torchvision.transforms as T\nimport torch.nn.functional as F\nimport torch.nn as nn\n\nimport collections\nimport numbers\nimport random\nimport math\nfrom PIL import Image, ImageOps, ImageEnhance\nimport time\nfrom torch.utils.data import Dataset\n\nfrom networks.DilatedSegUNet_new_version import DilatedSegUNet_new_version\nimport tool\nfrom tqdm import tqdm\n\nflip_index = ['16', '15', '14', '13', '12', '11', '10']", "_____no_output_____" ], [ "NUM_CHANNELS = 3\nNUM_CLASSES = 2 \nBATCH_SIZE = 2\nW, H = 1918, 1280\nSTRIDE = 512\nIMAGE_SIZE = 1024\ntest_mask_path = '../../data/test_masks/DilatedSegUNet_new_version/'\nweight_path = '../_weights/DilatedSegUNet_new_version-fold0-end.pth'", "_____no_output_____" ], [ "def load_model(filename, model):\n checkpoint = torch.load(filename)\n model.load_state_dict(checkpoint['model_state'])", "_____no_output_____" ], [ "model = DilatedSegUNet_new_version()\nmodel = model.cuda()\nmodel.eval()\nload_model(weight_path, model)", "_____no_output_____" ], [ "test_path = '../../data/images/test/'\n\nif not os.path.exists(test_mask_path):\n os.makedirs(test_mask_path)", "_____no_output_____" ], [ "test_names = os.listdir(test_path)\ntest_names = sorted(test_names)", "_____no_output_____" ], [ "coords_full = np.zeros((BATCH_SIZE, 2, H, W), dtype='float32')\nxx,yy = np.meshgrid(np.linspace(-0.5,0.5,W), np.linspace(-0.5,0.5,H))\ncoord = np.concatenate([xx[np.newaxis,...], yy[np.newaxis,...]],0).astype('float32')\nfor i in range(BATCH_SIZE):\n coords_full[i] = coord", "_____no_output_____" ], [ "with torch.no_grad():\n batch_size = BATCH_SIZE\n normalize_mean = [.485, .456, .406]\n normalize_std = [.229, .224, .225]\n\n test_names = sorted(os.listdir(test_path))\n for image_pack in tqdm(range(len(test_names) // batch_size)):\n images = np.zeros((batch_size, 3, H, W), dtype='float32')\n test_masks = np.zeros((batch_size, 2, H, W), dtype='float32')\n ifflip = [False] * batch_size\n image_batch_names = test_names[image_pack * batch_size: image_pack * batch_size + batch_size]\n mask_names = [input_name.split('.')[0] + '.png' for input_name in image_batch_names]\n \n for idx, image_name in enumerate(image_batch_names):\n image = Image.open(os.path.join(test_path, image_name))\n angle = image_name.split('.')[0].split('_')[-1]\n if angle in flip_index:\n ifflip[idx] = True\n image = ImageOps.mirror(image)\n\n image = np.array(image).astype('float') / 255\n image = image.transpose(2, 0, 1)\n\n for i in range(3):\n image[i] = (image[i] - normalize_mean[i]) / normalize_std[i]\n\n images[idx] = image\n \n\n\n for h_idx in range(int(math.ceil((H - STRIDE) / STRIDE))):\n h_start = h_idx * STRIDE\n h_end = h_start + IMAGE_SIZE\n if h_end > H:\n h_end = H\n h_start = h_end - IMAGE_SIZE\n for w_idx in range(int(math.ceil((W - STRIDE) / STRIDE))):\n w_start = w_idx * STRIDE\n w_end = w_start + IMAGE_SIZE\n if w_end > W:\n w_end = W\n w_start = w_end - IMAGE_SIZE\n\n input_batchs = images[:, :, h_start:h_end, w_start:w_end]\n input_tensor = torch.from_numpy(input_batchs).cuda()\n inputs = Variable(input_tensor)\n \n coord_batchs = coords_full[:, :, h_start:h_end, w_start:w_end]\n coord_batchs = coord_batchs[:, :, ::16, ::16]\n coord_tensor = torch.from_numpy(coord_batchs).cuda()\n coords = Variable(coord_tensor)\n \n outputs = model(inputs, coords)\n ouputs = outputs.cpu().data.numpy()\n\n test_masks[:, :, h_start:h_end, w_start:w_end] += ouputs\n \n test_masks = np.argmax(test_masks, axis=1).astype('uint8')\n for idx in range(batch_size):\n output_PIL = Image.fromarray(test_masks[idx].astype('uint8')*255).convert('1')\n if ifflip[idx]:\n output_PIL = ImageOps.mirror(output_PIL)\n mask_name = mask_names[idx]\n output_PIL.save(test_mask_path + mask_name)\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d06facdf88869e5eb76a4d8ea1305677d99fa476
3,692
ipynb
Jupyter Notebook
notebook/procs-hi-transliteration.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
3
2020-01-11T13:55:38.000Z
2020-08-25T22:34:15.000Z
notebook/procs-hi-transliteration.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
null
null
null
notebook/procs-hi-transliteration.ipynb
samlet/stack
47db17fd4fdab264032f224dca31a4bb1d19b754
[ "Apache-2.0" ]
1
2021-01-01T05:21:44.000Z
2021-01-01T05:21:44.000Z
22.375758
157
0.555525
[ [ [ "⊕ [sanskrit-coders/indic_transliteration: Python package for indic script transliteration](https://github.com/sanskrit-coders/indic_transliteration)\n\n```\npip install indic_transliteration\npip install git+https://github.com/sanskrit-coders/indic_transliteration/@master \n```", "_____no_output_____" ] ], [ [ "from indic_transliteration import sanscript\nfrom indic_transliteration.sanscript import SchemeMap, SCHEMES, transliterate\n\ndata = 'idam adbhutam'\nprint(transliterate(data, sanscript.HK, sanscript.TELUGU))", "ఇదమ్ అద్భుతమ్\n" ], [ "print(transliterate(data, sanscript.ITRANS, sanscript.DEVANAGARI))", "इदम् अद्भुतम्\n" ], [ "scheme_map = SchemeMap(SCHEMES[sanscript.VELTHUIS], SCHEMES[sanscript.TELUGU])\nprint(transliterate(data, scheme_map=scheme_map))", "ఇదమ్ అద్భుతమ్\n" ], [ "# Dravidian language extension (印度南部达罗毗荼人)\nfrom indic_transliteration import xsanscript\n# from indic_transliteration.xsanscript import SchemeMap, SCHEMES, transliterate\n\ndata = 'असय औषधिः ग्रन्थः। ऎ ऒ यॆक्ककॊ?'\nprint(transliterate(data, xsanscript.DEVANAGARI, xsanscript.KANNADA))", "ಅಸಯ ಔಷಧಿಃ ಗ್ರನ್ಥಃ। ಎ ಒ ಯೆಕ್ಕಕೊ?\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06fc5f9a05be40e1a95d184d8f4c626a481d4a6
25,450
ipynb
Jupyter Notebook
docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb
EvelynQiang/analytics-zoo
be5dd08abe9b14ac085817decd017862a273985a
[ "Apache-2.0" ]
null
null
null
docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb
EvelynQiang/analytics-zoo
be5dd08abe9b14ac085817decd017862a273985a
[ "Apache-2.0" ]
null
null
null
docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb
EvelynQiang/analytics-zoo
be5dd08abe9b14ac085817decd017862a273985a
[ "Apache-2.0" ]
null
null
null
44.337979
7,181
0.631788
[ [ [ "<a href=\"https://colab.research.google.com/github/yushan111/analytics-zoo/blob/add-autoestimator-quick-start/docs/docs/colab-notebook/orca/quickstart/autoestimator_pytorch_lenet_mnist.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "![image.png](data:image/png;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAUDBAQEAwUEBAQFBQUGBwwIBwcHBw8LCwkMEQ8SEhEPERETFhwXExQaFRERGCEYGh0dHx8fExciJCIeJBweHx7/2wBDAQUFBQcGBw4ICA4eFBEUHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh7/wAARCABNAI0DASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD7LrzPT/i1p958WpvAy2O2NZHgjvjPw8yrkptxxyGXOeorrfiDr0XhnwbqmuyEFrW3Zogf4pDwg/FiK+WW8OajpHw10T4lwM51E6w0zuTyU3fu2P1dG/77r2crwNLEQlKr192P+J6/16niZpj6uHnGNLp70v8ACnY+gP2hfiafhR4AXxUNGGr5vYrX7P8AafJ++GO7dtbpt6Y710Hwx8baL8QPBVh4q0Gbfa3aZaNj88Eg+/G47Mp4/IjgivC/25dVt9b/AGY9P1i0IMF5qVnOnPQMkhx+HT8K84+Gt/q/7OmseF9du5bm7+HXjfTrSa6Yjd9humiUs3Hdck/7UeRyUrx3FxbT3R7UZKSTWzPozQvjRp+q/HrVPhPHol3HeafE8j3rTKY3Coj8L1/jH5VN+0Z8XLP4Q+DrbWpdN/tW7vLsW1tZ/aPJ3/KWdi21sBQB26kV4d8Npobj/goV4puYJUlhlsZHjkRgyupt4CGBHUEHOaZ8W7WD42/te2Pw8lkaTQPDVhMLwoxwJSm52/B2hT/gJpDPqD4aeLbLxx4C0bxZp6eXBqdqk3lltxifo6E9yrAr+Fcd8IvjNp/xD8beKfDFpol3Yy+HpWilmlmVlmIlePIA5HKZ5ryv9gjxDeadbeKvhRrRKaj4fv3lijbqEL7JVHsJFB/7aVmfsV/8l7+Lv/X6/wD6VzUAeufDf456d4p+Kmr/AA41Pw/e+H9d00SYS5nR1nMZG4IV/wBkhx6rk1j+Pf2kNM0D4jap4J0TwnqniW90q2ee9ks5kVY/LQvIvIJJUYB/2jt61xX7bXhTUPC+r6J8cvCMyWesaTcRW962B+8BJWJyP4sZMbDurDsK3P2Gvh22j+Crj4i60/2nX/FJM4mc7nS2LEjJ/vO2Xb/gPpQBiN+2ZpiXq2T/AA38RrdPysBlQSN9F25PQ16T4d+OVpq0XhJpPC+p2UniMkJHPIoa3xcND8wIyc7d3HY15N49z/w8U8Jcn/jyj7/9O9xXffG8Z+PfgEHPL24/8ma78uw8K9ZxntZv7k2efmVedCipw3ul97Ol+O/x48I/CdYbTUkuNT1q4TzINNtSN+zON7seEUkEDqT2B5rkPhB+0u/jX4g6d4O1f4fap4duNUEjWc0s+9HCIznIZEOMKeRnnFec/s8WFr4+/bA8f+JvE0aXlzo885sYZhuETLP5MbYP9xFwPQkHrX2HdWFndTW01xbQzS2snmwO6BmifBBZSeVOCRx2JrgPQPn/AMdftQ22m+Pb3wn4K8Cax40n01mW+msnIVChw+wKjlgp4LHAyOM9a9b+EHjyy+I/ge18U2Gm6hp0M7vH5N7HtcMh2tgjhlzkbh6HoQRXyVc6b8Vf2Z/iN4k8T6X4dTxF4S1SUyT3IUsvlb2dd7L80LruIJIKnPfjH1N8C/iXoHxR8FLr+hQyWnlymC7s5cb7eYAMVyOGBDAhh1z2OQADvaKKKAPF/wBpaHxDrseieE9D0u+uI7m4E11PHAzRJztQMwGAASzHPoKgv/2edCXSp0tNb1p7pYW8lZJU8oyAHbkbemcd69b8Wa7Y+GfDOpeIdT837FptrJdT+Um5tiAk4Hc4HSvD/wDhsH4Qf89Nf/8ABd/9lXpU80r0aUKVF8qV/m/M8yplVCtVlUqrmb/BeR5n8T9H8a6v+y5N4RHhXXZr/TtegeCBLCVnaBlkJ2gDJCtuyR03CvoTRPAmneL/ANnXQvBfivT5Ujl0G0hmjkTbNbSrCuGAPKujD9CDxmuH/wCGwfhB/wA9Nf8A/Bd/9nR/w2D8IP8Anpr/AP4Lv/s65MTX9vVlVta+tjrwtD6vRjSve3U8V+BXgrxj8JP2gtefVtF1PUxo+h3rWs9vaySJfKqKYVjIByWAAC9RyO1aXwJ/ZsvfiFpus+MfiVd+J/D+rX2oyFIYlFvLID8zyOJEJwXYgdPu19g+BvEll4v8K2HiTTre9gsr+LzbdbuLypGQk7W25OARyPUEGsP4mfFfwH8OrcP4r8QW1nO67orRMy3Eg9RGuWx7nA96wOg+aLD4Xa98Df2m/C+p+E7HxH4g8NahGIb+7+ztO0SysY5RK0agAKdkgyOg9qwPh5rXxM+E/wAXPH2r6f8ACTxF4gh1jUZ1R1tZ40CrcSMHVhGwYENXo+qftseB4rpo9O8K+IbuJTgSSNFFu9wNzfrW/wCC/wBr74Wa3cpa6sNW8PSM20SXsAeHP+/GWx9SAKAD9omTxP8AED9kz7XH4T1O21vUHtJpNIjgklnhInGVK7Q3AGTwK9I/Zzsb3Tfgb4PsNRtJ7O7g0uJJoJ4ykkbDOQynkH2Ndpo+p6frGmw6lpV9bX1lOu+G4t5RJHIPUMODVugD5a8beGvEU/7efhjxDBoOqS6PDaRrLfpaubdD5E4wZMbRyQOvcV23xi0fV7741+Cb+z0u9uLW3aHz54oGZIsXGTuYDA455r2+vLfih8fvhl8PbmSx1nXRdanGcPYaennzIfRsEKh9mYGunCYl4apzpX0a+9WOXF4VYmnyN21T+53PGvjF8P8A4h/C7403Pxi+FmmPrNlqJZtV02KMyMC+DKCi/MyMQHDLkq3bGM9P8Lfjl8R/iH8RNH0aL4Y3vh7RQZG1W9uEllAxE+xQzIgQF9vqx6cVhy/tteDhORF4N8QPDn77Swq2P93J/nXe/Dv9qP4UeLrqKxk1S40C9lICRatGIkZs4wJQSn5kVzHUeZ678aPjT4Th8R+D/G3wzudc1O8edNNvbGFzaGOTKqoCo3mxjPHIbHDc816B+xF8N9d+H/wyu5PEts9lqWsXgufsj/fgiVAqBx2Y/MSOwIB5yK94jdJUDowZWAKlTkEHvT6ACiiigDO8T6Jp/iPw7qGg6rG0thqFu9tcIrlC0bghgCORweorxv8A4ZM+Cv8A0L99/wCDSf8A+Kr3WigD548T/sz/AAE8OeHr/XtX0e9t7Cwge4uJDqk/yooJP8XJ7AdyQK+TvgL8OrT4sfG37Dp+mSWHheCdr27h8xnMForfLCXPJZuEz15Y9q9s/wCCgfxSMstr8K9FnJOUutYMZySesMBx+Dkf7nvXtP7JXwvX4Z/C2CO/gEevattvNTLDDRkj5Ifoinn/AGi1AGH+1f8AGyD4TeGrbw74ZW3/AOElvoMWqBQUsIB8olK9M8YRenBJ4GD4P8CP2cPEXxXc+PPiNrGoWmm6g3noWbfe6gCfvlnzsQ9iQSR0AGDXN+HbZvjz+13I+ps02l3OoyTSqScCxt87Y/YMqqv1cmvsXxn8YNF8F+NIvCtzo84tIIo/PuYjgQKy5GyMDLALjp9ADitqGHqV5ONNXaV/kjGviKdCKlUdk3b5kWi/s2/BjS7NbdPBNndEDDS3c0szt7ks3H4AVy3xD/ZJ+F+v2UreH7a58MagV/dy2srSw7u26Jycj/dKmtL4gWvjH4oabKnh+8sbDSbVopEia4Ia5ZwSN8qEqNqFG2DIBcZYkYHf/Dx9V0SztfCfiO7W71C3tVe3uxuxdRjAYZPJeMkA+qlG7kBypQVFVFPVvbqvMmNabrSpuGiW/R+R8PeHNf8AiR+y38Tzouro93otwwkmtUcm2voc482En7kg9eCCMMCK+/8Awj4g0vxT4asPEOi3K3OnX8CzwSDup7EdiDkEdiCK8q/bF8C2fjb4I6tdJCj6locbajZSryRsGZUz6Mgbj1C+leG/sY/FSXw38GfiBYXcnmDw5aNq2no/I+dWBT6eYEP1c1gdBv8A7ZHx/wBS07VZvhn4BupYr7iLVL+3JMqM3/LvERyG5G5hyM7Rg5qh8Dv2P473T4dc+KV3dJNOBIukW0mxkzz++k5O71Vends8Vx/7CHg8eNfi/qnjXXs3v9igXW6UZ8y9mZtrn1IxI312ntX35QB5Xb/s7fBiC1FsngHTGQDG52ld/wDvotn9a8v+LX7HfhHU9PnvPh/dT6DqagtHazytNaSn+6S2XT65Ye1fUlFAHwT+zr8ZPFHwf8cH4afEn7TFosdwLZkujl9Lc9HU94TkEgcYIZe4P3qjK6B0YMpGQQcgivkj/gop4CtZ/DWlfEK0hRL20nWwvWUYMkL7jGW/3WBH0f2FeofsW+L7jxd8BdKe9mM15pMj6ZM7NksIsGMn/tmyD8KAPaKKKKACuP8AjL470/4c/DvVPFeobX+yx7baAnBnnbiOMfU9fQAntXYV8Cftj+PL/wCJ/wAXrH4b+FN15Z6Zdi0ijiORc37nY7fRfuA9vnPQ0AM/Y+8Cah8VPjFf/EbxXuvLLTLv7bPJIPlub5zuRPov3yO2EHQ197ajG8un3EURw7xOq/UggVynwV8Baf8ADf4c6Z4Usdjtbx77qcDH2i4bmST8TwPRQB2rsz0oA/PH9gaRLL9oh7a7+WeXS7uBFbr5gZGI+uEavqP4mfEbRNB+KVvpdz4ITWL23hULdLGrXJMikqkK7SW6469SQPf5U+Omkav8Df2nk8V6TARaTXx1fTichJUdj50BPbBZ0I67WU96+5/h94g8JfEPw/pnjXREtLwSR4jleJTPav8AxRMeqMpOCM+44INdGGq06Um6kbqzW9jmxVKpVilTlZ3T2ueK2ngHVPEngKSztdestLa3uUuJbRpz9luFkDFZncEjzASY+OP3RB3EAjubvwbq91pGk+DLfxhqkl/Y2yzXF4oj2Wi7GRQp2eZmTLIAWzsDk/w5vfGH4TW3jCIXWj3EWmam0gNwW3eTcqM8uinG8E5DYzyQfbr/AIe+GYfCfhe10lZ2up40BuLl87pnwBnkk4AAUDsqgdq3lXthIRU9U27W2879fQ540b4ucnDRxSvffyt09TzTw94M1D4b/Bzxy3iLVbe5il065l8qJmMcarA4J+bHLZGeOw618W/BCxu7r4ffFiW3VikXhdN+P+vuF/8A0GN/1r6R/bz+L1jp3hiX4ZaJdLLquobW1Qxtn7Nbg7hGx7O5A47KDn7wrT/Yt+FCWPwN1e48RWpSTxnEwkjZcMLIoyR/i293Hsy1zYivPEVHUnuzrw9CGHpqnDZHN/8ABNO7tzpPjWxDKLhbi0lI7lCsgH6g/nX2BX5u/CHxHqX7O/7Q15pniSOUWCSNp+phVPzwMQY7hB3AwrjuVJHU1+jGk6hZarp1vqOnXUN3Z3MaywzwuGSRCMhgR1BrE2LVFFITigDwr9u+5gg/Zx1iKYgPc3dpFDnu4mV+P+Ao1cr/AME4baeP4R67cuCIZtccR577YYgT+teT/twfFWHx74vsPAXhSX+0NP0q4PmyW/zi6vW+QKmPvBASoI6szY4AJ+t/2dvArfDv4Q6J4ZnC/bo4jPfEHObiQ7nGe+0kLn0WgD0GiiigDyD9rH4or8M/hdc3FlOE17VN1ppig/MjEfPN9EXn/eKjvX5//Bz4iXPw38ajxZbaLYaxqEcTpAb4uRCz8NINpBLYyMn+8a/Vi5s7W5Km4t4Ziv3TJGGx9M1F/ZWm/wDQPtP+/C/4UAfDn/DbXjv/AKFLw3+c/wD8XR/w2147/wChS8N/nP8A/F19x/2Vpv8A0D7T/vwv+FH9lab/ANA+0/78L/hQB4b4bsdP/ad/Z7tb3xjp9tp17NcT/ZJ7IEm0kjcoHXeSSCB8yk4I9OCPmS+8NfHD9mzxLPqWkm5OlO3zXltEZ7C6QdPNT+Bsf3sMOcHvX6K28EVvH5cMaRoOiooUfkKe6hlKsAQRggjrQB8UaH+3BqkVqE1rwDZ3VwF5ktNRaFSf91kfH51znjP9rX4m+MlOieC9Eh0OS5yi/Yle7vGz2RsYB9wufQivs/VPhl8O9UnNxqPgbw1dzE5Mkulwlj9Tt5rW0Dwz4d0BCmhaFpelKRgiztI4c/8AfIFAHx3+zv8Asta3qutxeMPizFLFb+b9oXSrh99xdyZzuuDztUnkqTubvgdftiKNIkWONVVFACqBgADsKcKKAPF/2mfgNpXxZ0qO9tJotN8T2cZW1vWX5JU5PlS45K56MOVJPUEg/JmgeL/jl+zhqTaNfWNxFpPmEizv4zNYyknlopFOFJ6/Kw68jNfo3UN5a295bvb3UEU8LjDRyIGVh7g8GgD40tv247lbIrcfDqF7oDG6PVyqE/Qxk/rXBeMvjz8ZfjNK/hbwtpk1naXI2SWWixO8sintJMeQvrjaMda+3J/hV8NJ7n7TN8P/AAs82c7zpMOc/wDfNdNpGk6ZpFr9l0rTrOwt/wDnlbQLEn5KAKAPm39lX9mdfBF5B4x8ceRc+IYxus7KMh4rEn+Mt0eX0xwvYk4I+nqKKACiiigD/9k=)\n---", "_____no_output_____" ], [ "##### Copyright 2018 Analytics Zoo Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#", "_____no_output_____" ] ], [ [ "## **Environment Preparation**", "_____no_output_____" ], [ "**Install Java 8**\n\nRun the cell on the **Google Colab** to install jdk 1.8.\n\n**Note:** if you run this notebook on your computer, root permission is required when running the cell to install Java 8. (You may ignore this cell if Java 8 has already been set up in your computer).", "_____no_output_____" ] ], [ [ "# Install jdk8\n!apt-get install openjdk-8-jdk-headless -qq > /dev/null\nimport os\n# Set environment variable JAVA_HOME.\nos.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\n!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java\n!java -version", "_____no_output_____" ] ], [ [ "**Install Analytics Zoo**\n\n ", "_____no_output_____" ], [ "[Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) is needed to prepare the Python environment for running this example. \n\n**Note**: The following code cell is specific for setting up conda environment on Colab; for general conda installation, please refer to the [install guide](https://docs.conda.io/projects/conda/en/latest/user-guide/install/) for more details.", "_____no_output_____" ] ], [ [ "import sys\n\n# Get current python version\nversion_info = sys.version_info\npython_version = f\"{version_info.major}.{version_info.minor}.{version_info.micro}\"", "_____no_output_____" ], [ "# Install Miniconda\n!wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh\n!chmod +x Miniconda3-4.5.4-Linux-x86_64.sh\n!./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local\n\n# Update Conda\n!conda install --channel defaults conda python=$python_version --yes\n!conda update --channel defaults --all --yes\n\n# Append to the sys.path\n_ = (sys.path\n .append(f\"/usr/local/lib/python{version_info.major}.{version_info.minor}/site-packages\"))\n\nos.environ['PYTHONHOME']=\"/usr/local\"", "_____no_output_____" ] ], [ [ "You can install the latest pre-release version using `pip install --pre --upgrade analytics-zoo[ray]`.", "_____no_output_____" ] ], [ [ "# Install latest pre-release version of Analytics Zoo \n# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.\n!pip install --pre --upgrade analytics-zoo[ray]", "_____no_output_____" ], [ "# Install python dependencies\n!pip install torch==1.7.1 torchvision==0.8.2", "_____no_output_____" ] ], [ [ "## **Automated hyper-parameter search for PyTorch using Orca APIs**\n\nIn this guide we will describe how to enable automated hyper-parameter search for PyTorch using Orca `AutoEstimator` in 5 simple steps.", "_____no_output_____" ] ], [ [ "# import necesary libraries and modules\nfrom __future__ import print_function\nimport os\nimport argparse\n\nfrom zoo.orca import init_orca_context, stop_orca_context\nfrom zoo.orca import OrcaContext", "_____no_output_____" ] ], [ [ "### **Step 1: Init Orca Context**", "_____no_output_____" ] ], [ [ "# recommended to set it to True when running Analytics Zoo in Jupyter notebook. \nOrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook).\n\ncluster_mode = \"local\"\n\nif cluster_mode == \"local\":\n init_orca_context(cores=4, memory=\"2g\", init_ray_on_spark=True) # run in local mode\nelif cluster_mode == \"k8s\":\n init_orca_context(cluster_mode=\"k8s\", num_nodes=2, cores=4, init_ray_on_spark=True) # run on K8s cluster\nelif cluster_mode == \"yarn\":\n init_orca_context(\n cluster_mode=\"yarn-client\", cores=4, num_nodes=2, memory=\"2g\", init_ray_on_spark=True, \n driver_memory=\"10g\", driver_cores=1) # run on Hadoop YARN cluster", "_____no_output_____" ] ], [ [ "This is the only place where you need to specify local or distributed mode. View [Orca Context](https://analytics-zoo.readthedocs.io/en/latest/doc/Orca/Overview/orca-context.html) for more details.\n\n**Note**: You should export HADOOP_CONF_DIR=/path/to/hadoop/conf/dir when you run on Hadoop YARN cluster.", "_____no_output_____" ], [ "### **Step 2: Define the Model**\nYou may define your model, loss and optimizer in the same way as in any standard PyTorch program.", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass LeNet(nn.Module):\n def __init__(self, fc1_hidden_size=500):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, fc1_hidden_size)\n self.fc2 = nn.Linear(fc1_hidden_size, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\ncriterion = nn.NLLLoss()", "_____no_output_____" ] ], [ [ "After defining your model, you need to define a *Model Creator Function* that returns an instance of your model, and a *Optimizer Creator Function* that returns a PyTorch optimizer. Note that both the *Model Creator Function* and the *Optimizer Creator Function* should take `config` as input and get the hyper-parameter values from `config`.", "_____no_output_____" ] ], [ [ "def model_creator(config):\n model = LeNet(fc1_hidden_size=config[\"fc1_hidden_size\"])\n return model\n\ndef optim_creator(model, config):\n return torch.optim.Adam(model.parameters(), lr=config[\"lr\"])", "_____no_output_____" ] ], [ [ "### **Step 3: Define Dataset**\n\nYou can define the train and validation datasets using *Data Creator Function* that has one parameter of `config` and returns a PyTorch `DataLoader`.", "_____no_output_____" ] ], [ [ "import torch\nfrom torchvision import datasets, transforms\n\ntorch.manual_seed(0)\ndir = './dataset'\ntest_batch_size = 640\n\ndef train_loader_creator(config):\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(dir, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=config[\"batch_size\"], shuffle=True)\n return train_loader\n\ndef test_loader_creator(config):\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(dir, train=False, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=test_batch_size, shuffle=False)\n return test_loader", "_____no_output_____" ] ], [ [ "### **Step 4: Define search space**\n\nYou should define a dictionary as your hyper-parameter search space. The keys are hyper-parameter names which should be the same with those in your creators, and you can specify how you want to sample each hyper-parameter in the values of the search space. See more about [automl.hp](https://analytics-zoo.readthedocs.io/en/latest/doc/PythonAPI/AutoML/automl.html#orca-automl-hp).\n", "_____no_output_____" ] ], [ [ "from zoo.orca.automl import hp\n\nsearch_space = {\n \"fc1_hidden_size\": hp.choice([500, 600]),\n \"lr\": hp.choice([0.001, 0.003]),\n \"batch_size\": hp.choice([160, 320, 640]),\n}", "_____no_output_____" ] ], [ [ "### **Step 5: Automatically fit and search *with* Orca AutoEstimator**", "_____no_output_____" ], [ "First, create an AutoEstimator. You can refer to [AutoEstimator API doc](https://analytics-zoo.readthedocs.io/en/latest/doc/PythonAPI/AutoML/automl.html#orca-automl-auto-estimator) for more details.", "_____no_output_____" ] ], [ [ "from zoo.orca.automl.auto_estimator import AutoEstimator\n\nauto_est = AutoEstimator.from_torch(model_creator=model_creator,\n optimizer=optim_creator,\n loss=criterion,\n logs_dir=\"/tmp/zoo_automl_logs\",\n resources_per_trial={\"cpu\": 2},\n name=\"lenet_mnist\")", "_____no_output_____" ] ], [ [ "Next, use the auto estimator to fit and search for the best hyper-parameter set.", "_____no_output_____" ] ], [ [ "auto_est.fit(data=train_loader_creator,\n validation_data=test_loader_creator,\n search_space=search_space,\n n_sampling=2,\n epochs=1,\n metric=\"accuracy\")", "_____no_output_____" ] ], [ [ "Finally, you can get the best learned model and the best hyper-parameters.", "_____no_output_____" ] ], [ [ "best_model = auto_est.get_best_model().model # will change later", "_____no_output_____" ], [ "best_config = auto_est.get_best_config()\nprint(best_config)", "_____no_output_____" ] ], [ [ "You can use the best learned model and the best hyper-parameters as you want. Here, we demonstrate how to evaluate on the test dataset.", "_____no_output_____" ] ], [ [ "test_loader = test_loader_creator(best_config)\nbest_model.eval()\ncorrect = 0\nwith torch.no_grad():\n for data, target in test_loader:\n output = best_model(data)\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).sum().numpy()\naccuracy = 100. * correct / len(test_loader.dataset)\nprint(f\"accuracy is {accuracy}%\")", "_____no_output_____" ] ], [ [ "You can find the accuracy of the best model has reached 98%.", "_____no_output_____" ] ], [ [ "# stop orca context when program finishes\nstop_orca_context()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06fd54463088bdc177a5356657d261971a1b6b7
35,680
ipynb
Jupyter Notebook
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial1.ipynb
haltakov/course-content-dl
0131a691d687075551053a4f8b79f65535150330
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial1.ipynb
haltakov/course-content-dl
0131a691d687075551053a4f8b79f65535150330
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial1.ipynb
haltakov/course-content-dl
0131a691d687075551053a4f8b79f65535150330
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
38.406889
603
0.592853
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W1D2_LinearDeepLearning/student/W1D2_Tutorial1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "\n# DL Neuromatch Academy: Week 1, Day 2, Tutorial 1\n# Gradients and AutoGrad\n\n__Content creators:__ Saeed Salehi, Vladimir Haltakov, Andrew Saxe\n\n\n\n__Content reviewers:__ Polina Turishcheva, Atnafu Lambebo, Yu-Fang Yang\n\n__Content editors:__ Anoop Kulkarni\n\n__Production editors:__ Khalid Almubarak, Spiros Chavlis", "_____no_output_____" ], [ "---\n#Tutorial Objectives\n\nDay 2 Tutorial 1 will continue on buiding PyTorch skillset and motivate its core functionality, Autograd. In this notebook, we will cover the key concepts and ideas of:\n\n* Gradient descent\n* PyTorch Autograd\n* PyTorch nn module\n\n\n", "_____no_output_____" ] ], [ [ "#@markdown Tutorial slides\n# you should link the slides for all tutorial videos here (we will store pdfs on osf)\n\nfrom IPython.display import HTML\nHTML('<iframe src=\"https://docs.google.com/presentation/d/1kfWWYhSIkczYfjebhMaqQILTCu7g94Q-o_ZcWb1QAKs/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\"></iframe>')", "_____no_output_____" ] ], [ [ "---\n# Setup\n", "_____no_output_____" ] ], [ [ "# Imports\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn\nimport time\nimport random\n\nfrom tqdm.notebook import tqdm, trange", "_____no_output_____" ], [ "#@title Figure settings\n\nimport ipywidgets as widgets # interactive display\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")", "_____no_output_____" ], [ "#@title Plotting functions\n\ndef ex3_plot(epochs, losses, values, gradients):\n f, (plot1, plot2, plot3) = plt.subplots(3, 1, sharex=True, figsize=(10, 7))\n plot1.set_title(\"Cross Entropy Loss\")\n plot1.plot(np.linspace(1, epochs, epochs), losses, color='r')\n\n plot2.set_title(\"First Parameter value\")\n plot2.plot(np.linspace(1, epochs, epochs), values, color='c')\n\n plot3.set_title(\"First Parameter gradient\")\n plot3.plot(np.linspace(1, epochs, epochs), gradients, color='m')\n plot3.set_xlabel(\"Epoch\")\n plt.show()", "_____no_output_____" ], [ "#@title Helper functions\n\nseed = 1943 # McCulloch & Pitts (1943)\nrandom.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\ntorch.cuda.manual_seed(seed)\nnp.random.seed(seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False", "_____no_output_____" ] ], [ [ "---\n# Section 1: Gradient Descent Algorithm\n\nSince the goal of most learning algorithms is **minimizing the risk (cost) function**, optimization is the soul of learning! The gradient descent algorithm, along with its variations such as stochastic gradient descent, is one of the most powerful and popular optimization methods used for deep learning.\n\n## 1.1: Gradient Descent\n\n", "_____no_output_____" ] ], [ [ "#@title Video 1.1: Introduction\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"PFQeUDxQFls\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ], [ "#@title Video 1.2: Gradient Descent\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"Z3dyRLR8GbM\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "\nGradient Descent (introduced by Augustin-Louis Cauchy in 1847) is an **iterative method** to **minimize** a **continuous** and (ideally) **differentiable function** of **many variables**.\n\n### Definition\nLet $f(\\mathbf{w}): \\mathbb{R}^d \\rightarrow \\mathbb{R}$ be a differentiable function. Gradient Descent is an iterative algorithm for minimizing the function $f$, starting with an initial value for variables $\\mathbf{w}$, taking steps of size $\\eta$ in the direction of the negative gradient at the current point to update the variables $\\mathbf{w}$.\n\n$$ \\mathbf{w}^{(t+1)} = \\mathbf{w}^{(t)} - \\eta \\nabla f (\\mathbf{w}^{(t)}) $$\n\nwhere $\\eta > 0$ and $\\nabla f (\\mathbf{w})= \\left( \\frac{\\delta f(\\mathbf{w})}{\\delta w_1}, ..., \\frac{\\delta f(\\mathbf{w})}{\\delta w_d} \\right)$. Since negative gradients always point locally in the direction of steepest descent, the algorithm makes small steps at each point **towards** the minimum.\n \n<br/>\n\n### Vanilla Algorithm\n\n---\n> *inputs*: initial guess $\\mathbf{w}^{(0)}$, step size $\\eta > 0$, number of steps $T$\n\n> *For* *t = 1, 2, ..., T* *do* \\\n$\\qquad$ $\\mathbf{w}^{(t+1)} = \\mathbf{w}^{(t)} - \\eta \\nabla f (\\mathbf{w}^{(t)})$\\\n*end*\n\n> *return*: $\\mathbf{w}^{(t+1)}$\n\n---\n\n<br/>\n\nTo be able to use this algorithm, we need to calculate the gradient of the loss with respect to the learnable parameters.\n", "_____no_output_____" ], [ "## 1.2: Calculating Gradients\n\nTo minimize the empirical risk (loss) function using gradient descent, we need to calculate the vector of partial derivatives:\n\n$$\\dfrac{\\partial Loss}{\\partial \\mathbf{w}} = \\left[ \\dfrac{\\partial Loss}{\\partial w_1}, \\dfrac{\\partial Loss}{\\partial w_2} , ..., \\dfrac{\\partial Loss}{\\partial w_d} \\right]^{\\top} $$\n\nAlthough PyTorch and other deep learning frameworks (e.g. JAX and TensorFlow) provide us with incredibly powerful and efficient algorithms for automatic differentiation, calculating few derivatives with hand would be fun.\n\n### Exercise 1.2\n1. Given $L(w_1, w_2) = w_1^2 - 2w_1 w_2$ find $\\dfrac{\\partial L}{\\partial w_1}$ and $\\dfrac{\\partial L}{\\partial w_1}$.\n\n<br/>\n\n2. Given $f(x) = sin(x)$ and $g(x) = \\ln(x)$, find the derivative of their composite function $\\dfrac{d (f \\circ g)(x)}{d x}$ (*hint: chain rule*).\n\n **Chain rule**: For a composite function $F(x) = f(g(x)) \\equiv (f \\circ g)(x)$:\n$$F'(x) = f'(g(x)) \\cdot g'(x)$$\nor differently denoted:\n$$ \\frac{dF}{dx} = \\frac{df}{dg} ~ \\frac{dg}{dx} $$\n\n<br/>\n\n3. Given $f(x, y, z) = \\tanh \\left( \\ln \\left[1 + z \\frac{2x}{sin(y)} \\right] \\right)$, how easy is it to derive $\\dfrac{\\partial f}{\\partial x}$, $\\dfrac{\\partial f}{\\partial y}$ and $\\dfrac{\\partial f}{\\partial z}$? (*hint: you don't have to actually calculate them!*)\n", "_____no_output_____" ], [ "## 1.3: Computational Graphs and Backprop\n", "_____no_output_____" ] ], [ [ "#@title Video 1.3: Computational Graph\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"7c8iCHcVgVs\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "\nThe last function in *Exercise 1.2* is an example of how overwhelming the derivation of gradients can get, as the number of variables and nested functions increases. This function is still extremely simple compared to the loss functions of modern neural networks. So how do PyTorch and similar frameworks approach such beasts?\n\n### 1.3.1: Computational Graphs\n\nLet’s look at the function again:\n\n$$f(x, y, z) = \\tanh \\left(\\ln \\left[1 + z \\frac{2x}{sin(y)} \\right] \\right)$$\n\nwe can build a so-called computational graph (shown below) to break the original function to smaller and more approachable expressions. If this \"reverse engineering\" approach seems unintuitive and arbitrary, it's because it is! Usually, the graph is built first.\n\n<center><img src=\"https://raw.githubusercontent.com/ssnio/statics/main/neuromatch/comput_graph.png\" alt=\"Computation Graph\" width=\"800\"/></center>\n\nStarting from $x$, $y$, and $z$ and following the arrows and expressions, you would see that our graph returns the same function as $f$. It does so by calculating intermediate variables $a,b,c,d,$ and $e$. This is called the **forward pass**.\n\nNow, let’s start from $f$, and work our way against the arrows while calculating the gradient of each expression as we go. This is called **backward pass**, which later inspires **backpropagation of errors**.\n\n<center><img src=\"https://raw.githubusercontent.com/ssnio/statics/main/neuromatch/comput_graph_full.png\" alt=\"Computation Graph full\" width=\"1200\"/></center>\n\nBecause we've split the computation into simple operations on intermediate variables, I hope you can appreciate how easy it now is to calculate the partial derivatives. \n\nNow we can use chain rule and simply calculate any gradient:\n\n$$ \\dfrac{\\partial f}{\\partial x} = \\dfrac{\\partial f}{\\partial e}~\\dfrac{\\partial e}{\\partial d}~\\dfrac{\\partial d}{\\partial c}~\\dfrac{\\partial c}{\\partial a}~\\dfrac{\\partial a}{\\partial x} = \\left( 1-\\tanh^2(e) \\right) \\cdot \\frac{1}{d}\\cdot z \\cdot \\frac{1}{b} \\cdot 2$$\n\nConveniently, the values for $e$, $b$, and $d$ are available to us from when we did the forward pass through the graph. That is, the partial derivatives have simple expressions in terms of the intermediate variables $a,b,c,d,e$ that we calculated and stored during the forward pass.\n", "_____no_output_____" ], [ "### Exercise 1.3\nFor the function above, calculate the $\\dfrac{\\partial f}{\\partial y}$ and $\\dfrac{\\partial f}{\\partial z}$ using the computational graph and chain rule.", "_____no_output_____" ], [ "For more: [Calculus on Computational Graphs: Backpropagation](https://colah.github.io/posts/2015-08-Backprop/)", "_____no_output_____" ], [ "---\n# Section 2: PyTorch AutoGrad", "_____no_output_____" ] ], [ [ "#@title Video 2.1: Automatic Differentiation\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"h8B8Nlcz7yY\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "Deep learning frameworks such as PyTorch, JAX, and TensorFlow come with a very efficient and sophisticated set of algorithms, commonly known as Automatic differentiation. AutoGrad is PyTorch's automatic differentiation engine. Here we start by covering the essentials of AutoGrad, but you will learn more in the coming days.\n\n", "_____no_output_____" ], [ "## Section 2.1: Forward Propagation\n\nEverything starts with the forward propagation (pass). PyTorch plans the computational graph, as we declare the variables and operations, and it builds the graph when we call the backward pass. PyTorch rebuilds the graph every time we iterate or change it (or simply put, PyTorch uses a dynamic graph).\n\nBefore we start our first example, let's recall gradient descent algorithm. In gradient descent algorithm, it is only required to have the gradient of our cost function with respect to variables which are accessible to us for updating (changing). These variables are often called \"learnable parameters\" or simply parameter in PyTorch. In the case of neural networks, weights and biases are often the learnable parameters.", "_____no_output_____" ], [ "### Exercise 2.1\n\nIn PyTorch, we can set the optional argument `requires_grad` in tensors to `True`, so PyTorch would track every operation on them while configuring the computational graph. For this exercise, use the provided tensors to build the following graph.\n\n<br/>\n\n<center><img src=\"https://raw.githubusercontent.com/ssnio/statics/main/neuromatch/simple_graph.png\" alt=\"Simple nn graph\" width=\"600\"/></center>", "_____no_output_____" ] ], [ [ "class SimpleGraph:\n def __init__(self, w=None, b=None):\n \"\"\"Initializing the SimpleGraph\n\n Args:\n w (float): initial value for weight\n b (float): initial value for bias\n \"\"\"\n if w is None:\n self.w = torch.randn(1, requires_grad=True)\n else:\n self.w = torch.tensor([w], requires_grad=True)\n if b is None:\n self.b = torch.randn(1, requires_grad=True)\n else:\n self.b = torch.tensor([b], requires_grad=True)\n\n def forward(self, x):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor): 1D tensor of features\n\n Returns:\n torch.Tensor: model predictions\n \"\"\"\n assert isinstance(x, torch.Tensor)\n #################################################\n ## Implement the the forward pass to calculate prediction\n ## Note that prediction is not the loss, but the value after `tanh`\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Forward Pass `forward`\")\n #################################################\n prediction = ...\n return prediction\n\n\ndef sq_loss(y_true, y_prediction):\n \"\"\"L2 loss function\n\n Args:\n y_true (torch.Tensor): 1D tensor of target labels\n y_true (torch.Tensor): 1D tensor of predictions\n\n Returns:\n torch.Tensor: L2-loss (squared error)\n \"\"\"\n assert isinstance(y_true, torch.Tensor)\n assert isinstance(y_prediction, torch.Tensor)\n #################################################\n ## Implement the L2-loss (squred error) given true label and prediction\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Loss function `sq_loss`\")\n #################################################\n loss = ...\n return loss\n\n\n# # Uncomment to run\n# feature = torch.tensor([1]) # input tensor\n# target = torch.tensor([7]) # target tensor\n\n# simple_graph = SimpleGraph(-0.5, 0.5)\n# print(\"initial weight = {} \\ninitial bias = {}\".format(simple_graph.w.item(),\n# simple_graph.b.item()))\n\n# prediction = simple_graph.forward(feature)\n# square_loss = sq_loss(target, prediction)\n\n# print(\"for x={} and y={}, prediction={} and L2 Loss = {}\".format(feature.item(),\n# target.item(),\n# prediction.item(),\n# square_loss.item()))", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D2_LinearDeepLearning/solutions/W1D2_Tutorial1_Solution_b9fccdbe.py)\n\n", "_____no_output_____" ], [ "It is important to appreciate the fact that PyTorch can follow our operations as we arbitrary go through classes and functions.", "_____no_output_____" ], [ "## 2.2 Backward Propagation\n\nHere is where all the magic lies. We can first look at the operations that PyTorch kept track of. Tensor property `grad_fn` keeps reference to backward propagation functions.", "_____no_output_____" ] ], [ [ "print('Gradient function for prediction =', prediction.grad_fn)\nprint('Gradient function for loss =', square_loss.grad_fn)", "_____no_output_____" ] ], [ [ "Now let's kick off backward pass to calculate the gradients by calling the `.backward()` on the tensor we wish to initiate the backpropagation from. Often, `.backward()` is called on the loss, which is the last on the graph. Before doing that, let's calculate the loss gradients:\n\n$$\\frac{\\partial{loss}}{\\partial{w}} = - 2 x (y_t - y_p)(1 - y_p^2)$$\n\n$$\\frac{\\partial{loss}}{\\partial{b}} = - 2 (y_t - y_p)(1 - y_p^2)$$\n\nWe can then compare it to PyTorch gradients, which can be obtained by calling `.grad` on tensors.\n\n**Important Notes**\n* Always keep in mind that PyTorch is tracking all the operations for tensors that require grad. To stop this tracking, we use `.detach()`.\n\n* PyTorch builds the graph only when `.backward()` is called and then it is set free. If you try calling `.backward()` after it is already called, you get the following error:\n\n *`Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling .backward() or autograd.grad() the first time.`*\n\n* Learnable parameters are \"contagious\". If you recall from our computational graph, we need all the intermediate gradients to be able to use the chain rule. Therefore, we need to `.detach()` any tensor that was on the path of gradient flow (e.g. prediction tensor).\n\n* `.backward()` accumulates gradients in the leaves. For most of training methods, we call `.zero_grad()` on the loss or optimizer to zero `.grad` attributes (see [autograd.backward](https://pytorch.org/docs/stable/autograd.html#torch.autograd.backward) for more).", "_____no_output_____" ] ], [ [ "# analytical gradients (remember detaching)\nana_dloss_dw = - 2 * feature * (target - prediction.detach())*(1 - prediction.detach()**2)\nana_dloss_db = - 2 * (target - prediction.detach())*(1 - prediction.detach()**2)\n\nsquare_loss.backward() # first we should call the backward to build the graph\nautograd_dloss_dw = simple_graph.w.grad\nautograd_dloss_db = simple_graph.b.grad\n\nprint(ana_dloss_dw == autograd_dloss_dw)\nprint(ana_dloss_db == autograd_dloss_db)", "_____no_output_____" ] ], [ [ "References and more:\n* [A GENTLE INTRODUCTION TO TORCH.AUTOGRAD](https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html)\n\n* [AUTOMATIC DIFFERENTIATION PACKAGE - TORCH.AUTOGRAD](https://pytorch.org/docs/stable/autograd.html)\n\n* [AUTOGRAD MECHANICS](https://pytorch.org/docs/stable/notes/autograd.html)\n\n* [AUTOMATIC DIFFERENTIATION WITH TORCH.AUTOGRAD](https://pytorch.org/tutorials/beginner/basics/autogradqs_tutorial.html)", "_____no_output_____" ], [ "---\n# Section 3: PyTorch's Neural Net module (`nn.Module`)", "_____no_output_____" ] ], [ [ "#@title Video 3.1: Putting it together\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"rUChBWj9ihw\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "In this section we will focus on training the simple neural network model from yesterday.", "_____no_output_____" ] ], [ [ "#@title Generate the sample dataset\nimport sklearn.datasets\n\n# Create a dataset of 256 points with a little noise\nX_orig, y_orig = sklearn.datasets.make_moons(256, noise=0.1)\n\n# Plot the dataset\nplt.figure(figsize=(9, 7))\nplt.scatter(X_orig[:,0], X_orig[:,1], s=40, c=y_orig)\nplt.xlabel(\"$x_1$\")\nplt.ylabel(\"$x_2$\")\nplt.show()\n\n# Select the appropriate device (GPU or CPU)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# Convert the 2D points to a float tensor\nX = torch.from_numpy(X_orig).type(torch.FloatTensor)\nX = X.to(device)\n\n# Convert the labels to a long interger tensor\ny = torch.from_numpy(y_orig).type(torch.LongTensor)\ny = y.to(device)", "_____no_output_____" ] ], [ [ "Let's define the same simple neural network model as in Day 1. This time we will not define a `train` method, but instead implement it outside of the class so we can better inspect it.", "_____no_output_____" ] ], [ [ "# Simple neural network with a single hidden layer\nclass NaiveNet(nn.Module):\n\n def __init__(self):\n \"\"\"\n Initializing the NaiveNet\n \"\"\"\n super(NaiveNet, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(2, 16),\n nn.ReLU(),\n nn.Linear(16, 2),\n )\n\n def forward(self, x):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor): 2D tensor of features\n\n Returns:\n torch.Tensor: model predictions\n \"\"\"\n return self.layers(x)", "_____no_output_____" ] ], [ [ "PyTorch provides us with ready to use neural network building blocks, such as linear or recurrent layers, activation functions and loss functions, packed in the [`torch.nn`](https://pytorch.org/docs/stable/nn.html) module. If we build a neural network using the `torch.nn` layers, the weights and biases are already in `requires_grad` mode. \n\nNow let's prepare the training! We need 3 things for that:\n\n* **Model parameters** - Model parameters refer to all the learnable parameters' of the model which are accessible by calling `.parameters()` on the model. Please note that not all the `requires_grad` are seen as model parameters. To create a custom model parameter, you can use [`nn.Parameter`](https://pytorch.org/docs/stable/generated/torch.nn.parameter.Parameter.html) (*A kind of Tensor that is to be considered a module parameter*). When we create a new instace of our model, layer parameters are initialized using a uniform distribution (more on that in the coming tutorials and days).\n\n* **Loss function** - we need to define the loss that we are going to be optimizing. The cross entropy loss is suitable for classification problems.\n\n* **Optimizer** - the optimizer will perform the adaptation of the model parameters according to the chosen loss function. The optimizer takes the parameters of the model (often by calling `.parameters()` on the model) as its input to be adapted.\n\nYou will learn more details about choosing the right loss function and optimizer later in the course.", "_____no_output_____" ] ], [ [ "# Create an instance of our network\nnaive_model = NaiveNet().to(device)\n\n# Create a cross entropy loss function\ncross_entropy_loss = nn.CrossEntropyLoss()\n\n# Stochstic Gradient Descent optimizer with a learning rate of 1e-1\nlearning_rate = 1e-1\nsgd_optimizer = torch.optim.SGD(naive_model.parameters(), lr=learning_rate)", "_____no_output_____" ] ], [ [ "The training process in PyTorch is interactive - you can perform training iterations as you wish and inspect the results after each iteration. We encourage leaving the loss function outside the explicit forward pass function, and rather calculate it on the output (prediction).\n\nLet's perform one training iteration. You can run the cell multiple times and see how the parameters are being updated and the loss is reducing. We pick the parameters of the first neuron in the first layer. Please make sure you go through all the commands and discuss their purpose with the pod.", "_____no_output_____" ] ], [ [ "# Reset all gradients to 0\nsgd_optimizer.zero_grad()\n\n# Forward pass (Compute the output of the model on the data)\ny_logits = naive_model(X)\n\n# Compute the loss\nloss = cross_entropy_loss(y_logits, y)\nprint('Loss:', loss.item())\n\n# Perform backpropagation to build the graph and compute the gradients\nloss.backward()\n\n# `.parameters()` returns a generator\nprint('Gradients:', next(naive_model.parameters()).grad[0].detach().numpy())\n\n# Print model's first learnable parameters\nprint('Parameters before:', next(naive_model.parameters())[0].detach().numpy())\n\n# Optimizer takes a step in the steepest direction (negative of gradient)\n# and \"updates\" the weights and biases of the network\nsgd_optimizer.step()\n\n# Print model's first learnable parameters\nprint('Parameters after: ', next(naive_model.parameters())[0].detach().numpy())", "_____no_output_____" ] ], [ [ "## Exercise 3\nFollowing everything we learned so far, we ask you to complete the `train` function below.", "_____no_output_____" ] ], [ [ "def train(features, labels, model, loss_fun, optimizer, n_epochs):\n\n \"\"\"Training function\n\n Args:\n features (torch.Tensor): features (input) with shape torch.Size([n_samples, 2])\n labels (torch.Tensor): labels (targets) with shape torch.Size([n_samples, 2])\n model (torch nn.Module): the neural network\n loss_fun (function): loss function\n optimizer(function): optimizer\n n_epochs (int): number of training epochs\n\n Returns:\n list: record (evolution) of losses\n list: record (evolution) of value of the first parameter\n list: record (evolution) of gradient of the first parameter\n \"\"\"\n loss_record = [] # keeping recods of loss\n par_values = [] # keeping recods of first parameter\n par_grads = [] # keeping recods of gradient of first parameter\n\n # we use `tqdm` methods for progress bar\n epoch_range = trange(n_epochs, desc='loss: ', leave=True)\n for i in epoch_range:\n\n if loss_record:\n epoch_range.set_description(\"loss: {:.4f}\".format(loss_record[-1]))\n epoch_range.refresh() # to show immediately the update\n time.sleep(0.01)\n\n #################################################\n ## Implement the missing parts of the training loop\n # Complete the function and remove or comment the line below\n raise NotImplementedError(\"Training setup `train`\")\n #################################################\n ... # Initialize gradients to 0\n predictions = ... # Compute model prediction (output)\n loss = ... # Compute the loss\n ... # Compute gradients (backward pass)\n ... # update parameters (optimizer takes a step)\n\n loss_record.append(loss.item())\n par_values.append(next(model.parameters())[0][0].item())\n par_grads.append(next(model.parameters()).grad[0][0].item())\n\n return loss_record, par_values, par_grads\n\n# # Uncomment to run\n# epochs = 5000\n# losses, values, gradients = train(X, y,\n# naive_model,\n# cross_entropy_loss,\n# sgd_optimizer,\n# epochs)\n\n# ex3_plot(epochs, losses, values, gradients)", "_____no_output_____" ] ], [ [ "[*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W1D2_LinearDeepLearning/solutions/W1D2_Tutorial1_Solution_364cd4e2.py)\n\n*Example output:*\n\n<img alt='Solution hint' align='left' width=704 height=488 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W1D2_LinearDeepLearning/static/W1D2_Tutorial1_Solution_364cd4e2_2.png>\n\n", "_____no_output_____" ] ], [ [ "#@title Video 3.2: Wrap-up\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"zFmWs6doqhM\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtu.be/\" + video.id)\nvideo", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06fe4bf87a59f3e15ec9bf10405a0bbb73368f4
16,607
ipynb
Jupyter Notebook
DataWrangling/ClassifyingTimeStamps.ipynb
diliprk/SmartCityVisualization
618cd433c2f6bb55042c643ccaef12b5814ccb77
[ "MIT" ]
null
null
null
DataWrangling/ClassifyingTimeStamps.ipynb
diliprk/SmartCityVisualization
618cd433c2f6bb55042c643ccaef12b5814ccb77
[ "MIT" ]
null
null
null
DataWrangling/ClassifyingTimeStamps.ipynb
diliprk/SmartCityVisualization
618cd433c2f6bb55042c643ccaef12b5814ccb77
[ "MIT" ]
null
null
null
31.632381
274
0.425724
[ [ [ "## Classifying Hourly Timestamps into Day or Night", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport warnings\nfrom df2gspread import df2gspread as d2g\nwarnings.simplefilter('ignore') ", "_____no_output_____" ], [ "## Import Raw TTN Data from Google SpreadSheet\nurl = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRlXVQ6c3fKWvtQlFRSRUs5TI3soU7EghlypcptOM8paKXcUH8HjYv90VoJBncuEKYIZGLq477xE58C/pub?gid=0&single=true&output=csv'\ndf_hourly = pd.read_csv(url,parse_dates = ['time'],infer_datetime_format = True,usecols = [0,3])\ndf_hourly.head()", "_____no_output_____" ], [ "## Cleaning and re-organizing the DataFrame\ndf_hourly.rename(columns={'time': 'TimeStamps'}, inplace=True)\ndf_hourly.rename(columns={'CarCount': 'VehicleCountperHour'}, inplace=True)\n\n## Strip the Microseconds from the time column\ndf_hourly['TimeStamps'] = df_hourly['TimeStamps'].values.astype('datetime64[s]')\n\n## Reorder the dataframe\ndf_hourly = df_hourly.reindex(['TimeStamps','VehicleCountperHour'], axis=1)\ndf_hourly.tail()", "_____no_output_____" ] ], [ [ "### Importing SunTime Chart for adding Day or Night Classification\n Here we add Day or Night classification to Hourly Vehicle Count Data using sunrise and sunset times dataframe of Saarbrucken. These times were acquired from [timeanddate.com](https://www.timeanddate.com/sun/germany/saarbrucken) and the dataframe was manually created.", "_____no_output_____" ] ], [ [ "# Reading SunTime Chart of Saarbrücken\nurl = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRJCZQkgUHTCcx-wvJOog87qq4EFiQ1W6T4akxLSpiqCb3KjYDf_43coltDGG0YcjjsDTxjeXE-O_NH/pub?gid=0&single=true&output=csv'\ndf_suntimes = pd.read_csv(url)\n# Modified Sun Timings DataFrame\ndf_suntimes_mod = pd.DataFrame()\ndf_suntimes_mod['SunriseTimeStamp']= pd.to_datetime(df_suntimes['Date'] + ' ' + df_suntimes['Sunrise'])\ndf_suntimes_mod['SunsetTimeStamp']= pd.to_datetime(df_suntimes['Date'] + ' ' + df_suntimes['Sunset'])\n# Querying values in the dataframe to selected dates\nstart_time = '2018-02-21 07:25:00'\ndf_suntimes_mod = df_suntimes_mod.loc[df_suntimes_mod.SunriseTimeStamp >= start_time,:]\nend_time = '2018-02-25 18:15:00'\ndf_suntimes_mod = df_suntimes_mod.loc[df_suntimes_mod.SunriseTimeStamp <= end_time,:]\ndf_suntimes_mod = df_suntimes_mod.reset_index()\ndf_suntimes_mod = df_suntimes_mod.drop(['index'], 1)\ndf_suntimes_mod", "_____no_output_____" ], [ "## Creating a new Dataframe from original dataframe to classify Hourly TimeStamps as 'Day' or 'Night':\ndf_dn = df_hourly\n\n## Set Everything to Day First\ndf_dn['DayorNight'] = 'Day'\n\n## Manually fixing first day's night timestamps to 'Night'\nnight_index = (df_dn.loc[df_dn.TimeStamps <= (df_suntimes_mod['SunriseTimeStamp'][0]),:]).index\n\n# Select Night Time Traffic Only from Sunset today to next day Sunrise\nn_days = len(df_suntimes_mod['SunriseTimeStamp'])\nfor i,j in zip(range(n_days),range(1,n_days)): \n start_time = df_suntimes_mod['SunsetTimeStamp'][i]\n end_time = df_suntimes_mod['SunriseTimeStamp'][j]\n data = df_dn[(df_dn['TimeStamps'] > start_time) & (df_dn['TimeStamps'] < end_time)]\n night_index = night_index.append(data.index)\n \n## Set all the Night TimeStamps to 'Night\ndf_dn['DayorNight'].iloc[night_index] = 'Night'\ndf_dn.head(15)", "_____no_output_____" ], [ "# Writing the file as csv\ndf_dn.to_csv('data/DayorNight.csv', date_format=\"%d/%m/%Y %H:%M:%S\",index=False)", "_____no_output_____" ], [ "## Write pandas dataframe to a Google Sheet Using df2spread:\n\n# Insert ID of Google Spreadsheet\nspreadsheet = '1LTXIPNb7MX0qEOU_DbBKC-OwE080kyRvt-i_ejFM-Yg'\n\n# Insert Sheet Name\nwks_name = 'CleanedData'\n\nd2g.upload(df_dn,spreadsheet,wks_name,col_names=True,clean=True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d06fedac235d0cde30dae4eb132ea937811d1cce
8,191
ipynb
Jupyter Notebook
4_6_Matrices_and_Transformation_of_State/2_matrices_in_python.ipynb
mustafa1adel/CVND_Localization_Exercises
bff1f6879458b78d89a5fdce79a6e7da434f3a2a
[ "MIT" ]
121
2018-06-05T02:46:52.000Z
2022-03-23T10:11:21.000Z
4_6_Matrices_and_Transformation_of_State/2_matrices_in_python.ipynb
mustafa1adel/CVND_Localization_Exercises
bff1f6879458b78d89a5fdce79a6e7da434f3a2a
[ "MIT" ]
4
2021-03-19T02:34:33.000Z
2022-03-11T23:56:20.000Z
4_6_Matrices_and_Transformation_of_State/2_matrices_in_python.ipynb
mustafa1adel/CVND_Localization_Exercises
bff1f6879458b78d89a5fdce79a6e7da434f3a2a
[ "MIT" ]
133
2018-06-01T02:38:57.000Z
2021-12-27T18:53:58.000Z
25.596875
257
0.503357
[ [ [ "## Coding Matrices\n\nHere are a few exercises to get you started with coding matrices. The exercises start off with vectors and then get more challenging\n\n### Vectors", "_____no_output_____" ] ], [ [ "### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable v\nv = []", "_____no_output_____" ] ], [ [ "The v variable contains a Python list. This list could also be thought of as a 1x5 matrix with 1 row and 5 columns. How would you represent this list as a matrix?", "_____no_output_____" ] ], [ [ "### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable mv\n### The difference between a vector and a matrix in Python is that\n### a matrix is a list of lists.\n\n### Hint: See the last quiz on the previous page\n\nmv = [[]]", "_____no_output_____" ] ], [ [ "How would you represent this vector in its vertical form with 5 rows and 1 column? When defining matrices in Python, each row is a list. So in this case, you have 5 rows and thus will need 5 lists.\n\nAs an example, this is what the vector $$<5, 7>$$ would look like as a 1x2 matrix in Python: \n```python\nmatrix1by2 = [\n [5, 7]\n]\n```\n\nAnd here is what the same vector would look like as a 2x1 matrix:\n```python\nmatrix2by1 = [\n [5], \n [7]\n]\n```", "_____no_output_____" ] ], [ [ "### TODO: Assign the vector <5, 10, 2, 6, 1> to the variable vT\n### vT is a 5x1 matrix\nvT = []", "_____no_output_____" ] ], [ [ "### Assigning Matrices to Variables", "_____no_output_____" ] ], [ [ "### TODO: Assign the following matrix to the variable m\n### 8 7 1 2 3\n### 1 5 2 9 0\n### 8 2 2 4 1\n\nm = [[]]", "_____no_output_____" ] ], [ [ "### Accessing Matrix Values", "_____no_output_____" ] ], [ [ "### TODO: In matrix m, change the value \n### in the second row last column from 0 to 5\n### Hint: You do not need to rewrite the entire matrix\n", "_____no_output_____" ] ], [ [ "### Looping through Matrices to do Math\n\nCoding mathematical operations with matrices can be tricky. Because matrices are lists of lists, you will need to use a for loop inside another for loop. The outside for loop iterates over the rows and the inside for loop iterates over the columns.\n\n\nHere is some pseudo code\n```python\nfor i in number of rows:\n for j in number of columns:\n mymatrix[i][j]\n```\n\nTo figure out how many times to loop over the matrix, you need to know the number of rows and number of columns. \n\n\nIf you have a variable with a matrix in it, how could you figure out the number of rows? How could you figure out the number of columns? The [len](https://docs.python.org/2/library/functions.html#len) function in Python might be helpful.", "_____no_output_____" ], [ "### Scalar Multiplication", "_____no_output_____" ] ], [ [ "### TODO: Use for loops to multiply each matrix element by 5\n### Store the answer in the r variable. This is called scalar\n### multiplication\n###\n### HINT: First write a for loop that iterates through the rows\n### one row at a time\n###\n### Then write another for loop within the for loop that\n### iterates through the columns\n###\n### If you used the variable i to represent rows and j\n### to represent columns, then m[i][j] would give you\n### access to each element in the matrix\n###\n### Because r is an empty list, you cannot directly assign\n### a value like r[i][j] = m[i][j]. You might have to\n### work on one row at a time and then use r.append(row).\nr = []", "_____no_output_____" ] ], [ [ "### Printing Out a Matrix", "_____no_output_____" ] ], [ [ "### TODO: Write a function called matrix_print() \n### that prints out a matrix in\n### a way that is easy to read.\n### Each element in a row should be separated by a tab\n### And each row should have its own line\n### You can test our your results with the m matrix\n\n### HINT: You can use a for loop within a for loop\n### In Python, the print() function will be useful\n### print(5, '\\t', end = '') will print out the integer 5, \n### then add a tab after the 5. The end = '' makes sure that\n### the print function does not print out a new line if you do\n### not want a new line.\n\n### Your output should look like this\n### 8 7 1 2 3 \n### 1 5 2 9 5 \n### 8 2 2 4 1\n\ndef matrix_print(matrix):\n return\n\nm = [\n [8, 7, 1, 2, 3],\n [1, 5, 2, 9, 5],\n [8, 2, 2, 4, 1]\n]\n\nmatrix_print(m)", "_____no_output_____" ] ], [ [ "### Test Your Results", "_____no_output_____" ] ], [ [ "### You can run these tests to see if you have the expected\n### results. If everything is correct, this cell has no output\n\nassert v == [5, 10, 2, 6, 1]\nassert mv == [\n [5, 10, 2, 6, 1]\n]\n\nassert vT == [\n [5], \n [10], \n [2], \n [6], \n [1]]\n\nassert m == [\n [8, 7, 1, 2, 3], \n [1, 5, 2, 9, 5], \n [8, 2, 2, 4, 1]\n]\n\nassert r == [\n [40, 35, 5, 10, 15], \n [5, 25, 10, 45, 25], \n [40, 10, 10, 20, 5]\n]", "_____no_output_____" ] ], [ [ "### Print Out Your Results", "_____no_output_____" ] ], [ [ "### Run this cell to print out your answers\nprint(v)\nprint(mv)\nprint(vT)\nprint(m)\nprint(r)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d06ff474f601c7f2bc493d7669de1fde9b384f79
16,889
ipynb
Jupyter Notebook
Assignments1/HW1.ipynb
resourceful/lecture_db
83d9911dbc0a2647258d5c02ecb8152edbd04522
[ "MIT" ]
1
2022-03-30T12:53:49.000Z
2022-03-30T12:53:49.000Z
Assignments1/HW1.ipynb
resourceful/lecture_db
83d9911dbc0a2647258d5c02ecb8152edbd04522
[ "MIT" ]
null
null
null
Assignments1/HW1.ipynb
resourceful/lecture_db
83d9911dbc0a2647258d5c02ecb8152edbd04522
[ "MIT" ]
3
2018-09-06T07:18:33.000Z
2022-03-30T12:53:48.000Z
28.722789
413
0.462017
[ [ [ "%load_ext sql\n%sql sqlite:///flights.db", "_____no_output_____" ] ], [ [ "숙제 1\n=======\n\n### 일러두기 :\n\n**_꼼꼼하게 읽어보기 바랍니다_**\n\n\n* `prettytable` 모듈을 설치해야 스크립트를 실행할 수 있음. (설치 방법: `pip install --user prettytable`)\n * `flights.db` 파일이 숙제용 Jupyter notebook과 같은 디렉터리에 있어야 함 (없다면 [여기서](http://open.gnu.ac.kr/lecslides/2018-2-DB/Assignments1/flights.db.zip) 다운 받기) 압축을 해제해야 함. `flights.db.zip`이 있는 곳에서 `unzip flights.db.zip`으로 압축을 해제하면 됨\n* 데이터베이스 `flights.db`를 다운 받은 후 가장 위의 셀의 명령 실행하기\n* 테스트, 디버그, 탐색하기 등을 위해서 새로운 셀을 생성하는 것을 적극 권장함\n* 셀을 실행시키고 셀 왼 편에 `In [*]:` 이 보인다면 _실행 중_ 을 의미함\n * **만약 셀이 오랫 동안 결과를 내 놓지 않고 멈춘 것 같다면: SQL 에 다시 연결하도록 python kernel을 다시 시작해야 함**\n * 커널을 다시 시작하는 방법: \"Kernel >> Restart & Clear Output\", 그리고 위의 셀부터 아래로 하나씩 실행 함 \n * 다른 버전의 데이터베이스를 로드하기 위해서도 마찬가지를 새로운 연결을 만들어야 함\n* 기억하기:\n * `%sql [SQL 질의문;]` 은 _한 줄짜리_ SQL 질의문에 사용\n * `%%sql \n [SQL 질의문;]` 은 _여러 줄짜리_ SQL 질의문에 사용\n* `submit.py` 을 실행하면 질의문을 처리하고 출력함 \n* 실행의 결과는 `correct_output.txt` 파일에 나와 있음.\n * 실행 결과의 비교를 원한다면 `python sanity_check.py` 을 실행하거나, 다음의 명령을 실행하여 결과를 얻을 수 있음 `python submit.py > my_output; diff my_output correct_output.txt` 터미널에서 입력해야 함\n * **숙제로 작성한 `submit.py` 파일은 아래의 형식을 절대적으로 따라야 함.** 형식이라 함은:\n * 컬럼의 이름은 `correct_output.txt` 에 나와 있는 이름과 **똑같은 이름**이어야 함\n * 컬럼의 순서도 `correct_output.txt` 에 나와 있는 순서와 **똑같은 순서**이어야 함 \n\n### 제출 방법:\n * iPython notebook 자체를 제출하지 말 것\n * 대신에, `submit.py` 에 작성한 번호에 맞게 질의문을 복사 붙여 넣기 할 것\n * `%sql` 또는 `%%sql` 명령은 SQL 문에 포함시키지 말 것\n * 제출한 질의문을 똑같은 스키마에서 임의로 선택된 값에 대상으로 실행시켜 평가를 할 것임. 그렇기 때문에 해답과 똑같은 결과가 나오도록 상수등을 써서 조작하지 말것\n * **`submission_instructions.txt` 에 설명된 방법으로 해답을 제출할 것**\n\n_즐겁게 시작해봅시다!_", "_____no_output_____" ], [ "개요: 여행 일정 지연\n------------------------\n\n여행 일정이 지연 되는 것만큼 짜증 나는 일은 없습니다. 그렇지 않나요?\n\n여행 일정이 지연되지 않도록 여러가지 새로운 방법을 찾아봅니다. 최근에 찾은 데이터가 왜 일정이 지연되는지 이유와 무엇을 포기할지를 잘 설명해주고 있습니다. \n\nSQL을 사용해서 한 번 그 이유들을 알아봅시다.", "_____no_output_____" ], [ "----", "_____no_output_____" ], [ "이 과제에서는 2017년 7월의 여객기의 지연 정보의 정보를 담고 있습니다. 데이터베이스의 기본 릴레이션에 대한 정보를 알아 봅시다.", "_____no_output_____" ] ], [ [ "%%sql\nSELECT * \nFROM flight_delays \nLIMIT 1;", " * sqlite:///flights.db\nDone.\n" ] ], [ [ "굉장히 많은 컬럼들이 있는 것을 알 수 있는데, 그러면 몇 줄이나 될까요?", "_____no_output_____" ] ], [ [ "%%sql\nSELECT COUNT(*) AS num_rows\nFROM flight_delays", " * sqlite:///flights.db\nDone.\n" ] ], [ [ "데이터의 양이 상당합니다! 손과 머리로만 해답을 찾지 못할 것 같군요. \n\n데이터베이스에 더 많은 데이터를 넣을 필요는 없겠군요. 컬럼들이 어떤 의미를 갖는지 알아 보려면 [이 링크](https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236)를 따라가기 바랍니다. \n\n몇 개의 추가적인 테이블들을 같이 포함해 놓았습니다. 이 테이블들을 사용하면 `airline_id`, `airport_id`, 그리고 `day_of_week` 을 사람이 읽기 편한 정보로 변환할 수 있습니다. \n\n아래의 셀을 이용하여 `airlines`과 `weekdays` 의 정보를 확인해보기 바랍니다:", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "좋습니다. 이제 시작해봅시다.", "_____no_output_____" ], [ "# SQL 질의문", "_____no_output_____" ], [ "질의문 1: 항공편의 평균 지연 시간은? \n------------------------\n데이터에 대한 이해를 돕기 위해, 간단한 질의문을 작성해봅시다.\n\n아래의 셀에 2017년 7월동안 모든 항공편의 평균 지연시간을 구하는 질의문을 작성해봅시다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 2: 가장 긴 지연 시간은?\n------------------------\n평균은 그리 크지 않군요. 하지만 _최장_ 지연 시간은 어떻게 되나요?\n\n아래의 셀에 2017년 7월동안 가장 늦게 도착한 시간을 찾는 질의문을 작성해봅시다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 3: 어떤 항공편을 피하는 것이 정신 건강에 좋을까요?\n------------------------\n\n어떤 항공편이 가장 늦었나요?\n\n\n아래의 셀에 2017년 7월에 가장 늦게 도착한 항공사(`carrier`)와 항공편 명, 출발 도시 명, 도착 도시 명, 항공 일정을 출력하는 질의문을 작성 바랍니다. 앞에서 얻은 정보를 질의문에 삽입해서 계산하지 말고 중첩 질의문을 쓰기 바랍니다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 4: 어떤 요일이 여행하기 가장 안좋은 날인가요?\n------------------------\n\n학기가 시작되었으니 먼 곳으로 여행을 할 수는 없지만, 출장은 가야하겠지요. 비행기를 타기 가장 안좋은 날은 무슨 요일인가요?\n\n아래의 셀에 요일마다 평균 지연 시간이 어떻게 되는지 내림차순으로 정렬하여 결과를 출력하도록 질의문을 작성하기 바랍니다. 출력 결과의 스키마는 (`weekday_name`, `average_delay`)의 형태를 갖고 있어야 합니다.\n\n**Note: 요일의 ID를 그대로 출력하지 말기 바랍니다.** (Hint: `weekdays` 테이블을 사용하여 join하여 요일의 이름을 출력하도록 합시다.)", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 5: SFO에서 출발하는 항공사 중 지연 시간이 가장 긴 항공사는 어디입니까?\n------------------------\n\n어떤 요일을 피해야 할지 알았으니 SFO에서 출발하는 항공사 중 한 곳을 정해야 합니다. 어디로 갈지는 말하지 않았으니, SFO에서 출발하는 모든 항공사의 항공편들의 평균 지연시간을 구해 봅시다.\n\n아래의 셀에 2017년 7월에 SFO에서 출발한 각 항공사 별로 모든 항공편에 대해 평균 지연시간을 내림차순으로 구하는 질의문을 작성해봅시다.\n\n**Note: 항공사 ID를 그대로 출력하지 맙시다.** (Hint: 중첩 질의문으로 `airlines` 테이블을 join 하여 항공사 이름을 출력합시다.)", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 6: 항공사들의 지연 비율을 알아 봅시다\n------------------------\n지연되는 항공편이 많습니다. 어떤 항공사가 지연시간이 많은 알아봅시다.\n\n아래의 셀에 평균 10분 이상 지연되는 항공편이 있었던 항공사들의 비율을 구해봅시다. 전체 항공사의 수를 세서 질의문에 포함시키지 말기 바랍니다. 그리고 질의문에는 최소한 하나 이상의 `HAVING` 절을 사용합시다.\n\nNote: sqlite 의 `COUNT(*)`는 정수형을 리턴하기 때문에 실수형으로 결과를 출력하려면 최소한 한 번 이상 `SELECT CAST (COUNT(*) AS float)` 또는 `COUNT(*)*1.0` 절을 써야 합니다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 7: 출발 지연이 도착 지연에 어떤 영향을 미치나요?\n------------------------\n\n비행기가 지연 출발하면 도착 시간에 얼마나 영향을 주는지 알고 싶습니다.\n\n\n[샘플 공분산](https://en.wikipedia.org/wiki/Covariance) 은 두 변수 간의 분산량을 측정하여 상관관계가 있는지 알려주는 통계치입니다. 공분산이 클수록 상관관계가 높고 음수인 경우 역상관관계가 있습니다. 샘플 공분산의 계산 식은 다음과 같습니다:\n$$\nCov(X,Y) = \\frac{1}{n-1} \\sum_{i=1}^n (x_i-\\hat{x})(y_i-\\hat{y})\n$$\n이 때, $x_i$ 는 $X$의 $i$번째 값이고, $y_i$는 $Y$의 $i$번째 값입니다. $X$ 와 $Y$의 평균은 $\\bar{x}$ 과 $\\bar{y}$으로 표현 하였습니다.\n\n아래의 셀에 도착 지연과 출발 지연 시간의 공분산을 구하는 하나의 질의문을 작성 해보기 바랍니다.\n\n*Note: [Pearson correlation coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) 으로 구할 수도 있습니다. 그 결과는 정규화 되어 1 부터 -1의 값으로 상관관계를 알려 줍니다. 하지만, SQLite는 루트 계산 함수가 들어 있지 않기 때문에 이 계산식을 쓸 수 가 없습니다. 다른 보편적인 데이터베이스(PostgreSQL와 MySQL)에는 루트 계산 함수가 구현되어 있습니다.*", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 8: 한 주가 엉망이었습니다...\n------------------------\n\n7월 어떤 항공사의 마지막 한 주(24일 이후)의 평균 지연 시간이 그 이전 주(24일 이전)들의 평균 지연 시간보다 절대적으로 길었나요?\n\n아래의 셀에 1일부터 23일까지의 평균 지연 시간 대비 24일 부터 31일 사이의 평균 지연 시간이 절대적으로 길었던 항공사의 이름을 출력하는 질의문을 작성하기 바랍니다.\n\nNote: [sqlite에서 날짜 다루기](http://www.sqlite.org/lang_datefunc.html)에 따라 `day_of_month`을 사용하여 질의문을 작성하는 것이 편할 것입니다.\n\nNote 2: 아마 과제 중 가장 어려운 질의문이 될 수도 있는데, 작은 단위로 질의문을 작성하여 한 부분씩 해결하고, 그 질의문을 합쳐서 최종 질의문을 작성하는 것이 좋습니다.\n\nHint: 두 개의 하위 질의문으로 계산할 수 있습니다. 하나의 질의문이 24일 이후의 평균 도착 시간을 계산하고, 다른 질의문이 24일 이전의 도착 시간을 계산하고, 두 질의문을 join하여 지연 시간의 차를 계산하면 됩니다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 9: 진보적인 그리고 혁명적인\n------------------------\n포트랜드 (PDX)와 유진 (EUG)로 가기를 원하지만, 한 번에 가기가 쉽지 않군요. 우수 고객 마일리지를 채우기 위해 같은 항공편으로 각 도시로 이동하기를 원합니다. SFO -> PDF와 SFO -> EUG 로 가는 같은 항공사가 있는지 알고 싶습니다.\n\n아래의 셀에 2017년 7월에 SFO -> PDX 과 SFO -> EUG 을 출항한 항공사의 유일한 이름(중복 없음 ID 가 아님)을 출력하는 하나의 SQL 질의문을 작성하기 바랍니다.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "질의문 10: 피로도와 등거리 간의 결정\n------------------------\n\n시카고에서 캘리포니아로 이동하려고 합니다. Midway (MDW) 또는 O'Hare (ORD) 에서 샌프란시스코 (SFO), 산호세 (SJC), 오크랜드 (OAK)로 도착하면 좋겟습니다. 만약 이 번 달이 7월이라고 하면 시카고에서 현지 시간 14시에 출발하는 경로 중 지연 시간이 가장 짧은 경로가 어떤 것입니까?\n\n아래의 셀에 MDW 또는 ORD 에서 현지 시간 14시(`crs_dep_time`)에 출발하고 SFO, SJC, 또는 OAK에 도착하는 항공편들의 평균 지연 시간을 구하는 하나의 질의문을 작성하기 바랍니다. 출발과 도착 공항을 Group by로 묶고 지연 시간의 내림차순으로 출력하기 바랍니다.\n\nNote: `crs_dep_time` 필드는 정수 형을 갖고 있으며 hhmm (e.g. 4:15pm 은 1615 임) 형을 따름.", "_____no_output_____" ] ], [ [ "%%sql\n", "_____no_output_____" ] ], [ [ "## 다 끝났습니다. 이제 제출합시다.\n * 제출하는 방법은 가장 위의 설명문을 참고하기 바랍니다.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d06ff679fd22bbe132845ba9c3b90ba71cfe18a9
2,871
ipynb
Jupyter Notebook
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
harrywang/PyTorch-Tutorial
ad68467770ef8c3a0cd88f16d604334af299eca1
[ "MIT" ]
null
null
null
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
harrywang/PyTorch-Tutorial
ad68467770ef8c3a0cd88f16d604334af299eca1
[ "MIT" ]
null
null
null
tutorial-contents-notebooks/303_build_nn_quickly.ipynb
harrywang/PyTorch-Tutorial
ad68467770ef8c3a0cd88f16d604334af299eca1
[ "MIT" ]
null
null
null
23.341463
87
0.518635
[ [ [ "# 303 Build NN Quickly\n\nView more, visit my tutorial page: https://morvanzhou.github.io/tutorials/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn.functional as F", "_____no_output_____" ], [ "# replace following class code with an easy sequential network\nclass Net(torch.nn.Module):\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer\n self.predict = torch.nn.Linear(n_hidden, n_output) # output layer\n\n def forward(self, x):\n x = F.relu(self.hidden(x)) # activation function for hidden layer\n x = self.predict(x) # linear output\n return x", "_____no_output_____" ], [ "net1 = Net(1, 10, 1)", "_____no_output_____" ], [ "# easy and fast way to build your network\nnet2 = torch.nn.Sequential(\n torch.nn.Linear(1, 10),\n torch.nn.ReLU(),\n torch.nn.Linear(10, 1)\n)\n", "_____no_output_____" ], [ "print(net1) # net1 architecture\nprint(net2) # net2 architecture", "Net(\n (hidden): Linear(in_features=1, out_features=10, bias=True)\n (predict): Linear(in_features=10, out_features=1, bias=True)\n)\nSequential(\n (0): Linear(in_features=1, out_features=10, bias=True)\n (1): ReLU()\n (2): Linear(in_features=10, out_features=1, bias=True)\n)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d07001fd9722bf73a21ef6542e18561a127c7abc
28,655
ipynb
Jupyter Notebook
Tutorial-ScalarWaveCurvilinear.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
null
null
null
Tutorial-ScalarWaveCurvilinear.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
null
null
null
Tutorial-ScalarWaveCurvilinear.ipynb
Steve-Hawk/nrpytutorial
42d7450dba8bf43aa9c2d8f38f85f18803de69b7
[ "BSD-2-Clause" ]
1
2021-03-02T12:51:56.000Z
2021-03-02T12:51:56.000Z
51.630631
581
0.591031
[ [ [ "<script async src=\"https://www.googletagmanager.com/gtag/js?id=UA-59152712-8\"></script>\n<script>\n window.dataLayer = window.dataLayer || [];\n function gtag(){dataLayer.push(arguments);}\n gtag('js', new Date());\n\n gtag('config', 'UA-59152712-8');\n</script>\n\n# Generating C code for the right-hand-side of the scalar wave equation, in ***curvilinear*** coordinates, using a reference metric formalism\n\n## Author: Zach Etienne\n### Formatting improvements courtesy Brandon Clark\n\n[comment]: <> (Abstract: TODO)\n\n**Notebook Status:** <font color='green'><b> Validated </b></font>\n\n**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). In addition, all expressions have been validated against a trusted code (the [original SENR/NRPy+ code](https://bitbucket.org/zach_etienne/nrpy)).\n\n### NRPy+ Source Code for this module: [ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py)\n\n[comment]: <> (Introduction: TODO)", "_____no_output_____" ], [ "<a id='toc'></a>\n\n# Table of Contents\n$$\\label{toc}$$\n\nThis notebook is organized as follows\n\n0. [Preliminaries](#prelim): Reference Metrics and Picking Best Coordinate System to Solve the PDE\n1. [Example](#example): The scalar wave equation in spherical coordinates\n1. [Step 1](#contracted_christoffel): Contracted Christoffel symbols $\\hat{\\Gamma}^i = \\hat{g}^{ij}\\hat{\\Gamma}^k_{ij}$ in spherical coordinates, using NRPy+\n1. [Step 2](#rhs_scalarwave_spherical): The right-hand side of the scalar wave equation in spherical coordinates, using NRPy+\n1. [Step 3](#code_validation): Code Validation against `ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs` NRPy+ Module\n1. [Step 5](#latex_pdf_output): Output this notebook to $\\LaTeX$-formatted PDF file", "_____no_output_____" ], [ "<a id='prelim'></a>\n\n# Preliminaries: Reference Metrics and Picking Best Coordinate System to Solve the PDE \\[Back to [top](#toc)\\]\n$$\\label{prelim}$$\n\nRecall from [NRPy+ tutorial notebook on the Cartesian scalar wave equation](Tutorial-ScalarWave.ipynb), the scalar wave equation in 3D Cartesian coordinates is given by\n\n$$\\partial_t^2 u = c^2 \\nabla^2 u \\text{,}$$\nwhere $u$ (the amplitude of the wave) is a function of time and Cartesian coordinates in space: $u = u(t,x,y,z)$ (spatial dimension as-yet unspecified), and subject to some initial condition\n$$u(0,x,y,z) = f(x,y,z),$$\n\nwith suitable (sometimes approximate) spatial boundary conditions.\n\nTo simplify this equation, let's first choose units such that $c=1$. Alternative wave speeds can be constructed\nby simply rescaling the time coordinate, with the net effect being that the time $t$ is replaced with time in dimensions of space; i.e., $t\\to c t$:\n\n$$\\partial_t^2 u = \\nabla^2 u.$$\n\nAs we learned in the [NRPy+ tutorial notebook on reference metrics](Tutorial-Reference_Metric.ipynb), reference metrics are a means to pick the best coordinate system for the PDE we wish to solve. However, to take advantage of reference metrics requires first that we generalize the PDE. In the case of the scalar wave equation, this involves first rewriting in [Einstein notation](https://en.wikipedia.org/wiki/Einstein_notation) (with implied summation over repeated indices) via\n\n$$(-\\partial_t^2 + \\nabla^2) u = \\eta^{\\mu\\nu} u_{,\\ \\mu\\nu} = 0,$$\n\nwhere $u_{,\\mu\\nu} = \\partial_\\mu \\partial_\\nu u$, and $\\eta^{\\mu\\nu}$ is the contravariant flat-space metric tensor with components $\\text{diag}(-1,1,1,1)$.\n\nNext we apply the \"comma-goes-to-semicolon rule\" and replace $\\eta^{\\mu\\nu}$ with $\\hat{g}^{\\mu\\nu}$ to generalize the scalar wave equation to an arbitrary reference metric $\\hat{g}^{\\mu\\nu}$:\n\n$$\\hat{g}^{\\mu\\nu} u_{;\\ \\mu\\nu} = \\hat{\\nabla}_{\\mu} \\hat{\\nabla}_{\\nu} u = 0,$$\n\nwhere $\\hat{\\nabla}_{\\mu}$ denotes the [covariant derivative](https://en.wikipedia.org/wiki/Covariant_derivative) with respect to the reference metric basis vectors $\\hat{x}^{\\mu}$, and $\\hat{g}^{\\mu \\nu} \\hat{\\nabla}_{\\mu} \\hat{\\nabla}_{\\nu} u$ is the covariant\n[D'Alembertian](https://en.wikipedia.org/wiki/D%27Alembert_operator) of $u$.\n\nFor example, suppose we wish to model a short-wavelength wave that is nearly spherical. In this case, if we were to solve the wave equation PDE in Cartesian coordinates, we would in principle need high resolution in all three cardinal directions. If instead we chose spherical coordinates centered at the center of the wave, we might need high resolution only in the radial direction, with only a few points required in the angular directions. Thus choosing spherical coordinates would be far more computationally efficient than modeling the wave in Cartesian coordinates.\n\nLet's now expand the covariant scalar wave equation in arbitrary coordinates. Since the covariant derivative of a scalar is equivalent to its partial derivative, we have\n\\begin{align}\n0 &= \\hat{g}^{\\mu \\nu} \\hat{\\nabla}_{\\mu} \\hat{\\nabla}_{\\nu} u \\\\\n&= \\hat{g}^{\\mu \\nu} \\hat{\\nabla}_{\\mu} \\partial_{\\nu} u.\n\\end{align}\n\n$\\partial_{\\nu} u$ transforms as a one-form under covariant differentiation, so we have\n$$\\hat{\\nabla}_{\\mu} \\partial_{\\nu} u = \\partial_{\\mu} \\partial_{\\nu} u - \\hat{\\Gamma}^\\tau_{\\mu\\nu} \\partial_\\tau u,$$\nwhere \n\n$$\\hat{\\Gamma}^\\tau_{\\mu\\nu} = \\frac{1}{2} \\hat{g}^{\\tau\\alpha} \\left(\\partial_\\nu \\hat{g}_{\\alpha\\mu} + \\partial_\\mu \\hat{g}_{\\alpha\\nu} - \\partial_\\alpha \\hat{g}_{\\mu\\nu} \\right)$$\nare the [Christoffel symbols](https://en.wikipedia.org/wiki/Christoffel_symbols) associated with the reference metric $\\hat{g}_{\\mu\\nu}$.\n\nThen the scalar wave equation is written:\n$$0 = \\hat{g}^{\\mu \\nu} \\left( \\partial_{\\mu} \\partial_{\\nu} u - \\hat{\\Gamma}^\\tau_{\\mu\\nu} \\partial_\\tau u\\right).$$\n\nDefine the contracted Christoffel symbols:\n$$\\hat{\\Gamma}^\\tau = \\hat{g}^{\\mu\\nu} \\hat{\\Gamma}^\\tau_{\\mu\\nu}.$$\n\nThen the scalar wave equation is given by\n$$0 = \\hat{g}^{\\mu \\nu} \\partial_{\\mu} \\partial_{\\nu} u - \\hat{\\Gamma}^\\tau \\partial_\\tau u.$$\n\nThe reference metrics we adopt satisfy\n$$\\hat{g}^{t \\nu} = -\\delta^{t \\nu},$$\nwhere $\\delta^{t \\nu}$ is the [Kronecker delta](https://en.wikipedia.org/wiki/Kronecker_delta). Therefore the scalar wave equation in curvilinear coordinates can be written\n\\begin{align}\n0 &= \\hat{g}^{\\mu \\nu} \\partial_{\\mu} \\partial_{\\nu} u - \\hat{\\Gamma}^\\tau \\partial_\\tau u \\\\\n&= -\\partial_t^2 u + \\hat{g}^{i j} \\partial_{i} \\partial_{j} u - \\hat{\\Gamma}^i \\partial_i u \\\\\n\\implies \\partial_t^2 u &= \\hat{g}^{i j} \\partial_{i} \\partial_{j} u - \\hat{\\Gamma}^i \\partial_i u,\n\\end{align}\nwhere repeated Latin indices denote implied summation over *spatial* components only. This module implements the bottom equation for arbitrary reference metrics satisfying $\\hat{g}^{t \\nu} = -\\delta^{t \\nu}$. To gain an appreciation for what NRPy+ accomplishes automatically, let's first work out the scalar wave equation in spherical coordinates by hand:", "_____no_output_____" ], [ "<a id='example'></a>\n\n# Example: The scalar wave equation in spherical coordinates \\[Back to [top](#toc)\\]\n$$\\label{example}$$\n\nFor example, the spherical reference metric is written\n\n$$\\hat{g}_{\\mu\\nu} = \\begin{pmatrix}\n-1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & r^2 & 0 \\\\\n 0 & 0 & 0 & r^2 \\sin^2 \\theta \\\\\n\\end{pmatrix}.\n$$\n\nSince the inverse of a diagonal matrix is simply the inverse of the diagonal elements, we can write \n$$\\hat{g}^{\\mu\\nu} = \\begin{pmatrix}\n-1 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 \\\\\n 0 & 0 & \\frac{1}{r^2} & 0 \\\\\n 0 & 0 & 0 & \\frac{1}{r^2 \\sin^2 \\theta} \\\\\n\\end{pmatrix}.$$\n\nThe scalar wave equation in these coordinates can thus be written\n\\begin{align}\n0 &= \\hat{g}^{\\mu \\nu} \\partial_{\\mu} \\partial_{\\nu} u - \\hat{\\Gamma}^\\tau \\partial_\\tau u \\\\\n&= \\hat{g}^{tt} \\partial_t^2 u + \\hat{g}^{rr} \\partial_r^2 u + \\hat{g}^{\\theta\\theta} \\partial_\\theta^2 u + \\hat{g}^{\\phi\\phi} \\partial_\\phi^2 u - \\hat{\\Gamma}^\\tau \\partial_\\tau u \\\\\n&= -\\partial_t^2 u + \\partial_r^2 u + \\frac{1}{r^2} \\partial_\\theta^2\nu + \\frac{1}{r^2 \\sin^2 \\theta} \\partial_\\phi^2 u - \\hat{\\Gamma}^\\tau \\partial_\\tau u\\\\\n\\implies \\partial_t^2 u &= \\partial_r^2 u + \\frac{1}{r^2} \\partial_\\theta^2\nu + \\frac{1}{r^2 \\sin^2 \\theta} \\partial_\\phi^2 u - \\hat{\\Gamma}^\\tau \\partial_\\tau u\n\\end{align}\n\nThe contracted Christoffel symbols \n$\\hat{\\Gamma}^\\tau$ can then be computed directly from the metric $\\hat{g}_{\\mu\\nu}$.\n\nIt can be shown (exercise to the reader) that the only nonzero\ncomponents of $\\hat{\\Gamma}^\\tau$ in static spherical polar coordinates are\ngiven by\n\\begin{align}\n\\hat{\\Gamma}^r &= -\\frac{2}{r} \\\\\n\\hat{\\Gamma}^\\theta &= -\\frac{\\cos\\theta}{r^2 \\sin\\theta}.\n\\end{align}\n\nThus we have found the Laplacian in spherical coordinates is simply:\n\n\\begin{align}\n\\nabla^2 u &= \n\\partial_r^2 u + \\frac{1}{r^2} \\partial_\\theta^2 u + \\frac{1}{r^2 \\sin^2 \\theta} \\partial_\\phi^2 u - \\hat{\\Gamma}^\\tau \\partial_\\tau u\\\\\n&= \\partial_r^2 u + \\frac{1}{r^2} \\partial_\\theta^2 u + \\frac{1}{r^2 \\sin^2 \\theta} \\partial_\\phi^2 u + \\frac{2}{r} \\partial_r u + \\frac{\\cos\\theta}{r^2 \\sin\\theta} \\partial_\\theta u\n\\end{align}\n(cf. http://mathworld.wolfram.com/SphericalCoordinates.html; though note that they defined the angle $\\phi$ as $\\theta$ and $\\theta$ as $\\phi$.)", "_____no_output_____" ], [ "<a id='contracted_christoffel'></a>\n\n# Step 1: Contracted Christoffel symbols $\\hat{\\Gamma}^i = \\hat{g}^{ij}\\hat{\\Gamma}^k_{ij}$ in spherical coordinates, using NRPy+ \\[Back to [top](#toc)\\]\n$$\\label{contracted_christoffel}$$\n\nLet's next use NRPy+ to derive the contracted Christoffel symbols\n$$\\hat{g}^{ij} \\hat{\\Gamma}^k_{ij}$$\nin spherical coordinates, where $i\\in\\{1,2,3\\}$ and $j\\in\\{1,2,3\\}$ are spatial indices.\n\nAs discussed in the [NRPy+ tutorial notebook on reference metrics](Tutorial-Reference_Metric.ipynb), several reference-metric-related quantities in spherical coordinates are computed in NRPy+ (provided the parameter **`reference_metric::CoordSystem`** is set to **`\"Spherical\"`**), including the inverse spatial spherical reference metric $\\hat{g}^{ij}$ and the Christoffel symbols from this reference metric $\\hat{\\Gamma}^{i}_{jk}$. ", "_____no_output_____" ] ], [ [ "import sympy as sp\nimport NRPy_param_funcs as par\nimport indexedexp as ixp\nimport reference_metric as rfm\n\n# reference_metric::CoordSystem can be set to Spherical, SinhSpherical, SinhSphericalv2, \n# Cylindrical, SinhCylindrical, SinhCylindricalv2, etc.\n# See reference_metric.py and NRPy+ tutorial notebook on \n# reference metrics for full list and description of how\n# to extend.\npar.set_parval_from_str(\"reference_metric::CoordSystem\",\"Spherical\")\npar.set_parval_from_str(\"grid::DIM\",3)\n\nrfm.reference_metric()\n\ncontractedGammahatU = ixp.zerorank1()\nfor k in range(3):\n for i in range(3):\n for j in range(3):\n contractedGammahatU[k] += rfm.ghatUU[i][j] * rfm.GammahatUDD[k][i][j]\n\nfor k in range(3):\n print(\"contracted GammahatU[\"+str(k)+\"]:\")\n sp.pretty_print(sp.simplify(contractedGammahatU[k]))\n if k<2:\n print(\"\\n\\n\")", "contracted GammahatU[0]:\n-2 \n───\nxx₀\n\n\n\ncontracted GammahatU[1]:\n -1 \n─────────────\n 2 \nxx₀ ⋅tan(xx₁)\n\n\n\ncontracted GammahatU[2]:\n0\n" ] ], [ [ "<a id='rhs_scalarwave_spherical'></a>\n\n# Step 2: The right-hand side of the scalar wave equation in spherical coordinates, using NRPy+ \\[Back to [top](#toc)\\]\n$$\\label{rhs_scalarwave_spherical}$$\n\nFollowing our [implementation of the scalar wave equation in Cartesian coordinates](Tutorial-ScalarWave.ipynb), we will introduce a new variable $v=\\partial_t u$ that will enable us to split the second time derivative into two first-order time derivatives:\n\n\\begin{align}\n\\partial_t u &= v \\\\\n\\partial_t v &= \\hat{g}^{ij} \\partial_{i} \\partial_{j} u - \\hat{\\Gamma}^i \\partial_i u.\n\\end{align}\n\nAdding back the sound speed $c$, we have a choice of a single factor of $c$ multiplying both right-hand sides, or a factor of $c^2$ multiplying the second equation only. We'll choose the latter:\n\n\\begin{align}\n\\partial_t u &= v \\\\\n\\partial_t v &= c^2 \\left(\\hat{g}^{ij} \\partial_{i} \\partial_{j} u - \\hat{\\Gamma}^i \\partial_i u\\right).\n\\end{align}\n\nNow let's generate the C code for the finite-difference representations of the right-hand sides of the above \"time evolution\" equations for $u$ and $v$. Since the right-hand side of $\\partial_t v$ contains implied sums over $i$ and $j$ in the first term, and an implied sum over $k$ in the second term, we'll find it useful to split the right-hand side into two parts\n\n\\begin{equation}\n\\partial_t v = c^2 \\left(\n{\\underbrace {\\textstyle \\hat{g}^{ij} \\partial_{i} \\partial_{j} u}_{\\text{Part 1}}} \n{\\underbrace {\\textstyle -\\hat{\\Gamma}^i \\partial_i u}_{\\text{Part 2}}}\\right),\n\\end{equation}\n\nand perform the implied sums in two pieces:", "_____no_output_____" ] ], [ [ "import NRPy_param_funcs as par\nimport indexedexp as ixp\nimport grid as gri\nimport finite_difference as fin\nimport reference_metric as rfm\nfrom outputC import *", "_____no_output_____" ], [ "# The name of this module (\"scalarwave\") is given by __name__:\nthismodule = __name__\n\n# Step 0: Read the spatial dimension parameter as DIM.\nDIM = par.parval_from_str(\"grid::DIM\")\n\n# Step 1: Set the finite differencing order to 4.\npar.set_parval_from_str(\"finite_difference::FD_CENTDERIVS_ORDER\",4)\n\n# Step 2a: Reset the gridfunctions list; below we define the\n# full complement of gridfunctions needed by this\n# tutorial. This line of code enables us to re-run this\n# tutorial without resetting the running Python kernel.\ngri.glb_gridfcs_list = []\n# Step 2b: Register gridfunctions that are needed as input\n# to the scalar wave RHS expressions.\nuu, vv = gri.register_gridfunctions(\"EVOL\",[\"uu\",\"vv\"])\n\n# Step 3a: Declare the rank-1 indexed expression \\partial_{i} u,\n# Derivative variables like these must have an underscore\n# in them, so the finite difference module can parse the\n# variable name properly.\nuu_dD = ixp.declarerank1(\"uu_dD\")\n\n# Step 3b: Declare the rank-2 indexed expression \\partial_{ij} u,\n# which is symmetric about interchange of indices i and j\n# Derivative variables like these must have an underscore\n# in them, so the finite difference module can parse the\n# variable name properly.\nuu_dDD = ixp.declarerank2(\"uu_dDD\",\"sym01\")\n\n# Step 4: Define the C parameter wavespeed. The `wavespeed`\n# variable is a proper SymPy variable, so it can be\n# used in below expressions. In the C code, it acts\n# just like a usual parameter, whose value is \n# specified in the parameter file.\nwavespeed = par.Cparameters(\"REAL\",thismodule,\"wavespeed\", 1.0)\n\n# Step 5: Define right-hand sides for the evolution.\nuu_rhs = vv\n# Step 5b: The right-hand side of the \\partial_t v equation\n# is given by:\n# \\hat{g}^{ij} \\partial_i \\partial_j u - \\hat{\\Gamma}^i \\partial_i u.\n# ^^^^^^^^^^^^ PART 1 ^^^^^^^^^^^^^^^^ ^^^^^^^^^^ PART 2 ^^^^^^^^^^^\nvv_rhs = 0\nfor i in range(DIM):\n # PART 2:\n vv_rhs -= contractedGammahatU[i]*uu_dD[i]\n for j in range(DIM):\n # PART 1:\n vv_rhs += rfm.ghatUU[i][j]*uu_dDD[i][j]\n\nvv_rhs *= wavespeed*wavespeed\n\n# Step 6: Generate C code for scalarwave evolution equations,\n# print output to the screen (standard out, or stdout).\nfin.FD_outputC(\"stdout\",\n [lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"uu\"),rhs=uu_rhs),\n lhrh(lhs=gri.gfaccess(\"rhs_gfs\",\"vv\"),rhs=vv_rhs)])", "{\n /* \n * NRPy+ Finite Difference Code Generation, Step 1 of 2: Read from main memory and compute finite difference stencils:\n */\n /*\n * Original SymPy expressions:\n * \"[const double uu_dD0 = invdx0*(-2*uu_i0m1_i1_i2/3 + uu_i0m2_i1_i2/12 + 2*uu_i0p1_i1_i2/3 - uu_i0p2_i1_i2/12),\n * const double uu_dD1 = invdx1*(-2*uu_i0_i1m1_i2/3 + uu_i0_i1m2_i2/12 + 2*uu_i0_i1p1_i2/3 - uu_i0_i1p2_i2/12),\n * const double uu_dDD00 = invdx0**2*(-5*uu/2 + 4*uu_i0m1_i1_i2/3 - uu_i0m2_i1_i2/12 + 4*uu_i0p1_i1_i2/3 - uu_i0p2_i1_i2/12),\n * const double uu_dDD11 = invdx1**2*(-5*uu/2 + 4*uu_i0_i1m1_i2/3 - uu_i0_i1m2_i2/12 + 4*uu_i0_i1p1_i2/3 - uu_i0_i1p2_i2/12),\n * const double uu_dDD22 = invdx2**2*(-5*uu/2 + 4*uu_i0_i1_i2m1/3 - uu_i0_i1_i2m2/12 + 4*uu_i0_i1_i2p1/3 - uu_i0_i1_i2p2/12)]\"\n */\n const double uu_i0_i1_i2m2 = in_gfs[IDX4(UUGF, i0,i1,i2-2)];\n const double uu_i0_i1_i2m1 = in_gfs[IDX4(UUGF, i0,i1,i2-1)];\n const double uu_i0_i1m2_i2 = in_gfs[IDX4(UUGF, i0,i1-2,i2)];\n const double uu_i0_i1m1_i2 = in_gfs[IDX4(UUGF, i0,i1-1,i2)];\n const double uu_i0m2_i1_i2 = in_gfs[IDX4(UUGF, i0-2,i1,i2)];\n const double uu_i0m1_i1_i2 = in_gfs[IDX4(UUGF, i0-1,i1,i2)];\n const double uu = in_gfs[IDX4(UUGF, i0,i1,i2)];\n const double uu_i0p1_i1_i2 = in_gfs[IDX4(UUGF, i0+1,i1,i2)];\n const double uu_i0p2_i1_i2 = in_gfs[IDX4(UUGF, i0+2,i1,i2)];\n const double uu_i0_i1p1_i2 = in_gfs[IDX4(UUGF, i0,i1+1,i2)];\n const double uu_i0_i1p2_i2 = in_gfs[IDX4(UUGF, i0,i1+2,i2)];\n const double uu_i0_i1_i2p1 = in_gfs[IDX4(UUGF, i0,i1,i2+1)];\n const double uu_i0_i1_i2p2 = in_gfs[IDX4(UUGF, i0,i1,i2+2)];\n const double vv = in_gfs[IDX4(VVGF, i0,i1,i2)];\n const double tmpFD0 = (1.0/12.0)*uu_i0m2_i1_i2;\n const double tmpFD1 = -1.0/12.0*uu_i0p2_i1_i2;\n const double tmpFD2 = (1.0/12.0)*uu_i0_i1m2_i2;\n const double tmpFD3 = -1.0/12.0*uu_i0_i1p2_i2;\n const double tmpFD4 = -5.0/2.0*uu;\n const double uu_dD0 = invdx0*(tmpFD0 + tmpFD1 - 2.0/3.0*uu_i0m1_i1_i2 + (2.0/3.0)*uu_i0p1_i1_i2);\n const double uu_dD1 = invdx1*(tmpFD2 + tmpFD3 - 2.0/3.0*uu_i0_i1m1_i2 + (2.0/3.0)*uu_i0_i1p1_i2);\n const double uu_dDD00 = ((invdx0)*(invdx0))*(-tmpFD0 + tmpFD1 + tmpFD4 + (4.0/3.0)*uu_i0m1_i1_i2 + (4.0/3.0)*uu_i0p1_i1_i2);\n const double uu_dDD11 = ((invdx1)*(invdx1))*(-tmpFD2 + tmpFD3 + tmpFD4 + (4.0/3.0)*uu_i0_i1m1_i2 + (4.0/3.0)*uu_i0_i1p1_i2);\n const double uu_dDD22 = ((invdx2)*(invdx2))*(tmpFD4 + (4.0/3.0)*uu_i0_i1_i2m1 - 1.0/12.0*uu_i0_i1_i2m2 + (4.0/3.0)*uu_i0_i1_i2p1 - 1.0/12.0*uu_i0_i1_i2p2);\n /* \n * NRPy+ Finite Difference Code Generation, Step 2 of 2: Evaluate SymPy expressions and write to main memory:\n */\n /*\n * Original SymPy expressions:\n * \"[rhs_gfs[IDX4(UUGF, i0, i1, i2)] = vv,\n * rhs_gfs[IDX4(VVGF, i0, i1, i2)] = wavespeed**2*(2*uu_dD0/xx0 + uu_dD1*sin(2*xx1)/(2*xx0**2*sin(xx1)**2) + uu_dDD00 + uu_dDD11/xx0**2 + uu_dDD22/(xx0**2*sin(xx1)**2))]\"\n */\n const double tmp0 = (1.0/((xx0)*(xx0)));\n const double tmp1 = tmp0/((sin(xx1))*(sin(xx1)));\n rhs_gfs[IDX4(UUGF, i0, i1, i2)] = vv;\n rhs_gfs[IDX4(VVGF, i0, i1, i2)] = ((wavespeed)*(wavespeed))*(tmp0*uu_dDD11 + (1.0/2.0)*tmp1*uu_dD1*sin(2*xx1) + tmp1*uu_dDD22 + 2*uu_dD0/xx0 + uu_dDD00);\n}\n\n" ] ], [ [ "<a id='code_validation'></a>\n\n# Step 3: Code Validation against `ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs` NRPy+ Module \\[Back to [top](#toc)\\]\n$$\\label{code_validation}$$\n\nHere, as a code validation check, we verify agreement in the SymPy expressions for the RHSs of the Curvilinear Scalar Wave equation (i.e., uu_rhs and vv_rhs) between\n\n1. this tutorial and \n2. the NRPy+ [ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs](../edit/ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py) module.\n\nBy default, we analyze the RHSs in Spherical coordinates, though other coordinate systems may be chosen.", "_____no_output_____" ] ], [ [ "# Step 7: We already have SymPy expressions for uu_rhs and vv_rhs in\n# terms of other SymPy variables. Even if we reset the list\n# of NRPy+ gridfunctions, these *SymPy* expressions for\n# uu_rhs and vv_rhs *will remain unaffected*. \n# \n# Here, we will use the above-defined uu_rhs and vv_rhs to \n# validate against the same expressions in the \n# ScalarWaveCurvilinear/ScalarWaveCurvilinear module,\n# to ensure consistency between the tutorial and the\n# module itself.\n#\n# Reset the list of gridfunctions, as registering a gridfunction\n# twice will spawn an error.\ngri.glb_gridfcs_list = []\n\n# Step 8: Call the ScalarWaveCurvilinear_RHSs() function from within the\n# ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py module,\n# which should do exactly the same as in Steps 1-6 above.\nimport ScalarWaveCurvilinear.ScalarWaveCurvilinear_RHSs as swcrhs\nswcrhs.ScalarWaveCurvilinear_RHSs()\n\n# Step 9: Consistency check between the tutorial notebook above\n# and the ScalarWaveCurvilinear_RHSs() function from within the\n# ScalarWaveCurvilinear/ScalarWaveCurvilinear_RHSs.py module.\nprint(\"Consistency check between ScalarWaveCurvilinear tutorial and NRPy+ module:\")\nprint(\"uu_rhs - swcrhs.uu_rhs: \"+str(sp.simplify(uu_rhs - swcrhs.uu_rhs))+\"\\t\\t (should be zero)\")\nprint(\"vv_rhs - swcrhs.vv_rhs: \"+str(sp.simplify(vv_rhs - swcrhs.vv_rhs))+\"\\t\\t (should be zero)\")", "Consistency check between ScalarWaveCurvilinear tutorial and NRPy+ module:\nuu_rhs - swcrhs.uu_rhs: 0\t\t (should be zero)\nvv_rhs - swcrhs.vv_rhs: 0\t\t (should be zero)\n" ] ], [ [ "<a id='latex_pdf_output'></a>\n\n# Step 4: Output this notebook to $\\LaTeX$-formatted PDF file \\[Back to [top](#toc)\\]\n$$\\label{latex_pdf_output}$$\n\nThe following code cell converts this Jupyter notebook into a proper, clickable $\\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename\n[Tutorial-ScalarWaveCurvilinear.pdf](Tutorial-ScalarWaveCurvilinear.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)", "_____no_output_____" ] ], [ [ "!jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-ScalarWaveCurvilinear.ipynb\n!pdflatex -interaction=batchmode Tutorial-ScalarWaveCurvilinear.tex\n!pdflatex -interaction=batchmode Tutorial-ScalarWaveCurvilinear.tex\n!pdflatex -interaction=batchmode Tutorial-ScalarWaveCurvilinear.tex\n!rm -f Tut*.out Tut*.aux Tut*.log", "[pandoc warning] Duplicate link reference `[comment]' \"source\" (line 23, column 1)\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\nThis is pdfTeX, Version 3.14159265-2.6-1.40.18 (TeX Live 2017/Debian) (preloaded format=pdflatex)\n restricted \\write18 enabled.\nentering extended mode\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d07005eea539d37dce8b7f01311ac40adaa75f32
6,172
ipynb
Jupyter Notebook
PortingTest.ipynb
kylejshaffer/CoVe
400029e04aa55296329e1c8f09ca32fc3cede51f
[ "BSD-3-Clause" ]
53
2018-01-18T19:50:40.000Z
2020-07-21T06:56:17.000Z
PortingTest.ipynb
xiaohui-victor-li/CoVe
2f3681e87bb8dce5a936526cfeedf7d661961f92
[ "BSD-3-Clause" ]
3
2018-03-02T20:55:35.000Z
2019-01-25T06:52:51.000Z
PortingTest.ipynb
xiaohui-victor-li/CoVe
2f3681e87bb8dce5a936526cfeedf7d661961f92
[ "BSD-3-Clause" ]
11
2018-01-30T06:06:41.000Z
2021-06-27T21:59:59.000Z
29.673077
249
0.58571
[ [ [ "import os\nimport urllib\nfrom zipfile import ZipFile\nimport fileinput\nimport numpy as np\nimport gc\nimport urllib.request", "_____no_output_____" ], [ "if not os.path.exists('glove.840B.300d.txt'):\n if not os.path.exists('glove.840B.300d.zip'):\n print('downloading GloVe')\n urllib.request.urlretrieve(\"http://nlp.stanford.edu/data/glove.840B.300d.zip\", \"glove.840B.300d.zip\")\n zip = ZipFile('glove.840B.300d.zip')\n zip.extractall()", "_____no_output_____" ], [ "import torch\nfrom torchtext import data\nfrom torchtext import datasets\nfrom torchtext.vocab import GloVe\nimport fileinput\nimport numpy as np\nfrom cove import MTLSTM\n\ninputs = data.Field(lower=True, include_lengths=True, batch_first=True)\nanswers = data.Field(sequential=False)\n\nprint('Generating train, dev, test splits')\ntrain, dev, test = datasets.SNLI.splits(inputs, answers)\n\nprint('Building vocabulary')\ninputs.build_vocab(train, dev, test)\n\ng = GloVe(name='840B', dim=300)\ngc.collect()\ninputs.vocab.load_vectors(vectors=g)\ngc.collect()\n\nanswers.build_vocab(train)\n\nmodel = MTLSTM(n_vocab=len(inputs.vocab), vectors=inputs.vocab.vectors)\nmodel.cuda(0)\n\ntrain_iter, dev_iter, test_iter = data.BucketIterator.splits(\n (train, dev, test), batch_size=100, device=0)\n\ntrain_iter.init_epoch()", "Generating train, dev, test splits\nBuilding vocabulary\n" ], [ "from keras.models import load_model\nimport tensorflow as tf", "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "# To prevent Tensorflow from being greedy and allocating all GPU memory for itself\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n# Loading loding saved Keras CoVe model\ncove_model = load_model('Keras_CoVe.h5')", "/usr/local/lib/python3.5/dist-packages/keras/models.py:255: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.\n warnings.warn('No training configuration found in save file: '\n" ], [ "TOTAL_NUM_TEST_SENTENCE = 10000\nprint('Comparing Keras CoVe prediction with Pytorch CoVe')\nabs_error_per_dim = 0\ntotal_num_of_dim = 0\nnum_test_sentence = 0\nmodel.train()\nfor batch_idx, batch in enumerate(train_iter):\n if num_test_sentence > TOTAL_NUM_TEST_SENTENCE:\n # It takes a long time to run through all examples hence restricting the test set \n break\n cove_premise = model(*batch.premise)\n #cove_hypothesis = model(*batch.hypothesis)\n sentence_sparse_vector = batch.premise[0].data.cpu().numpy()\n for i in range(len(sentence_sparse_vector)):\n sentence = sentence_sparse_vector[i]\n sentence_glove = []\n for word in sentence:\n sentence_glove.append(inputs.vocab.vectors[word].numpy())\n sentence_glove = np.expand_dims(np.array(sentence_glove),0)\n if np.any(np.sum(sentence_glove,axis=2)==0):\n break\n keras_cove_sentence = cove_model.predict(sentence_glove)\n keras_cove_sentence = np.squeeze(keras_cove_sentence,0)\n pytorch_cove_sentence = cove_premise.data.cpu().numpy()[i]\n\n abs_error_per_dim+=np.sum(np.abs(keras_cove_sentence - pytorch_cove_sentence))\n total_num_of_dim+=np.prod(sentence_glove.shape)\n num_test_sentence+=1\nabs_error_per_dim/=total_num_of_dim\nprint('abs error per dim:'+str(abs_error_per_dim))", "Comparing Keras CoVe prediction with Pytorch CoVe\nabs error per dim:2.4579888586353267e-08\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d07009b0af73a94ed954b9b6ab3b7f8ee5410e46
13,599
ipynb
Jupyter Notebook
examples/ConsumptionSaving/example_ConsLaborModel.ipynb
feifei107/HARK
a96223aadc9d51ba6ecee1388c6afce4fd85de84
[ "Apache-2.0" ]
264
2016-06-20T18:06:44.000Z
2022-03-31T12:40:31.000Z
examples/ConsumptionSaving/example_ConsLaborModel.ipynb
PelyYan/HARK
3c17c2767cf0539ff4c2048d96ede6b2922fd608
[ "Apache-2.0" ]
876
2016-06-16T19:41:13.000Z
2022-03-30T20:44:52.000Z
examples/ConsumptionSaving/example_ConsLaborModel.ipynb
PelyYan/HARK
3c17c2767cf0539ff4c2048d96ede6b2922fd608
[ "Apache-2.0" ]
194
2016-06-17T16:12:29.000Z
2022-03-01T15:11:43.000Z
32.378571
101
0.564894
[ [ [ "from HARK.ConsumptionSaving.ConsLaborModel import (\n LaborIntMargConsumerType,\n init_labor_lifecycle,\n)\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import process_time", "_____no_output_____" ], [ "mystr = lambda number: \"{:.4f}\".format(number) # Format numbers as strings", "_____no_output_____" ], [ "do_simulation = True", "_____no_output_____" ], [ "# Make and solve a labor intensive margin consumer i.e. a consumer with utility for leisure\nLaborIntMargExample = LaborIntMargConsumerType(verbose=0)\nLaborIntMargExample.cycles = 0", "_____no_output_____" ], [ "t_start = process_time()\nLaborIntMargExample.solve()\nt_end = process_time()\nprint(\n \"Solving a labor intensive margin consumer took \"\n + str(t_end - t_start)\n + \" seconds.\"\n)", "_____no_output_____" ], [ "t = 0\nbMin_orig = 0.0\nbMax = 100.0", "_____no_output_____" ], [ "# Plot the consumption function at various transitory productivity shocks\nTranShkSet = LaborIntMargExample.TranShkGrid[t]\nbMin = bMin_orig\nB = np.linspace(bMin, bMax, 300)\nbMin = bMin_orig\nfor Shk in TranShkSet:\n B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk)\n C = LaborIntMargExample.solution[t].cFunc(B_temp, Shk * np.ones_like(B_temp))\n plt.plot(B_temp, C)\n bMin = np.minimum(bMin, B_temp[0])\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Normalized consumption level\")\nplt.xlim(bMin, bMax - bMin_orig + bMin)\nplt.ylim(0.0, None)\nplt.show()", "_____no_output_____" ], [ "# Plot the marginal consumption function at various transitory productivity shocks\nTranShkSet = LaborIntMargExample.TranShkGrid[t]\nbMin = bMin_orig\nB = np.linspace(bMin, bMax, 300)\nfor Shk in TranShkSet:\n B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk)\n C = LaborIntMargExample.solution[t].cFunc.derivativeX(\n B_temp, Shk * np.ones_like(B_temp)\n )\n plt.plot(B_temp, C)\n bMin = np.minimum(bMin, B_temp[0])\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Marginal propensity to consume\")\nplt.xlim(bMin, bMax - bMin_orig + bMin)\nplt.ylim(0.0, 1.0)\nplt.show()", "_____no_output_____" ], [ "# Plot the labor function at various transitory productivity shocks\nTranShkSet = LaborIntMargExample.TranShkGrid[t]\nbMin = bMin_orig\nB = np.linspace(0.0, bMax, 300)\nfor Shk in TranShkSet:\n B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk)\n Lbr = LaborIntMargExample.solution[t].LbrFunc(B_temp, Shk * np.ones_like(B_temp))\n bMin = np.minimum(bMin, B_temp[0])\n plt.plot(B_temp, Lbr)\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Labor supply\")\nplt.xlim(bMin, bMax - bMin_orig + bMin)\nplt.ylim(0.0, 1.0)\nplt.show()", "_____no_output_____" ], [ "# Plot the marginal value function at various transitory productivity shocks\npseudo_inverse = True\nTranShkSet = LaborIntMargExample.TranShkGrid[t]\nbMin = bMin_orig\nB = np.linspace(0.0, bMax, 300)\nfor Shk in TranShkSet:\n B_temp = B + LaborIntMargExample.solution[t].bNrmMin(Shk)\n if pseudo_inverse:\n vP = LaborIntMargExample.solution[t].vPfunc.cFunc(\n B_temp, Shk * np.ones_like(B_temp)\n )\n else:\n vP = LaborIntMargExample.solution[t].vPfunc(B_temp, Shk * np.ones_like(B_temp))\n bMin = np.minimum(bMin, B_temp[0])\n plt.plot(B_temp, vP)\nplt.xlabel(\"Beginning of period bank balances\")\nif pseudo_inverse:\n plt.ylabel(\"Pseudo inverse marginal value\")\nelse:\n plt.ylabel(\"Marginal value\")\nplt.xlim(bMin, bMax - bMin_orig + bMin)\nplt.ylim(0.0, None)\nplt.show()", "_____no_output_____" ], [ "if do_simulation:\n t_start = process_time()\n LaborIntMargExample.T_sim = 120 # Set number of simulation periods\n LaborIntMargExample.track_vars = [\"bNrm\", 'cNrm']\n LaborIntMargExample.initialize_sim()\n LaborIntMargExample.simulate()\n t_end = process_time()\n print(\n \"Simulating \"\n + str(LaborIntMargExample.AgentCount)\n + \" intensive-margin labor supply consumers for \"\n + str(LaborIntMargExample.T_sim)\n + \" periods took \"\n + mystr(t_end - t_start)\n + \" seconds.\"\n )\n\n N = LaborIntMargExample.AgentCount\n CDF = np.linspace(0.0, 1, N)\n\n plt.plot(np.sort(LaborIntMargExample.controls['cNrm']), CDF)\n plt.xlabel(\n \"Consumption cNrm in \" + str(LaborIntMargExample.T_sim) + \"th simulated period\"\n )\n plt.ylabel(\"Cumulative distribution\")\n plt.xlim(0.0, None)\n plt.ylim(0.0, 1.0)\n plt.show()\n\n plt.plot(np.sort(LaborIntMargExample.controls['Lbr']), CDF)\n plt.xlabel(\n \"Labor supply Lbr in \" + str(LaborIntMargExample.T_sim) + \"th simulated period\"\n )\n plt.ylabel(\"Cumulative distribution\")\n plt.xlim(0.0, 1.0)\n plt.ylim(0.0, 1.0)\n plt.show()\n\n plt.plot(np.sort(LaborIntMargExample.state_now['aNrm']), CDF)\n plt.xlabel(\n \"End-of-period assets aNrm in \"\n + str(LaborIntMargExample.T_sim)\n + \"th simulated period\"\n )\n plt.ylabel(\"Cumulative distribution\")\n plt.xlim(0.0, 20.0)\n plt.ylim(0.0, 1.0)\n plt.show()", "_____no_output_____" ], [ "# Make and solve a labor intensive margin consumer with a finite lifecycle\nLifecycleExample = LaborIntMargConsumerType(**init_labor_lifecycle)\nLifecycleExample.cycles = (\n 1 # Make this consumer live a sequence of periods exactly once\n)", "_____no_output_____" ], [ "start_time = process_time()\nLifecycleExample.solve()\nend_time = process_time()\nprint(\n \"Solving a lifecycle labor intensive margin consumer took \"\n + str(end_time - start_time)\n + \" seconds.\"\n)\nLifecycleExample.unpack('cFunc')", "_____no_output_____" ], [ "bMax = 20.0", "_____no_output_____" ], [ "# Plot the consumption function in each period of the lifecycle, using median shock\nB = np.linspace(0.0, bMax, 300)\nb_min = np.inf\nb_max = -np.inf\nfor t in range(LifecycleExample.T_cycle):\n TranShkSet = LifecycleExample.TranShkGrid[t]\n Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less\n B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk)\n C = LifecycleExample.solution[t].cFunc(B_temp, Shk * np.ones_like(B_temp))\n plt.plot(B_temp, C)\n b_min = np.minimum(b_min, B_temp[0])\n b_max = np.maximum(b_min, B_temp[-1])\nplt.title(\"Consumption function across periods of the lifecycle\")\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Normalized consumption level\")\nplt.xlim(b_min, b_max)\nplt.ylim(0.0, None)\nplt.show()", "_____no_output_____" ], [ "# Plot the marginal consumption function in each period of the lifecycle, using median shock\nB = np.linspace(0.0, bMax, 300)\nb_min = np.inf\nb_max = -np.inf\nfor t in range(LifecycleExample.T_cycle):\n TranShkSet = LifecycleExample.TranShkGrid[t]\n Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less\n B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk)\n MPC = LifecycleExample.solution[t].cFunc.derivativeX(\n B_temp, Shk * np.ones_like(B_temp)\n )\n plt.plot(B_temp, MPC)\n b_min = np.minimum(b_min, B_temp[0])\n b_max = np.maximum(b_min, B_temp[-1])\nplt.title(\"Marginal consumption function across periods of the lifecycle\")\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Marginal propensity to consume\")\nplt.xlim(b_min, b_max)\nplt.ylim(0.0, 1.0)\nplt.show()", "_____no_output_____" ], [ "# Plot the labor supply function in each period of the lifecycle, using median shock\nB = np.linspace(0.0, bMax, 300)\nb_min = np.inf\nb_max = -np.inf\nfor t in range(LifecycleExample.T_cycle):\n TranShkSet = LifecycleExample.TranShkGrid[t]\n Shk = TranShkSet[int(len(TranShkSet) // 2)] # Use the median shock, more or less\n B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk)\n L = LifecycleExample.solution[t].LbrFunc(B_temp, Shk * np.ones_like(B_temp))\n plt.plot(B_temp, L)\n b_min = np.minimum(b_min, B_temp[0])\n b_max = np.maximum(b_min, B_temp[-1])\nplt.title(\"Labor supply function across periods of the lifecycle\")\nplt.xlabel(\"Beginning of period bank balances\")\nplt.ylabel(\"Labor supply\")\nplt.xlim(b_min, b_max)\nplt.ylim(0.0, 1.01)\nplt.show()", "_____no_output_____" ], [ "# Plot the marginal value function at various transitory productivity shocks\npseudo_inverse = True\nTranShkSet = LifecycleExample.TranShkGrid[t]\nB = np.linspace(0.0, bMax, 300)\nb_min = np.inf\nb_max = -np.inf\nfor t in range(LifecycleExample.T_cycle):\n TranShkSet = LifecycleExample.TranShkGrid[t]\n Shk = TranShkSet[int(len(TranShkSet) / 2)] # Use the median shock, more or less\n B_temp = B + LifecycleExample.solution[t].bNrmMin(Shk)\n if pseudo_inverse:\n vP = LifecycleExample.solution[t].vPfunc.cFunc(\n B_temp, Shk * np.ones_like(B_temp)\n )\n else:\n vP = LifecycleExample.solution[t].vPfunc(B_temp, Shk * np.ones_like(B_temp))\n plt.plot(B_temp, vP)\n b_min = np.minimum(b_min, B_temp[0])\n b_max = np.maximum(b_min, B_temp[-1])\nplt.xlabel(\"Beginning of period bank balances\")\nif pseudo_inverse:\n plt.ylabel(\"Pseudo inverse marginal value\")\nelse:\n plt.ylabel(\"Marginal value\")\nplt.title(\"Marginal value across periods of the lifecycle\")\nplt.xlim(b_min, b_max)\nplt.ylim(0.0, None)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0700bf0701ed70ae7d731662b9d405468f65db0
8,894
ipynb
Jupyter Notebook
sources/nfirs/scripts/popular-room-ignitions.ipynb
tbuffington7/data
6ef19c9ad5b3c9cea6fcdf13d04e7edf51aa7eb9
[ "MIT" ]
null
null
null
sources/nfirs/scripts/popular-room-ignitions.ipynb
tbuffington7/data
6ef19c9ad5b3c9cea6fcdf13d04e7edf51aa7eb9
[ "MIT" ]
null
null
null
sources/nfirs/scripts/popular-room-ignitions.ipynb
tbuffington7/data
6ef19c9ad5b3c9cea6fcdf13d04e7edf51aa7eb9
[ "MIT" ]
null
null
null
32.341818
181
0.484596
[ [ [ "### Find the top rooms ignited and the top materials in those rooms that were first ignited", "_____no_output_____" ] ], [ [ "import psycopg2\nimport pandas as pd\nfrom IPython.display import display\n\nconn = psycopg2.connect(service='nfirs')\npd.options.display.max_rows = 1000\n\ndf = pd.read_sql_query(\"select * from codelookup where fieldid = 'PROP_USE' and length(code_value) = 3 order by code_value\", conn)['code_value']\ncodes = list(df.values)", "_____no_output_____" ] ], [ [ "#### By property use type (batch by property type)", "_____no_output_____" ] ], [ [ "# Create a CSV for each property use type\n\nq = \"\"\"SELECT x.prop_use,\n area_orig,\n first_ign,\n x.civ_inj,\n x.civ_death,\n x.flame_sprd,\n x.item_sprd, \n x.cnt\nFROM\n ( SELECT *,\n row_number() over (partition BY area_orig\n ORDER BY area_orig, w.cnt DESC, first_ign, w.flame_sprd,w.item_sprd, w.civ_death, w.civ_inj DESC) row_num\n FROM\n (SELECT distinct bf.area_orig,\n bf.first_ign,\n bf.prop_use,\n bf.flame_sprd,\n bf.item_sprd,\n COALESCE(bf.oth_death, 0) as civ_death,\n COALESCE(bf.oth_inj,0) as civ_inj,\n count(*) OVER ( PARTITION BY bf.area_orig, bf.first_ign, bf.flame_sprd, bf.item_sprd, COALESCE(bf.oth_death, 0)+COALESCE(bf.oth_inj,0) ) AS cnt,\n row_number() OVER ( PARTITION BY bf.area_orig, bf.first_ign, bf.flame_sprd, bf.item_sprd, COALESCE(bf.oth_death, 0)+COALESCE(bf.oth_inj,0) ) AS row_numbers\n FROM joint_buildingfires bf\n WHERE bf.area_orig IN\n ( SELECT area_orig\n FROM joint_buildingfires\n WHERE prop_use = %(use)s\n AND area_orig != 'UU'\n AND extract(year from inc_date) > 2011\n GROUP BY area_orig\n ORDER BY count(1) DESC LIMIT 8)\n AND bf.prop_use = %(use)s\n AND bf.first_ign != 'UU'\n AND extract(year from inc_date) > 2011\n ORDER BY area_orig,\n first_ign ) w\n WHERE w.row_numbers = 1) x\nORDER BY area_orig,\n x.cnt DESC,\n first_ign\n\"\"\"\n\n# for c in codes[1:2]:\n# df = pd.read_sql_query(q, conn, params=dict(use=c))\n# display(df)\n\nfor c in codes:\n df = pd.read_sql_query(q, conn, params=dict(use=c))\n df.to_csv('/tmp/{}.csv'.format(c))", "_____no_output_____" ], [ "# Testing/sanity checks\n\nq = \"\"\"SELECT bf.prop_use, bf.area_orig,\n bf.first_ign,\n bf.flame_sprd,\n COALESCE(bf.oth_death, 0) + COALESCE(bf.oth_inj,0) as civ_inj_death,\n count(*) OVER ( PARTITION BY bf.area_orig, bf.first_ign, bf.flame_sprd, COALESCE(bf.oth_death, 0)+COALESCE(bf.oth_inj,0) ) AS cnt,\n row_number() OVER ( PARTITION BY bf.area_orig, bf.first_ign, bf.flame_sprd, COALESCE(bf.oth_death, 0)+COALESCE(bf.oth_inj,0) ) AS row_numbers\n FROM buildingfires bf\n WHERE bf.area_orig IN\n ( SELECT area_orig\n FROM buildingfires\n WHERE prop_use = %(use)s\n AND area_orig != 'UU'\n GROUP BY area_orig\n ORDER BY count(1) DESC LIMIT 8)\n AND bf.prop_use = %(use)s\n AND bf.first_ign != 'UU'\n ORDER BY area_orig,\n first_ign,\n cnt desc\"\"\"\n\npd.read_sql_query(q, conn, params=dict(use='100'))", "_____no_output_____" ], [ "q = \"\"\"\nselect count(1)\nfrom joint_buildingfires\nwhere prop_use='100'\n and area_orig = '00'\n and first_ign = '00'\n and COALESCE(oth_death, 0) + COALESCE(oth_inj, 0) = 0\n and flame_sprd = 'N'\n\"\"\"\n\npd.read_sql_query(q, conn)", "_____no_output_____" ], [ "# Sanity checks\n\nq = \"\"\"\nselect area_orig, first_ign, count(1)\nfrom joint_buildingfires\nwhere area_orig != 'UU'\n and first_ign != 'UU'\ngroup by area_orig, first_ign\norder by count desc\n\"\"\"\n\npd.read_sql_query(q, conn)", "_____no_output_____" ], [ "# More sanity checks, including civ death/inj + flame spread\n\nq = \"\"\"\nselect area_orig, first_ign, flame_sprd, COALESCE(oth_death, 0)+COALESCE(oth_inj,0) as civ_death_inj, count(1)\nfrom joint_buildingfires\nwhere area_orig != 'UU'\n and first_ign != 'UU'\ngroup by area_orig, first_ign, flame_sprd, civ_death_inj\norder by count desc\"\"\"\n\npd.read_sql_query(q, conn)", "_____no_output_____" ], [ "# For grouped propety usage only 6 most popular ignition sources\n\nq = \"\"\"\n-- \n\nSELECT area_orig,\n first_ign,\n x.cnt\nFROM\n ( SELECT *,\n row_number() over (partition BY area_orig\n ORDER BY area_orig, w.cnt DESC, first_ign) row_num\n FROM\n (SELECT bf.area_orig,\n bf.first_ign,\n count(*) OVER ( PARTITION BY bf.area_orig, bf.first_ign ) AS cnt,\n row_number() OVER ( PARTITION BY bf.area_orig, bf.first_ign ) AS row_numbers\n FROM joint_buildingfires bf\n WHERE bf.area_orig IN\n ( SELECT area_orig\n FROM joint_buildingfires\n WHERE prop_use in ('120', '121', '122', '123', '124', '129')\n AND area_orig != 'UU'\n GROUP BY area_orig\n ORDER BY count(1) DESC LIMIT 8)\n AND bf.prop_use in ('120', '121', '122', '123', '124', '129')\n AND bf.first_ign != 'UU'\n ORDER BY area_orig,\n first_ign ) w\n WHERE w.row_numbers = 1) x\nWHERE x.row_num < 7\nORDER BY area_orig,\n x.cnt DESC,\n first_ign\n\"\"\"\n\ndf = pd.read_sql_query(q, conn)\ndisplay(df)", "_____no_output_____" ], [ "# Pull all from buildingfires to CSV\n\nq = \"\"\"\nselect prop_use, area_orig, first_ign, oth_inj, oth_death, flame_sprd\nfrom joint_buildingfires\"\"\"\n\ndf = pd.read_sql_query(q, conn)\ndf.to_csv('/tmp/buildingfires.csv')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d070233d29714d61dd7af1b14d9c89246510f0cb
30,875
ipynb
Jupyter Notebook
3_DL_Libraries_Review/3_1_SpaCy/Exploring Spacy.ipynb
TheManohar/DLearn_Pro
6f479745510640080923321452488f10f2a08b17
[ "MIT" ]
null
null
null
3_DL_Libraries_Review/3_1_SpaCy/Exploring Spacy.ipynb
TheManohar/DLearn_Pro
6f479745510640080923321452488f10f2a08b17
[ "MIT" ]
null
null
null
3_DL_Libraries_Review/3_1_SpaCy/Exploring Spacy.ipynb
TheManohar/DLearn_Pro
6f479745510640080923321452488f10f2a08b17
[ "MIT" ]
null
null
null
37.020384
488
0.576097
[ [ [ "import pandas as pd\nimport spacy", "_____no_output_____" ], [ "dir(spacy)", "_____no_output_____" ] ], [ [ "# Linguistic Features", "_____no_output_____" ], [ "## Part-of-speech tagging\n After tokenization, spaCy can parse and tag a given Doc. This is where the statistical model comes in, which enables spaCy to make a prediction of which tag or label most likely applies in this context. A model consists of binary data and is produced by showing a system enough examples for it to make predictions that generalise across the language – for example, a word following \"the\" in English is most likely a noun.\n\nLinguistic annotations are available as Token attributes . Like many NLP libraries, spaCy encodes all strings to hash values to reduce memory usage and improve efficiency. So to get the readable string representation of an attribute, we need to add an underscore _ to its name:", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u'Apple is looking at buying U.K. startup for $1 billion')\n\nfor token in doc:\n print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,\n token.shape_, token.is_alpha, token.is_stop)", "Apple apple PROPN NNP nsubj Xxxxx True False\nis be VERB VBZ aux xx True True\nlooking look VERB VBG ROOT xxxx True False\nat at ADP IN prep xx True True\nbuying buy VERB VBG pcomp xxxx True False\nU.K. u.k. PROPN NNP compound X.X. False False\nstartup startup NOUN NN dobj xxxx True False\nfor for ADP IN prep xxx True True\n$ $ SYM $ quantmod $ False False\n1 1 NUM CD compound d False False\nbillion billion NUM CD pobj xxxx True False\n" ] ], [ [ "# Dependency parsing", "_____no_output_____" ], [ "## Noun chunks\nNoun chunks are \"base noun phrases\" – flat phrases that have a noun as their head. You can think of noun chunks as a noun plus the words describing the noun – for example, \"the lavish green grass\" or \"the world’s largest tech fund\". To get the noun chunks in a document, simply iterate over Doc.noun_chunks .", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Autonomous cars shift insurance liability toward manufacturers\")\n\nfor chunk in doc.noun_chunks:\n print(chunk.text, chunk.root.text, chunk.root.dep_,\n chunk.root.head.text)", "Autonomous cars cars nsubj shift\ninsurance liability liability dobj shift\nmanufacturers manufacturers pobj toward\n" ] ], [ [ "## Navigating the parse tree\nspaCy uses the terms head and child to describe the words connected by a single arc in the dependency tree. The term dep is used for the arc label, which describes the type of syntactic relation that connects the child to the head. As with other attributes, the value of .dep is a hash value. You can get the string value with .dep_", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Autonomous cars shift insurance liability toward manufacturers\")\n\nfor token in doc:\n print(token.text, token.dep_, token.head.text, token.head.pos_,\n [child for child in token.children])", "Autonomous amod cars NOUN []\ncars nsubj shift VERB [Autonomous]\nshift ROOT shift VERB [cars, liability, toward]\ninsurance compound liability NOUN []\nliability dobj shift VERB [insurance]\ntoward prep shift VERB [manufacturers]\nmanufacturers pobj toward ADP []\n" ] ], [ [ "Because the syntactic relations form a tree, every word has exactly one head. You can therefore iterate over the arcs in the tree by iterating over the words in the sentence. This is usually the best way to match an arc of interest — from below:", "_____no_output_____" ] ], [ [ "from spacy.symbols import nsubj, VERB\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Autonomous cars shift insurance liability toward manufacturers\")\n\n# Finding a verb with a subject from below — good\nverbs = set()\n\nfor possible_subject in doc:\n if possible_subject.dep == nsubj and possible_subject.head.pos == VERB:\n verbs.add(possible_subject.head)\nprint(verbs)", "{shift}\n" ] ], [ [ " If you try to match from above, you'll have to iterate twice: once for the head, and then again through the children:", "_____no_output_____" ] ], [ [ "# Finding a verb with a subject from above — less good\nverbs = []\n\nfor possible_verb in doc:\n if possible_verb.pos == VERB:\n for possible_subject in possible_verb.children:\n if possible_subject.dep == nsubj:\n verbs.append(possible_verb)\n break", "_____no_output_____" ], [ "verbs", "_____no_output_____" ] ], [ [ "## Iterating around the local tree\nA few more convenience attributes are provided for iterating around the local tree from the token. The Token.lefts and Token.rights attributes provide sequences of syntactic children that occur before and after the token. Both sequences are in sentence order. There are also two integer-typed attributes, Token.n_rights and Token.n_lefts , that give the number of left and right children.", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"bright red apples on the tree\")\n\nprint([token.text for token in doc[2].lefts]) # ['bright', 'red']\nprint([token.text for token in doc[2].rights]) # ['on']\nprint(doc[2].n_lefts) # 2\nprint(doc[2].n_rights) # 1", "['bright', 'red']\n['on']\n2\n1\n" ], [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Credit and mortgage account holders must submit their requests\")\n\nroot = [token for token in doc if token.head == token][0]\nsubject = list(root.lefts)[0]\n\nfor descendant in subject.subtree:\n assert subject is descendant or subject.is_ancestor(descendant)\n print(descendant.text, descendant.dep_, descendant.n_lefts,\n descendant.n_rights,\n [ancestor.text for ancestor in descendant.ancestors])", "Credit nmod 0 2 ['holders', 'submit']\nand cc 0 0 ['Credit', 'holders', 'submit']\nmortgage compound 0 0 ['account', 'Credit', 'holders', 'submit']\naccount conj 1 0 ['Credit', 'holders', 'submit']\nholders nsubj 1 0 ['submit']\n" ] ], [ [ "You can get a whole phrase by its syntactic head using the Token.subtree attribute. This returns an ordered sequence of tokens. You can walk up the tree with the Token.ancestors attribute, and check dominance with Token.is_ancestor() .", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Credit and mortgage account holders must submit their requests\")\n\nroot = [token for token in doc if token.head == token][0]\nsubject = list(root.lefts)[0]\nfor descendant in subject.subtree:\n assert subject is descendant or subject.is_ancestor(descendant)\n print(descendant.text, descendant.dep_, descendant.n_lefts,\n descendant.n_rights,\n [ancestor.text for ancestor in descendant.ancestors])", "Credit nmod 0 2 ['holders', 'submit']\nand cc 0 0 ['Credit', 'holders', 'submit']\nmortgage compound 0 0 ['account', 'Credit', 'holders', 'submit']\naccount conj 1 0 ['Credit', 'holders', 'submit']\nholders nsubj 1 0 ['submit']\n" ] ], [ [ "Finally, the .left_edge and .right_edge attributes can be especially useful, because they give you the first and last token of the subtree. This is the easiest way to create a Span object for a syntactic phrase. Note that .right_edge gives a token within the subtree — so if you use it as the end-point of a range, don't forget to +1!", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Credit and mortgage account holders must submit their requests\")\n\nspan = doc[doc[4].left_edge.i : doc[4].right_edge.i+1]\nspan.merge()\nfor token in doc:\n print(token.text, token.pos_, token.dep_, token.head.text)", "Credit and mortgage account holders NOUN nsubj submit\nmust VERB aux submit\nsubmit VERB ROOT submit\ntheir ADJ poss requests\nrequests NOUN dobj submit\n" ] ], [ [ "## Visualizing dependencies\nThe best way to understand spaCy's dependency parser is interactively. To make this easier, spaCy v2.0+ comes with a visualization module. You can pass a Doc or a list of Doc objects to displaCy and run displacy.serve to run the web server, or displacy.render to generate the raw markup. If you want to know how to write rules that hook into some type of syntactic construction, just plug the sentence into the visualizer and see how spaCy annotates it.", "_____no_output_____" ] ], [ [ "from spacy import displacy\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"Autonomous cars shift insurance liability toward manufacturers\")\n\ndisplacy.render(doc, style='dep', jupyter=True)", "_____no_output_____" ] ], [ [ "## Disabling the parser\nIn the default models, the parser is loaded and enabled as part of the standard processing pipeline. If you don't need any of the syntactic information, you should disable the parser. Disabling the parser will make spaCy load and run much faster. If you want to load the parser, but need to disable it for specific documents, you can also control its use on the nlp object.", "_____no_output_____" ] ], [ [ "from spacy.lang.en import English\n\nnlp = spacy.load('en', disable=['parser'])\n#nlp = English().from_disk('/model', disable=['parser'])\ndoc = nlp(u\"I don't want parsed\", disable=['parser'])", "_____no_output_____" ] ], [ [ "# Named Entities\nspaCy features an extremely fast statistical entity recognition system, that assigns labels to contiguous spans of tokens. The default model identifies a variety of named and numeric entities, including companies, locations, organizations and products. You can add arbitrary classes to the entity recognition system, and update the model with new examples.", "_____no_output_____" ], [ "## Named Entity Recognition 101\n\nA named entity is a \"real-world object\" that's assigned a name – for example, a person, a country, a product or a book title. spaCy can recognise various types of named entities in a document, by asking the model for a prediction. Because models are statistical and strongly depend on the examples they were trained on, this doesn't always work perfectly and might need some tuning later, depending on your use case.\n\nNamed entities are available as the ents property of a Doc:", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u'Apple is looking at buying U.K. startup for $1 billion')\n\nfor ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)", "Apple 0 5 ORG\nU.K. 27 31 GPE\n$1 billion 44 54 MONEY\n" ] ], [ [ "## Accessing entity annotations\n\nThe standard way to access entity annotations is the doc.ents property, which produces a sequence of Span objects. The entity type is accessible either as a hash value or as a string, using the attributes ent.label and ent.label_. The Span object acts as a sequence of tokens, so you can iterate over the entity or index into it. You can also get the text form of the whole entity, as though it were a single token.\n\nYou can also access token entity annotations using the token.ent_iob and token.ent_type attributes. token.ent_iob indicates whether an entity starts, continues or ends on the tag. If no entity type is set on a token, it will return an empty string.", "_____no_output_____" ] ], [ [ "nlp = spacy.load('en_core_web_sm')\ndoc = nlp(u'San Francisco considers banning sidewalk delivery robots')\n\n# document level\nents = [(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents]\nprint(ents)\n\n# token level\nent_san = [doc[0].text, doc[0].ent_iob_, doc[0].ent_type_]\nent_francisco = [doc[1].text, doc[1].ent_iob_, doc[1].ent_type_]\nprint(ent_san) # [u'San', u'B', u'GPE']\nprint(ent_francisco) # [u'Francisco', u'I', u'GPE']", "[('San Francisco', 0, 13, 'GPE')]\n['San', 'B', 'GPE']\n['Francisco', 'I', 'GPE']\n" ] ], [ [ "## Setting entity annotations\n\nTo ensure that the sequence of token annotations remains consistent, you have to set entity annotations at the document level. However, you can't write directly to the token.ent_iob or token.ent_type attributes, so the easiest way to set entities is to assign to the doc.ents attribute and create the new entity as a Span .\n\nKeep in mind that you need to create a Span with the start and end index of the token, not the start and end index of the entity in the document. In this case, \"FB\" is token (0, 1) – but at the document level, the entity will have the start and end indices (0, 2).", "_____no_output_____" ] ], [ [ "from spacy.tokens import Span\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(u\"FB is hiring a new Vice President of global policy\")\nents = [(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents]\nprint('Before', ents)\n# the model didn't recognise \"FB\" as an entity :(\n\nORG = doc.vocab.strings[u'ORG'] # get hash value of entity label\nfb_ent = Span(doc, 0, 1, label=ORG) # create a Span for the new entity\ndoc.ents = list(doc.ents) + [fb_ent]\n\nents = [(e.text, e.start_char, e.end_char, e.label_) for e in doc.ents]\nprint('After', ents)\n# [(u'FB', 0, 2, 'ORG')] 🎉", "Before []\nAfter [('FB', 0, 2, 'ORG')]\n" ] ], [ [ "## Setting entity annotations from array\n\nYou can also assign entity annotations using the doc.from_array() method. To do this, you should include both the `ENT_TYPE` and the ENT_IOB attributes in the array you're importing from.", "_____no_output_____" ] ], [ [ "import numpy\n\nfrom spacy.attrs import ENT_IOB, ENT_TYPE\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp.make_doc(u'London is a big city in the United Kingdom.')\nprint('Before', list(doc.ents)) # []\n\nheader = [ENT_IOB, ENT_TYPE]\nattr_array = numpy.zeros((len(doc), len(header)))\nattr_array[0, 0] = 3 # B\nattr_array[0, 1] = doc.vocab.strings[u'GPE']\ndoc.from_array(header, attr_array)\nprint('After', list(doc.ents)) # [London", "Before []\nAfter [London]\n" ] ], [ [ "## Setting entity annotations in Cython\n\nFinally, you can always write to the underlying struct, if you compile a Cython function. This is easy to do, and allows you to write efficient native code.\n\nThis code needs cython to work.\n\n```cython\n\n# cython: infer_types=True\nfrom spacy.tokens.doc cimport Doc\n\ncpdef set_entity(Doc doc, int start, int end, int ent_type):\n for i in range(start, end):\n doc.c[i].ent_type = ent_type\n doc.c[start].ent_iob = 3\n for i in range(start+1, end):\n doc.c[i].ent_iob = 2\n```", "_____no_output_____" ], [ "## Training and updating\n\nTo provide training examples to the entity recogniser, you'll first need to create an instance of the GoldParse class. You can specify your annotations in a stand-off format or as token tags. If a character offset in your entity annotations don't fall on a token boundary, the GoldParse class will treat that annotation as a missing value. This allows for more realistic training, because the entity recogniser is allowed to learn from examples that may feature tokenizer errors.\n\n```python\ntrain_data = [('Who is Chaka Khan?', [(7, 17, 'PERSON')]),\n ('I like London and Berlin.', [(7, 13, 'LOC'), (18, 24, 'LOC')])]\n\ndoc = Doc(nlp.vocab, [u'rats', u'make', u'good', u'pets'])\ngold = GoldParse(doc, entities=[u'U-ANIMAL', u'O', u'O', u'O'])\n```", "_____no_output_____" ], [ "## Visualizing named entities\n\nThe displaCy ENT visualizer lets you explore an entity recognition model's behaviour interactively. If you're training a model, it's very useful to run the visualization yourself. To help you do that, spaCy v2.0+ comes with a visualization module. You can pass a Doc or a list of Doc objects to displaCy and run displacy.serve to run the web server, or displacy.render to generate the raw markup.", "_____no_output_____" ] ], [ [ "from spacy import displacy\n\ntext = \"\"\"But Google is starting from behind. The company made a late push\ninto hardware, and Apple’s Siri, available on iPhones, and Amazon’s Alexa\nsoftware, which runs on its Echo and Dot devices, have clear leads in\nconsumer adoption.\"\"\"\n\nnlp = spacy.load('xx_ent_wiki_sm')\ndoc = nlp(text)\ndisplacy.serve(doc, style='ent')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0702888c82ec087ad51f7381aa5e772cf56a50e
530,322
ipynb
Jupyter Notebook
model_baseline.ipynb
Yeolnim/HappinessAnalysis
fe5aedf4591f086417b6ba5471f48a7ee765f508
[ "MIT" ]
null
null
null
model_baseline.ipynb
Yeolnim/HappinessAnalysis
fe5aedf4591f086417b6ba5471f48a7ee765f508
[ "MIT" ]
null
null
null
model_baseline.ipynb
Yeolnim/HappinessAnalysis
fe5aedf4591f086417b6ba5471f48a7ee765f508
[ "MIT" ]
null
null
null
285.272727
148,400
0.913268
[ [ [ "# coding=utf-8\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\n\ndf=pd.read_csv(r'./data/happiness_train_complete.csv',encoding='GB2312',index_col='id')\n\ndf = df[df[\"happiness\"]>0] #原表中幸福度非正的都是错误数据,可以剔除12条错误数据\n\ndf.dtypes[df.dtypes==object] #查得有四列不是数据类型,需要对其进行转化\nfor i in range(df.dtypes[df.dtypes==object].shape[0]):\n print(df.dtypes[df.dtypes==object].index[i])\n \n \n#转化四列数据,转换后df全为数值格式\ndf[\"survey_month\"] = df[\"survey_time\"].transform(lambda line:line.split(\" \")[0].split(\"/\")[1]).astype(\"int64\") #返回调查月:用空格来切分日期和时间,日期中第1项为月\ndf[\"survey_day\"] = df[\"survey_time\"].transform(lambda line:line.split(\" \")[0].split(\"/\")[2]).astype(\"int64\") #返回调查日\ndf[\"survey_hour\"] = df[\"survey_time\"].transform(lambda line:line.split(\" \")[1].split(\":\")[0]).astype(\"int64\") #返回调查小时\ndf=df.drop(columns='survey_time')\n\nenc1=preprocessing.OrdinalEncoder()\nenc2=preprocessing.OrdinalEncoder()\nenc3=preprocessing.OrdinalEncoder()\ndf['edu_other']=enc1.fit_transform(df['edu_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))\nprint(enc1.categories_) #查看编码类型\n\ndf['property_other']=enc2.fit_transform(df['property_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))\nprint(enc2.categories_) #查看编码类型\n\ndf['invest_other']=enc3.fit_transform(df['invest_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))\nprint(enc3.categories_) #查看编码类型\n\n\n#确定X和Y\nX=df.drop(columns='happiness').fillna(0)\nY=df.happiness", "survey_time\nedu_other\nproperty_other\ninvest_other\n[array(['0', '夜校'], dtype=object)]\n[array(['0', '两人共有', '两兄弟共有', '但目前房产证还没办下来', '兄弟三人共同所有', '兄弟共有', '全家人共同所有',\n '全家人共有', '全家共有', '全家所有', '共同所有', '共有', '农村没有产权,最近才开始办理',\n '原为配偶所有,配偶刚去世,还未办理转移手续', '受访者称该房全家人共有', '合建房', '多人共有', '多人拥有',\n '家人共有', '家人所有', '家庭共同所有', '家庭成员共有', '家庭成员所有', '小产权', '小产权房',\n '尚未办理房产证', '已购买,但未过户', '已购买,未过户', '待办', '户主所有', '房产证未办好',\n '才买,还无房产证', '拆迁分配,还没房产证', '无产权', '无房产证', '暂时没有房产证,已经开始办理了。',\n '未分家,全家所有', '未办', '未办理房产证', '未过户', '没办理房产证,未验收合格',\n '没时间和金钱过户,但是东西都在本人手里拿着', '没有产权', '没有产权,还没办理完手续', '没有房产证', '没有房产证。',\n '没有房照', '父亲共同所有', '祖宗', '自己房子被烧毁', '访户说是虹园新村大队分的房子', '还未过户',\n '还没办下房产证'], dtype=object)]\n[array(['0', '个人融资', '储蓄存款', '其他理财产品', '商业万能保险', '字画、茶壶', '家中有部分土地承包出去',\n '彩票', '投资开发区', '投资服务业、家具业', '民间借贷', '没有', '活期储蓄', '理财', '理财产品',\n '福利车票', '租房', '统筹', '网上理财', '老人家不清楚', '自己没有,儿女不清楚', '银行存款',\n '银行存款利息', '银行理财', '高利贷'], dtype=object)]\n" ], [ "from sklearn import metrics\nfrom sklearn import linear_model\nfrom sklearn import model_selection\n#=============\n#1、线性回归\n#=============\n\n#=============\n#1.1、普通线性回归\n#=============\nreg1=linear_model.LinearRegression()\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\n#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整\nmes1=[]\nmes2=[]\nmes3=[]\nmes4=[]\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=reg1.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n e2=metrics.mean_squared_error(np.round(y_pred),y_test)\n e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)\n e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)\n mes1.append(e1)\n mes2.append(e2)\n mes3.append(e3)\n mes4.append(e4)\nprint('normal_liner:')\nprint(mes1)\nprint(np.mean(mes1))\nprint('-------------')\nprint(mes2)\nprint(np.mean(mes2))\nprint('-------------')\nprint(mes3)\nprint(np.mean(mes3))\nprint('-------------')\nprint(mes4)\nprint(np.mean(mes4))\nprint()\nprint()\n", "normal_liner:\n[0.5218405862145089, 0.4852965015965814, 0.5628881992065076, 0.5314535604995132, 0.4744568322991874, 0.5188017606112356, 0.5693976215767934, 0.4845385085246459, 0.4835650851423975, 0.5085809076421898]\n0.5140819563313561\n-------------\n[0.5794743429286608, 0.5506883604505632, 0.6420525657071339, 0.5894868585732165, 0.5193992490613266, 0.5794743429286608, 0.6245306633291614, 0.5569461827284106, 0.5526315789473685, 0.5852130325814536]\n0.5779897177235955\n-------------\n[0.83729662077597, 0.8347934918648311, 0.9299123904881101, 0.8560700876095119, 0.8811013767209012, 0.8811013767209012, 0.918648310387985, 0.8710888610763454, 0.9172932330827067, 0.8395989974937343]\n0.8766904746220998\n-------------\n[0.9086357947434293, 0.8760951188986232, 0.8961201501877347, 0.9098873591989988, 0.8347934918648311, 0.8648310387984981, 0.9173967459324155, 0.8448060075093867, 0.8245614035087719, 0.8671679197994987]\n0.8744295030442188\n\n\n" ], [ "#=============\n#1.2、L1的lasso回归\n#=============\nreg2=linear_model.Lasso()\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\n#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整\nmes1=[]\nmes2=[]\nmes3=[]\nmes4=[]\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=reg2.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n e2=metrics.mean_squared_error(np.round(y_pred),y_test)\n e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)\n e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)\n mes1.append(e1)\n mes2.append(e2)\n mes3.append(e3)\n mes4.append(e4)\nprint('Lasso:')\nprint(mes1)\nprint(np.mean(mes1))\nprint('-------------')\nprint(mes2)\nprint(np.mean(mes2))\nprint('-------------')\nprint(mes3)\nprint(np.mean(mes3))\nprint('-------------')\nprint(mes4)\nprint(np.mean(mes4))\nprint()\nprint()\n\n", "Lasso:\n[0.6908454965905734, 0.6833639432743945, 0.6813263778114521, 0.6367774189797439, 0.6236189455246643, 0.6007060953680261, 0.6395669020321127, 0.6251623294394346, 0.6069819797102257, 0.6577096239839919]\n0.6446059112714619\n-------------\n[0.7409261576971214, 0.7409261576971214, 0.7321652065081352, 0.6795994993742178, 0.6695869837296621, 0.6307884856070087, 0.6683354192740926, 0.672090112640801, 0.6591478696741855, 0.7030075187969925]\n0.6896573410999338\n-------------\n[1.2853566958698373, 1.2528160200250313, 1.2690863579474343, 1.2403003754693367, 1.2690863579474343, 1.2841051314142677, 1.2715894868585733, 1.1364205256570714, 1.2418546365914787, 1.2969924812030076]\n1.2547608068983471\n-------------\n[0.8210262828535669, 0.8335419274092616, 0.8172715894868585, 0.7759699624530664, 0.7571964956195244, 0.7321652065081352, 0.7647058823529411, 0.7747183979974969, 0.7355889724310777, 0.8258145363408521]\n0.7837999253452781\n\n\n" ], [ "#=============\n#1.3、L2的岭回归\n#=============\nreg3=linear_model.Ridge()\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\n#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整\nmes1=[]\nmes2=[]\nmes3=[]\nmes4=[]\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=reg3.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n e2=metrics.mean_squared_error(np.round(y_pred),y_test)\n e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)\n e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)\n mes1.append(e1)\n mes2.append(e2)\n mes3.append(e3)\n mes4.append(e4)\nprint('Ridge:')\nprint(mes1)\nprint(np.mean(mes1))\nprint('-------------')\nprint(mes2)\nprint(np.mean(mes2))\nprint('-------------')\nprint(mes3)\nprint(np.mean(mes3))\nprint('-------------')\nprint(mes4)\nprint(np.mean(mes4))\nprint()\nprint()\n", "D:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.6104e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.64623e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.64179e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.41335e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.41695e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.42328e-17): result may not be accurate.\n overwrite_a=True).T\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\ridge.py:125: LinAlgWarning: Ill-conditioned matrix (rcond=1.56501e-17): result may not be accurate.\n overwrite_a=True).T\n" ], [ "\n#=============\n#1.4、逻辑回归\n#=============\nreg4=linear_model.LogisticRegression(penalty='l2',solver='saga') #正则会导致准确率下降,所以不正则\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes1=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=reg4.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes1.append(e1)\nprint('LR:')\nprint(mes1)\nprint(np.mean(mes1))\nprint()\nprint()\n", "D:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:460: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\sag.py:334: ConvergenceWarning: The max_iter was reached which means the coef_ did not converge\n \"the coef_ did not converge\", ConvergenceWarning)\n" ], [ "from sklearn import metrics\nfrom sklearn import svm\nfrom sklearn import model_selection\n#=============\n#2、SVM\n#=============\nclf2=svm.SVC() #gamma和C都是默认值,没有调参\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf2.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('SVM:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "D:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\svm\\base.py:196: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.\n \"avoid this warning.\", FutureWarning)\n" ], [ "from sklearn import metrics\nfrom sklearn import neighbors\nfrom sklearn import model_selection\n#=============\n#3、KNN\n#=============\n\nfor n in range(10,101,10): #K值肯定会造成影响\n clf3=neighbors.KNeighborsClassifier(n_neighbors=n) \n #交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\n mes=[]\n\n kf=model_selection.KFold(10,shuffle=True)\n for train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n\n y_pred=clf3.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\n print('KNN(n=%d):'%n)\n print(mes)\n print(np.mean(mes))\n print()\n print()\n", "KNN(n=10):\n[0.8235294117647058, 0.8135168961201502, 0.7609511889862328, 0.7897371714643304, 0.8923654568210263, 0.8473091364205256, 0.6871088861076345, 0.7884856070087609, 0.7807017543859649, 0.7142857142857143]\n0.7897991223365046\n\n\nKNN(n=20):\n[0.6370463078848561, 0.6833541927409261, 0.8172715894868585, 0.7033792240300375, 0.8035043804755945, 0.6458072590738423, 0.6921151439299124, 0.851063829787234, 0.6641604010025063, 0.62531328320802]\n0.7123015611619788\n\n\nKNN(n=30):\n[0.7271589486858573, 0.704630788485607, 0.6195244055068836, 0.7221526908635795, 0.6783479349186483, 0.7033792240300375, 0.704630788485607, 0.7284105131414268, 0.6090225563909775, 0.7330827067669173]\n0.6930340557275542\n\n\nKNN(n=40):\n[0.723404255319149, 0.688360450563204, 0.6370463078848561, 0.6182728410513142, 0.7008760951188986, 0.6307884856070087, 0.718397997496871, 0.7634543178973717, 0.7080200501253133, 0.7092731829573935]\n0.689789398402138\n\n\nKNN(n=50):\n[0.6770963704630788, 0.7434292866082604, 0.6770963704630788, 0.6645807259073843, 0.7108886107634543, 0.690863579474343, 0.655819774718398, 0.6921151439299124, 0.7280701754385965, 0.6466165413533834]\n0.688657657911989\n\n\nKNN(n=60):\n[0.6971214017521903, 0.769712140175219, 0.7259073842302879, 0.6795994993742178, 0.6533166458072591, 0.672090112640801, 0.688360450563204, 0.6533166458072591, 0.6967418546365914, 0.6441102756892231]\n0.6880276410676253\n\n\nKNN(n=70):\n[0.72090112640801, 0.7171464330413017, 0.7296620775969962, 0.655819774718398, 0.7496871088861077, 0.6583229036295369, 0.6132665832290363, 0.6871088861076345, 0.6892230576441103, 0.6553884711779449]\n0.6876526422439075\n\n\nKNN(n=80):\n[0.672090112640801, 0.6708385481852316, 0.672090112640801, 0.6458072590738423, 0.6445556946182729, 0.8135168961201502, 0.7171464330413017, 0.6545682102628285, 0.6641604010025063, 0.7218045112781954]\n0.6876578178863932\n\n\nKNN(n=90):\n[0.6307884856070087, 0.6508135168961201, 0.818523153942428, 0.7121401752190237, 0.6633291614518148, 0.6745932415519399, 0.6708385481852316, 0.7121401752190237, 0.6190476190476191, 0.7243107769423559]\n0.6876524854062566\n\n\nKNN(n=100):\n[0.7421777221526908, 0.7008760951188986, 0.6896120150187734, 0.6846057571964956, 0.704630788485607, 0.6545682102628285, 0.6733416770963705, 0.7596996245306633, 0.6829573934837093, 0.5839598997493735]\n0.687642918309541\n\n\n" ], [ "from sklearn import metrics\nfrom sklearn import naive_bayes\nfrom sklearn import model_selection\n\nX_new=X # 本来想标准化,但发现标准化后的效果更差,所以就没有标准化\n#=============\n#4、朴素贝叶斯\n#=============\nclf4=naive_bayes.GaussianNB() #多想分布朴素贝叶斯跑不通,必须是正定矩阵什么的,所以这里用的高斯\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X_new.iloc[train]\n y_train = Y.iloc[train]\n X_test = X_new.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf4.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('bayes:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "bayes:\n[5.035043804755945, 4.742177722152691, 4.693366708385482, 4.53566958698373, 5.8986232790988735, 3.892365456821026, 5.496871088861076, 5.166458072590738, 3.43734335839599, 5.62406015037594]\n4.85219792284215\n\n\n" ], [ "from sklearn import metrics\nfrom sklearn import tree\nfrom sklearn import model_selection\n#=============\n#5、决策树\n#=============\n\nclf5=tree.DecisionTreeClassifier() \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf5.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "Tree:\n[0.8723404255319149, 0.9949937421777222, 1.0613266583229037, 0.9111389236545682, 1.016270337922403, 0.9787234042553191, 1.0713391739674594, 1.0200250312891115, 0.9586466165413534, 1.0150375939849625]\n0.9899841907647717\n\n\n" ], [ "from sklearn import metrics\nfrom sklearn import neural_network\nfrom sklearn import model_selection\n#=============\n#6、MLP\n#=============\n\nclf6=neural_network.MLPClassifier(hidden_layer_sizes=(10,8,5,3,2),activation='logistic') #随意设置下隐藏层构成 \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf6.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "Tree:\n[0.7033792240300375, 0.6445556946182729, 0.7221526908635795, 0.6445556946182729, 0.7384230287859824, 0.7384230287859824, 0.6846057571964956, 0.6420525657071339, 0.7117794486215538, 0.6466165413533834]\n0.6876543674580694\n\n\n" ], [ "from sklearn import metrics\nfrom sklearn import ensemble\nfrom sklearn import model_selection\n#=============\n#7、随机森林\n#=============\n\nclf7=ensemble.RandomForestRegressor(n_estimators=20,n_jobs=-1) \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf7.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "Tree:\n[0.49832603254067587, 0.48535669586983726, 0.4775719649561953, 0.5056007509386734, 0.49263767209011255, 0.527872340425532, 0.5293898623279099, 0.5403754693366708, 0.528859649122807, 0.4932111528822056]\n0.5079201590490618\n\n\n" ], [ "#=============\n#特征重要程度排序\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\na=ensemble.RandomForestRegressor(n_estimators=20).fit(X,Y).feature_importances_\ntemp=np.argsort(a) #返回index\n\na=list(a)\na.sort()\n\nb=[]\nfor i in temp:\n b.append(X.columns[i])\n\nplt.figure(figsize=(10,40))\nplt.grid()\nplt.barh(b,a,)\n\n#参数结论:\n# 1、edu_other、property_other、invest_other这三项转换数据都不太重要,而且property、invest的各项数据似乎都不重要\n# 2、前十项中equity、depresion反映社会态度和心态;\n# class、family_income、floor_area反映财富;\n# birth、marital_1st、weight_jin、country反映客观状态\n# survey_day为什么也会有影响,这是一个最有疑问的指标\n \n ", "_____no_output_____" ], [ "from sklearn import metrics\nfrom sklearn import ensemble\nfrom sklearn import model_selection\n#=============\n#8、gdbt\n#=============\nclf8=ensemble.GradientBoostingRegressor(max_features=20) #必须要设置参数,不然跑太慢了\n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf8.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "Tree:\n[0.47633642591899, 0.4020312917792966, 0.5166238715526613, 0.4585656966229876, 0.46647299843701556, 0.4615686153095699, 0.5101788848159109, 0.4242939227167187, 0.4411693407003206, 0.5124651959031992]\n0.46697062437566705\n\n\n" ], [ "#=============\n#特征重要程度排序\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\na=ensemble.GradientBoostingClassifier().fit(X,Y).feature_importances_\ntemp=np.argsort(a) #返回index\n\na=list(a)\na.sort()\n\nb=[]\nfor i in temp:\n b.append(X.columns[i])\n\nplt.figure(figsize=(10,40))\nplt.grid()\nplt.barh(b,a,)", "_____no_output_____" ], [ "from sklearn import metrics\nimport xgboost\nfrom sklearn import model_selection\n#=============\n#9、xgboost\n#=============\n\nclf9=xgboost.XGBRegressor() \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf9.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "D:\\Python\\Anaconda3\\lib\\site-packages\\xgboost\\core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "#=============\n#特征重要程度排序\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\na=xgboost.XGBRegressor().fit(X,Y).feature_importances_\ntemp=np.argsort(a) #返回index\n\na=list(a)\na.sort()\n\nb=[]\nfor i in temp:\n b.append(X.columns[i])\n\nplt.figure(figsize=(10,40))\nplt.grid()\nplt.barh(b,a,)", "_____no_output_____" ], [ "#=============\n#特征重要程度排序\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\na=xgboost.XGBRegressor().fit(X,Y).feature_importances_\ntemp=np.argsort(a) #返回index\n\na=list(a)\na.sort()\n\nb=[]\nfor i in temp:\n b.append(X.columns[i])\n\nplt.figure(figsize=(10,40))\nplt.grid()\nplt.barh(b,a,)", "_____no_output_____" ], [ "from sklearn import metrics\nimport lightgbm\nfrom sklearn import model_selection\n#lighgbm防报错\nimport os\nos.environ[\"KMP_DUPLICATE_LIB_OK\"]=\"TRUE\"\n\n#=============\n#10、LightGBM\n#=============\n\nclf10=lightgbm.LGBMRegressor() \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test = X.iloc[test]\n y_test = Y.iloc[test]\n \n y_pred=clf10.fit(X_train,y_train).predict(X_test)\n e1=metrics.mean_squared_error(y_pred,y_test)\n mes.append(e1)\nprint('Tree:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()\n", "Tree:\n[0.46138956396442127, 0.4839184463201312, 0.4732814344369148, 0.428570037681299, 0.4760503906588419, 0.4625872568518952, 0.43628526690552966, 0.49643610942149685, 0.5001793091860733, 0.4640331618613549]\n0.46827309772879583\n\n\n" ], [ "#=============\n#特征重要程度排序\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\na=lightgbm.LGBMRegressor().fit(X,Y).feature_importances_\ntemp=np.argsort(a) #返回index\n\na=list(a)\na.sort()\n\nb=[]\nfor i in temp:\n b.append(X.columns[i])\n\nplt.figure(figsize=(10,40))\nplt.grid()\nplt.barh(b,a,)", "_____no_output_____" ], [ "df1=pd.read_csv(r'./data/happiness_test_complete.csv',encoding='GB2312',index_col='id')\n\n \n \n#转化四列数据,转换后df全为数值格式\ndf1[\"survey_month\"] = df1[\"survey_time\"].transform(lambda line:line.split(\" \")[0].split(\"/\")[1]).astype(\"int64\") #返回调查月:用空格来切分日期和时间,日期中第1项为月\ndf1[\"survey_day\"] = df1[\"survey_time\"].transform(lambda line:line.split(\" \")[0].split(\"/\")[2]).astype(\"int64\") #返回调查日\ndf1[\"survey_hour\"] = df1[\"survey_time\"].transform(lambda line:line.split(\" \")[1].split(\":\")[0]).astype(\"int64\") #返回调查小时\ndf1=df1.drop(columns='survey_time')\n\n\n\ndef temp1(a):\n if a not in enc1.categories_[0]:\n return 0\n else:\n return a\ndf1['edu_other']=enc1.transform(df1['edu_other'].transform(temp1).transform(lambda x:str(x)).values.reshape(-1,1))\n\ndef temp2(a):\n if a not in enc2.categories_[0]:\n return 0\n else:\n return a\ndf1['property_other']=enc2.transform(df1['property_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))\n\ndef temp3(a):\n if a not in enc3.categories_[0]:\n return 0\n else:\n return a\ndf1['invest_other']=enc3.transform(df1['invest_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))\n\n\n\n#确定X_test\nX_test=df1.fillna(0)\nimport xgboost\n# 结果1\ny_test=xgboost.XGBRegressor().fit(X,Y).predict(X_test)\ndf1_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')\ndf1_final.to_csv(r'df1_final.csv')\n# 结果1四舍五入\ndf1_final_round=pd.DataFrame({'id':X_test.index,'happiness':np.round(y_test)}).set_index('id')\ndf1_final_round.to_csv(r'df1_final.csv')\n", "D:\\Python\\Anaconda3\\lib\\site-packages\\xgboost\\core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\nD:\\Python\\Anaconda3\\lib\\site-packages\\xgboost\\core.py:588: FutureWarning: Series.base is deprecated and will be removed in a future version\n data.base is not None and isinstance(data, np.ndarray) \\\n" ], [ "# 结果2\n\nfrom sklearn import metrics\nimport xgboost\nfrom sklearn import model_selection\nfrom sklearn.externals import joblib\n#=============\n#xgboost_modified\n#=============\n\nclf_xgboost_modified=xgboost.XGBRegressor(max_depth=4,min_child_weight=5,gamma=0,subsample=0.8,colsample_bytree=0.75,reg_alpha=5,reg_lambda=0.1) \n#交叉验证确定准确率,因为对回归值会采用取整操作,所以不用自带的交叉验证模型\nmes=[]\ni=0\n\nkf=model_selection.KFold(10,shuffle=True)\nfor train,test in kf.split(X):\n X_train = X.iloc[train]\n y_train = Y.iloc[train]\n X_test1 = X.iloc[test]\n y_test1 = Y.iloc[test]\n \n clf_xgboost_modified.fit(X_train,y_train)\n y_pred=clf_xgboost_modified.predict(X_test1)\n e1=metrics.mean_squared_error(y_pred,y_test1)\n mes.append(e1)\n joblib.dump(clf_xgboost_modified,filename='xgboost_%d.pkl'%i)\n \n y_test=clf_xgboost_modified.predict(X_test)\n\n df2_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')\n# df2_final.to_csv('df2_xgboost_%d.csv'%i)\n \n i+=1\nprint('clf_xgboost_modified:')\nprint(mes)\nprint(np.mean(mes))\nprint()\nprint()", "D:\\Python\\Anaconda3\\lib\\site-packages\\xgboost\\core.py:587: FutureWarning: Series.base is deprecated and will be removed in a future version\n if getattr(data, 'base', None) is not None and \\\n" ], [ "#调试最终结果\nclf10=lightgbm.LGBMRegressor(metric='l2') #默认default={l2 for regression}\n\nparam_test = {\n'max_depth':np.array([9]),\n'min_child_weight':np.array([0.0001]),\n'min_split_gain':np.array([0.4]),\n'subsample':np.array([0.5]),\n'colsample_bytree':np.array([1]), \n'reg_alpha':np.array([1e-05]),\n'reg_lambda':np.array([0.0001]) ,\n'learning_rate':np.array([0.1]),\n}\n\nclf=model_selection.GridSearchCV(clf10,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')\nclf.fit(X_train,y_train)\njoblib.dump(clf_xgboost_modified,filename='xgboost_%d.pkl'%i)\nprint(\"clf.cv_results_['mean_test_score']:=%s\"%clf.cv_results_['mean_test_score'])\nprint(clf.best_score_)\nprint(clf.best_params_)\n\n# 结论:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}", "clf.cv_results_['mean_test_score']:=[-0.47885624]\n-0.47885624435852536\n{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}\n" ], [ "#调试最终结果\nclf8=ensemble.GradientBoostingRegressor(loss='ls') \n\nparam_test = { \n 'max_depth':np.array([2]),\n 'min_weight_fraction_leaf':np.array([0.002]), \n 'min_impurity_split':np.array([0.0001]),\n 'subsample':np.array([0.96]),\n 'max_features':np.array([0.88]),\n 'n_estimators':np.array([80]), \n 'learning_rate':np.array([0.2]), \n\n}\n\nclf=model_selection.GridSearchCV(clf8,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')\nclf.fit(X_train,y_train)\n\nprint(\"clf.cv_results_['mean_test_score']:=%s\"%clf.cv_results_['mean_test_score'])\nprint(clf.best_score_)\nprint(clf.best_params_)\n\n# 结论:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}", "D:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\nD:\\Python\\Anaconda3\\lib\\site-packages\\sklearn\\tree\\tree.py:283: DeprecationWarning: The min_impurity_split parameter is deprecated. Its default value will change from 1e-7 to 0 in version 0.23, and it will be removed in 0.25. Use the min_impurity_decrease parameter instead.\n DeprecationWarning)\n" ], [ "#调试最终结果\n\nparam_test = { \n 'min_samples_split':np.array([4]), \n 'min_weight_fraction_leaf':np.array([0.01]), \n 'min_impurity_decrease':np.array([0]), \n 'n_estimators':[150], \n 'max_features':[0.8], \n}\n\nclf=model_selection.GridSearchCV(clf7,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')\nclf.fit(X_train,y_train)\n\nprint(\"clf.cv_results_['mean_test_score']:=%s\"%clf.cv_results_['mean_test_score'])\nprint(clf.best_score_)\nprint(clf.best_params_)\n\n# 结论:{'max_features': 0.8, 'min_impurity_decrease': 0, 'min_samples_split': 4, 'min_weight_fraction_leaf': 0.01, 'n_estimators': 150}", "clf.cv_results_['mean_test_score']:=[-0.49734107]\n-0.49734106661994726\n{'max_features': 0.8, 'min_impurity_decrease': 0, 'min_samples_split': 4, 'min_weight_fraction_leaf': 0.01, 'n_estimators': 150}\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0703218fc5afc3fffe680cbce4c2d836a1b445b
5,986
ipynb
Jupyter Notebook
src/convertor.ipynb
liruilong940607/inpainting.siggraph2017.tf
53d3991a91e1e70739e0b002b5873af11650e530
[ "MIT" ]
1
2019-11-10T09:21:44.000Z
2019-11-10T09:21:44.000Z
src/convertor.ipynb
liruilong940607/inpainting.siggraph2017.tf
53d3991a91e1e70739e0b002b5873af11650e530
[ "MIT" ]
null
null
null
src/convertor.ipynb
liruilong940607/inpainting.siggraph2017.tf
53d3991a91e1e70739e0b002b5873af11650e530
[ "MIT" ]
null
null
null
30.385787
122
0.540762
[ [ [ "import numpy as np\nfrom torch.utils.serialization import load_lua\n\n\n# load Completion Network\nmodel_path = './completionnet_places2.t7'\ndata = load_lua(model_path)\nmodel = data.model\nmodel.evaluate()\ndatamean = data.mean\nprint (datamean)", "tensor([ 0.4560, 0.4472, 0.4155])\n" ], [ "i = 48\nvalue = model.modules[i].weight.numpy()\nprint (i, model.modules[i], np.max(value), np.min(value), np.mean(value))", "48 nn.SpatialConvolution(32 -> 3, 3x3, 1, 1, 1, 1) 1.2813634 -1.2358687 -0.0033915918\n" ], [ "print (model.modules[0].weight.numpy().shape)\n#print (model.modules)\nfor i in range(len(model.modules)):\n #value = model.modules[i].weight.numpy()\n print (i, model.modules[i])#, np.max(value), np.min(value), np.mean(value))\n# model.modules[i].weight.numpy()\n#print (model.modules[1].weight, model.modules[1].bias, model.modules[1].running_var, model.modules[1].running_mean)", "(64, 4, 5, 5)\n0 nn.SpatialConvolution(4 -> 64, 5x5, 1, 1, 2, 2)\n1 nn.SpatialBatchNormalization\n2 nn.ReLU\n3 nn.SpatialConvolution(64 -> 128, 3x3, 2, 2, 1, 1)\n4 nn.SpatialBatchNormalization\n5 nn.ReLU\n6 nn.SpatialConvolution(128 -> 128, 3x3, 1, 1, 1, 1)\n7 nn.SpatialBatchNormalization\n8 nn.ReLU\n9 nn.SpatialConvolution(128 -> 256, 3x3, 2, 2, 1, 1)\n10 nn.SpatialBatchNormalization\n11 nn.ReLU\n12 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 1, 1, 1, 1)\n13 nn.SpatialBatchNormalization\n14 nn.ReLU\n15 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 1, 1, 1, 1)\n16 nn.SpatialBatchNormalization\n17 nn.ReLU\n18 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 2, 2, 2, 2)\n19 nn.SpatialBatchNormalization\n20 nn.ReLU\n21 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 4, 4, 4, 4)\n22 nn.SpatialBatchNormalization\n23 nn.ReLU\n24 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 8, 8, 8, 8)\n25 nn.SpatialBatchNormalization\n26 nn.ReLU\n27 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 16, 16, 16, 16)\n28 nn.SpatialBatchNormalization\n29 nn.ReLU\n30 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 1, 1, 1, 1)\n31 nn.SpatialBatchNormalization\n32 nn.ReLU\n33 nn.SpatialDilatedConvolution(256 -> 256, 3x3, 1, 1, 1, 1, 1, 1)\n34 nn.SpatialBatchNormalization\n35 nn.ReLU\n36 nn.SpatialFullConvolution(256 -> 128, 4x4, 2, 2, 1, 1)\n37 nn.SpatialBatchNormalization\n38 nn.ReLU\n39 nn.SpatialConvolution(128 -> 128, 3x3, 1, 1, 1, 1)\n40 nn.SpatialBatchNormalization\n41 nn.ReLU\n42 nn.SpatialFullConvolution(128 -> 64, 4x4, 2, 2, 1, 1)\n43 nn.SpatialBatchNormalization\n44 nn.ReLU\n45 nn.SpatialConvolution(64 -> 32, 3x3, 1, 1, 1, 1)\n46 nn.SpatialBatchNormalization\n47 nn.ReLU\n48 nn.SpatialConvolution(32 -> 3, 3x3, 1, 1, 1, 1)\n49 nn.Sigmoid\n" ], [ "from layer import *\nimport tensorflow as tf\n", "_____no_output_____" ], [ "is_training = tf.placeholder(tf.bool, [])\nx = tf.placeholder(tf.float32, [8, 256, 256, 4])\nwith tf.variable_scope('generator2'):\n with tf.variable_scope('conv1'):\n x = conv_layer(x, [5, 5, 4, 64], 1, \n initializer=tf.constant_initializer(model.modules[0].weight.numpy()))\n x = batch_normalize(x, is_training,\n initializer_beta=tf.constant_initializer(model.modules[1].bias.numpy()),\n initializer_scale=tf.constant_initializer(model.modules[1].weight.numpy()), \n initializer_mean=tf.constant_initializer(model.modules[1].running_mean.numpy()), \n initializer_var=tf.constant_initializer(model.modules[1].running_var.numpy())\n )\n x = tf.nn.relu(x)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0703d473ffdc046d36c09a69bfdd8e4b4cd4fb1
4,230
ipynb
Jupyter Notebook
Clase01.ipynb
dvsivle/sitio-web
ca34a869c8e1b0d4380bec26c9f16b230712b684
[ "MIT" ]
null
null
null
Clase01.ipynb
dvsivle/sitio-web
ca34a869c8e1b0d4380bec26c9f16b230712b684
[ "MIT" ]
null
null
null
Clase01.ipynb
dvsivle/sitio-web
ca34a869c8e1b0d4380bec26c9f16b230712b684
[ "MIT" ]
null
null
null
20.634146
224
0.418676
[ [ [ "<a href=\"https://colab.research.google.com/github/dvsivle/sitio-web/blob/develop/Clase01.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "a = [1,2,3,4,5] #lista\nb = (1,2,3,4,5) #tuplas", "_____no_output_____" ], [ "print(a)\nprint(b)", "_____no_output_____" ], [ "a[0] = 99\nprint(a)", "_____no_output_____" ], [ "b[0] = 99\nprint(b)", "_____no_output_____" ], [ "import numpy as np\nimport skimage #scikit-image\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "im = skimage.data.astronaut()", "_____no_output_____" ], [ "im.shape", "_____no_output_____" ], [ "plt.imshow(im)", "_____no_output_____" ], [ "plt.title(\"Red\")\nplt.imshow(im[:,:,0], cmap='gray')\nplt.colorbar()", "_____no_output_____" ], [ "plt.title(\"Green\")\nplt.imshow(im[:,:,1], cmap='gray')", "_____no_output_____" ], [ "plt.title(\"Blue\")\nplt.imshow(im[:,:,2], cmap='gray')", "_____no_output_____" ], [ "plt.imshow(im[256:,256:,0], cmap='gray')", "_____no_output_____" ], [ "plt.imshow(np.random.randn(100,100), cmap='gray')\nplt.colorbar()", "_____no_output_____" ], [ "plt.imshow(np.random.randn(100,100), cmap='inferno')\nplt.colorbar()\n", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0703f0ee3b81491db1233399c16c65191da4f2b
6,823
ipynb
Jupyter Notebook
Week 5/afae22-exercise-05.ipynb
carlomarxdk/algorithmic-fairness
34db31915b9d2e13f86a68ac1f69e558467b88c1
[ "MIT" ]
null
null
null
Week 5/afae22-exercise-05.ipynb
carlomarxdk/algorithmic-fairness
34db31915b9d2e13f86a68ac1f69e558467b88c1
[ "MIT" ]
null
null
null
Week 5/afae22-exercise-05.ipynb
carlomarxdk/algorithmic-fairness
34db31915b9d2e13f86a68ac1f69e558467b88c1
[ "MIT" ]
null
null
null
50.169118
561
0.625971
[ [ [ "# Algorithmic Fairness, Accountability, and Ethics, Spring 2022\n# Exercise 5", "_____no_output_____" ], [ "## Task 0 (Setup)\n\nWe use the same dataset as in week 3 and 4. If you missed to install the module, please carry out the installation tasks at <https://github.com/zykls/folktables#basic-installation-instructions>.\n\nAfter successful installation, you should be able to run the following code to generate a prediction task.\nTo make your life easier, we made the `BasicProblem`-magic from the `folktables` package (see exercises of week 3) explicit in this task.\nThis way, you can get access to different encodings of the data. \n\n**Note**: Some Windows users could not run the line `acs_data = data_source.get_data(states=[\"CA\"], download=True)`. The dataset is available as a zip file on learnIT under week 3. The direct link is <https://learnit.itu.dk/mod/resource/view.php?id=155305>. Unzip it in the notebook's location, and set `download` to `False` in the code below.", "_____no_output_____" ] ], [ [ "from folktables.acs import adult_filter\nfrom folktables import ACSDataSource\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndata_source = ACSDataSource(survey_year='2018', horizon='1-Year', survey='person')\nacs_data = data_source.get_data(states=[\"CA\"], download=True)\n\nfeature_names = ['AGEP', # Age\n \"CIT\", # Citizenship status\n 'COW', # Class of worker\n \"ENG\", # Ability to speak English\n 'SCHL', # Educational attainment\n 'MAR', # Marital status\n \"HINS1\", # Insurance through a current or former employer or union\n \"HINS2\", # Insurance purchased directly from an insurance company\n \"HINS4\", # Medicaid\n \"RAC1P\", # Recoded detailed race code\n 'SEX']\n\ntarget_name = \"PINCP\" # Total person's income\n\ndef data_processing(data, features, target_name:str, threshold: float = 35000):\n df = data\n ### Adult Filter (STARTS) (from Foltktables)\n df = df[~df[\"SEX\"].isnull()]\n df = df[~df[\"RAC1P\"].isnull()]\n df = df[df['AGEP'] > 16]\n df = df[df['PINCP'] > 100]\n df = df[df['WKHP'] > 0]\n df = df[df['PWGTP'] >= 1]\n ### Adult Filter (ENDS)\n ### Groups of interest\n sex = df[\"SEX\"].values\n ### Target\n df[\"target\"] = df[target_name] > threshold\n target = df[\"target\"].values\n df = df[features + [\"target\", target_name]] ##we want to keep df before one_hot encoding to make Bias Analysis\n df_processed = df[features].copy()\n cols = [ \"HINS1\", \"HINS2\", \"HINS4\", \"CIT\", \"COW\", \"SCHL\", \"MAR\", \"SEX\", \"RAC1P\"]\n df_processed = pd.get_dummies(df_processed, prefix=None, prefix_sep='_', dummy_na=False, columns=cols, drop_first=True)\n df_processed = pd.get_dummies(df_processed, prefix=None, prefix_sep='_', dummy_na=True, columns=[\"ENG\"], drop_first=True)\n return df_processed, df, target, sex\n\ndata, data_original, target, group = data_processing(acs_data, feature_names, target_name)\n\nX_train, X_test, y_train, y_test, group_train, group_test = train_test_split(\n data, target, group, test_size=0.2, random_state=0)", "_____no_output_____" ] ], [ [ "# Task 1 (Decision tree)\n\n1. Train a decision tree classifier on the training dataset. (You can work on the original dataset or on the one-hot encoded one.) The following parameter choices worked well in our setup: `(DecisionTreeClassifier(min_samples_split = 0.01, min_samples_leaf= 0.01, max_features=\"auto\", max_depth = 15, criterion = \"gini\", random_state = 0))` Report on its accuracy. Visualize the tree using `plot_tree` from `sklearn`. Which parameters can you change to the adapt the size of the tree? Try to find parameters that make the tree easier to understand.\n2. For two training examples, explain their classification given the decision tree.\n3. Compute feature importance as shown in the lecture. Which features are most important?\n4. Compute permuted feature importance using sklearn as shown in the lecture. How does feature importance change?\n5. Provide a counterfactual for a feature vector that is predicted negatively. Compare to the counterfactual for logistic regression (last week's exercises). Is it a counterfactual in both models?\n", "_____no_output_____" ], [ "# Task 2 (Black-box model)\n\n1. Train a black-box model classifier (for example, use a random forest, a gradient-boosted decision tree, an SVM, or a Neural Network). Report on its accuracy. If you have used a tree data structure such as RF or gradient-boosted decision trees, report on the feature importance as in Task 1.\n2. Both for the decision tree and the black-box classifier, use the `shap` module to explain predictions. Contrast the two models to each other: What are similarities, how do they differ? As shown in the lecture, provide a summary plot, a dependence plot, a force plot for a negatively/positively predicted feature vector, and summary plot on the interaction values.\n3. Reflect on the explanations: How does the _decision tree_'s black-box explanation relate to its white-box explanation? Which classifier would you prefer when deploying a model as part of the machine learning pipeline? ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0703fecc9bbf486afa980456f0889e576b6f0b7
42,016
ipynb
Jupyter Notebook
solutions/mid1/submissions/khandurbhanuprakashrahul_142394_6241866_PortfolioTheory_Mid Term.ipynb
tulyu96/finm-portfolio-2021
6b26e235323064f2bf0a5bbc81f922b2150b75ef
[ "MIT" ]
null
null
null
solutions/mid1/submissions/khandurbhanuprakashrahul_142394_6241866_PortfolioTheory_Mid Term.ipynb
tulyu96/finm-portfolio-2021
6b26e235323064f2bf0a5bbc81f922b2150b75ef
[ "MIT" ]
null
null
null
solutions/mid1/submissions/khandurbhanuprakashrahul_142394_6241866_PortfolioTheory_Mid Term.ipynb
tulyu96/finm-portfolio-2021
6b26e235323064f2bf0a5bbc81f922b2150b75ef
[ "MIT" ]
null
null
null
34.839138
630
0.462062
[ [ [ "1 Short Answer (25pts)\n\n1. (5pts) True or False. (And explain your reason.)Mean-variance optimization goes long the highest Sharpe-Ratio assets and shorts the lowestSharpe-ratio assets.\n\nA. False: Not necessarily. MV optimization optimizes for the total portfolio return vs the portfolio variance. The optimization is driven by the marginal correlation of a given asset with the rest of the assets and individual security level sharpe is not of high importance. If the asset with the lowest Sharpe were to bring in a lot of diversification benefit - the MV portfolio could possibly go long on ssuch an asset\n\n2. (5pts) True or False. (And explain your reason.)Investing in an LETF makes more sense for a long-term horizon than a short-term horizon.\n\nA. False: Investing in Leverage ETF makes more sense for short term instead of long term. While LETF can replicate quite closely for a day to day basis - Over the long term it is prone to calculation errors (due to the effect of compounding) and will not replicate the index over a longer time period \n\nexample [as seen in lec notes] : Both of the below are not equivalent\n\nL return = w (1+r1)(1+r2)......-w\nLETF = (1+wr1)(1+wr2).......-1\n\n\n\n3. (5pts) This week ProShares launches BITO on the NYSE. The ETF holds Bitcoin futures con-tracts. Suppose in a year from now, we want to try to replicate BITO using SPY and IEF asregressors in a LFD. Because BITO will only have a year of data, we do not trust that we willhave a good estimate of the mean return.Do you suggest that we (in a year) estimate the regression with an intercept or without anintercept? Why?\n\nA. We should estimate the regression with an intercept. Given we do not have enough data and while we only have the two regressors available for testing (SPY, IEF) - it is quite possible that the variations in BITO arent explained by these two. Using an intercept will allow for the regression to capture a constant that will help in explaining the variation. If it were so that the two regressors explain the variation of BITO well, we should eventually end up with a small (near zero ) intercept in which case the result will be nearer to a regression without an intercept\n\n4. (5pts) Is HDG effective at tracking HFRI in-sample? And out of sample?\n\nA. HDG is only effective at tracking HFRI in-sample as the estimators were derived from the LFD of the in-sample data space. On applying the same estimators to data OOS, the constant estimators do not replicate the movements well. This results in using a rolling window estimation\n\n5. (5pts) A hedge fund claims to beat the market by having a very high alpha. After regressingthe hedge fund returns on the 6 Merrill-Lynch style factors, you find the alpha to be negative.Explain why this discrepancy can happen.\n\nA. This can happen if one of the regressors used has a strong correlation to the constant (Something like a constant return, ultra low volatility - for example UST maybe). The alpha of the HF will be captured in the beta of this regressor which could result in the negative alpha \n", "_____no_output_____" ], [ "2 Allocation (25 pts)\n\nConsider the Merrill-Lynch Style Factors found in “prosharesanalysisdata.xlsx”, sheet “merrillfactors”.We will use “USGG3M Index” as the risk-free rate. Subtract it from the other 5 columns, and proceedwith those 5 risky assets.\n\n1. (5pts) What are the weights of the tangency portfolio,wtan?\n\n2. (5pts) What are the weights of the optimal portfolio,w∗, with a targeted excess mean return of.02 per month?Is the optimal portfolio,w∗, invested in the risk-free rate?\n\n3. (5pts) Report the mean, volatility, and Sharpe ratio of the optimized portfolio. Annualize allthree statistics.\n\n4. (5pts) Re-calculate the optimal portfolio,w∗with target excess mean of .02 per month. Butthis time only use data through 2018 in doing the calculation. Calculate the return in 2019-2021based on those optimal weights.Report the mean, volatility, and Sharpe ratio of the 2019-2021 performance.\n\n5. (5pts) Suppose that instead of optimizing these 5 risky assets, we optimized 5 commodity futures:oil, coffee, cocoa, lumber, cattle, and gold.Do you think the out-of-sample fragility problem would be better or worse than what we haveseen optimizing equities?No calculation is needed for this question–we just want a conceptual (though specific) answer.\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom statsmodels.regression.rolling import RollingOLS\nfrom sklearn.linear_model import LinearRegression\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "# reading data\n\nfactor_data = pd.read_excel('/Users/rkb/Desktop/PortfolioT/data/proshares_analysis_data.xlsx', sheet_name = 'merrill_factors')\nfactor_data = factor_data.set_index('date')\nfactor_data.head()\n", "_____no_output_____" ], [ "risky_data = factor_data.subtract(factor_data[\"USGG3M Index\"], axis=0)\nrisky_data = risky_data.drop(columns=[\"USGG3M Index\"])\nrisky_data.head()", "_____no_output_____" ], [ "#calc tangency weights\n\ndef tangency_weights(returns,dropna=True,scale_cov=1):\n if dropna:\n returns = returns.dropna()\n\n covmat_full = returns.cov()\n covmat_diag = np.diag(np.diag(covmat_full))\n covmat = scale_cov * covmat_full + (1-scale_cov) * covmat_diag\n\n weights = np.linalg.solve(covmat,returns.mean())\n weights = weights / weights.sum()\n\n return pd.DataFrame(weights, index=returns.columns)", "_____no_output_____" ], [ "#1. (5pts) What are the weights of the tangency portfolio,wtan?\n# assumption = sum of weights = 1, all of the portfolio invested in risky assets and nothing in risk free\n\nweight_v = tangency_weights(risky_data)\nweight_v", "_____no_output_____" ], [ "#2. (5pts) What are the weights of the optimal portfolio,w∗, with a targeted excess mean return of.02 per month?\n#Is the optimal portfolio,w∗, invested in the risk-free rate?\n\ndef compute_tangency(df_tilde, diagonalize_Sigma=False):\n\n \"\"\"Compute tangency portfolio given a set of excess returns.\n \n\n Also, for convenience, this returns the associated vector of average\n\n returns and the variance-covariance matrix.\n\n\n Parameters\n\n ----------\n\n diagonalize_Sigma: bool\n\n When `True`, set the off diagonal elements of the variance-covariance\n\n matrix to zero.\n\n \"\"\"\n\n Sigma = df_tilde.cov()\n\n # N is the number of assets\n\n N = Sigma.shape[0]\n\n Sigma_adj = Sigma.copy()\n\n if diagonalize_Sigma:\n\n Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj))\n\n\n\n mu_tilde = df_tilde.mean()\n\n Sigma_inv = np.linalg.inv(Sigma_adj)\n\n weights = Sigma_inv @ mu_tilde / (np.ones(N) @ Sigma_inv @ mu_tilde)\n\n # For convenience, I'll wrap the solution back into a pandas.Series object.\n\n omega_tangency = pd.Series(weights, index=mu_tilde.index)\n\n return omega_tangency, mu_tilde, Sigma_adj\n\n\ndef target_mv_portfolio(df_tilde, target_return=0.02, diagonalize_Sigma=False):\n\n \"\"\"Compute MV optimal portfolio, given target return and set of excess returns.\n\n Parameters\n\n ----------\n\n diagonalize_Sigma: bool\n\n When `True`, set the off diagonal elements of the variance-covariance\n\n matrix to zero.\n\n \"\"\"\n\n omega_tangency, mu_tilde, Sigma = compute_tangency(df_tilde, diagonalize_Sigma=diagonalize_Sigma)\n\n Sigma_adj = Sigma.copy()\n\n if diagonalize_Sigma:\n\n Sigma_adj.loc[:,:] = np.diag(np.diag(Sigma_adj))\n\n Sigma_inv = np.linalg.inv(Sigma_adj)\n\n N = Sigma_adj.shape[0]\n\n delta_tilde = ((np.ones(N) @ Sigma_inv @ mu_tilde)/(mu_tilde @ Sigma_inv @ mu_tilde)) * target_return\n\n omega_star = delta_tilde * omega_tangency\n\n return omega_star, mu_tilde, Sigma_adj\n\nomega_star, mu_tilde, Sigma = target_mv_portfolio(risky_data)\n\nomega_star_df = omega_star.to_frame('MV Portfolio Weights')\n\nomega_star_df\n\n\n\n#A. Weights below. optimal portfolio will short the risk free rate. \n#As the excess return for the tangential portfolio (estimated previously is <2%)and sum(MV weights) >1", "_____no_output_____" ], [ "mu_tilde.transpose()*omega_star", "_____no_output_____" ], [ "#3. (5pts) Report the mean, volatility, and Sharpe ratio of the optimized portfolio. \n#Annualize allthree statistics.\n\ndef portfolio_stats(omega, mu_tilde, Sigma, annualize_fac):\n # Mean\n mean = (mu_tilde @ omega) * annualize_fac\n\n # Volatility\n vol = np.sqrt(omega @ Sigma @ omega) * np.sqrt(annualize_fac)\n\n\n # Sharpe ratio\n sharpe_ratio = mean / vol\n\n return round(pd.DataFrame(data = [mean, vol, sharpe_ratio], \n index = ['Mean', 'Volatility', 'Sharpe'], \n columns = ['Portfolio Stats']), 4)\n\nportfolio_stats(omega_star, mu_tilde, Sigma, 12)", "_____no_output_____" ], [ "#4. (5pts) Re-calculate the optimal portfolio,w∗with target excess mean of .02 per month. \n#Butthis time only use data through 2018 in doing the calculation. \n#Calculate the return in 2019-2021based on those optimal weights.Report the mean, volatility, and Sharpe ratio of the 2019-2021 performance.\n\nr_data = pd.read_excel('/Users/rkb/Desktop/PortfolioT/data/proshares_analysis_data.xlsx', sheet_name = 'merrill_factors')\npd.to_datetime(r_data['date'])\nr_data.set_index('date',inplace=True)\nr_data.head()", "_____no_output_____" ], [ "t_data = r_data.loc('year'<=2018)\nt_data.tail()", "_____no_output_____" ] ], [ [ "#5. (5pts) Suppose that instead of optimizing these 5 risky assets, \nwe optimized 5 commodity futures:oil, coffee, cocoa, lumber, cattle, and gold.\nDo you think the out-of-sample fragility problem would be better or worse than what we haveseen optimizing equities?\nNo calculation is needed for this question–we just want a conceptual (though specific) answer.\n\n#A. The correlation seen in equities is larger than the correlation observed in commodities in general (from classroom discussions). With lower correlation, we achieve higher diversification for the optimized portfolio and hence this should be better off than optimizing equities \n", "_____no_output_____" ], [ "3 Hedging & Replication (20pts)\n\nContinue to use the same data file from the previous problem.2\nSuppose we want to invest in EEM, but hedge out SPY. Do this by estimating a regression of EEMon SPY.•Do NOT include an intercept.•\n\nUse the full sample of data.\n\n1. (5pts) What is the optimal hedge ratio over the full sample of data? That is, for every dollarinvested in EEM, what would you invest in SPY?\n\n2. (5pts) What is the mean, volatility, and Sharpe ratio of the hedged position, had we appliedthat hedge throughout the full sample? Annualize the statistics.\n\n3. (5pts) Does it have the same mean as EEM? Why or why not?\n\n4. (5pts) Suppose we estimated a multifactor regression where in addition to SPY, we had IWMas a regressor. Why might this regression be difficult to use for attribution or even hedging?", "_____no_output_____" ] ], [ [ "hedge_data = pd.read_excel('/Users/rkb/Desktop/PortfolioT/data/proshares_analysis_data.xlsx', sheet_name = 'merrill_factors')\nhedge_data.set_index('date',inplace=True)\nhedge_data.head()", "_____no_output_____" ], [ "X = risky_data['SPY US Equity']\n#X = hedge_data['EEM US Equity']\ny = risky_data['EEM US Equity']\nstatic_model_noint = sm.OLS(y,X).fit()\nstatic_model_noint.params\n\n#1 Optimal Hedge ratio = Beta of reg. For every 1$ in SPY, corresponding in EEM = 0.92566", "_____no_output_____" ], [ "def summary_stats(df, annual_fac):\n report = pd.DataFrame()\n report['Mean'] = df.mean() * annual_fac\n report['Vol'] = df.std() * np.sqrt(annual_fac)\n report['Sharpe'] = report['Mean'] / report['Vol']\n return round(report, 4)\n\nsummary_stats(risky_data[['SPY US Equity']]*0.92566,12)\n\n#2 A: Stats below", "_____no_output_____" ], [ "summary_stats(risky_data[['EEM US Equity','SPY US Equity']],12)\n\n#It does not have the same mean as EEM", "_____no_output_____" ] ], [ [ "4 Modeling Risk (20pts)Continue to use the same data file used in the previous problem. But for this problem use the totalreturns of SPY and EFA. That is, use the returns as given in the spreadsheet–without subtractingUSGG3M Index.\n\n1. (10pts) SPY and EFA are highly correlated, yet SPY has had a much higher return. Howconfident are we that SPY will overperform EFA over the next 10 years?To answer the question,•use statistical estimates of the total returns of SPY and EFA over the full sample.•Assume that log returns for both assets are normally distributed.\n\n2. (10pts) Calculate the 60-month rolling volatility of EFA.Use the latest estimate of the volatility (Sep 2021), along with the normality formula, to calculatea Sep 2021 estimate of the 1-month, 1% VaR. In using the VaR formula, assume that the meanis zero.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d07041df5a64c788a4e2d332dc64f8c3874bf778
12,966
ipynb
Jupyter Notebook
notebooks/Fraktur_OCR.ipynb
tukl-msd/LSTM-PYNQ
dcdf79f247d7695964eb74d1202457ef1a37e35e
[ "BSD-3-Clause" ]
26
2018-02-21T11:28:20.000Z
2022-02-15T06:07:15.000Z
notebooks/Fraktur_OCR.ipynb
xupsh/LSTM-PYNQ
dcdf79f247d7695964eb74d1202457ef1a37e35e
[ "BSD-3-Clause" ]
1
2018-06-22T07:45:32.000Z
2018-06-22T07:45:32.000Z
notebooks/Fraktur_OCR.ipynb
tukl-msd/LSTM-PYNQ
dcdf79f247d7695964eb74d1202457ef1a37e35e
[ "BSD-3-Clause" ]
5
2018-02-26T00:46:07.000Z
2019-03-11T00:58:24.000Z
68.242105
8,128
0.814901
[ [ [ "# LSTM on Pynq\nThis notebook covers how to use the FPGA-accelerated BiLSTM implementation on PYNQ to perform optical character recognition (OCR) on Fraktur text.\n![alt text](Fraktur_images/LSTM_Topology.png \"BiLSTM for OCR\")", "_____no_output_____" ], [ "## 1. Instantiate the OCR model\n\nInstantiating the OCR model will automatically download the appropriate bitstream onto the device and load the weights trained on the Fraktur dataset. ", "_____no_output_____" ] ], [ [ "import lstm\n\nhw_ocr = lstm.PynqFrakturOCR(lstm.RUNTIME_HW)", "_____no_output_____" ] ], [ [ "## 2. Load a line of text\nLoad a line of Fraktur text that has been extracted from the Fraktur text \"Wanderungen durch die Mark Brandenburg\" by Theodor Fontane.", "_____no_output_____" ] ], [ [ "from PIL import Image\nim = Image.open('/home/xilinx/jupyter_notebooks/lstm/Fraktur_images/010006.raw.lnrm.png').convert('L')\nim", "_____no_output_____" ] ], [ [ "## 3. Perform OCR in hardware\nPerform the OCR task on the input image using the hardware-accelerated OCR engine.", "_____no_output_____" ] ], [ [ "import numpy as np\n\nhw_result = hw_ocr.inference(np.array(im))\nhw_mops_per_s, hw_ms_inference_time, hw_recognized_text = hw_result\n\nprint(\"HW OCRed text: {}\".format(hw_recognized_text))\nprint(\"HW MOps/s: {}\".format(hw_mops_per_s))\nprint(\"HW inference time [ms]: {}\".format(hw_ms_inference_time))", "HW OCRed text: Für Andre hier gethan, sei stumme Gabe, -\nHW MOps/s: 54833.779575112705\nHW inference time [ms]: 1.7328369617462158\n" ] ], [ [ "## 4. Perform OCR in software\nFor the sake of evaluating the performances of the hardware-accelerated OCR engine, perform the same OCR task using the Arm CPU.", "_____no_output_____" ] ], [ [ "sw_ocr = lstm.PynqFrakturOCR(lstm.RUNTIME_SW)\n\nsw_result = sw_ocr.inference(np.array(im))\nsw_mops_per_s, sw_ms_inference_time, sw_recognized_text = sw_result\n\nprint(\"SW OCRed text: {}\".format(sw_recognized_text))\nprint(\"SW MOps/s: {}\".format(sw_mops_per_s))\nprint(\"SW inference time [ms]: {}\".format(sw_ms_inference_time))", "SW OCRed text: Für Andre hier gethan, sei stumme Gabe, -\nSW MOps/s: 1.2310819261405748\nSW inference time [ms]: 77182.515625\n" ] ], [ [ "## 5. Reset the hardware accelerator", "_____no_output_____" ] ], [ [ "hw_ocr.cleanup()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d070470030d8e001ad5cd99b28868578113ac9e5
8,374
ipynb
Jupyter Notebook
ECS129 Knot or Not_v1.ipynb
mmaliu97/ECS-129
80d1d35dfb7c9ebcfdc4a95718146851f33a6166
[ "MIT" ]
null
null
null
ECS129 Knot or Not_v1.ipynb
mmaliu97/ECS-129
80d1d35dfb7c9ebcfdc4a95718146851f33a6166
[ "MIT" ]
null
null
null
ECS129 Knot or Not_v1.ipynb
mmaliu97/ECS-129
80d1d35dfb7c9ebcfdc4a95718146851f33a6166
[ "MIT" ]
null
null
null
28.006689
88
0.478744
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport csv", "_____no_output_____" ], [ "dataVals = np.genfromtxt(r'1zncA.txt', delimiter='', dtype=float)\navgCoords = []\nx = 0", "_____no_output_____" ], [ "def ray_dir(E,F):\n d = E-F", "_____no_output_____" ], [ "def intersect_line_triangle(q1, q2, p1, p2, p3):\n def signed_tetra_volume(a, b, c, d):\n return np.sign(np.dot(np.cross(b - a, c - a), d - a) / 6.0)\n\n numknots = 0\n s1 = signed_tetra_volume(p1, p2, p3, q1)\n s2 = signed_tetra_volume(p1, p2, p3, q2)\n\n if s1 != s2:\n s3 = signed_tetra_volume(p1, p2, q1, q2)\n s4 = signed_tetra_volume(p2, p3, q1, q2)\n s5 = signed_tetra_volume(p3, p1, q1, q2)\n if s3==s4 and s4==s5:\n# n = np.cross(p2 - p1, p3 - p1)\n# t = np.dot(p1 - q1, n) / np.dot(q2 - q1, n)\n numknots = numknots + 1\n # return None\n return numknots", "_____no_output_____" ] ], [ [ "# Strategy 1: Once threshold is reach, remove the point", "_____no_output_____" ] ], [ [ "def lineseg_dist(p, a, b):\n\n # normalized tangent vector\n d = np.divide(b - a, np.linalg.norm(b - a))\n\n # signed parallel distance components\n s = np.dot(a - p, d)\n t = np.dot(p - b, d)\n\n # clamped parallel distance\n h = np.maximum.reduce([s, t, 0])\n\n # perpendicular distance component\n c = np.cross(p - a, d)\n\n return np.hypot(h, np.linalg.norm(c))", "_____no_output_____" ] ], [ [ "# Run Code", "_____no_output_____" ] ], [ [ "for k in range(0, 50):\n nproblem = 0\n for i in range(0, len(dataVals) - 2):\n xCoord = (dataVals[i][0] + dataVals[i + 1][0] + dataVals[i + 2][0]) / 3\n yCoord = (dataVals[i][1] + dataVals[i + 1][1] + dataVals[i + 2][1]) / 3\n zCoord = (dataVals[i][2] + dataVals[i + 1][2] + dataVals[i + 2][2]) / 3\n \n avgCoords=[xCoord, yCoord, zCoord];\n \n A = dataVals[i]\n B = dataVals[i + 1]\n C = avgCoords\n\n nk=0\n\n for j in range(0, i-2):\n \n E = dataVals[j]\n F = dataVals[j + 1]\n nk += intersect_line_triangle(E, F, A, B, C)\n\n for j in range(i + 2, len(dataVals)-1):\n E = dataVals[j]\n F = dataVals[j + 1]\n nk += intersect_line_triangle(E, F, A, B, C)\n\n A = dataVals[i + 1]\n B = avgCoords\n C = dataVals[i + 2]\n\n for j in range(0, i-1):\n E = dataVals[j]\n F = dataVals[j + 1]\n nk += intersect_line_triangle(E, F, A, B, C)\n\n for j in range(i + 3, len(dataVals)-1):\n E = dataVals[j]\n F = dataVals[j + 1]\n nk += intersect_line_triangle(E, F, A, B, C)\n \n if nk==0:\n dataVals[i + 1] = avgCoords\n nproblem += nk\n \n # Check if distance is short enough\n distance = lineseg_dist(avgCoords, dataVals[i], dataVals[i+2])\n if distance < 0.01:\n np.delete(dataVals, i,0)\n\n print(\"On iteration:\", k)\n print(\"curr possible numknot:\", nproblem)\n", "On iteration: 0\ncurr possible numknot: 0\nOn iteration: 1\ncurr possible numknot: 0\nOn iteration: 2\ncurr possible numknot: 0\nOn iteration: 3\ncurr possible numknot: 0\nOn iteration: 4\ncurr possible numknot: 0\nOn iteration: 5\ncurr possible numknot: 0\nOn iteration: 6\ncurr possible numknot: 0\nOn iteration: 7\ncurr possible numknot: 0\nOn iteration: 8\ncurr possible numknot: 0\nOn iteration: 9\ncurr possible numknot: 0\nOn iteration: 10\ncurr possible numknot: 0\nOn iteration: 11\ncurr possible numknot: 0\nOn iteration: 12\ncurr possible numknot: 1\nOn iteration: 13\ncurr possible numknot: 1\nOn iteration: 14\ncurr possible numknot: 1\nOn iteration: 15\ncurr possible numknot: 1\nOn iteration: 16\ncurr possible numknot: 2\nOn iteration: 17\ncurr possible numknot: 3\nOn iteration: 18\ncurr possible numknot: 3\nOn iteration: 19\ncurr possible numknot: 4\nOn iteration: 20\ncurr possible numknot: 4\nOn iteration: 21\ncurr possible numknot: 4\nOn iteration: 22\ncurr possible numknot: 4\nOn iteration: 23\ncurr possible numknot: 5\nOn iteration: 24\ncurr possible numknot: 6\nOn iteration: 25\ncurr possible numknot: 6\nOn iteration: 26\ncurr possible numknot: 6\nOn iteration: 27\ncurr possible numknot: 6\nOn iteration: 28\ncurr possible numknot: 5\nOn iteration: 29\ncurr possible numknot: 5\nOn iteration: 30\ncurr possible numknot: 5\nOn iteration: 31\ncurr possible numknot: 5\nOn iteration: 32\ncurr possible numknot: 5\nOn iteration: 33\ncurr possible numknot: 5\nOn iteration: 34\ncurr possible numknot: 5\nOn iteration: 35\ncurr possible numknot: 5\nOn iteration: 36\ncurr possible numknot: 5\nOn iteration: 37\ncurr possible numknot: 5\nOn iteration: 38\ncurr possible numknot: 5\nOn iteration: 39\ncurr possible numknot: 5\nOn iteration: 40\ncurr possible numknot: 5\nOn iteration: 41\ncurr possible numknot: 5\nOn iteration: 42\ncurr possible numknot: 5\nOn iteration: 43\ncurr possible numknot: 5\nOn iteration: 44\ncurr possible numknot: 6\nOn iteration: 45\ncurr possible numknot: 5\nOn iteration: 46\ncurr possible numknot: 5\nOn iteration: 47\ncurr possible numknot: 5\nOn iteration: 48\ncurr possible numknot: 5\nOn iteration: 49\ncurr possible numknot: 5\n" ], [ "dd", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d07065f8ec7e88c322b4a224f2a5edd302d1dc22
57,971
ipynb
Jupyter Notebook
Classification/Histogram-Based Gradient Boosting Trees/HistGradientBoostingClassifier_MaxAbsScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
53
2021-08-28T07:41:49.000Z
2022-03-09T02:20:17.000Z
Classification/Histogram-Based Gradient Boosting Trees/HistGradientBoostingClassifier_MaxAbsScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
142
2021-07-27T07:23:10.000Z
2021-08-25T14:57:24.000Z
Classification/Histogram-Based Gradient Boosting Trees/HistGradientBoostingClassifier_MaxAbsScaler.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
38
2021-07-27T04:54:08.000Z
2021-08-23T02:27:20.000Z
84.877013
23,998
0.810698
[ [ [ "# HistGradientBoostingClassifier with MaxAbsScaler", "_____no_output_____" ], [ "This code template is for classification analysis using a HistGradientBoostingClassifier and the feature rescaling technique called MaxAbsScaler ", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "import warnings \r\nimport numpy as np \r\nimport pandas as pd \r\nimport matplotlib.pyplot as plt \r\nimport seaborn as se \r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.experimental import enable_hist_gradient_boosting\r\nfrom sklearn.ensemble import HistGradientBoostingClassifier\r\nfrom sklearn.metrics import classification_report,plot_confusion_matrix\r\nfrom sklearn.preprocessing import MaxAbsScaler\r\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\n\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\r\nfile_path=\"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\r\nfeatures = []", "_____no_output_____" ] ], [ [ "Target feature for prediction.", "_____no_output_____" ] ], [ [ "#y_value\ntarget='' ", "_____no_output_____" ] ], [ [ "### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "X = df[features]\nY = df[target]", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)\ndef EncodeY(df):\n if len(df.unique())<=2:\n return df\n else:\n un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')\n df=LabelEncoder().fit_transform(df)\n EncodedT=[xi for xi in range(len(un_EncodedT))]\n print(\"Encoded Target: {} to {}\".format(un_EncodedT,EncodedT))\n return df", "_____no_output_____" ], [ "x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i]) \nX=EncodeX(X)\nY=EncodeY(NullClearner(Y))\nX.head()", "Encoded Target: ['Iris-setosa' 'Iris-versicolor' 'Iris-virginica'] to [0, 1, 2]\n" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "f,ax = plt.subplots(figsize=(18, 18))\nmatrix = np.triu(X.corr())\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Distribution Of Target Variable", "_____no_output_____" ] ], [ [ "plt.figure(figsize = (10,6))\nse.countplot(Y)", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.", "_____no_output_____" ] ], [ [ "x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)", "_____no_output_____" ] ], [ [ "### Data Rescaling\n", "_____no_output_____" ], [ "sklearn.preprocessing.MaxAbsScaler is used\n\nScale each feature by its maximum absolute value.\n\nRead more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html)", "_____no_output_____" ] ], [ [ "Scaler=MaxAbsScaler() \nx_train=Scaler.fit_transform(x_train) \nx_test=Scaler.transform(x_test)", "_____no_output_____" ] ], [ [ "### Model\n\nHistogram-based Gradient Boosting Classification Tree.This estimator is much faster than GradientBoostingClassifier for big datasets (n_samples >= 10 000).This estimator has native support for missing values (NaNs). \n\n[Reference](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html#sklearn.ensemble.HistGradientBoostingClassifier)\n\n> **loss**: The loss function to use in the boosting process. ‘binary_crossentropy’ (also known as logistic loss) is used for binary classification and generalizes to ‘categorical_crossentropy’ for multiclass classification. ‘auto’ will automatically choose either loss depending on the nature of the problem.\n\n> **learning_rate**: The learning rate, also known as shrinkage. This is used as a multiplicative factor for the leaves values. Use 1 for no shrinkage.\n\n> **max_iter**: The maximum number of iterations of the boosting process, i.e. the maximum number of trees.\n\n> **max_depth**: The maximum depth of each tree. The depth of a tree is the number of edges to go from the root to the deepest leaf. Depth isn’t constrained by default.\n\n> **l2_regularization**: The L2 regularization parameter. Use 0 for no regularization (default).\n\n> **early_stopping**: If ‘auto’, early stopping is enabled if the sample size is larger than 10000. If True, early stopping is enabled, otherwise early stopping is disabled.\n\n> **n_iter_no_change**: Used to determine when to “early stop”. The fitting process is stopped when none of the last n_iter_no_change scores are better than the n_iter_no_change - 1 -th-to-last one, up to some tolerance. Only used if early stopping is performed.\n\n> **tol**: The absolute tolerance to use when comparing scores during early stopping. The higher the tolerance, the more likely we are to early stop: higher tolerance means that it will be harder for subsequent iterations to be considered an improvement upon the reference score.\n\n> **scoring**: Scoring parameter to use for early stopping. ", "_____no_output_____" ] ], [ [ "model = HistGradientBoostingClassifier(random_state = 123)\nmodel.fit(x_train, y_train)", "_____no_output_____" ] ], [ [ "#### Model Accuracy\n\nscore() method return the mean accuracy on the given test data and labels.\n\nIn multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.", "_____no_output_____" ] ], [ [ "print(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))", "Accuracy score 90.00 %\n\n" ] ], [ [ "#### Confusion Matrix\n\nA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.", "_____no_output_____" ] ], [ [ "plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)", "_____no_output_____" ] ], [ [ "#### Classification Report\nA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.\n\n* **where**:\n - Precision:- Accuracy of positive predictions.\n - Recall:- Fraction of positives that were correctly identified.\n - f1-score:- percent of positive predictions were correct\n - support:- Support is the number of actual occurrences of the class in the specified dataset.", "_____no_output_____" ] ], [ [ "print(classification_report(y_test,model.predict(x_test)))", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 13\n 1 0.71 0.83 0.77 6\n 2 0.90 0.82 0.86 11\n\n accuracy 0.90 30\n macro avg 0.87 0.88 0.88 30\nweighted avg 0.91 0.90 0.90 30\n\n" ] ], [ [ "#### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d070683dafdae31791b681520d79396de9e53132
9,727
ipynb
Jupyter Notebook
datasets/winning-args-corpus/cmv_tester.ipynb
calebchiam/cs6742-fork
14dc879849ada4c08059c21f8b9721c0c3dbcebf
[ "MIT" ]
null
null
null
datasets/winning-args-corpus/cmv_tester.ipynb
calebchiam/cs6742-fork
14dc879849ada4c08059c21f8b9721c0c3dbcebf
[ "MIT" ]
null
null
null
datasets/winning-args-corpus/cmv_tester.ipynb
calebchiam/cs6742-fork
14dc879849ada4c08059c21f8b9721c0c3dbcebf
[ "MIT" ]
1
2020-01-17T17:27:16.000Z
2020-01-17T17:27:16.000Z
31.996711
2,356
0.574278
[ [ [ "from convokit import Corpus", "_____no_output_____" ], [ "corpus = Corpus(filename='change-my-view-corpus')", "_____no_output_____" ], [ "corpus", "_____no_output_____" ], [ "for convo in corpus.iter_conversations():\n conv = convo\n break", "_____no_output_____" ], [ "conv.meta", "_____no_output_____" ], [ "conv._usernames", "_____no_output_____" ], [ "for utt in conv.iter_utterances():\n uttz = utt\n if len(uttz.meta['replies']) > 0:\n break", "_____no_output_____" ], [ "uttz.reply_to", "_____no_output_____" ], [ "uttz", "_____no_output_____" ], [ "[utt.id for utt in corpus.get_conversation('t3_2ro9ux').iter_utterances()]", "_____no_output_____" ], [ "uttz.meta['replies']", "_____no_output_____" ], [ "for user in corpus.iter_users():\n us = user\n break", "_____no_output_____" ], [ "us.meta", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0706e5c6be6acfd2da08857abcf7912e210a2f7
178,250
ipynb
Jupyter Notebook
03-machine-learning-tabular-crossection/10 - Parametrização/GridSearch.ipynb
sn3fru/datascience_course
ee0a505134383034e09020d9b1de18904d9b2665
[ "MIT" ]
331
2019-01-26T21:11:45.000Z
2022-03-02T11:35:16.000Z
03-machine-learning-tabular-crossection/10 - Parametrização/GridSearch.ipynb
sn3fru/datascience_course
ee0a505134383034e09020d9b1de18904d9b2665
[ "MIT" ]
2
2019-11-02T22:32:13.000Z
2020-04-13T10:31:11.000Z
03-machine-learning-tabular-crossection/10 - Parametrização/GridSearch.ipynb
sn3fru/datascience_course
ee0a505134383034e09020d9b1de18904d9b2665
[ "MIT" ]
88
2019-01-25T16:53:47.000Z
2022-03-03T00:05:08.000Z
47.968245
15,520
0.45623
[ [ [ "# Prática Guiada: Demonstração de `GridSearchCV`", "_____no_output_____" ], [ "Vamos usar o conjunto de dados iris... que já conhecemos bem.\n\nVeremos como usar `GridSearchCV` para otimizar o hiperparâmetro `k` do algoritmo de vizinhos mais próximos.\n\n[aqui](http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf) há um link para o paper de Ronald Fisher, que usou este conjunto de dados em 1936.", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score, train_test_split\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "df = load_iris()\nX = df.data\ny = df.target", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state=98)\nlen(X_train), len(X_test), len(y_train), len(y_test)", "_____no_output_____" ] ], [ [ "## 1. Escrevendo os parâmetros à mão\n\nÉ claro que, dependendo do modelo, os hiperparâmetros podem ter um efeito considerável na qualidade da previsão. \nVamos ver como a precisão varia na hora de prever a espécie das flores para diferentes valores de K.", "_____no_output_____" ] ], [ [ "k_range = list(range(1, 100))\nk_scores = []\nfor k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n scores = cross_val_score(knn, X_train, y_train, cv=10, scoring='accuracy')\n k_scores.append(scores.mean())\n \nk_scores", "_____no_output_____" ], [ "plt.plot(k_range, k_scores)\nplt.xlabel('Value of K for KNN')\nplt.ylabel('Cross-Validated Accuracy');", "_____no_output_____" ] ], [ [ "Como sempre, observamos que o desempenho muda para diferentes valores do hiperparâmetro. <br />\nComo podemos sistematizar essa pesquisa e adicionar mais hiperparâmetros à exploração? ", "_____no_output_____" ], [ "## 2. Usando `GridSearch`\n", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import GridSearchCV", "_____no_output_____" ] ], [ [ "É definida uma lista de parâmetros a serem testados.", "_____no_output_____" ] ], [ [ "k_range = list(range(1, 31))\nknn = KNeighborsClassifier()", "_____no_output_____" ], [ "range(1, 31)", "_____no_output_____" ], [ "param_grid = dict(n_neighbors=range(1, 31))\nprint(param_grid)", "{'n_neighbors': range(1, 31)}\n" ] ], [ [ "Instanciar o método `GridSearchCV`", "_____no_output_____" ] ], [ [ "grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy', n_jobs=-1)", "_____no_output_____" ] ], [ [ "Fazer o ajuste", "_____no_output_____" ] ], [ [ "grid.fit(X_train, y_train)", "/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:841: DeprecationWarning: The default of the `iid` parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.\n DeprecationWarning)\n" ] ], [ [ "`GridSeachCV` retorna um dict com muitas informações. Do momento da configuração de cada parâmetro até os scores médios (via validação cruzada). Ele também fornece os scores em cada conjunto de treino e teste da Validação Cruzada K-Fold. ", "_____no_output_____" ] ], [ [ "grid.cv_results_.keys()", "_____no_output_____" ], [ "pd.DataFrame(grid.cv_results_).columns", "/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split0_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split1_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split2_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split3_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split4_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split5_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split6_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split7_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split8_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split9_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('mean_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('std_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n" ], [ "pd.DataFrame(grid.cv_results_)", "_____no_output_____" ] ], [ [ "Vamos ver o melhor modelo:", "_____no_output_____" ] ], [ [ "grid.best_params_", "_____no_output_____" ], [ "grid.best_estimator_, grid.best_score_, grid.best_params_", "_____no_output_____" ] ], [ [ "### 2.1 Adicionando outros parâmetros para ajustar", "_____no_output_____" ], [ "Vamos adicionar o parâmetro binário de peso do algoritmo knn que determina se alguns vizinhos terão mais peso do que outros no momento da classificação. O valor de distância indica que o peso é inversamente proporcional à distância\n\nGridSearchCV exige que a grade de parâmetros a serem verificados venha em um dicionário com os nomes dos parâmetros e a lista dos valores possíveis. \n\nObserve que o GridSearchCV possui todos os métodos que a API sklearn oferece para modelos preditivos: fit, predict, predict_proba, etc.", "_____no_output_____" ] ], [ [ "k_range = list(range(1, 31))\nweight_options = ['uniform', 'distance']", "_____no_output_____" ] ], [ [ "Agora a otimização será feita iterando e alternando `weights` e `k` (número de vizinhos próximos).", "_____no_output_____" ] ], [ [ "param_grid = dict(n_neighbors=k_range, weights=weight_options)\nprint(param_grid)", "{'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30], 'weights': ['uniform', 'distance']}\n" ] ], [ [ "**Verificar:** \n 1. Como o processo de busca será realizado? \n 2. Quantas vezes o algoritmo terá que ser iterado?", "_____no_output_____" ], [ "Ajustar os modelos", "_____no_output_____" ] ], [ [ "grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')\ngrid.fit(X_train, y_train)", "/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/model_selection/_search.py:841: DeprecationWarning: The default of the `iid` parameter will change from True to False in version 0.22 and will be removed in 0.24. This will change numeric results when test-set sizes are unequal.\n DeprecationWarning)\n" ], [ "pd.DataFrame(grid.cv_results_)", "/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split0_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split1_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split2_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split3_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split4_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split5_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split6_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split7_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split8_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('split9_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('mean_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n/home/marcossilva/anaconda3/lib/python3.7/site-packages/sklearn/utils/deprecation.py:125: FutureWarning: You are accessing a training score ('std_train_score'), which will not be available by default any more in 0.21. If you need training scores, please set return_train_score=True\n warnings.warn(*warn_args, **warn_kwargs)\n" ] ], [ [ "Escolher o melhor modelo", "_____no_output_____" ] ], [ [ "print (grid.best_estimator_)\nprint(grid.best_score_)\nprint(grid.best_params_)", "KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',\n metric_params=None, n_jobs=None, n_neighbors=8, p=2,\n weights='uniform')\n0.9833333333333333\n{'n_neighbors': 8, 'weights': 'uniform'}\n" ] ], [ [ "## 3. Usar os melhores modelos para executar as previsões", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors=8, weights='uniform')\nknn.fit(X_train, y_train)\ny_pred = knn.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import classification_report, confusion_matrix\nimport seaborn as sns\n\nprint (classification_report(y_test, y_pred))\nsns.heatmap(confusion_matrix(y_test, y_pred),annot=True)", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 8\n 1 1.00 0.81 0.90 16\n 2 0.67 1.00 0.80 6\n\n micro avg 0.90 0.90 0.90 30\n macro avg 0.89 0.94 0.90 30\nweighted avg 0.93 0.90 0.90 30\n\n" ] ], [ [ "Podemos usar o atalho que `GridSeachCV` possui: usando o método` predict` sobre o objeto `grid`. ", "_____no_output_____" ] ], [ [ "grid.predict(X_test)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d0707a170eef38d1aa8d4a6a19c6e4c5644f2002
39,536
ipynb
Jupyter Notebook
Evaluating Performance.ipynb
sgf-afk/Class_Assignments
8852a825975e4d5040d28ebe705ff7198d59a1c8
[ "MIT" ]
null
null
null
Evaluating Performance.ipynb
sgf-afk/Class_Assignments
8852a825975e4d5040d28ebe705ff7198d59a1c8
[ "MIT" ]
null
null
null
Evaluating Performance.ipynb
sgf-afk/Class_Assignments
8852a825975e4d5040d28ebe705ff7198d59a1c8
[ "MIT" ]
null
null
null
50.492976
274
0.38188
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sqlalchemy import create_engine\nimport statsmodels.api as sm\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\npostgres_user = 'dsbc_student'\npostgres_pw = '7*.8G9QH21'\npostgres_host = '142.93.121.174'\npostgres_port = '5432'\npostgres_db = 'weatherinszeged'", "_____no_output_____" ], [ "engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(\n postgres_user, postgres_pw, postgres_host, postgres_port, postgres_db))\nweather_df = pd.read_sql_query('select * from weatherinszeged',con=engine)\n\n# no need for an open connection, as we're only doing a single query\nengine.dispose()", "_____no_output_____" ], [ "# Y is the target variable\nY = weather_df['apparenttemperature'] - weather_df['temperature']\n# X is the feature set\nX = weather_df[['humidity','windspeed']]\n\nX = sm.add_constant(X)\n\nresults = sm.OLS(Y, X).fit()\n\nresults.summary()", "_____no_output_____" ], [ "\nweather_df['humidity_windspeed_interaction'] = weather_df.humidity * weather_df.windspeed\n\n# Y is the target variable\nY = weather_df['apparenttemperature'] - weather_df['temperature']\n# X is the feature set\nX = weather_df[['humidity','windspeed', 'humidity_windspeed_interaction']]\n\nX = sm.add_constant(X)\n\nresults = sm.OLS(Y, X).fit()\n\nresults.summary()", "_____no_output_____" ], [ "# Y is the target variable\nY = weather_df['apparenttemperature'] - weather_df['temperature']\n# X is the feature set\nX = weather_df[['humidity','windspeed', 'visibility']]\n\nX = sm.add_constant(X)\n\nresults = sm.OLS(Y, X).fit()\n\nresults.summary()", "_____no_output_____" ], [ "\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sqlalchemy import create_engine\nimport statsmodels.api as sm\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\npostgres_user = 'dsbc_student'\npostgres_pw = '7*.8G9QH21'\npostgres_host = '142.93.121.174'\npostgres_port = '5432'\npostgres_db = 'houseprices'", "_____no_output_____" ], [ "engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(\n postgres_user, postgres_pw, postgres_host, postgres_port, postgres_db))\nhouse_prices_df = pd.read_sql_query('select * from houseprices',con=engine)\n\n# no need for an open connection, as we're only doing a single query\nengine.dispose()", "_____no_output_____" ], [ "house_prices_df = pd.concat([house_prices_df,pd.get_dummies(house_prices_df.mszoning, prefix=\"mszoning\", drop_first=True)], axis=1)\nhouse_prices_df = pd.concat([house_prices_df,pd.get_dummies(house_prices_df.street, prefix=\"street\", drop_first=True)], axis=1)\ndummy_column_names = list(pd.get_dummies(house_prices_df.mszoning, prefix=\"mszoning\", drop_first=True).columns)\ndummy_column_names = dummy_column_names + list(pd.get_dummies(house_prices_df.street, prefix=\"street\", drop_first=True).columns)", "_____no_output_____" ], [ "# Y is the target variable\nY = house_prices_df['saleprice']\n# X is the feature set\nX = house_prices_df[['overallqual', 'grlivarea', 'garagecars', 'garagearea', 'totalbsmtsf'] + dummy_column_names]\n\nX = sm.add_constant(X)\n\nresults = sm.OLS(Y, X).fit()\n\nresults.summary()", "_____no_output_____" ], [ "house_prices_df['totalsf'] = house_prices_df['totalbsmtsf'] + house_prices_df['firstflrsf'] + house_prices_df['secondflrsf']\n\nhouse_prices_df['int_over_sf'] = house_prices_df['totalsf'] * house_prices_df['overallqual']\n\n# Y is the target variable\nY = np.log1p(house_prices_df['saleprice'])\n# X is the feature set\nX = house_prices_df[['overallqual', 'grlivarea', 'garagecars', 'garagearea', 'totalsf', 'int_over_sf'] + dummy_column_names]\n\nX = sm.add_constant(X)\n\nresults = sm.OLS(Y, X).fit()\n\nresults.summary()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0708a053abd71282b4191f1de69cceaa8dfd6b1
137,442
ipynb
Jupyter Notebook
Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb
junelsolis/ZeroCostDL4Mic
d0435e9f3591ed2680b8c947e14ceba89c11d189
[ "MIT" ]
321
2020-03-20T16:33:37.000Z
2022-03-29T20:43:41.000Z
Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb
junelsolis/ZeroCostDL4Mic
d0435e9f3591ed2680b8c947e14ceba89c11d189
[ "MIT" ]
144
2020-03-21T07:11:29.000Z
2022-03-31T15:05:44.000Z
Colab_notebooks/Deep-STORM_2D_ZeroCostDL4Mic.ipynb
kapoorlab/ZeroCostDL4Mic
6e6affa0b6194176b59374230e2c9f102cc30942
[ "MIT" ]
81
2020-03-20T21:49:01.000Z
2022-03-10T13:52:25.000Z
137,442
137,442
0.653396
[ [ [ "# **Deep-STORM (2D)**\n\n---\n\n<font size = 4>Deep-STORM is a neural network capable of image reconstruction from high-density single-molecule localization microscopy (SMLM), first published in 2018 by [Nehme *et al.* in Optica](https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458). The architecture used here is a U-Net based network without skip connections. This network allows image reconstruction of 2D super-resolution images, in a supervised training manner. The network is trained using simulated high-density SMLM data for which the ground-truth is available. These simulations are obtained from random distribution of single molecules in a field-of-view and therefore do not imprint structural priors during training. The network output a super-resolution image with increased pixel density (typically upsampling factor of 8 in each dimension).\n\nDeep-STORM has **two key advantages**:\n- SMLM reconstruction at high density of emitters\n- fast prediction (reconstruction) once the model is trained appropriately, compared to more common multi-emitter fitting processes.\n\n\n---\n\n<font size = 4>*Disclaimer*:\n\n<font size = 4>This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.\n\n<font size = 4>This notebook is based on the following paper: \n\n<font size = 4>**Deep-STORM: super-resolution single-molecule microscopy by deep learning**, Optica (2018) by *Elias Nehme, Lucien E. Weiss, Tomer Michaeli, and Yoav Shechtman* (https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458)\n\n<font size = 4>And source code found in: https://github.com/EliasNehme/Deep-STORM\n\n\n<font size = 4>**Please also cite this original paper when using or developing this notebook.**", "_____no_output_____" ], [ "# **How to use this notebook?**\n\n---\n\n<font size = 4>Video describing how to use our notebooks are available on youtube:\n - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook\n - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook\n\n\n---\n###**Structure of a notebook**\n\n<font size = 4>The notebook contains two types of cell: \n\n<font size = 4>**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.\n\n<font size = 4>**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.\n\n---\n###**Table of contents, Code snippets** and **Files**\n\n<font size = 4>On the top left side of the notebook you find three tabs which contain from top to bottom:\n\n<font size = 4>*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.\n\n<font size = 4>*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.\n\n<font size = 4>*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. \n\n<font size = 4>**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.\n\n<font size = 4>**Note:** The \"sample data\" in \"Files\" contains default files. Do not upload anything in here!\n\n---\n###**Making changes to the notebook**\n\n<font size = 4>**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.\n\n<font size = 4>To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).\nYou can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment.", "_____no_output_____" ], [ "#**0. Before getting started**\n---\n<font size = 4> Deep-STORM is able to train on simulated dataset of SMLM data (see https://www.osapublishing.org/optica/abstract.cfm?uri=optica-5-4-458 for more info). Here, we provide a simulator that will generate training dataset (section 3.1.b). A few parameters will allow you to match the simulation to your experimental data. Similarly to what is described in the paper, simulations obtained from ThunderSTORM can also be loaded here (section 3.1.a).\n\n---\n<font size = 4>**Important note**\n\n<font size = 4>- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.\n\n<font size = 4>- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.\n\n<font size = 4>- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.\n---", "_____no_output_____" ], [ "# **1. Install Deep-STORM and dependencies**\n---\n", "_____no_output_____" ] ], [ [ "Notebook_version = '1.13'\nNetwork = 'Deep-STORM'\n\n\n\nfrom builtins import any as b_any\n\ndef get_requirements_path():\n # Store requirements file in 'contents' directory \n current_dir = os.getcwd()\n dir_count = current_dir.count('/') - 1\n path = '../' * (dir_count) + 'requirements.txt'\n return path\n\ndef filter_files(file_list, filter_list):\n filtered_list = []\n for fname in file_list:\n if b_any(fname.split('==')[0] in s for s in filter_list):\n filtered_list.append(fname)\n return filtered_list\n\ndef build_requirements_file(before, after):\n path = get_requirements_path()\n\n # Exporting requirements.txt for local run\n !pip freeze > $path\n\n # Get minimum requirements file\n df = pd.read_csv(path, delimiter = \"\\n\")\n mod_list = [m.split('.')[0] for m in after if not m in before]\n req_list_temp = df.values.tolist()\n req_list = [x[0] for x in req_list_temp]\n\n # Replace with package name and handle cases where import name is different to module name\n mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]\n mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list] \n filtered_list = filter_files(req_list, mod_replace_list)\n\n file=open(path,'w')\n for item in filtered_list:\n file.writelines(item + '\\n')\n\n file.close()\n\nimport sys\nbefore = [str(m) for m in sys.modules]\n\n#@markdown ##Install Deep-STORM and dependencies\n# %% Model definition + helper functions\n\n!pip install fpdf\n# Import keras modules and libraries\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Activation, UpSampling2D, Convolution2D, MaxPooling2D, BatchNormalization, Layer\nfrom tensorflow.keras.callbacks import Callback\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras import optimizers, losses\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\nfrom skimage.transform import warp\nfrom skimage.transform import SimilarityTransform\nfrom skimage.metrics import structural_similarity\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom scipy.signal import fftconvolve\n\n# Import common libraries\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy.io as sio\nfrom os.path import abspath\nfrom sklearn.model_selection import train_test_split\nfrom skimage import io\nimport time\nimport os\nimport shutil\nimport csv\nfrom PIL import Image \nfrom PIL.TiffTags import TAGS\nfrom scipy.ndimage import gaussian_filter\nimport math\nfrom astropy.visualization import simple_norm\nfrom sys import getsizeof\nfrom fpdf import FPDF, HTMLMixin\nfrom pip._internal.operations.freeze import freeze\nimport subprocess\nfrom datetime import datetime\n\n# For sliders and dropdown menu, progress bar\nfrom ipywidgets import interact\nimport ipywidgets as widgets\nfrom tqdm import tqdm\n\n# For Multi-threading in simulation\nfrom numba import njit, prange\n\n\n\n# define a function that projects and rescales an image to the range [0,1]\ndef project_01(im):\n im = np.squeeze(im)\n min_val = im.min()\n max_val = im.max()\n return (im - min_val)/(max_val - min_val)\n\n# normalize image given mean and std\ndef normalize_im(im, dmean, dstd):\n im = np.squeeze(im)\n im_norm = np.zeros(im.shape,dtype=np.float32)\n im_norm = (im - dmean)/dstd\n return im_norm\n\n# Define the loss history recorder\nclass LossHistory(Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n\n def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n \n# Define a matlab like gaussian 2D filter\ndef matlab_style_gauss2D(shape=(7,7),sigma=1):\n \"\"\" \n 2D gaussian filter - should give the same result as:\n MATLAB's fspecial('gaussian',[shape],[sigma]) \n \"\"\"\n m,n = [(ss-1.)/2. for ss in shape]\n y,x = np.ogrid[-m:m+1,-n:n+1]\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\n h.astype(dtype=K.floatx())\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\n sumh = h.sum()\n if sumh != 0:\n h /= sumh\n h = h*2.0\n h = h.astype('float32')\n return h\n\n# Expand the filter dimensions\npsf_heatmap = matlab_style_gauss2D(shape = (7,7),sigma=1)\ngfilter = tf.reshape(psf_heatmap, [7, 7, 1, 1])\n\n# Combined MSE + L1 loss\ndef L1L2loss(input_shape):\n def bump_mse(heatmap_true, spikes_pred):\n\n # generate the heatmap corresponding to the predicted spikes\n heatmap_pred = K.conv2d(spikes_pred, gfilter, strides=(1, 1), padding='same')\n\n # heatmaps MSE\n loss_heatmaps = losses.mean_squared_error(heatmap_true,heatmap_pred)\n\n # l1 on the predicted spikes\n loss_spikes = losses.mean_absolute_error(spikes_pred,tf.zeros(input_shape))\n return loss_heatmaps + loss_spikes\n return bump_mse\n\n# Define the concatenated conv2, batch normalization, and relu block\ndef conv_bn_relu(nb_filter, rk, ck, name):\n def f(input):\n conv = Convolution2D(nb_filter, kernel_size=(rk, ck), strides=(1,1),\\\n padding=\"same\", use_bias=False,\\\n kernel_initializer=\"Orthogonal\",name='conv-'+name)(input)\n conv_norm = BatchNormalization(name='BN-'+name)(conv)\n conv_norm_relu = Activation(activation = \"relu\",name='Relu-'+name)(conv_norm)\n return conv_norm_relu\n return f\n\n# Define the model architechture\ndef CNN(input,names):\n Features1 = conv_bn_relu(32,3,3,names+'F1')(input)\n pool1 = MaxPooling2D(pool_size=(2,2),name=names+'Pool1')(Features1)\n Features2 = conv_bn_relu(64,3,3,names+'F2')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2),name=names+'Pool2')(Features2)\n Features3 = conv_bn_relu(128,3,3,names+'F3')(pool2)\n pool3 = MaxPooling2D(pool_size=(2, 2),name=names+'Pool3')(Features3)\n Features4 = conv_bn_relu(512,3,3,names+'F4')(pool3)\n up5 = UpSampling2D(size=(2, 2),name=names+'Upsample1')(Features4)\n Features5 = conv_bn_relu(128,3,3,names+'F5')(up5)\n up6 = UpSampling2D(size=(2, 2),name=names+'Upsample2')(Features5)\n Features6 = conv_bn_relu(64,3,3,names+'F6')(up6)\n up7 = UpSampling2D(size=(2, 2),name=names+'Upsample3')(Features6)\n Features7 = conv_bn_relu(32,3,3,names+'F7')(up7)\n return Features7\n\n# Define the Model building for an arbitrary input size\ndef buildModel(input_dim, initial_learning_rate = 0.001):\n input_ = Input (shape = (input_dim))\n act_ = CNN (input_,'CNN')\n density_pred = Convolution2D(1, kernel_size=(1, 1), strides=(1, 1), padding=\"same\",\\\n activation=\"linear\", use_bias = False,\\\n kernel_initializer=\"Orthogonal\",name='Prediction')(act_)\n model = Model (inputs= input_, outputs=density_pred)\n opt = optimizers.Adam(lr = initial_learning_rate)\n model.compile(optimizer=opt, loss = L1L2loss(input_dim))\n return model\n\n\n# define a function that trains a model for a given data SNR and density\ndef train_model(patches, heatmaps, modelPath, epochs, steps_per_epoch, batch_size, upsampling_factor=8, validation_split = 0.3, initial_learning_rate = 0.001, pretrained_model_path = '', L2_weighting_factor = 100):\n \n \"\"\"\n This function trains a CNN model on the desired training set, given the \n upsampled training images and labels generated in MATLAB.\n \n # Inputs\n # TO UPDATE ----------\n\n # Outputs\n function saves the weights of the trained model to a hdf5, and the \n normalization factors to a mat file. These will be loaded later for testing \n the model in test_model. \n \"\"\"\n \n # for reproducibility\n np.random.seed(123)\n\n X_train, X_test, y_train, y_test = train_test_split(patches, heatmaps, test_size = validation_split, random_state=42)\n print('Number of training examples: %d' % X_train.shape[0])\n print('Number of validation examples: %d' % X_test.shape[0])\n \n # Setting type\n X_train = X_train.astype('float32')\n X_test = X_test.astype('float32')\n y_train = y_train.astype('float32')\n y_test = y_test.astype('float32')\n\n \n #===================== Training set normalization ==========================\n # normalize training images to be in the range [0,1] and calculate the \n # training set mean and std\n mean_train = np.zeros(X_train.shape[0],dtype=np.float32)\n std_train = np.zeros(X_train.shape[0], dtype=np.float32)\n for i in range(X_train.shape[0]):\n X_train[i, :, :] = project_01(X_train[i, :, :])\n mean_train[i] = X_train[i, :, :].mean()\n std_train[i] = X_train[i, :, :].std()\n\n # resulting normalized training images\n mean_val_train = mean_train.mean()\n std_val_train = std_train.mean()\n X_train_norm = np.zeros(X_train.shape, dtype=np.float32)\n for i in range(X_train.shape[0]):\n X_train_norm[i, :, :] = normalize_im(X_train[i, :, :], mean_val_train, std_val_train)\n \n # patch size\n psize = X_train_norm.shape[1]\n\n # Reshaping\n X_train_norm = X_train_norm.reshape(X_train.shape[0], psize, psize, 1)\n\n # ===================== Test set normalization ==========================\n # normalize test images to be in the range [0,1] and calculate the test set \n # mean and std\n mean_test = np.zeros(X_test.shape[0],dtype=np.float32)\n std_test = np.zeros(X_test.shape[0], dtype=np.float32)\n for i in range(X_test.shape[0]):\n X_test[i, :, :] = project_01(X_test[i, :, :])\n mean_test[i] = X_test[i, :, :].mean()\n std_test[i] = X_test[i, :, :].std()\n\n # resulting normalized test images\n mean_val_test = mean_test.mean()\n std_val_test = std_test.mean()\n X_test_norm = np.zeros(X_test.shape, dtype=np.float32)\n for i in range(X_test.shape[0]):\n X_test_norm[i, :, :] = normalize_im(X_test[i, :, :], mean_val_test, std_val_test)\n \n # Reshaping\n X_test_norm = X_test_norm.reshape(X_test.shape[0], psize, psize, 1)\n\n # Reshaping labels\n Y_train = y_train.reshape(y_train.shape[0], psize, psize, 1)\n Y_test = y_test.reshape(y_test.shape[0], psize, psize, 1)\n\n # Save datasets to a matfile to open later in matlab\n mdict = {\"mean_test\": mean_val_test, \"std_test\": std_val_test, \"upsampling_factor\": upsampling_factor, \"Normalization factor\": L2_weighting_factor}\n sio.savemat(os.path.join(modelPath,\"model_metadata.mat\"), mdict)\n\n\n # Set the dimensions ordering according to tensorflow consensous\n # K.set_image_dim_ordering('tf')\n K.set_image_data_format('channels_last')\n\n # Save the model weights after each epoch if the validation loss decreased\n checkpointer = ModelCheckpoint(filepath=os.path.join(modelPath,\"weights_best.hdf5\"), verbose=1,\n save_best_only=True)\n\n # Change learning when loss reaches a plataeu\n change_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=0.00005)\n \n # Model building and complitation\n model = buildModel((psize, psize, 1), initial_learning_rate = initial_learning_rate)\n model.summary()\n\n # Load pretrained model\n if not pretrained_model_path:\n print('Using random initial model weights.')\n else:\n print('Loading model weights from '+pretrained_model_path)\n model.load_weights(pretrained_model_path)\n \n # Create an image data generator for real time data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0., # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0., # randomly shift images horizontally (fraction of total width)\n height_shift_range=0., # randomly shift images vertically (fraction of total height)\n zoom_range=0.,\n shear_range=0.,\n horizontal_flip=False, # randomly flip images\n vertical_flip=False, # randomly flip images\n fill_mode='constant',\n data_format=K.image_data_format())\n\n # Fit the image generator on the training data\n datagen.fit(X_train_norm)\n \n # loss history recorder\n history = LossHistory()\n\n # Inform user training begun\n print('-------------------------------')\n print('Training model...')\n\n # Fit model on the batches generated by datagen.flow()\n train_history = model.fit_generator(datagen.flow(X_train_norm, Y_train, batch_size=batch_size), \n steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=1, \n validation_data=(X_test_norm, Y_test), \n callbacks=[history, checkpointer, change_lr]) \n\n # Inform user training ended\n print('-------------------------------')\n print('Training Complete!')\n \n # Save the last model\n model.save(os.path.join(modelPath, 'weights_last.hdf5'))\n\n # convert the history.history dict to a pandas DataFrame: \n lossData = pd.DataFrame(train_history.history) \n\n if os.path.exists(os.path.join(modelPath,\"Quality Control\")):\n shutil.rmtree(os.path.join(modelPath,\"Quality Control\"))\n\n os.makedirs(os.path.join(modelPath,\"Quality Control\"))\n\n # The training evaluation.csv is saved (overwrites the Files if needed). \n lossDataCSVpath = os.path.join(modelPath,\"Quality Control/training_evaluation.csv\")\n with open(lossDataCSVpath, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['loss','val_loss','learning rate'])\n for i in range(len(train_history.history['loss'])):\n writer.writerow([train_history.history['loss'][i], train_history.history['val_loss'][i], train_history.history['lr'][i]])\n\n return\n\n\n# Normalization functions from Martin Weigert used in CARE\ndef normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):\n \"\"\"This function is adapted from Martin Weigert\"\"\"\n \"\"\"Percentile-based image normalization.\"\"\"\n\n mi = np.percentile(x,pmin,axis=axis,keepdims=True)\n ma = np.percentile(x,pmax,axis=axis,keepdims=True)\n return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)\n\n\ndef normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32\n \"\"\"This function is adapted from Martin Weigert\"\"\"\n if dtype is not None:\n x = x.astype(dtype,copy=False)\n mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)\n ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)\n eps = dtype(eps)\n\n try:\n import numexpr\n x = numexpr.evaluate(\"(x - mi) / ( ma - mi + eps )\")\n except ImportError:\n x = (x - mi) / ( ma - mi + eps )\n\n if clip:\n x = np.clip(x,0,1)\n\n return x\n\ndef norm_minmse(gt, x, normalize_gt=True):\n \"\"\"This function is adapted from Martin Weigert\"\"\"\n\n \"\"\"\n normalizes and affinely scales an image pair such that the MSE is minimized \n \n Parameters\n ----------\n gt: ndarray\n the ground truth image \n x: ndarray\n the image that will be affinely scaled \n normalize_gt: bool\n set to True of gt image should be normalized (default)\n Returns\n -------\n gt_scaled, x_scaled \n \"\"\"\n if normalize_gt:\n gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)\n x = x.astype(np.float32, copy=False) - np.mean(x)\n #x = x - np.mean(x)\n gt = gt.astype(np.float32, copy=False) - np.mean(gt)\n #gt = gt - np.mean(gt)\n scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())\n return gt, scale * x\n\n\n# Multi-threaded Erf-based image construction\n@njit(parallel=True)\ndef FromLoc2Image_Erf(xc_array, yc_array, photon_array, sigma_array, image_size = (64,64), pixel_size = 100):\n w = image_size[0]\n h = image_size[1]\n erfImage = np.zeros((w, h))\n for ij in prange(w*h):\n j = int(ij/w)\n i = ij - j*w\n for (xc, yc, photon, sigma) in zip(xc_array, yc_array, photon_array, sigma_array):\n # Don't bother if the emitter has photons <= 0 or if Sigma <= 0\n if (sigma > 0) and (photon > 0):\n S = sigma*math.sqrt(2)\n x = i*pixel_size - xc\n y = j*pixel_size - yc\n # Don't bother if the emitter is further than 4 sigma from the centre of the pixel\n if (x+pixel_size/2)**2 + (y+pixel_size/2)**2 < 16*sigma**2:\n ErfX = math.erf((x+pixel_size)/S) - math.erf(x/S)\n ErfY = math.erf((y+pixel_size)/S) - math.erf(y/S)\n erfImage[j][i] += 0.25*photon*ErfX*ErfY\n return erfImage\n\n\n@njit(parallel=True)\ndef FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = (64,64), pixel_size = 100):\n w = image_size[0]\n h = image_size[1]\n locImage = np.zeros((image_size[0],image_size[1]) )\n n_locs = len(xc_array)\n\n for e in prange(n_locs):\n locImage[int(max(min(round(yc_array[e]/pixel_size),w-1),0))][int(max(min(round(xc_array[e]/pixel_size),h-1),0))] += 1\n\n return locImage\n\n\n\ndef getPixelSizeTIFFmetadata(TIFFpath, display=False):\n with Image.open(TIFFpath) as img:\n meta_dict = {TAGS[key] : img.tag[key] for key in img.tag.keys()}\n\n\n # TIFF tags\n # https://www.loc.gov/preservation/digital/formats/content/tiff_tags.shtml\n # https://www.awaresystems.be/imaging/tiff/tifftags/resolutionunit.html\n ResolutionUnit = meta_dict['ResolutionUnit'][0] # unit of resolution\n width = meta_dict['ImageWidth'][0]\n height = meta_dict['ImageLength'][0]\n\n xResolution = meta_dict['XResolution'][0] # number of pixels / ResolutionUnit\n\n if len(xResolution) == 1:\n xResolution = xResolution[0]\n elif len(xResolution) == 2:\n xResolution = xResolution[0]/xResolution[1]\n else:\n print('Image resolution not defined.')\n xResolution = 1\n\n if ResolutionUnit == 2:\n # Units given are in inches\n pixel_size = 0.025*1e9/xResolution\n elif ResolutionUnit == 3:\n # Units given are in cm\n pixel_size = 0.01*1e9/xResolution\n else: \n # ResolutionUnit is therefore 1\n print('Resolution unit not defined. Assuming: um')\n pixel_size = 1e3/xResolution\n\n if display:\n print('Pixel size obtained from metadata: '+str(pixel_size)+' nm')\n print('Image size: '+str(width)+'x'+str(height))\n \n return (pixel_size, width, height)\n\n\ndef saveAsTIF(path, filename, array, pixel_size):\n \"\"\"\n Image saving using PIL to save as .tif format\n # Input \n path - path where it will be saved\n filename - name of the file to save (no extension)\n array - numpy array conatining the data at the required format\n pixel_size - physical size of pixels in nanometers (identical for x and y)\n \"\"\"\n\n # print('Data type: '+str(array.dtype))\n if (array.dtype == np.uint16):\n mode = 'I;16'\n elif (array.dtype == np.uint32):\n mode = 'I'\n else:\n mode = 'F'\n\n # Rounding the pixel size to the nearest number that divides exactly 1cm.\n # Resolution needs to be a rational number --> see TIFF format\n # pixel_size = 10000/(round(10000/pixel_size))\n\n if len(array.shape) == 2:\n im = Image.fromarray(array)\n im.save(os.path.join(path, filename+'.tif'),\n mode = mode, \n resolution_unit = 3,\n resolution = 0.01*1e9/pixel_size)\n\n\n elif len(array.shape) == 3:\n imlist = []\n for frame in array:\n imlist.append(Image.fromarray(frame))\n\n imlist[0].save(os.path.join(path, filename+'.tif'), save_all=True,\n append_images=imlist[1:],\n mode = mode, \n resolution_unit = 3,\n resolution = 0.01*1e9/pixel_size)\n\n return\n\n\n\n\nclass Maximafinder(Layer):\n def __init__(self, thresh, neighborhood_size, use_local_avg, **kwargs):\n super(Maximafinder, self).__init__(**kwargs)\n self.thresh = tf.constant(thresh, dtype=tf.float32)\n self.nhood = neighborhood_size\n self.use_local_avg = use_local_avg\n\n def build(self, input_shape):\n if self.use_local_avg is True:\n self.kernel_x = tf.reshape(tf.constant([[-1,0,1],[-1,0,1],[-1,0,1]], dtype=tf.float32), [3, 3, 1, 1])\n self.kernel_y = tf.reshape(tf.constant([[-1,-1,-1],[0,0,0],[1,1,1]], dtype=tf.float32), [3, 3, 1, 1])\n self.kernel_sum = tf.reshape(tf.constant([[1,1,1],[1,1,1],[1,1,1]], dtype=tf.float32), [3, 3, 1, 1])\n\n def call(self, inputs):\n\n # local maxima positions\n max_pool_image = MaxPooling2D(pool_size=(self.nhood,self.nhood), strides=(1,1), padding='same')(inputs)\n cond = tf.math.greater(max_pool_image, self.thresh) & tf.math.equal(max_pool_image, inputs)\n indices = tf.where(cond)\n bind, xind, yind = indices[:, 0], indices[:, 2], indices[:, 1]\n confidence = tf.gather_nd(inputs, indices)\n\n # local CoG estimator\n if self.use_local_avg:\n x_image = K.conv2d(inputs, self.kernel_x, padding='same')\n y_image = K.conv2d(inputs, self.kernel_y, padding='same')\n sum_image = K.conv2d(inputs, self.kernel_sum, padding='same')\n confidence = tf.cast(tf.gather_nd(sum_image, indices), dtype=tf.float32)\n x_local = tf.math.divide(tf.gather_nd(x_image, indices),tf.gather_nd(sum_image, indices))\n y_local = tf.math.divide(tf.gather_nd(y_image, indices),tf.gather_nd(sum_image, indices))\n xind = tf.cast(xind, dtype=tf.float32) + tf.cast(x_local, dtype=tf.float32)\n yind = tf.cast(yind, dtype=tf.float32) + tf.cast(y_local, dtype=tf.float32)\n else:\n xind = tf.cast(xind, dtype=tf.float32)\n yind = tf.cast(yind, dtype=tf.float32)\n \n return bind, xind, yind, confidence\n\n def get_config(self):\n\n # Implement get_config to enable serialization. This is optional.\n base_config = super(Maximafinder, self).get_config()\n config = {}\n return dict(list(base_config.items()) + list(config.items()))\n\n\n\n# ------------------------------- Prediction with postprocessing function-------------------------------\ndef batchFramePredictionLocalization(dataPath, filename, modelPath, savePath, batch_size=1, thresh=0.1, neighborhood_size=3, use_local_avg = False, pixel_size = None):\n \"\"\"\n This function tests a trained model on the desired test set, given the \n tiff stack of test images, learned weights, and normalization factors.\n \n # Inputs\n dataPath - the path to the folder containing the tiff stack(s) to run prediction on \n filename - the name of the file to process\n modelPath - the path to the folder containing the weights file and the mean and standard deviation file generated in train_model\n savePath - the path to the folder where to save the prediction\n batch_size. - the number of frames to predict on for each iteration\n thresh - threshoold percentage from the maximum of the gaussian scaling\n neighborhood_size - the size of the neighborhood for local maxima finding\n use_local_average - Boolean whether to perform local averaging or not\n \"\"\"\n \n # load mean and std\n matfile = sio.loadmat(os.path.join(modelPath,'model_metadata.mat'))\n test_mean = np.array(matfile['mean_test'])\n test_std = np.array(matfile['std_test']) \n upsampling_factor = np.array(matfile['upsampling_factor'])\n upsampling_factor = upsampling_factor.item() # convert to scalar\n L2_weighting_factor = np.array(matfile['Normalization factor'])\n L2_weighting_factor = L2_weighting_factor.item() # convert to scalar\n\n # Read in the raw file\n Images = io.imread(os.path.join(dataPath, filename))\n if pixel_size == None:\n pixel_size, _, _ = getPixelSizeTIFFmetadata(os.path.join(dataPath, filename), display=True)\n pixel_size_hr = pixel_size/upsampling_factor\n\n # get dataset dimensions\n (nFrames, M, N) = Images.shape\n print('Input image is '+str(N)+'x'+str(M)+' with '+str(nFrames)+' frames.')\n\n # Build the model for a bigger image\n model = buildModel((upsampling_factor*M, upsampling_factor*N, 1))\n\n # Load the trained weights\n model.load_weights(os.path.join(modelPath,'weights_best.hdf5'))\n\n # add a post-processing module\n max_layer = Maximafinder(thresh*L2_weighting_factor, neighborhood_size, use_local_avg)\n\n # Initialise the results: lists will be used to collect all the localizations\n frame_number_list, x_nm_list, y_nm_list, confidence_au_list = [], [], [], []\n\n # Initialise the results\n Prediction = np.zeros((M*upsampling_factor, N*upsampling_factor), dtype=np.float32)\n Widefield = np.zeros((M, N), dtype=np.float32)\n\n # run model in batches\n n_batches = math.ceil(nFrames/batch_size)\n for b in tqdm(range(n_batches)):\n\n nF = min(batch_size, nFrames - b*batch_size)\n Images_norm = np.zeros((nF, M, N),dtype=np.float32)\n Images_upsampled = np.zeros((nF, M*upsampling_factor, N*upsampling_factor), dtype=np.float32)\n\n # Upsampling using a simple nearest neighbor interp and calculating - MULTI-THREAD this?\n for f in range(nF):\n Images_norm[f,:,:] = project_01(Images[b*batch_size+f,:,:])\n Images_norm[f,:,:] = normalize_im(Images_norm[f,:,:], test_mean, test_std)\n Images_upsampled[f,:,:] = np.kron(Images_norm[f,:,:], np.ones((upsampling_factor,upsampling_factor)))\n Widefield += Images[b*batch_size+f,:,:]\n\n # Reshaping\n Images_upsampled = np.expand_dims(Images_upsampled,axis=3)\n\n # Run prediction and local amxima finding\n predicted_density = model.predict_on_batch(Images_upsampled)\n predicted_density[predicted_density < 0] = 0\n Prediction += predicted_density.sum(axis = 3).sum(axis = 0)\n\n bind, xind, yind, confidence = max_layer(predicted_density)\n \n # normalizing the confidence by the L2_weighting_factor\n confidence /= L2_weighting_factor \n\n # turn indices to nms and append to the results\n xind, yind = xind*pixel_size_hr, yind*pixel_size_hr\n frmind = (bind.numpy() + b*batch_size + 1).tolist()\n xind = xind.numpy().tolist()\n yind = yind.numpy().tolist()\n confidence = confidence.numpy().tolist()\n frame_number_list += frmind\n x_nm_list += xind\n y_nm_list += yind\n confidence_au_list += confidence\n\n # Open and create the csv file that will contain all the localizations\n if use_local_avg:\n ext = '_avg'\n else:\n ext = '_max'\n with open(os.path.join(savePath, 'Localizations_' + os.path.splitext(filename)[0] + ext + '.csv'), \"w\", newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['frame', 'x [nm]', 'y [nm]', 'confidence [a.u]'])\n locs = list(zip(frame_number_list, x_nm_list, y_nm_list, confidence_au_list))\n writer.writerows(locs)\n\n # Save the prediction and widefield image\n Widefield = np.kron(Widefield, np.ones((upsampling_factor,upsampling_factor)))\n Widefield = np.float32(Widefield)\n\n # io.imsave(os.path.join(savePath, 'Predicted_'+os.path.splitext(filename)[0]+'.tif'), Prediction)\n # io.imsave(os.path.join(savePath, 'Widefield_'+os.path.splitext(filename)[0]+'.tif'), Widefield)\n\n saveAsTIF(savePath, 'Predicted_'+os.path.splitext(filename)[0], Prediction, pixel_size_hr)\n saveAsTIF(savePath, 'Widefield_'+os.path.splitext(filename)[0], Widefield, pixel_size_hr)\n\n\n return\n\n\n# Colors for the warning messages\nclass bcolors:\n WARNING = '\\033[31m'\n NORMAL = '\\033[0m' # white (normal)\n\n\n\ndef list_files(directory, extension):\n return (f for f in os.listdir(directory) if f.endswith('.' + extension))\n\n\n# @njit(parallel=True)\ndef subPixelMaxLocalization(array, method = 'CoM', patch_size = 3):\n xMaxInd, yMaxInd = np.unravel_index(array.argmax(), array.shape, order='C')\n centralPatch = XC[(xMaxInd-patch_size):(xMaxInd+patch_size+1),(yMaxInd-patch_size):(yMaxInd+patch_size+1)]\n\n if (method == 'MAX'):\n x0 = xMaxInd\n y0 = yMaxInd\n\n elif (method == 'CoM'):\n x0 = 0\n y0 = 0\n S = 0\n for xy in range(patch_size*patch_size):\n y = math.floor(xy/patch_size)\n x = xy - y*patch_size\n x0 += x*array[x,y]\n y0 += y*array[x,y]\n S = array[x,y]\n \n x0 = x0/S - patch_size/2 + xMaxInd\n y0 = y0/S - patch_size/2 + yMaxInd\n \n elif (method == 'Radiality'):\n # Not implemented yet\n x0 = xMaxInd\n y0 = yMaxInd\n \n return (x0, y0)\n\n\n@njit(parallel=True)\ndef correctDriftLocalization(xc_array, yc_array, frames, xDrift, yDrift):\n n_locs = xc_array.shape[0]\n xc_array_Corr = np.empty(n_locs)\n yc_array_Corr = np.empty(n_locs)\n \n for loc in prange(n_locs):\n xc_array_Corr[loc] = xc_array[loc] - xDrift[frames[loc]]\n yc_array_Corr[loc] = yc_array[loc] - yDrift[frames[loc]]\n\n return (xc_array_Corr, yc_array_Corr)\n\n\nprint('--------------------------------')\nprint('DeepSTORM installation complete.')\n\n# Check if this is the latest version of the notebook\n\nAll_notebook_versions = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv\", dtype=str)\nprint('Notebook version: '+Notebook_version)\n\nLatest_Notebook_version = All_notebook_versions[All_notebook_versions[\"Notebook\"] == Network]['Version'].iloc[0]\nprint('Latest notebook version: '+Latest_Notebook_version)\n\n\nif Notebook_version == Latest_Notebook_version:\n print(\"This notebook is up-to-date.\")\nelse:\n print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n\n\n# Latest_notebook_version = pd.read_csv(\"https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv\")\n\n# if Notebook_version == list(Latest_notebook_version.columns):\n# print(\"This notebook is up-to-date.\")\n\n# if not Notebook_version == list(Latest_notebook_version.columns):\n# print(bcolors.WARNING +\"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki\")\n\ndef pdf_export(trained = False, raw_data = False, pretrained_model = False):\n class MyFPDF(FPDF, HTMLMixin):\n pass\n\n pdf = MyFPDF()\n pdf.add_page()\n pdf.set_right_margin(-1)\n pdf.set_font(\"Arial\", size = 11, style='B') \n\n \n #model_name = 'little_CARE_test'\n day = datetime.now()\n datetime_str = str(day)[0:10]\n\n Header = 'Training report for '+Network+' model ('+model_name+')\\nDate: '+datetime_str\n pdf.multi_cell(180, 5, txt = Header, align = 'L') \n \n # add another cell \n if trained:\n training_time = \"Training time: \"+str(hours)+ \"hour(s) \"+str(minutes)+\"min(s) \"+str(round(seconds))+\"sec(s)\"\n pdf.cell(190, 5, txt = training_time, ln = 1, align='L')\n pdf.ln(1)\n\n Header_2 = 'Information for your materials and method:'\n pdf.cell(190, 5, txt=Header_2, ln=1, align='L')\n\n all_packages = ''\n for requirement in freeze(local_only=True):\n all_packages = all_packages+requirement+', '\n #print(all_packages)\n\n #Main Packages\n main_packages = ''\n version_numbers = []\n for name in ['tensorflow','numpy','Keras']:\n find_name=all_packages.find(name)\n main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '\n #Version numbers only here:\n version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])\n\n cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)\n cuda_version = cuda_version.stdout.decode('utf-8')\n cuda_version = cuda_version[cuda_version.find(', V')+3:-1]\n gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)\n gpu_name = gpu_name.stdout.decode('utf-8')\n gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]\n #print(cuda_version[cuda_version.find(', V')+3:-1])\n #print(gpu_name)\n if raw_data == True:\n shape = (M,N)\n else:\n shape = (int(FOV_size/pixel_size),int(FOV_size/pixel_size))\n #dataset_size = len(os.listdir(Training_source))\n\n text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(n_patches)+' paired image patches (image dimensions: '+str(patch_size)+', patch size (upsampled): ('+str(int(patch_size))+','+str(int(patch_size))+') with a batch size of '+str(batch_size)+', using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Losses were calculated using MSE for the heatmaps and L1 loss for the spike prediction. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), Keras (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+' GPU.'\n\n if pretrained_model:\n text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(n_patches)+' paired image patches (image dimensions: '+str(patch_size)+', patch size (upsampled): ('+str(int(patch_size))+','+str(int(patch_size))+') with a batch size of '+str(batch_size)+', using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Losses were calculated using MSE for the heatmaps and L1 loss for the spike prediction. The models was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), Keras (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+' GPU.'\n\n pdf.set_font('')\n pdf.set_font_size(10.)\n pdf.multi_cell(180, 5, txt = text, align='L')\n pdf.ln(1)\n pdf.set_font('')\n pdf.set_font(\"Arial\", size = 11, style='B')\n pdf.ln(1)\n pdf.cell(190, 5, txt = 'Training dataset', align='L', ln=1)\n pdf.set_font('')\n pdf.set_font_size(10.)\n if raw_data==False:\n simul_text = 'The training dataset was created in the notebook using the following simulation settings:'\n pdf.cell(200, 5, txt=simul_text, align='L')\n pdf.ln(1)\n html = \"\"\" \n <table width=60% style=\"margin-left:0px;\">\n <tr>\n <th width = 50% align=\"left\">Setting</th>\n <th width = 50% align=\"left\">Simulated Value</th>\n </tr>\n <tr>\n <td width = 50%>FOV_size</td>\n <td width = 50%>{0}</td>\n </tr>\n <tr>\n <td width = 50%>pixel_size</td>\n <td width = 50%>{1}</td>\n </tr>\n <tr>\n <td width = 50%>ADC_per_photon_conversion</td>\n <td width = 50%>{2}</td>\n </tr>\n <tr>\n <td width = 50%>ReadOutNoise_ADC</td>\n <td width = 50%>{3}</td>\n </tr>\n <tr>\n <td width = 50%>ADC_offset</td>\n <td width = 50%>{4}</td>\n </tr>\n <tr>\n <td width = 50%>emitter_density</td>\n <td width = 50%>{5}</td>\n </tr>\n <tr>\n <td width = 50%>emitter_density_std</td>\n <td width = 50%>{6}</td>\n </tr>\n <tr>\n <td width = 50%>number_of_frames</td>\n <td width = 50%>{7}</td>\n </tr> \n <tr>\n <td width = 50%>sigma</td>\n <td width = 50%>{8}</td>\n </tr>\n <tr>\n <td width = 50%>sigma_std</td>\n <td width = 50%>{9}</td>\n </tr>\n <tr>\n <td width = 50%>n_photons</td>\n <td width = 50%>{10}</td>\n </tr>\n <tr>\n <td width = 50%>n_photons_std</td>\n <td width = 50%>{11}</td>\n </tr> \n </table>\n \"\"\".format(FOV_size, pixel_size, ADC_per_photon_conversion, ReadOutNoise_ADC, ADC_offset, emitter_density, emitter_density_std, number_of_frames, sigma, sigma_std, n_photons, n_photons_std)\n pdf.write_html(html)\n else:\n simul_text = 'The training dataset was simulated using ThunderSTORM and loaded into the notebook.'\n pdf.multi_cell(190, 5, txt=simul_text, align='L')\n pdf.set_font(\"Arial\", size = 11, style='B')\n #pdf.ln(1)\n #pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)\n pdf.set_font('')\n pdf.set_font('Arial', size = 10, style = 'B')\n pdf.cell(29, 5, txt= 'ImageData_path', align = 'L', ln=0)\n pdf.set_font('')\n pdf.multi_cell(170, 5, txt = ImageData_path, align = 'L')\n pdf.set_font('')\n pdf.set_font('Arial', size = 10, style = 'B')\n pdf.cell(28, 5, txt= 'LocalizationData_path:', align = 'L', ln=0)\n pdf.set_font('')\n pdf.multi_cell(170, 5, txt = LocalizationData_path, align = 'L')\n pdf.set_font('Arial', size = 10, style = 'B')\n pdf.cell(28, 5, txt= 'pixel_size:', align = 'L', ln=0)\n pdf.set_font('')\n pdf.multi_cell(170, 5, txt = str(pixel_size), align = 'L')\n #pdf.cell(190, 5, txt=aug_text, align='L', ln=1)\n pdf.set_font('Arial', size = 11, style = 'B')\n pdf.ln(1)\n pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)\n pdf.set_font('')\n pdf.set_font_size(10.)\n # if Use_Default_Advanced_Parameters:\n # pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')\n pdf.cell(200, 5, txt='The following parameters were used to generate patches:')\n pdf.ln(1)\n html = \"\"\"\n <table width=70% style=\"margin-left:0px;\">\n <tr>\n <th width = 50% align=\"left\">Patch Parameter</th>\n <th width = 50% align=\"left\">Value</th>\n </tr>\n <tr>\n <td width = 50%>patch_size</td>\n <td width = 50%>{0}</td>\n </tr>\n <tr>\n <td width = 50%>upsampling_factor</td>\n <td width = 50%>{1}</td>\n </tr>\n <tr>\n <td width = 50%>num_patches_per_frame</td>\n <td width = 50%>{2}</td>\n </tr>\n <tr>\n <td width = 50%>min_number_of_emitters_per_patch</td>\n <td width = 50%>{3}</td>\n </tr>\n <tr>\n <td width = 50%>max_num_patches</td>\n <td width = 50%>{4}</td>\n </tr>\n <tr>\n <td width = 50%>gaussian_sigma</td>\n <td width = 50%>{5}</td>\n </tr>\n <tr>\n <td width = 50%>Automatic_normalization</td>\n <td width = 50%>{6}</td>\n </tr>\n <tr>\n <td width = 50%>L2_weighting_factor</td>\n <td width = 50%>{7}</td>\n </tr>\n \"\"\".format(str(patch_size)+'x'+str(patch_size), upsampling_factor, num_patches_per_frame, min_number_of_emitters_per_patch, max_num_patches, gaussian_sigma, Automatic_normalization, L2_weighting_factor)\n pdf.write_html(html)\n pdf.ln(3)\n pdf.set_font('Arial', size=10)\n pdf.cell(200, 5, txt='The following parameters were used for training:')\n pdf.ln(1)\n html = \"\"\" \n <table width=70% style=\"margin-left:0px;\">\n <tr>\n <th width = 50% align=\"left\">Training Parameter</th>\n <th width = 50% align=\"left\">Value</th>\n </tr>\n <tr>\n <td width = 50%>number_of_epochs</td>\n <td width = 50%>{0}</td>\n </tr>\n <tr>\n <td width = 50%>batch_size</td>\n <td width = 50%>{1}</td>\n </tr>\n <tr>\n <td width = 50%>number_of_steps</td>\n <td width = 50%>{2}</td>\n </tr>\n <tr>\n <td width = 50%>percentage_validation</td>\n <td width = 50%>{3}</td>\n </tr>\n <tr>\n <td width = 50%>initial_learning_rate</td>\n <td width = 50%>{4}</td>\n </tr>\n </table>\n \"\"\".format(number_of_epochs,batch_size,number_of_steps,percentage_validation,initial_learning_rate)\n pdf.write_html(html)\n\n pdf.ln(1)\n # pdf.set_font('')\n pdf.set_font('Arial', size = 10, style = 'B')\n pdf.cell(21, 5, txt= 'Model Path:', align = 'L', ln=0)\n pdf.set_font('')\n pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')\n\n pdf.ln(1)\n pdf.cell(60, 5, txt = 'Example Training Images', ln=1)\n pdf.ln(1)\n exp_size = io.imread('/content/TrainingDataExample_DeepSTORM2D.png').shape\n pdf.image('/content/TrainingDataExample_DeepSTORM2D.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n pdf.ln(1)\n ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n pdf.multi_cell(190, 5, txt = ref_1, align='L')\n ref_2 = '- Deep-STORM: Nehme, Elias, et al. \"Deep-STORM: super-resolution single-molecule microscopy by deep learning.\" Optica 5.4 (2018): 458-464.'\n pdf.multi_cell(190, 5, txt = ref_2, align='L')\n # if Use_Data_augmentation:\n # ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. \"Augmentor: an image augmentation library for machine learning.\" arXiv preprint arXiv:1708.04680 (2017).'\n # pdf.multi_cell(190, 5, txt = ref_3, align='L')\n pdf.ln(3)\n reminder = 'Important:\\nRemember to perform the quality control step on all newly trained models\\nPlease consider depositing your training dataset on Zenodo'\n pdf.set_font('Arial', size = 11, style='B')\n pdf.multi_cell(190, 5, txt=reminder, align='C')\n\n pdf.output(model_path+'/'+model_name+'/'+model_name+'_training_report.pdf')\n print('------------------------------')\n print('PDF report exported in '+model_path+'/'+model_name+'/')\n\ndef qc_pdf_export():\n class MyFPDF(FPDF, HTMLMixin):\n pass\n\n pdf = MyFPDF()\n pdf.add_page()\n pdf.set_right_margin(-1)\n pdf.set_font(\"Arial\", size = 11, style='B') \n\n Network = 'Deep-STORM'\n #model_name = os.path.basename(full_QC_model_path)\n day = datetime.now()\n datetime_str = str(day)[0:10]\n\n Header = 'Quality Control report for '+Network+' model ('+os.path.basename(QC_model_path)+')\\nDate: '+datetime_str\n pdf.multi_cell(180, 5, txt = Header, align = 'L') \n\n all_packages = ''\n for requirement in freeze(local_only=True):\n all_packages = all_packages+requirement+', '\n\n pdf.set_font('')\n pdf.set_font('Arial', size = 11, style = 'B')\n pdf.ln(2)\n pdf.cell(190, 5, txt = 'Loss curves', ln=1, align='L')\n pdf.ln(1)\n if os.path.exists(savePath+'/lossCurvePlots.png'):\n exp_size = io.imread(savePath+'/lossCurvePlots.png').shape\n pdf.image(savePath+'/lossCurvePlots.png', x = 11, y = None, w = round(exp_size[1]/10), h = round(exp_size[0]/10))\n else:\n pdf.set_font('')\n pdf.set_font('Arial', size=10)\n pdf.cell(190, 5, txt='If you would like to see the evolution of the loss function during training please play the first cell of the QC section in the notebook.')\n pdf.ln(2)\n pdf.set_font('')\n pdf.set_font('Arial', size = 10, style = 'B')\n pdf.ln(3)\n pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)\n pdf.ln(1)\n exp_size = io.imread(savePath+'/QC_example_data.png').shape\n pdf.image(savePath+'/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))\n pdf.ln(1)\n pdf.set_font('')\n pdf.set_font('Arial', size = 11, style = 'B')\n pdf.ln(1)\n pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)\n pdf.set_font('')\n pdf.set_font_size(10.)\n\n pdf.ln(1)\n html = \"\"\"\n <body>\n <font size=\"7\" face=\"Courier New\" >\n <table width=94% style=\"margin-left:0px;\">\"\"\"\n with open(savePath+'/'+os.path.basename(QC_model_path)+'_QC_metrics.csv', 'r') as csvfile:\n metrics = csv.reader(csvfile)\n header = next(metrics)\n image = header[0]\n mSSIM_PvsGT = header[1]\n mSSIM_SvsGT = header[2]\n NRMSE_PvsGT = header[3]\n NRMSE_SvsGT = header[4]\n PSNR_PvsGT = header[5]\n PSNR_SvsGT = header[6]\n header = \"\"\"\n <tr>\n <th width = 10% align=\"left\">{0}</th>\n <th width = 15% align=\"left\">{1}</th>\n <th width = 15% align=\"center\">{2}</th>\n <th width = 15% align=\"left\">{3}</th>\n <th width = 15% align=\"center\">{4}</th>\n <th width = 15% align=\"left\">{5}</th>\n <th width = 15% align=\"center\">{6}</th>\n </tr>\"\"\".format(image,mSSIM_PvsGT,mSSIM_SvsGT,NRMSE_PvsGT,NRMSE_SvsGT,PSNR_PvsGT,PSNR_SvsGT)\n html = html+header\n for row in metrics:\n image = row[0]\n mSSIM_PvsGT = row[1]\n mSSIM_SvsGT = row[2]\n NRMSE_PvsGT = row[3]\n NRMSE_SvsGT = row[4]\n PSNR_PvsGT = row[5]\n PSNR_SvsGT = row[6]\n cells = \"\"\"\n <tr>\n <td width = 10% align=\"left\">{0}</td>\n <td width = 15% align=\"center\">{1}</td>\n <td width = 15% align=\"center\">{2}</td>\n <td width = 15% align=\"center\">{3}</td>\n <td width = 15% align=\"center\">{4}</td>\n <td width = 15% align=\"center\">{5}</td>\n <td width = 15% align=\"center\">{6}</td>\n </tr>\"\"\".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)),str(round(float(NRMSE_PvsGT),3)),str(round(float(NRMSE_SvsGT),3)),str(round(float(PSNR_PvsGT),3)),str(round(float(PSNR_SvsGT),3)))\n html = html+cells\n html = html+\"\"\"</body></table>\"\"\"\n \n pdf.write_html(html)\n\n pdf.ln(1)\n pdf.set_font('')\n pdf.set_font_size(10.)\n ref_1 = 'References:\\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. \"Democratising deep learning for microscopy with ZeroCostDL4Mic.\" Nature Communications (2021).'\n pdf.multi_cell(190, 5, txt = ref_1, align='L')\n ref_2 = '- Deep-STORM: Nehme, Elias, et al. \"Deep-STORM: super-resolution single-molecule microscopy by deep learning.\" Optica 5.4 (2018): 458-464.'\n pdf.multi_cell(190, 5, txt = ref_2, align='L')\n\n pdf.ln(3)\n reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'\n\n pdf.set_font('Arial', size = 11, style='B')\n pdf.multi_cell(190, 5, txt=reminder, align='C')\n\n pdf.output(savePath+'/'+os.path.basename(QC_model_path)+'_QC_report.pdf')\n\n\n print('------------------------------')\n print('QC PDF report exported as '+savePath+'/'+os.path.basename(QC_model_path)+'_QC_report.pdf')\n\n# Build requirements file for local run\nafter = [str(m) for m in sys.modules]\nbuild_requirements_file(before, after)", "_____no_output_____" ] ], [ [ "# **2. Complete the Colab session**\n---", "_____no_output_____" ], [ "\n## **2.1. Check for GPU access**\n---\n\nBy default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:\n\n<font size = 4>Go to **Runtime -> Change the Runtime type**\n\n<font size = 4>**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*\n\n<font size = 4>**Accelerator: GPU** *(Graphics processing unit)*\n", "_____no_output_____" ] ], [ [ "#@markdown ##Run this cell to check if you have GPU access\n# %tensorflow_version 1.x\n\nimport tensorflow as tf\n# if tf.__version__ != '2.2.0':\n# !pip install tensorflow==2.2.0\n\nif tf.test.gpu_device_name()=='':\n print('You do not have GPU access.') \n print('Did you change your runtime ?') \n print('If the runtime settings are correct then Google did not allocate GPU to your session')\n print('Expect slow performance. To access GPU try reconnecting later')\n\nelse:\n print('You have GPU access')\n !nvidia-smi\n\n# from tensorflow.python.client import device_lib \n# device_lib.list_local_devices()\n\n# print the tensorflow version\nprint('Tensorflow version is ' + str(tf.__version__))\n", "_____no_output_____" ] ], [ [ "## **2.2. Mount your Google Drive**\n---\n<font size = 4> To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.\n\n<font size = 4> Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. \n\n<font size = 4> Once this is done, your data are available in the **Files** tab on the top left of notebook.", "_____no_output_____" ] ], [ [ "#@markdown ##Run this cell to connect your Google Drive to Colab\n\n#@markdown * Click on the URL. \n\n#@markdown * Sign in your Google Account. \n\n#@markdown * Copy the authorization code. \n\n#@markdown * Enter the authorization code. \n\n#@markdown * Click on \"Files\" site on the right. Refresh the site. Your Google Drive folder should now be available here as \"drive\". \n\n#mounts user's Google Drive to Google Colab.\n\nfrom google.colab import drive\ndrive.mount('/content/gdrive')", "_____no_output_____" ] ], [ [ "\n# **3. Generate patches for training**\n---\n\nFor Deep-STORM the training data can be obtained in two ways:\n* Simulated using ThunderSTORM or other simulation tool and loaded here (**using Section 3.1.a**)\n* Directly simulated in this notebook (**using Section 3.1.b**)\n", "_____no_output_____" ], [ "## **3.1.a Load training data**\n---\n\nHere you can load your simulated data along with its corresponding localization file.\n* The `pixel_size` is defined in nanometer (nm). ", "_____no_output_____" ] ], [ [ "#@markdown ##Load raw data\n\nload_raw_data = True\n\n# Get user input\nImageData_path = \"\" #@param {type:\"string\"}\nLocalizationData_path = \"\" #@param {type: \"string\"}\n#@markdown Get pixel size from file?\nget_pixel_size_from_file = True #@param {type:\"boolean\"}\n#@markdown Otherwise, use this value:\npixel_size = 100 #@param {type:\"number\"}\n\nif get_pixel_size_from_file:\n pixel_size,_,_ = getPixelSizeTIFFmetadata(ImageData_path, True)\n\n# load the tiff data\nImages = io.imread(ImageData_path)\n# get dataset dimensions\nif len(Images.shape) == 3:\n (number_of_frames, M, N) = Images.shape\nelif len(Images.shape) == 2:\n (M, N) = Images.shape\n number_of_frames = 1\nprint('Loaded images: '+str(M)+'x'+str(N)+' with '+str(number_of_frames)+' frames')\n\n# Interactive display of the stack\ndef scroll_in_time(frame):\n f=plt.figure(figsize=(6,6))\n plt.imshow(Images[frame-1], interpolation='nearest', cmap = 'gray')\n plt.title('Training source at frame = ' + str(frame))\n plt.axis('off');\n\nif number_of_frames > 1:\n interact(scroll_in_time, frame=widgets.IntSlider(min=1, max=Images.shape[0], step=1, value=0, continuous_update=False));\nelse:\n f=plt.figure(figsize=(6,6))\n plt.imshow(Images, interpolation='nearest', cmap = 'gray')\n plt.title('Training source')\n plt.axis('off');\n\n# Load the localization file and display the first\nLocData = pd.read_csv(LocalizationData_path, index_col=0)\nLocData.tail()\n\n", "_____no_output_____" ] ], [ [ "## **3.1.b Simulate training data**\n---\nThis simulation tool allows you to generate SMLM data of randomly distrubuted emitters in a field-of-view. \nThe assumptions are as follows:\n\n* Gaussian Point Spread Function (PSF) with standard deviation defined by `Sigma`. The nominal value of `sigma` can be evaluated using `sigma = 0.21 x Lambda / NA`. (from [Zhang *et al.*, Applied Optics 2007](https://doi.org/10.1364/AO.46.001819))\n* Each emitter will emit `n_photons` per frame, and generate their equivalent Poisson noise.\n* The camera will contribute Gaussian noise to the signal with a standard deviation defined by `ReadOutNoise_ADC` in ADC\n* The `emitter_density` is defined as the number of emitters / um^2 on any given frame. Variability in the emitter density can be applied by adjusting `emitter_density_std`. The latter parameter represents the standard deviation of the normal distribution that the density is drawn from for each individual frame. `emitter_density` **is defined in number of emitters / um^2**.\n* The `n_photons` and `sigma` can additionally include some Gaussian variability by setting `n_photons_std` and `sigma_std`.\n\nImportant note:\n- All dimensions are in nanometer (e.g. `FOV_size` = 6400 represents a field of view of 6.4 um x 6.4 um).\n\n", "_____no_output_____" ] ], [ [ "load_raw_data = False\n\n# ---------------------------- User input ----------------------------\n#@markdown Run the simulation\n#@markdown --- \n#@markdown Camera settings: \nFOV_size = 6400#@param {type:\"number\"}\npixel_size = 100#@param {type:\"number\"}\nADC_per_photon_conversion = 1 #@param {type:\"number\"}\nReadOutNoise_ADC = 4.5#@param {type:\"number\"}\nADC_offset = 50#@param {type:\"number\"}\n\n#@markdown Acquisition settings: \nemitter_density = 6#@param {type:\"number\"}\nemitter_density_std = 0#@param {type:\"number\"}\n\nnumber_of_frames = 20#@param {type:\"integer\"}\n\nsigma = 110 #@param {type:\"number\"}\nsigma_std = 5 #@param {type:\"number\"}\n# NA = 1.1 #@param {type:\"number\"}\n# wavelength = 800#@param {type:\"number\"}\n# wavelength_std = 150#@param {type:\"number\"}\nn_photons = 2250#@param {type:\"number\"}\nn_photons_std = 250#@param {type:\"number\"}\n\n\n# ---------------------------- Variable initialisation ----------------------------\n# Start the clock to measure how long it takes\nstart = time.time()\n\nprint('-----------------------------------------------------------')\nn_molecules = emitter_density*FOV_size*FOV_size/10**6\nn_molecules_std = emitter_density_std*FOV_size*FOV_size/10**6\nprint('Number of molecules / FOV: '+str(round(n_molecules,2))+' +/- '+str((round(n_molecules_std,2))))\n\n# sigma = 0.21*wavelength/NA\n# sigma_std = 0.21*wavelength_std/NA\n# print('Gaussian PSF sigma: '+str(round(sigma,2))+' +/- '+str(round(sigma_std,2))+' nm')\n\nM = N = round(FOV_size/pixel_size)\nFOV_size = M*pixel_size\nprint('Final image size: '+str(M)+'x'+str(M)+' ('+str(round(FOV_size/1000, 3))+'um x'+str(round(FOV_size/1000,3))+' um)')\n\nnp.random.seed(1)\ndisplay_upsampling = 8 # used to display the loc map here\nNoiseFreeImages = np.zeros((number_of_frames, M, M))\nlocImage = np.zeros((number_of_frames, display_upsampling*M, display_upsampling*N))\n\nframes = []\nall_xloc = []\nall_yloc = []\nall_photons = []\nall_sigmas = []\n\n# ---------------------------- Main simulation loop ----------------------------\nprint('-----------------------------------------------------------')\nfor f in tqdm(range(number_of_frames)):\n \n # Define the coordinates of emitters by randomly distributing them across the FOV\n n_mol = int(max(round(np.random.normal(n_molecules, n_molecules_std, size=1)[0]), 0))\n x_c = np.random.uniform(low=0.0, high=FOV_size, size=n_mol)\n y_c = np.random.uniform(low=0.0, high=FOV_size, size=n_mol)\n photon_array = np.random.normal(n_photons, n_photons_std, size=n_mol)\n sigma_array = np.random.normal(sigma, sigma_std, size=n_mol)\n # x_c = np.linspace(0,3000,5)\n # y_c = np.linspace(0,3000,5)\n\n all_xloc += x_c.tolist()\n all_yloc += y_c.tolist()\n frames += ((f+1)*np.ones(x_c.shape[0])).tolist()\n all_photons += photon_array.tolist()\n all_sigmas += sigma_array.tolist()\n\n locImage[f] = FromLoc2Image_SimpleHistogram(x_c, y_c, image_size = (N*display_upsampling, M*display_upsampling), pixel_size = pixel_size/display_upsampling)\n\n # # Get the approximated locations according to the grid pixel size\n # Chr_emitters = [int(max(min(round(display_upsampling*x_c[i]/pixel_size),N*display_upsampling-1),0)) for i in range(len(x_c))]\n # Rhr_emitters = [int(max(min(round(display_upsampling*y_c[i]/pixel_size),M*display_upsampling-1),0)) for i in range(len(y_c))]\n\n # # Build Localization image\n # for (r,c) in zip(Rhr_emitters, Chr_emitters):\n # locImage[f][r][c] += 1\n\n NoiseFreeImages[f] = FromLoc2Image_Erf(x_c, y_c, photon_array, sigma_array, image_size = (M,M), pixel_size = pixel_size)\n\n\n# ---------------------------- Create DataFrame fof localization file ----------------------------\n# Table with localization info as dataframe output\nLocData = pd.DataFrame()\nLocData[\"frame\"] = frames\nLocData[\"x [nm]\"] = all_xloc\nLocData[\"y [nm]\"] = all_yloc\nLocData[\"Photon #\"] = all_photons\nLocData[\"Sigma [nm]\"] = all_sigmas\nLocData.index += 1 # set indices to start at 1 and not 0 (same as ThunderSTORM)\n\n\n# ---------------------------- Estimation of SNR ----------------------------\nn_frames_for_SNR = 100\nM_SNR = 10\nx_c = np.random.uniform(low=0.0, high=pixel_size*M_SNR, size=n_frames_for_SNR)\ny_c = np.random.uniform(low=0.0, high=pixel_size*M_SNR, size=n_frames_for_SNR)\nphoton_array = np.random.normal(n_photons, n_photons_std, size=n_frames_for_SNR)\nsigma_array = np.random.normal(sigma, sigma_std, size=n_frames_for_SNR)\n\nSNR = np.zeros(n_frames_for_SNR)\nfor i in range(n_frames_for_SNR):\n SingleEmitterImage = FromLoc2Image_Erf(np.array([x_c[i]]), np.array([x_c[i]]), np.array([photon_array[i]]), np.array([sigma_array[i]]), (M_SNR, M_SNR), pixel_size)\n Signal_photon = np.max(SingleEmitterImage)\n Noise_photon = math.sqrt((ReadOutNoise_ADC/ADC_per_photon_conversion)**2 + Signal_photon)\n SNR[i] = Signal_photon/Noise_photon\n\nprint('SNR: '+str(round(np.mean(SNR),2))+' +/- '+str(round(np.std(SNR),2)))\n# ---------------------------- ----------------------------\n\n\n# Table with info\nsimParameters = pd.DataFrame()\nsimParameters[\"FOV size (nm)\"] = [FOV_size]\nsimParameters[\"Pixel size (nm)\"] = [pixel_size]\nsimParameters[\"ADC/photon\"] = [ADC_per_photon_conversion]\nsimParameters[\"Read-out noise (ADC)\"] = [ReadOutNoise_ADC]\nsimParameters[\"Constant offset (ADC)\"] = [ADC_offset]\n\nsimParameters[\"Emitter density (emitters/um^2)\"] = [emitter_density]\nsimParameters[\"STD of emitter density (emitters/um^2)\"] = [emitter_density_std]\nsimParameters[\"Number of frames\"] = [number_of_frames]\n# simParameters[\"NA\"] = [NA]\n# simParameters[\"Wavelength (nm)\"] = [wavelength]\n# simParameters[\"STD of wavelength (nm)\"] = [wavelength_std]\nsimParameters[\"Sigma (nm))\"] = [sigma]\nsimParameters[\"STD of Sigma (nm))\"] = [sigma_std]\nsimParameters[\"Number of photons\"] = [n_photons]\nsimParameters[\"STD of number of photons\"] = [n_photons_std]\nsimParameters[\"SNR\"] = [np.mean(SNR)]\nsimParameters[\"STD of SNR\"] = [np.std(SNR)]\n\n\n# ---------------------------- Finish simulation ----------------------------\n# Calculating the noisy image\nImages = ADC_per_photon_conversion * np.random.poisson(NoiseFreeImages) + ReadOutNoise_ADC * np.random.normal(size = (number_of_frames, M, N)) + ADC_offset\nImages[Images <= 0] = 0\n\n# Convert to 16-bit or 32-bits integers\nif Images.max() < (2**16-1):\n Images = Images.astype(np.uint16)\nelse:\n Images = Images.astype(np.uint32)\n\n\n# ---------------------------- Display ----------------------------\n# Displaying the time elapsed for simulation\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds,1),\"sec(s)\")\n\n\n# Interactively display the results using Widgets\ndef scroll_in_time(frame):\n f = plt.figure(figsize=(18,6))\n plt.subplot(1,3,1)\n plt.imshow(locImage[frame-1], interpolation='bilinear', vmin = 0, vmax=0.1)\n plt.title('Localization image')\n plt.axis('off');\n\n plt.subplot(1,3,2)\n plt.imshow(NoiseFreeImages[frame-1], interpolation='nearest', cmap='gray')\n plt.title('Noise-free simulation')\n plt.axis('off');\n\n plt.subplot(1,3,3)\n plt.imshow(Images[frame-1], interpolation='nearest', cmap='gray')\n plt.title('Noisy simulation')\n plt.axis('off');\n\ninteract(scroll_in_time, frame=widgets.IntSlider(min=1, max=Images.shape[0], step=1, value=0, continuous_update=False));\n\n# Display the head of the dataframe with localizations\nLocData.tail()\n", "_____no_output_____" ], [ "#@markdown ---\n#@markdown ##Play this cell to save the simulated stack\n#@markdown Please select a path to the folder where to save the simulated data. It is not necessary to save the data to run the training, but keeping the simulated for your own record can be useful to check its validity.\nSave_path = \"\" #@param {type:\"string\"}\n\nif not os.path.exists(Save_path):\n os.makedirs(Save_path)\n print('Folder created.')\nelse:\n print('Training data already exists in folder: Data overwritten.')\n\nsaveAsTIF(Save_path, 'SimulatedDataset', Images, pixel_size)\n# io.imsave(os.path.join(Save_path, 'SimulatedDataset.tif'),Images)\nLocData.to_csv(os.path.join(Save_path, 'SimulatedDataset.csv'))\nsimParameters.to_csv(os.path.join(Save_path, 'SimulatedParameters.csv'))\nprint('Training dataset saved.')", "_____no_output_____" ] ], [ [ "## **3.2. Generate training patches**\n---\n\nTraining patches need to be created from the training data generated above. \n* The `patch_size` needs to give sufficient contextual information and for most cases a `patch_size` of 26 (corresponding to patches of 26x26 pixels) works fine. **DEFAULT: 26**\n* The `upsampling_factor` defines the effective magnification of the final super-resolved image compared to the input image (this is called magnification in ThunderSTORM). This is used to generate the super-resolved patches as target dataset. Using an `upsampling_factor` of 16 will require the use of more memory and it may be necessary to decreae the `patch_size` to 16 for example. **DEFAULT: 8**\n* The `num_patches_per_frame` defines the number of patches extracted from each frame generated in section 3.1. **DEFAULT: 500**\n* The `min_number_of_emitters_per_patch` defines the minimum number of emitters that need to be present in the patch to be a valid patch. An empty patch does not contain useful information for the network to learn from. **DEFAULT: 7**\n* The `max_num_patches` defines the maximum number of patches to generate. Fewer may be generated depending on how many pacthes are rejected and how many frames are available. **DEFAULT: 10000**\n* The `gaussian_sigma` defines the Gaussian standard deviation (in magnified pixels) applied to generate the super-resolved target image. **DEFAULT: 1**\n* The `L2_weighting_factor` is a normalization factor used in the loss function. It helps balancing the loss from the L2 norm. When using higher densities, this factor should be decreased and vice-versa. This factor can be autimatically calculated using an empiraical formula. **DEFAULT: 100**\n\n", "_____no_output_____" ] ], [ [ "#@markdown ## **Provide patch parameters**\n\n\n# -------------------- User input --------------------\npatch_size = 26 #@param {type:\"integer\"}\nupsampling_factor = 8 #@param [\"4\", \"8\", \"16\"] {type:\"raw\"}\nnum_patches_per_frame = 500#@param {type:\"integer\"}\nmin_number_of_emitters_per_patch = 7#@param {type:\"integer\"}\nmax_num_patches = 10000#@param {type:\"integer\"}\ngaussian_sigma = 1#@param {type:\"integer\"}\n\n#@markdown Estimate the optimal normalization factor automatically?\nAutomatic_normalization = True #@param {type:\"boolean\"}\n#@markdown Otherwise, it will use the following value:\nL2_weighting_factor = 100 #@param {type:\"number\"}\n\n\n# -------------------- Prepare variables --------------------\n# Start the clock to measure how long it takes\nstart = time.time()\n\n# Initialize some parameters\npixel_size_hr = pixel_size/upsampling_factor # in nm\nn_patches = min(number_of_frames*num_patches_per_frame, max_num_patches)\npatch_size = patch_size*upsampling_factor\n\n# Dimensions of the high-res grid\nMhr = upsampling_factor*M # in pixels\nNhr = upsampling_factor*N # in pixels\n\n# Initialize the training patches and labels\npatches = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\nspikes = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\nheatmaps = np.zeros((n_patches, patch_size, patch_size), dtype = np.float32)\n\n# Run over all frames and construct the training examples\nk = 1 # current patch count\nskip_counter = 0 # number of dataset skipped due to low density\nid_start = 0 # id position in LocData for current frame\nprint('Generating '+str(n_patches)+' patches of '+str(patch_size)+'x'+str(patch_size))\n\nn_locs = len(LocData.index)\nprint('Total number of localizations: '+str(n_locs))\ndensity = n_locs/(M*N*number_of_frames*(0.001*pixel_size)**2)\nprint('Density: '+str(round(density,2))+' locs/um^2')\nn_locs_per_patch = patch_size**2*density\n\nif Automatic_normalization:\n # This empirical formulae attempts to balance the loss L2 function between the background and the bright spikes\n # A value of 100 was originally chosen to balance L2 for a patch size of 2.6x2.6^2 0.1um pixel size and density of 3 (hence the 20.28), at upsampling_factor = 8\n L2_weighting_factor = 100/math.sqrt(min(n_locs_per_patch, min_number_of_emitters_per_patch)*8**2/(upsampling_factor**2*20.28))\n print('Normalization factor: '+str(round(L2_weighting_factor,2)))\n\n# -------------------- Patch generation loop --------------------\n\nprint('-----------------------------------------------------------')\nfor (f, thisFrame) in enumerate(tqdm(Images)):\n\n # Upsample the frame\n upsampledFrame = np.kron(thisFrame, np.ones((upsampling_factor,upsampling_factor)))\n # Read all the provided high-resolution locations for current frame\n DataFrame = LocData[LocData['frame'] == f+1].copy()\n\n # Get the approximated locations according to the high-res grid pixel size\n Chr_emitters = [int(max(min(round(DataFrame['x [nm]'][i]/pixel_size_hr),Nhr-1),0)) for i in range(id_start+1,id_start+1+len(DataFrame.index))]\n Rhr_emitters = [int(max(min(round(DataFrame['y [nm]'][i]/pixel_size_hr),Mhr-1),0)) for i in range(id_start+1,id_start+1+len(DataFrame.index))]\n id_start += len(DataFrame.index)\n\n # Build Localization image\n LocImage = np.zeros((Mhr,Nhr))\n LocImage[(Rhr_emitters, Chr_emitters)] = 1\n\n # Here, there's a choice between the original Gaussian (classification approach) and using the erf function\n HeatMapImage = L2_weighting_factor*gaussian_filter(LocImage, gaussian_sigma) \n # HeatMapImage = L2_weighting_factor*FromLoc2Image_MultiThreaded(np.array(list(DataFrame['x [nm]'])), np.array(list(DataFrame['y [nm]'])), \n # np.ones(len(DataFrame.index)), pixel_size_hr*gaussian_sigma*np.ones(len(DataFrame.index)), \n # Mhr, pixel_size_hr)\n \n\n # Generate random position for the top left corner of the patch\n xc = np.random.randint(0, Mhr-patch_size, size=num_patches_per_frame)\n yc = np.random.randint(0, Nhr-patch_size, size=num_patches_per_frame)\n\n for c in range(len(xc)):\n if LocImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size].sum() < min_number_of_emitters_per_patch:\n skip_counter += 1\n continue\n \n else:\n # Limit maximal number of training examples to 15k\n if k > max_num_patches:\n break\n else:\n # Assign the patches to the right part of the images\n patches[k-1] = upsampledFrame[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n spikes[k-1] = LocImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n heatmaps[k-1] = HeatMapImage[xc[c]:xc[c]+patch_size, yc[c]:yc[c]+patch_size]\n k += 1 # increment current patch count\n\n# Remove the empty data\npatches = patches[:k-1]\nspikes = spikes[:k-1]\nheatmaps = heatmaps[:k-1]\nn_patches = k-1\n\n# -------------------- Failsafe --------------------\n# Check if the size of the training set is smaller than 5k to notify user to simulate more images using ThunderSTORM\nif ((k-1) < 5000):\n # W = '\\033[0m' # white (normal)\n # R = '\\033[31m' # red\n print(bcolors.WARNING+'!! WARNING: Training set size is below 5K - Consider simulating more images in ThunderSTORM. !!'+bcolors.NORMAL)\n\n\n\n# -------------------- Displays --------------------\nprint('Number of patches skipped due to low density: '+str(skip_counter))\n# dataSize = int((getsizeof(patches)+getsizeof(heatmaps)+getsizeof(spikes))/(1024*1024)) #rounded in MB\n# print('Size of patches: '+str(dataSize)+' MB')\nprint(str(n_patches)+' patches were generated.')\n\n# Displaying the time elapsed for training\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\n# Display patches interactively with a slider\ndef scroll_patches(patch):\n f = plt.figure(figsize=(16,6))\n plt.subplot(1,3,1)\n plt.imshow(patches[patch-1], interpolation='nearest', cmap='gray')\n plt.title('Raw data (frame #'+str(patch)+')')\n plt.axis('off');\n\n plt.subplot(1,3,2)\n plt.imshow(heatmaps[patch-1], interpolation='nearest')\n plt.title('Heat map')\n plt.axis('off');\n\n plt.subplot(1,3,3)\n plt.imshow(spikes[patch-1], interpolation='nearest')\n plt.title('Localization map')\n plt.axis('off');\n \n plt.savefig('/content/TrainingDataExample_DeepSTORM2D.png',bbox_inches='tight',pad_inches=0)\n\n\ninteract(scroll_patches, patch=widgets.IntSlider(min=1, max=patches.shape[0], step=1, value=0, continuous_update=False));\n\n\n", "_____no_output_____" ] ], [ [ "# **4. Train the network**\n---", "_____no_output_____" ], [ "## **4.1. Select your paths and parameters**\n\n---\n\n<font size = 4>**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).\n\n<font size = 4>**`model_name`:** Use only my_model -style, not my-model (Use \"_\" not \"-\"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.\n\n\n<font size = 5>**Training parameters**\n\n<font size = 4>**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10-30) epochs, but a full training should run for ~100 epochs. Evaluate the performance after training (see 5). **Default value: 80**\n\n<font size =4>**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 16**\n\n<font size = 4>**`number_of_steps`:** Define the number of training steps by epoch. **If this value is set to 0**, by default this parameter is calculated so that each patch is seen at least once per epoch. **Default value: Number of patch / batch_size**\n\n<font size = 4>**`percentage_validation`:** Input the percentage of your training dataset you want to use to validate the network during training. **Default value: 30** \n\n<font size = 4>**`initial_learning_rate`:** This parameter represents the initial value to be used as learning rate in the optimizer. **Default value: 0.001**", "_____no_output_____" ] ], [ [ "#@markdown ###Path to training images and parameters\n\nmodel_path = \"\" #@param {type: \"string\"} \nmodel_name = \"\" #@param {type: \"string\"} \nnumber_of_epochs = 80#@param {type:\"integer\"}\nbatch_size = 16#@param {type:\"integer\"}\n\nnumber_of_steps = 0#@param {type:\"integer\"}\npercentage_validation = 30 #@param {type:\"number\"}\ninitial_learning_rate = 0.001 #@param {type:\"number\"}\n\n\npercentage_validation /= 100\nif number_of_steps == 0: \n number_of_steps = int((1-percentage_validation)*n_patches/batch_size)\n print('Number of steps: '+str(number_of_steps))\n\n# Pretrained model path initialised here so next cell does not need to be run\nh5_file_path = ''\nUse_pretrained_model = False\n\nif not ('patches' in locals()):\n # W = '\\033[0m' # white (normal)\n # R = '\\033[31m' # red\n print(WARNING+'!! WARNING: No patches were found in memory currently. !!')\n\nSave_path = os.path.join(model_path, model_name)\nif os.path.exists(Save_path):\n print(bcolors.WARNING+'The model folder already exists and will be overwritten.'+bcolors.NORMAL)\n\nprint('-----------------------------')\nprint('Training parameters set.')\n", "_____no_output_____" ] ], [ [ "\n## **4.2. Using weights from a pre-trained model as initial weights**\n---\n<font size = 4> Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a Deep-STORM 2D model**. \n\n<font size = 4> This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.\n\n<font size = 4> In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used. ", "_____no_output_____" ] ], [ [ "# @markdown ##Loading weights from a pre-trained network\n\nUse_pretrained_model = False #@param {type:\"boolean\"}\npretrained_model_choice = \"Model_from_file\" #@param [\"Model_from_file\"]\nWeights_choice = \"best\" #@param [\"last\", \"best\"]\n\n#@markdown ###If you chose \"Model_from_file\", please provide the path to the model folder:\npretrained_model_path = \"\" #@param {type:\"string\"}\n\n# --------------------- Check if we load a previously trained model ------------------------\nif Use_pretrained_model:\n\n# --------------------- Load the model from the choosen path ------------------------\n if pretrained_model_choice == \"Model_from_file\":\n h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n\n# --------------------- Download the a model provided in the XXX ------------------------\n\n if pretrained_model_choice == \"Model_name\":\n pretrained_model_name = \"Model_name\"\n pretrained_model_path = \"/content/\"+pretrained_model_name\n print(\"Downloading the 2D_Demo_Model_from_Stardist_2D_paper\")\n if os.path.exists(pretrained_model_path):\n shutil.rmtree(pretrained_model_path)\n os.makedirs(pretrained_model_path)\n wget.download(\"\", pretrained_model_path)\n wget.download(\"\", pretrained_model_path)\n wget.download(\"\", pretrained_model_path) \n wget.download(\"\", pretrained_model_path)\n h5_file_path = os.path.join(pretrained_model_path, \"weights_\"+Weights_choice+\".hdf5\")\n\n# --------------------- Add additional pre-trained models here ------------------------\n\n\n\n# --------------------- Check the model exist ------------------------\n# If the model path chosen does not contain a pretrain model then use_pretrained_model is disabled, \n if not os.path.exists(h5_file_path):\n print(bcolors.WARNING+'WARNING: weights_'+Weights_choice+'.hdf5 pretrained model does not exist'+bcolors.NORMAL)\n Use_pretrained_model = False\n\n \n# If the model path contains a pretrain model, we load the training rate, \n if os.path.exists(h5_file_path):\n#Here we check if the learning rate can be loaded from the quality control folder\n if os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n with open(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv'),'r') as csvfile:\n csvRead = pd.read_csv(csvfile, sep=',')\n #print(csvRead)\n if \"learning rate\" in csvRead.columns: #Here we check that the learning rate column exist (compatibility with model trained un ZeroCostDL4Mic bellow 1.4)\n print(\"pretrained network learning rate found\")\n #find the last learning rate\n lastLearningRate = csvRead[\"learning rate\"].iloc[-1]\n #Find the learning rate corresponding to the lowest validation loss\n min_val_loss = csvRead[csvRead['val_loss'] == min(csvRead['val_loss'])]\n #print(min_val_loss)\n bestLearningRate = min_val_loss['learning rate'].iloc[-1]\n if Weights_choice == \"last\":\n print('Last learning rate: '+str(lastLearningRate))\n if Weights_choice == \"best\":\n print('Learning rate of best validation loss: '+str(bestLearningRate))\n if not \"learning rate\" in csvRead.columns: #if the column does not exist, then initial learning rate is used instead\n bestLearningRate = initial_learning_rate\n lastLearningRate = initial_learning_rate\n print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(bestLearningRate)+' will be used instead.'+bcolors.NORMAL)\n\n#Compatibility with models trained outside ZeroCostDL4Mic but default learning rate will be used\n if not os.path.exists(os.path.join(pretrained_model_path, 'Quality Control', 'training_evaluation.csv')):\n print(bcolors.WARNING+'WARNING: The learning rate cannot be identified from the pretrained network. Default learning rate of '+str(initial_learning_rate)+' will be used instead'+bcolors.NORMAL)\n bestLearningRate = initial_learning_rate\n lastLearningRate = initial_learning_rate\n\n\n# Display info about the pretrained model to be loaded (or not)\nif Use_pretrained_model:\n print('Weights found in:')\n print(h5_file_path)\n print('will be loaded prior to training.')\n\nelse:\n print('No pretrained network will be used.')\n h5_file_path = ''\n\n", "_____no_output_____" ] ], [ [ "## **4.4. Start Training**\n---\n<font size = 4>When playing the cell below you should see updates after each epoch (round). Network training can take some time.\n\n<font size = 4>* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches.\n\n<font size = 4>Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder from Google Drive as all data can be erased at the next training if using the same folder.", "_____no_output_____" ] ], [ [ "#@markdown ##Start training\n\n# Start the clock to measure how long it takes\nstart = time.time()\n\n# --------------------- Using pretrained model ------------------------\n#Here we ensure that the learning rate set correctly when using pre-trained models\nif Use_pretrained_model:\n if Weights_choice == \"last\":\n initial_learning_rate = lastLearningRate\n\n if Weights_choice == \"best\": \n initial_learning_rate = bestLearningRate\n# --------------------- ---------------------- ------------------------\n\n\n#here we check that no model with the same name already exist, if so delete\nif os.path.exists(Save_path):\n shutil.rmtree(Save_path)\n\n# Create the model folder!\nos.makedirs(Save_path)\n\n# Export pdf summary \npdf_export(raw_data = load_raw_data, pretrained_model = Use_pretrained_model)\n\n# Let's go !\ntrain_model(patches, heatmaps, Save_path, \n steps_per_epoch=number_of_steps, epochs=number_of_epochs, batch_size=batch_size,\n upsampling_factor = upsampling_factor,\n validation_split = percentage_validation,\n initial_learning_rate = initial_learning_rate, \n pretrained_model_path = h5_file_path,\n L2_weighting_factor = L2_weighting_factor)\n\n# # Show info about the GPU memory useage\n# !nvidia-smi\n\n# Displaying the time elapsed for training\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\n# export pdf after training to update the existing document\npdf_export(trained = True, raw_data = load_raw_data, pretrained_model = Use_pretrained_model)\n", "_____no_output_____" ] ], [ [ "# **5. Evaluate your model**\n---\n\n<font size = 4>This section allows the user to perform important quality checks on the validity and generalisability of the trained model. \n\n<font size = 4>**We highly recommend to perform quality control on all newly trained models.**", "_____no_output_____" ] ], [ [ "# model name and path\n#@markdown ###Do you want to assess the model you just trained ?\nUse_the_current_trained_model = True #@param {type:\"boolean\"}\n\n#@markdown ###If not, please provide the path to the model folder:\n#@markdown #####During training, the model files are automatically saved inside a folder named after the parameter `model_name` (see section 4.1). Provide the name of this folder as `QC_model_path` . \n\nQC_model_path = \"\" #@param {type:\"string\"}\n\nif (Use_the_current_trained_model): \n QC_model_path = os.path.join(model_path, model_name)\n\nif os.path.exists(QC_model_path):\n print(\"The \"+os.path.basename(QC_model_path)+\" model will be evaluated\")\nelse:\n print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+bcolors.NORMAL)\n print('Please make sure you provide a valid model path before proceeding further.')\n", "_____no_output_____" ] ], [ [ "## **5.1. Inspection of the loss function**\n---\n\n<font size = 4>First, it is good practice to evaluate the training progress by comparing the training loss with the validation loss. The latter is a metric which shows how well the network performs on a subset of unseen data which is set aside from the training dataset. For more information on this, see for example [this review](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6381354/) by Nichols *et al.*\n\n<font size = 4>**Training loss** describes an error value after each epoch for the difference between the model's prediction and its ground-truth target.\n\n<font size = 4>**Validation loss** describes the same error value between the model's prediction on a validation image and compared to it's target.\n\n<font size = 4>During training both values should decrease before reaching a minimal value which does not decrease further even after more training. Comparing the development of the validation loss with the training loss can give insights into the model's performance.\n\n<font size = 4>Decreasing **Training loss** and **Validation loss** indicates that training is still necessary and increasing the `number_of_epochs` is recommended. Note that the curves can look flat towards the right side, just because of the y-axis scaling. The network has reached convergence once the curves flatten out. After this point no further training is required. If the **Validation loss** suddenly increases again an the **Training loss** simultaneously goes towards zero, it means that the network is overfitting to the training data. In other words the network is remembering the exact patterns from the training data and no longer generalizes well to unseen data. In this case the training dataset has to be increased.", "_____no_output_____" ] ], [ [ "#@markdown ##Play the cell to show a plot of training errors vs. epoch number\n\n\nlossDataFromCSV = []\nvallossDataFromCSV = []\n\nwith open(os.path.join(QC_model_path,'Quality Control/training_evaluation.csv'),'r') as csvfile:\n csvRead = csv.reader(csvfile, delimiter=',')\n next(csvRead)\n for row in csvRead:\n if row:\n lossDataFromCSV.append(float(row[0]))\n vallossDataFromCSV.append(float(row[1]))\n\nepochNumber = range(len(lossDataFromCSV))\nplt.figure(figsize=(15,10))\n\nplt.subplot(2,1,1)\nplt.plot(epochNumber,lossDataFromCSV, label='Training loss')\nplt.plot(epochNumber,vallossDataFromCSV, label='Validation loss')\nplt.title('Training loss and validation loss vs. epoch number (linear scale)')\nplt.ylabel('Loss')\nplt.xlabel('Epoch number')\nplt.legend()\n\nplt.subplot(2,1,2)\nplt.semilogy(epochNumber,lossDataFromCSV, label='Training loss')\nplt.semilogy(epochNumber,vallossDataFromCSV, label='Validation loss')\nplt.title('Training loss and validation loss vs. epoch number (log scale)')\nplt.ylabel('Loss')\nplt.xlabel('Epoch number')\nplt.legend()\nplt.savefig(os.path.join(QC_model_path,'Quality Control/lossCurvePlots.png'), bbox_inches='tight', pad_inches=0)\nplt.show()\n\n", "_____no_output_____" ] ], [ [ "## **5.2. Error mapping and quality metrics estimation**\n---\n\n<font size = 4>This section will display SSIM maps and RSE maps as well as calculating total SSIM, NRMSE and PSNR metrics for all the images provided in the \"QC_image_folder\" using teh corresponding localization data contained in \"QC_loc_folder\" !\n\n<font size = 4>**1. The SSIM (structural similarity) map** \n\n<font size = 4>The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). \n\n<font size=4>**mSSIM** is the SSIM value calculated across the entire window of both images.\n\n<font size=4>**The output below shows the SSIM maps with the mSSIM**\n\n<font size = 4>**2. The RSE (Root Squared Error) map** \n\n<font size = 4>This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).\n\n\n<font size =4>**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.\n\n<font size = 4>**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.\n\n<font size=4>**The output below shows the RSE maps with the NRMSE and PSNR values.**\n\n\n\n", "_____no_output_____" ] ], [ [ "\n# ------------------------ User input ------------------------\n#@markdown ##Choose the folders that contain your Quality Control dataset\nQC_image_folder = \"\" #@param{type:\"string\"}\nQC_loc_folder = \"\" #@param{type:\"string\"}\n#@markdown Get pixel size from file?\nget_pixel_size_from_file = True #@param {type:\"boolean\"}\n#@markdown Otherwise, use this value:\npixel_size = 100 #@param {type:\"number\"}\n\nif get_pixel_size_from_file:\n pixel_size_INPUT = None\nelse:\n pixel_size_INPUT = pixel_size\n\n\n# ------------------------ QC analysis loop over provided dataset ------------------------\n\nsavePath = os.path.join(QC_model_path, 'Quality Control')\n\n# Open and create the csv file that will contain all the QC metrics\nwith open(os.path.join(savePath, os.path.basename(QC_model_path)+\"_QC_metrics.csv\"), \"w\", newline='') as file:\n writer = csv.writer(file)\n\n # Write the header in the csv file\n writer.writerow([\"image #\",\"Prediction v. GT mSSIM\",\"WF v. GT mSSIM\", \"Prediction v. GT NRMSE\",\"WF v. GT NRMSE\", \"Prediction v. GT PSNR\", \"WF v. GT PSNR\"])\n\n # These lists will be used to collect all the metrics values per slice\n file_name_list = []\n slice_number_list = []\n mSSIM_GvP_list = []\n mSSIM_GvWF_list = []\n NRMSE_GvP_list = []\n NRMSE_GvWF_list = []\n PSNR_GvP_list = []\n PSNR_GvWF_list = []\n\n # Let's loop through the provided dataset in the QC folders\n\n for (imageFilename, locFilename) in zip(list_files(QC_image_folder, 'tif'), list_files(QC_loc_folder, 'csv')):\n print('--------------')\n print(imageFilename)\n print(locFilename)\n\n # Get the prediction\n batchFramePredictionLocalization(QC_image_folder, imageFilename, QC_model_path, savePath, pixel_size = pixel_size_INPUT)\n\n # test_model(QC_image_folder, imageFilename, QC_model_path, savePath, display=False);\n thisPrediction = io.imread(os.path.join(savePath, 'Predicted_'+imageFilename))\n thisWidefield = io.imread(os.path.join(savePath, 'Widefield_'+imageFilename))\n\n Mhr = thisPrediction.shape[0]\n Nhr = thisPrediction.shape[1]\n\n if pixel_size_INPUT == None:\n pixel_size, N, M = getPixelSizeTIFFmetadata(os.path.join(QC_image_folder,imageFilename))\n\n upsampling_factor = int(Mhr/M)\n print('Upsampling factor: '+str(upsampling_factor))\n pixel_size_hr = pixel_size/upsampling_factor # in nm\n\n # Load the localization file and display the first\n LocData = pd.read_csv(os.path.join(QC_loc_folder,locFilename), index_col=0)\n\n x = np.array(list(LocData['x [nm]']))\n y = np.array(list(LocData['y [nm]']))\n locImage = FromLoc2Image_SimpleHistogram(x, y, image_size = (Mhr,Nhr), pixel_size = pixel_size_hr)\n\n # Remove extension from filename\n imageFilename_no_extension = os.path.splitext(imageFilename)[0]\n\n # io.imsave(os.path.join(savePath, 'GT_image_'+imageFilename), locImage)\n saveAsTIF(savePath, 'GT_image_'+imageFilename_no_extension, locImage, pixel_size_hr)\n\n # Normalize the images wrt each other by minimizing the MSE between GT and prediction\n test_GT_norm, test_prediction_norm = norm_minmse(locImage, thisPrediction, normalize_gt=True)\n # Normalize the images wrt each other by minimizing the MSE between GT and Source image\n test_GT_norm, test_wf_norm = norm_minmse(locImage, thisWidefield, normalize_gt=True)\n\n # -------------------------------- Calculate the metric maps and save them --------------------------------\n\n # Calculate the SSIM maps\n index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = structural_similarity(test_GT_norm, test_prediction_norm, data_range=1., full=True)\n index_SSIM_GTvsWF, img_SSIM_GTvsWF = structural_similarity(test_GT_norm, test_wf_norm, data_range=1., full=True)\n\n\n # Save ssim_maps\n img_SSIM_GTvsPrediction_32bit = np.float32(img_SSIM_GTvsPrediction)\n # io.imsave(os.path.join(savePath,'SSIM_GTvsPrediction_'+imageFilename),img_SSIM_GTvsPrediction_32bit)\n saveAsTIF(savePath,'SSIM_GTvsPrediction_'+imageFilename_no_extension, img_SSIM_GTvsPrediction_32bit, pixel_size_hr)\n\n\n img_SSIM_GTvsWF_32bit = np.float32(img_SSIM_GTvsWF)\n # io.imsave(os.path.join(savePath,'SSIM_GTvsWF_'+imageFilename),img_SSIM_GTvsWF_32bit)\n saveAsTIF(savePath,'SSIM_GTvsWF_'+imageFilename_no_extension, img_SSIM_GTvsWF_32bit, pixel_size_hr)\n\n \n # Calculate the Root Squared Error (RSE) maps\n img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))\n img_RSE_GTvsWF = np.sqrt(np.square(test_GT_norm - test_wf_norm))\n\n # Save SE maps\n img_RSE_GTvsPrediction_32bit = np.float32(img_RSE_GTvsPrediction)\n # io.imsave(os.path.join(savePath,'RSE_GTvsPrediction_'+imageFilename),img_RSE_GTvsPrediction_32bit)\n saveAsTIF(savePath,'RSE_GTvsPrediction_'+imageFilename_no_extension, img_RSE_GTvsPrediction_32bit, pixel_size_hr)\n\n img_RSE_GTvsWF_32bit = np.float32(img_RSE_GTvsWF)\n # io.imsave(os.path.join(savePath,'RSE_GTvsWF_'+imageFilename),img_RSE_GTvsWF_32bit)\n saveAsTIF(savePath,'RSE_GTvsWF_'+imageFilename_no_extension, img_RSE_GTvsWF_32bit, pixel_size_hr)\n\n\n # -------------------------------- Calculate the RSE metrics and save them --------------------------------\n\n # Normalised Root Mean Squared Error (here it's valid to take the mean of the image)\n NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))\n NRMSE_GTvsWF = np.sqrt(np.mean(img_RSE_GTvsWF))\n \n # We can also measure the peak signal to noise ratio between the images\n PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)\n PSNR_GTvsWF = psnr(test_GT_norm,test_wf_norm,data_range=1.0)\n\n writer.writerow([imageFilename,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsWF),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsWF),str(PSNR_GTvsPrediction), str(PSNR_GTvsWF)])\n\n # Collect values to display in dataframe output\n file_name_list.append(imageFilename)\n mSSIM_GvP_list.append(index_SSIM_GTvsPrediction)\n mSSIM_GvWF_list.append(index_SSIM_GTvsWF)\n NRMSE_GvP_list.append(NRMSE_GTvsPrediction)\n NRMSE_GvWF_list.append(NRMSE_GTvsWF)\n PSNR_GvP_list.append(PSNR_GTvsPrediction)\n PSNR_GvWF_list.append(PSNR_GTvsWF)\n\n\n# Table with metrics as dataframe output\npdResults = pd.DataFrame(index = file_name_list)\npdResults[\"Prediction v. GT mSSIM\"] = mSSIM_GvP_list\npdResults[\"Wide-field v. GT mSSIM\"] = mSSIM_GvWF_list\npdResults[\"Prediction v. GT NRMSE\"] = NRMSE_GvP_list\npdResults[\"Wide-field v. GT NRMSE\"] = NRMSE_GvWF_list\npdResults[\"Prediction v. GT PSNR\"] = PSNR_GvP_list\npdResults[\"Wide-field v. GT PSNR\"] = PSNR_GvWF_list\n\n\n# ------------------------ Display ------------------------\n\nprint('--------------------------------------------')\n@interact\ndef show_QC_results(file = list_files(QC_image_folder, 'tif')):\n\n plt.figure(figsize=(15,15))\n # Target (Ground-truth)\n plt.subplot(3,3,1)\n plt.axis('off')\n img_GT = io.imread(os.path.join(savePath, 'GT_image_'+file))\n plt.imshow(img_GT, norm = simple_norm(img_GT, percent = 99.5))\n plt.title('Target',fontsize=15)\n\n # Wide-field\n plt.subplot(3,3,2)\n plt.axis('off')\n img_Source = io.imread(os.path.join(savePath, 'Widefield_'+file))\n plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n plt.title('Widefield',fontsize=15)\n\n #Prediction\n plt.subplot(3,3,3)\n plt.axis('off')\n img_Prediction = io.imread(os.path.join(savePath, 'Predicted_'+file))\n plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n plt.title('Prediction',fontsize=15)\n\n #Setting up colours\n cmap = plt.cm.CMRmap\n\n #SSIM between GT and Source\n plt.subplot(3,3,5)\n #plt.axis('off')\n plt.tick_params(\n axis='both', # changes apply to the x-axis and y-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n left=False, # ticks along the left edge are off\n right=False, # ticks along the right edge are off\n labelbottom=False,\n labelleft=False)\n img_SSIM_GTvsWF = io.imread(os.path.join(savePath, 'SSIM_GTvsWF_'+file))\n imSSIM_GTvsWF = plt.imshow(img_SSIM_GTvsWF, cmap = cmap, vmin=0, vmax=1)\n plt.colorbar(imSSIM_GTvsWF,fraction=0.046, pad=0.04)\n plt.title('Target vs. Widefield',fontsize=15)\n plt.xlabel('mSSIM: '+str(round(pdResults.loc[file][\"Wide-field v. GT mSSIM\"],3)),fontsize=14)\n plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)\n\n #SSIM between GT and Prediction\n plt.subplot(3,3,6)\n #plt.axis('off')\n plt.tick_params(\n axis='both', # changes apply to the x-axis and y-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n left=False, # ticks along the left edge are off\n right=False, # ticks along the right edge are off\n labelbottom=False,\n labelleft=False)\n img_SSIM_GTvsPrediction = io.imread(os.path.join(savePath, 'SSIM_GTvsPrediction_'+file))\n imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)\n plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)\n plt.title('Target vs. Prediction',fontsize=15)\n plt.xlabel('mSSIM: '+str(round(pdResults.loc[file][\"Prediction v. GT mSSIM\"],3)),fontsize=14)\n\n #Root Squared Error between GT and Source\n plt.subplot(3,3,8)\n #plt.axis('off')\n plt.tick_params(\n axis='both', # changes apply to the x-axis and y-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n left=False, # ticks along the left edge are off\n right=False, # ticks along the right edge are off\n labelbottom=False,\n labelleft=False)\n img_RSE_GTvsWF = io.imread(os.path.join(savePath, 'RSE_GTvsWF_'+file))\n imRSE_GTvsWF = plt.imshow(img_RSE_GTvsWF, cmap = cmap, vmin=0, vmax = 1)\n plt.colorbar(imRSE_GTvsWF,fraction=0.046,pad=0.04)\n plt.title('Target vs. Widefield',fontsize=15)\n plt.xlabel('NRMSE: '+str(round(pdResults.loc[file][\"Wide-field v. GT NRMSE\"],3))+', PSNR: '+str(round(pdResults.loc[file][\"Wide-field v. GT PSNR\"],3)),fontsize=14)\n plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)\n\n #Root Squared Error between GT and Prediction\n plt.subplot(3,3,9)\n #plt.axis('off')\n plt.tick_params(\n axis='both', # changes apply to the x-axis and y-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n left=False, # ticks along the left edge are off\n right=False, # ticks along the right edge are off\n labelbottom=False,\n labelleft=False)\n img_RSE_GTvsPrediction = io.imread(os.path.join(savePath, 'RSE_GTvsPrediction_'+file))\n imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)\n plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)\n plt.title('Target vs. Prediction',fontsize=15)\n plt.xlabel('NRMSE: '+str(round(pdResults.loc[file][\"Prediction v. GT NRMSE\"],3))+', PSNR: '+str(round(pdResults.loc[file][\"Prediction v. GT PSNR\"],3)),fontsize=14)\n plt.savefig(QC_model_path+'/Quality Control/QC_example_data.png', bbox_inches='tight', pad_inches=0)\nprint('--------------------------------------------')\npdResults.head()\n\n# Export pdf wth summary of QC results\nqc_pdf_export()", "_____no_output_____" ] ], [ [ "# **6. Using the trained model**\n\n---\n\n<font size = 4>In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive.", "_____no_output_____" ], [ "## **6.1 Generate image prediction and localizations from unseen dataset**\n---\n\n<font size = 4>The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as restored image stacks (ImageJ-compatible TIFF images).\n\n<font size = 4>**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.\n\n<font size = 4>**`Result_folder`:** This folder will contain the found localizations csv.\n\n<font size = 4>**`batch_size`:** This paramter determines how many frames are processed by any single pass on the GPU. A higher `batch_size` will make the prediction faster but will use more GPU memory. If an OutOfMemory (OOM) error occurs, decrease the `batch_size`. **DEFAULT: 4**\n\n<font size = 4>**`threshold`:** This paramter determines threshold for local maxima finding. The value is expected to reside in the range **[0,1]**. A higher `threshold` will result in less localizations. **DEFAULT: 0.1**\n\n<font size = 4>**`neighborhood_size`:** This paramter determines size of the neighborhood within which the prediction needs to be a local maxima in recovery pixels (CCD pixel/upsampling_factor). A high `neighborhood_size` will make the prediction slower and potentially discard nearby localizations. **DEFAULT: 3**\n\n<font size = 4>**`use_local_average`:** This paramter determines whether to locally average the prediction in a 3x3 neighborhood to get the final localizations. If set to **True** it will make inference slightly slower depending on the size of the FOV. **DEFAULT: True**\n", "_____no_output_____" ] ], [ [ "\n# ------------------------------- User input -------------------------------\n#@markdown ### Data parameters\nData_folder = \"\" #@param {type:\"string\"}\nResult_folder = \"\" #@param {type:\"string\"}\n#@markdown Get pixel size from file?\nget_pixel_size_from_file = True #@param {type:\"boolean\"}\n#@markdown Otherwise, use this value (in nm):\npixel_size = 100 #@param {type:\"number\"}\n\n#@markdown ### Model parameters\n#@markdown Do you want to use the model you just trained?\nUse_the_current_trained_model = True #@param {type:\"boolean\"}\n#@markdown Otherwise, please provide path to the model folder below\nprediction_model_path = \"\" #@param {type:\"string\"}\n\n#@markdown ### Prediction parameters\nbatch_size = 4#@param {type:\"integer\"}\n\n#@markdown ### Post processing parameters\nthreshold = 0.1#@param {type:\"number\"}\nneighborhood_size = 3#@param {type:\"integer\"}\n#@markdown Do you want to locally average the model output with CoG estimator ?\nuse_local_average = True #@param {type:\"boolean\"}\n\n\nif get_pixel_size_from_file:\n pixel_size = None\n\nif (Use_the_current_trained_model): \n prediction_model_path = os.path.join(model_path, model_name)\n\nif os.path.exists(prediction_model_path):\n print(\"The \"+os.path.basename(prediction_model_path)+\" model will be used.\")\nelse:\n print(bcolors.WARNING+'!! WARNING: The chosen model does not exist !!'+bcolors.NORMAL)\n print('Please make sure you provide a valid model path before proceeding further.')\n\n# inform user whether local averaging is being used\nif use_local_average == True: \n print('Using local averaging')\n\nif not os.path.exists(Result_folder):\n print('Result folder was created.')\n os.makedirs(Result_folder)\n\n\n# ------------------------------- Run predictions -------------------------------\n\nstart = time.time()\n#%% This script tests the trained fully convolutional network based on the \n# saved training weights, and normalization created using train_model.\n\nif os.path.isdir(Data_folder): \n for filename in list_files(Data_folder, 'tif'):\n # run the testing/reconstruction process\n print(\"------------------------------------\")\n print(\"Running prediction on: \"+ filename)\n batchFramePredictionLocalization(Data_folder, filename, prediction_model_path, Result_folder, \n batch_size, \n threshold, \n neighborhood_size, \n use_local_average,\n pixel_size = pixel_size)\n\nelif os.path.isfile(Data_folder):\n batchFramePredictionLocalization(os.path.dirname(Data_folder), os.path.basename(Data_folder), prediction_model_path, Result_folder, \n batch_size, \n threshold, \n neighborhood_size, \n use_local_average, \n pixel_size = pixel_size)\n\n\n\nprint('--------------------------------------------------------------------')\n# Displaying the time elapsed for training\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\n\n# ------------------------------- Interactive display -------------------------------\n\nprint('--------------------------------------------------------------------')\nprint('---------------------------- Previews ------------------------------')\nprint('--------------------------------------------------------------------')\n\nif os.path.isdir(Data_folder): \n @interact\n def show_QC_results(file = list_files(Data_folder, 'tif')):\n\n plt.figure(figsize=(15,7.5))\n # Wide-field\n plt.subplot(1,2,1)\n plt.axis('off')\n img_Source = io.imread(os.path.join(Result_folder, 'Widefield_'+file))\n plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n plt.title('Widefield', fontsize=15)\n # Prediction\n plt.subplot(1,2,2)\n plt.axis('off')\n img_Prediction = io.imread(os.path.join(Result_folder, 'Predicted_'+file))\n plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n plt.title('Predicted',fontsize=15)\n\nif os.path.isfile(Data_folder):\n\n plt.figure(figsize=(15,7.5))\n # Wide-field\n plt.subplot(1,2,1)\n plt.axis('off')\n img_Source = io.imread(os.path.join(Result_folder, 'Widefield_'+os.path.basename(Data_folder)))\n plt.imshow(img_Source, norm = simple_norm(img_Source, percent = 99.5))\n plt.title('Widefield', fontsize=15)\n # Prediction\n plt.subplot(1,2,2)\n plt.axis('off')\n img_Prediction = io.imread(os.path.join(Result_folder, 'Predicted_'+os.path.basename(Data_folder)))\n plt.imshow(img_Prediction, norm = simple_norm(img_Prediction, percent = 99.5))\n plt.title('Predicted',fontsize=15)\n\n", "_____no_output_____" ] ], [ [ "## **6.2 Drift correction**\n---\n\n<font size = 4>The visualization above is the raw output of the network and displayed at the `upsampling_factor` chosen during model training. The display is a preview without any drift correction applied. This section performs drift correction using cross-correlation between time bins to estimate the drift.\n\n<font size = 4>**`Loc_file_path`:** is the path to the localization file to use for visualization.\n\n<font size = 4>**`original_image_path`:** is the path to the original image. This only serves to extract the original image size and pixel size to shape the visualization properly.\n\n<font size = 4>**`visualization_pixel_size`:** This parameter corresponds to the pixel size to use for the image reconstructions used for the Drift Correction estmication (in **nm**). A smaller pixel size will be more precise but will take longer to compute. **DEFAULT: 20**\n\n<font size = 4>**`number_of_bins`:** This parameter defines how many temporal bins are used across the full dataset. All localizations in each bins are used ot build an image. This image is used to find the drift with respect to the image obtained from the very first bin. A typical value would correspond to about 500 frames per bin. **DEFAULT: Total number of frames / 500**\n\n<font size = 4>**`polynomial_fit_degree`:** The drift obtained for each temporal bins needs to be interpolated to every single frames. This is performed by polynomial fit, the degree of which is defined here. **DEFAULT: 4**\n\n<font size = 4> The drift-corrected localization data is automaticaly saved in the `save_path` folder.", "_____no_output_____" ] ], [ [ "# @markdown ##Data parameters\nLoc_file_path = \"\" #@param {type:\"string\"}\n# @markdown Provide information about original data. Get the info automatically from the raw data?\nGet_info_from_file = True #@param {type:\"boolean\"}\n# Loc_file_path = \"/content/gdrive/My Drive/Colab notebooks testing/DeepSTORM/Glia data from CL/Results from prediction/20200615-M6 with CoM localizations/Localizations_glia_actin_2D - 1-500fr_avg.csv\" #@param {type:\"string\"}\noriginal_image_path = \"\" #@param {type:\"string\"}\n# @markdown Otherwise, please provide image width, height (in pixels) and pixel size (in nm)\nimage_width = 256#@param {type:\"integer\"}\nimage_height = 256#@param {type:\"integer\"}\npixel_size = 100 #@param {type:\"number\"}\n\n# @markdown ##Drift correction parameters\nvisualization_pixel_size = 20#@param {type:\"number\"}\nnumber_of_bins = 50#@param {type:\"integer\"}\npolynomial_fit_degree = 4#@param {type:\"integer\"}\n\n# @markdown ##Saving parameters\nsave_path = '' #@param {type:\"string\"}\n\n\n# Let's go !\nstart = time.time()\n\n# Get info from the raw file if selected\nif Get_info_from_file:\n pixel_size, image_width, image_height = getPixelSizeTIFFmetadata(original_image_path, display=True)\n\n# Read the localizations in\nLocData = pd.read_csv(Loc_file_path)\n\n# Calculate a few variables \nMhr = int(math.ceil(image_height*pixel_size/visualization_pixel_size))\nNhr = int(math.ceil(image_width*pixel_size/visualization_pixel_size))\nnFrames = max(LocData['frame'])\nx_max = max(LocData['x [nm]'])\ny_max = max(LocData['y [nm]'])\nimage_size = (Mhr, Nhr)\nn_locs = len(LocData.index)\n\nprint('Image size: '+str(image_size))\nprint('Number of frames in data: '+str(nFrames))\nprint('Number of localizations in data: '+str(n_locs))\n\nblocksize = math.ceil(nFrames/number_of_bins)\nprint('Number of frames per block: '+str(blocksize))\n\nblockDataFrame = LocData[(LocData['frame'] < blocksize)].copy()\nxc_array = blockDataFrame['x [nm]'].to_numpy(dtype=np.float32)\nyc_array = blockDataFrame['y [nm]'].to_numpy(dtype=np.float32)\n\n# Preparing the Reference image\nphoton_array = np.ones(yc_array.shape[0])\nsigma_array = np.ones(yc_array.shape[0])\nImageRef = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\nImagesRef = np.rot90(ImageRef, k=2)\n\nxDrift = np.zeros(number_of_bins)\nyDrift = np.zeros(number_of_bins)\n\nfilename_no_extension = os.path.splitext(os.path.basename(Loc_file_path))[0]\n\nwith open(os.path.join(save_path, filename_no_extension+\"_DriftCorrectionData.csv\"), \"w\", newline='') as file:\n writer = csv.writer(file)\n\n # Write the header in the csv file\n writer.writerow([\"Block #\", \"x-drift [nm]\",\"y-drift [nm]\"])\n\n for b in tqdm(range(number_of_bins)):\n\n blockDataFrame = LocData[(LocData['frame'] >= (b*blocksize)) & (LocData['frame'] < ((b+1)*blocksize))].copy()\n xc_array = blockDataFrame['x [nm]'].to_numpy(dtype=np.float32)\n yc_array = blockDataFrame['y [nm]'].to_numpy(dtype=np.float32)\n\n photon_array = np.ones(yc_array.shape[0])\n sigma_array = np.ones(yc_array.shape[0])\n ImageBlock = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\n\n XC = fftconvolve(ImagesRef, ImageBlock, mode = 'same')\n yDrift[b], xDrift[b] = subPixelMaxLocalization(XC, method = 'CoM')\n\n # saveAsTIF(save_path, 'ImageBlock'+str(b), ImageBlock, visualization_pixel_size)\n # saveAsTIF(save_path, 'XCBlock'+str(b), XC, visualization_pixel_size)\n writer.writerow([str(b), str((xDrift[b]-xDrift[0])*visualization_pixel_size), str((yDrift[b]-yDrift[0])*visualization_pixel_size)])\n\n\nprint('--------------------------------------------------------------------')\n# Displaying the time elapsed for training\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\nprint('Fitting drift data...')\nbin_number = np.arange(number_of_bins)*blocksize + blocksize/2\nxDrift = (xDrift-xDrift[0])*visualization_pixel_size\nyDrift = (yDrift-yDrift[0])*visualization_pixel_size\n\nxDriftCoeff = np.polyfit(bin_number, xDrift, polynomial_fit_degree)\nyDriftCoeff = np.polyfit(bin_number, yDrift, polynomial_fit_degree)\n\nxDriftFit = np.poly1d(xDriftCoeff)\nyDriftFit = np.poly1d(yDriftCoeff)\nbins = np.arange(nFrames)\nxDriftInterpolated = xDriftFit(bins)\nyDriftInterpolated = yDriftFit(bins)\n\n\n# ------------------ Displaying the image results ------------------\n\nplt.figure(figsize=(15,10))\nplt.plot(bin_number,xDrift, 'r+', label='x-drift')\nplt.plot(bin_number,yDrift, 'b+', label='y-drift')\nplt.plot(bins,xDriftInterpolated, 'r-', label='y-drift (fit)')\nplt.plot(bins,yDriftInterpolated, 'b-', label='y-drift (fit)')\nplt.title('Cross-correlation estimated drift')\nplt.ylabel('Drift [nm]')\nplt.xlabel('Bin number')\nplt.legend();\n\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\", hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\n\n# ------------------ Actual drift correction -------------------\n\nprint('Correcting localization data...')\nxc_array = LocData['x [nm]'].to_numpy(dtype=np.float32)\nyc_array = LocData['y [nm]'].to_numpy(dtype=np.float32)\nframes = LocData['frame'].to_numpy(dtype=np.int32)\n\n\nxc_array_Corr, yc_array_Corr = correctDriftLocalization(xc_array, yc_array, frames, xDriftInterpolated, yDriftInterpolated)\nImageRaw = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\nImageCorr = FromLoc2Image_SimpleHistogram(xc_array_Corr, yc_array_Corr, image_size = image_size, pixel_size = visualization_pixel_size)\n\n\n# ------------------ Displaying the imge results ------------------\nplt.figure(figsize=(15,7.5))\n# Raw\nplt.subplot(1,2,1)\nplt.axis('off')\nplt.imshow(ImageRaw, norm = simple_norm(ImageRaw, percent = 99.5))\nplt.title('Raw', fontsize=15);\n# Corrected\nplt.subplot(1,2,2)\nplt.axis('off')\nplt.imshow(ImageCorr, norm = simple_norm(ImageCorr, percent = 99.5))\nplt.title('Corrected',fontsize=15);\n\n\n# ------------------ Table with info -------------------\ndriftCorrectedLocData = pd.DataFrame()\ndriftCorrectedLocData['frame'] = frames\ndriftCorrectedLocData['x [nm]'] = xc_array_Corr\ndriftCorrectedLocData['y [nm]'] = yc_array_Corr\ndriftCorrectedLocData['confidence [a.u]'] = LocData['confidence [a.u]']\n\ndriftCorrectedLocData.to_csv(os.path.join(save_path, filename_no_extension+'_DriftCorrected.csv'))\nprint('-------------------------------')\nprint('Corrected localizations saved.')\n", "_____no_output_____" ] ], [ [ "## **6.3 Visualization of the localizations**\n---\n\n\n<font size = 4>The visualization in section 6.1 is the raw output of the network and displayed at the `upsampling_factor` chosen during model training. This section performs visualization of the result by plotting the localizations as a simple histogram.\n\n<font size = 4>**`Loc_file_path`:** is the path to the localization file to use for visualization.\n\n<font size = 4>**`original_image_path`:** is the path to the original image. This only serves to extract the original image size and pixel size to shape the visualization properly.\n\n<font size = 4>**`visualization_pixel_size`:** This parameter corresponds to the pixel size to use for the final image reconstruction (in **nm**). **DEFAULT: 10**\n\n<font size = 4>**`visualization_mode`:** This parameter defines what visualization method is used to visualize the final image. NOTES: The Integrated Gaussian can be quite slow. **DEFAULT: Simple histogram.**\n\n\n\n", "_____no_output_____" ] ], [ [ "# @markdown ##Data parameters\nUse_current_drift_corrected_localizations = True #@param {type:\"boolean\"}\n# @markdown Otherwise provide a localization file path\nLoc_file_path = \"\" #@param {type:\"string\"}\n# @markdown Provide information about original data. Get the info automatically from the raw data?\nGet_info_from_file = True #@param {type:\"boolean\"}\n# Loc_file_path = \"/content/gdrive/My Drive/Colab notebooks testing/DeepSTORM/Glia data from CL/Results from prediction/20200615-M6 with CoM localizations/Localizations_glia_actin_2D - 1-500fr_avg.csv\" #@param {type:\"string\"}\noriginal_image_path = \"\" #@param {type:\"string\"}\n# @markdown Otherwise, please provide image width, height (in pixels) and pixel size (in nm)\nimage_width = 256#@param {type:\"integer\"}\nimage_height = 256#@param {type:\"integer\"}\npixel_size = 100#@param {type:\"number\"}\n\n# @markdown ##Visualization parameters\nvisualization_pixel_size = 10#@param {type:\"number\"}\nvisualization_mode = \"Simple histogram\" #@param [\"Simple histogram\", \"Integrated Gaussian (SLOW!)\"]\n\nif not Use_current_drift_corrected_localizations:\n filename_no_extension = os.path.splitext(os.path.basename(Loc_file_path))[0]\n\n\nif Get_info_from_file:\n pixel_size, image_width, image_height = getPixelSizeTIFFmetadata(original_image_path, display=True)\n\nif Use_current_drift_corrected_localizations:\n LocData = driftCorrectedLocData\nelse:\n LocData = pd.read_csv(Loc_file_path)\n\nMhr = int(math.ceil(image_height*pixel_size/visualization_pixel_size))\nNhr = int(math.ceil(image_width*pixel_size/visualization_pixel_size))\n\n\nnFrames = max(LocData['frame'])\nx_max = max(LocData['x [nm]'])\ny_max = max(LocData['y [nm]'])\nimage_size = (Mhr, Nhr)\n\nprint('Image size: '+str(image_size))\nprint('Number of frames in data: '+str(nFrames))\nprint('Number of localizations in data: '+str(len(LocData.index)))\n\nxc_array = LocData['x [nm]'].to_numpy()\nyc_array = LocData['y [nm]'].to_numpy()\nif (visualization_mode == 'Simple histogram'):\n locImage = FromLoc2Image_SimpleHistogram(xc_array, yc_array, image_size = image_size, pixel_size = visualization_pixel_size)\nelif (visualization_mode == 'Shifted histogram'):\n print(bcolors.WARNING+'Method not implemented yet!'+bcolors.NORMAL)\n locImage = np.zeros(image_size)\nelif (visualization_mode == 'Integrated Gaussian (SLOW!)'):\n photon_array = np.ones(xc_array.shape)\n sigma_array = np.ones(xc_array.shape)\n locImage = FromLoc2Image_Erf(xc_array, yc_array, photon_array, sigma_array, image_size = image_size, pixel_size = visualization_pixel_size)\n\nprint('--------------------------------------------------------------------')\n# Displaying the time elapsed for training\ndt = time.time() - start\nminutes, seconds = divmod(dt, 60) \nhours, minutes = divmod(minutes, 60) \nprint(\"Time elapsed:\",hours, \"hour(s)\",minutes,\"min(s)\",round(seconds),\"sec(s)\")\n\n# Display\nplt.figure(figsize=(20,10))\nplt.axis('off')\n# plt.imshow(locImage, cmap='gray');\nplt.imshow(locImage, norm = simple_norm(locImage, percent = 99.5));\n\n\nLocData.head()\n\n", "_____no_output_____" ], [ "# @markdown ---\n\n# @markdown #Play this cell to save the visualization\n\n# @markdown ####Please select a path to the folder where to save the visualization.\nsave_path = \"\" #@param {type:\"string\"}\n\nif not os.path.exists(save_path):\n os.makedirs(save_path)\n print('Folder created.')\n\nsaveAsTIF(save_path, filename_no_extension+'_Visualization', locImage, visualization_pixel_size)\nprint('Image saved.')", "_____no_output_____" ] ], [ [ "## **6.4. Download your predictions**\n---\n\n<font size = 4>**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name.", "_____no_output_____" ], [ "# **7. Version log**\n\n---\n<font size = 4>**v1.13**: \n* The section 1 and 2 are now swapped for better export of *requirements.txt*. \n* This version also now includes built-in version check and the version log that you're reading now.\n\n---", "_____no_output_____" ], [ "\n#**Thank you for using Deep-STORM 2D!**", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d0708d32daac934b93d0e8b4920aaff362d40c3f
468,318
ipynb
Jupyter Notebook
examples/Microstructures_Classification_GPU.ipynb
manassharma07/cysx_nn
3781c3d27682094aafd10f8eb6544616ff9499c2
[ "MIT" ]
20
2021-12-23T03:39:30.000Z
2022-02-28T04:28:44.000Z
examples/Microstructures_Classification_GPU.ipynb
manassharma07/cysx_nn
3781c3d27682094aafd10f8eb6544616ff9499c2
[ "MIT" ]
4
2021-12-23T14:42:45.000Z
2022-01-15T02:28:56.000Z
examples/Microstructures_Classification_GPU.ipynb
manassharma07/cysx_nn
3781c3d27682094aafd10f8eb6544616ff9499c2
[ "MIT" ]
1
2021-12-29T00:38:14.000Z
2021-12-29T00:38:14.000Z
443.063387
167,500
0.942695
[ [ [ "# Microstructure classification using Neural Networks", "_____no_output_____" ], [ "In this example, we will generate microstructures of 4 different types with different grain sizes. \nThen we will split the dataset into training and testing set.\n\nFinally we will trian the neural network using CrysX-NN to make predictions.", "_____no_output_____" ], [ "## Run the following cell for Google colab \nthen restart runtime", "_____no_output_____" ] ], [ [ "! pip install --upgrade --no-cache-dir https://github.com/manassharma07/crysx_nn/tarball/main\n! pip install pymks\n! pip install IPython==7.7.0\n! pip install fsspec>=0.3.3", "_____no_output_____" ] ], [ [ "## Import necessary libraries\nWe will use PyMKS for generation artificial microstructures.", "_____no_output_____" ] ], [ [ "from pymks import (\n generate_multiphase,\n plot_microstructures,\n# PrimitiveTransformer,\n# TwoPointCorrelation,\n# FlattenTransformer,\n# GenericTransformer\n)\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# For GPU\nimport cupy as cp", "_____no_output_____" ] ], [ [ "## Define some parameters\nlike number of samples per type, the width and height of a microstructure image in pixels.\n[For Google Colab, generating 10,000 samples of each type results in out of memory error. 8000 seems to work fine.]", "_____no_output_____" ] ], [ [ "nSamples_per_type = 10000\nwidth = 100\nheight = 100", "_____no_output_____" ] ], [ [ "## Generate microstructures\nThe following code will generate microstructures of 4 different types.\n\nThe first type has 6 times more grain boundaries along the x-axis than the y-axis.\n\nThe second type has 4 times more grain boundaries along the y-axis than the x-axis.\n\nThe third type has same number of grain boundaries along the x-axis as well as the y-axis.\n\nThe fourth type has 6 times more grain boundaries along the y-axis than the x-axis.", "_____no_output_____" ] ], [ [ "grain_sizes = [(30, 5), (10, 40), (15, 15), (5, 30)]\nseeds = [10, 99, 4, 36]\n\ndata_synth = np.concatenate([\n generate_multiphase(shape=(nSamples_per_type, width, height), grain_size=grain_size,\n volume_fraction=(0.5, 0.5),\n percent_variance=0.2,\n seed=seed\n )\n for grain_size, seed in zip(grain_sizes, seeds)\n])\n\n", "C:\\Users\\manas\\anaconda3\\envs\\crysx_nn\\lib\\site-packages\\toolz\\functoolz.py:306: FutureWarning: arrays to stack must be passed as a \"sequence\" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future.\n return self._partial(*args, **kwargs)\nC:\\Users\\manas\\anaconda3\\envs\\crysx_nn\\lib\\site-packages\\toolz\\functoolz.py:306: FutureWarning: arrays to stack must be passed as a \"sequence\" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future.\n return self._partial(*args, **kwargs)\nC:\\Users\\manas\\anaconda3\\envs\\crysx_nn\\lib\\site-packages\\toolz\\functoolz.py:306: FutureWarning: arrays to stack must be passed as a \"sequence\" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future.\n return self._partial(*args, **kwargs)\nC:\\Users\\manas\\anaconda3\\envs\\crysx_nn\\lib\\site-packages\\toolz\\functoolz.py:306: FutureWarning: arrays to stack must be passed as a \"sequence\" type such as list or tuple. Support for non-sequence iterables such as generators is deprecated as of NumPy 1.16 and will raise an error in the future.\n return self._partial(*args, **kwargs)\n" ] ], [ [ "## Plot a microstructure of each type ", "_____no_output_____" ] ], [ [ "plot_microstructures(*data_synth[::nSamples_per_type+0], colorbar=True)\n# plot_microstructures(*data_synth[::nSamples_per_type+1], colorbar=True)\n# plot_microstructures(*data_synth[::nSamples_per_type+2], colorbar=True)\n# plot_microstructures(*data_synth[::nSamples_per_type+3], colorbar=True)\n#plt.savefig(\"Microstructures.png\",dpi=600,transparent=True)\nplt.show()", "_____no_output_____" ] ], [ [ "## Check the shape of the data generated\nThe first dimension corresponds to the total number of samples, the second and third axes are for width and height.", "_____no_output_____" ] ], [ [ "# Print shape of the array\nprint(data_synth.shape)\nprint(type(data_synth))", "(40000, 100, 100)\n<class 'numpy.ndarray'>\n" ] ], [ [ "## Rename the generated data --> `X_data` as it is the input data", "_____no_output_____" ] ], [ [ "X_data = np.array(data_synth)\nprint(X_data.shape)", "(40000, 100, 100)\n" ] ], [ [ "## Create the target/true labels for the data\nThe microstructure data we have generated is such that the samples of different types are grouped together. Furthermore, their order is the same as the one we provided when generating the data. \n\nTherefore, we can generate the true labels quite easily by making a numpy array whose first `nSamples_per_type` elements correspond to type 0, and so on upto type 3.", "_____no_output_____" ] ], [ [ "Y_data = np.concatenate([np.ones(nSamples_per_type)*0,np.ones(nSamples_per_type)*1,np.ones(nSamples_per_type)*2,np.ones(nSamples_per_type)*3])", "_____no_output_____" ], [ "print(Y_data)\nprint(Y_data.shape)", "[0. 0. 0. ... 3. 3. 3.]\n(40000,)\n" ] ], [ [ "## Plot some samples taken from the data randomly as well as their labels that we created for confirmation", "_____no_output_____" ] ], [ [ "rng = np.random.default_rng()\n### Plot examples\nfig, axes = plt.subplots(nrows=2, ncols=6, figsize=(15., 6.))\nfor axes_row in axes:\n for ax in axes_row:\n test_index = rng.integers(0, len(Y_data))\n image = X_data[test_index]\n orig_label = Y_data[test_index]\n ax.set_axis_off()\n ax.imshow(image)\n ax.set_title('True: %i' % orig_label)", "_____no_output_____" ] ], [ [ "## Use sklearn to split the data into train and test set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# Split into train and test\nX_train_orig, X_test_orig, Y_train_orig, Y_test_orig = train_test_split(X_data, Y_data, test_size=0.20, random_state=1)", "_____no_output_____" ] ], [ [ "## Some statistics of the training data", "_____no_output_____" ] ], [ [ "print('Training data MIN',X_train_orig.min())\nprint('Training data MAX',X_train_orig.max())\nprint('Training data MEAN',X_train_orig.mean())\nprint('Training data STD',X_train_orig.std())", "Training data MIN 0\nTraining data MAX 1\nTraining data MEAN 0.49983095625\nTraining data STD 0.49999997142421193\n" ] ], [ [ "## Check some shapes", "_____no_output_____" ] ], [ [ "print(X_train_orig.shape)\nprint(Y_train_orig.shape)\nprint(X_test_orig.shape)\nprint(Y_test_orig.shape)", "(32000, 100, 100)\n(32000,)\n(8000, 100, 100)\n(8000,)\n" ] ], [ [ "## Flatten the input pixel data for each sample by reshaping the 2d array of size `100,100`, for each sample to a 1d array of size `100*100`", "_____no_output_____" ] ], [ [ "X_train = X_train_orig.reshape(X_train_orig.shape[0], width*height)\nX_test = X_test_orig.reshape(X_test_orig.shape[0], width*height)", "_____no_output_____" ] ], [ [ "## Check the shapes", "_____no_output_____" ] ], [ [ "print(X_train.shape)\nprint(X_test.shape)", "(32000, 10000)\n(8000, 10000)\n" ] ], [ [ "## Use a utility from CrysX-NN to one-hot encode the target/true labels \n\nThis means that a sample with type 3 will be represented as an array [0,0,0,1]", "_____no_output_____" ] ], [ [ "from crysx_nn import mnist_utils as mu\nY_train = mu.one_hot_encode(Y_train_orig, 4)\nY_test = mu.one_hot_encode(Y_test_orig, 4)", "_____no_output_____" ], [ "print(Y_train.shape)\nprint(Y_test.shape)", "(32000, 4)\n(8000, 4)\n" ] ], [ [ "## Standardize the training and testing input data using the mean and standard deviation of the training data", "_____no_output_____" ] ], [ [ "X_train = (X_train - np.mean(X_train_orig)) / np.std(X_train_orig)\nX_test = (X_test - np.mean(X_train_orig)) / np.std(X_train_orig)\n\n# Some statistics after standardization\nprint('Training data MIN',X_train.min())\nprint('Training data MAX',X_train.max())\nprint('Training data MEAN',X_train.mean())\nprint('Training data STD',X_train.std())\nprint('Testing data MIN',X_test.min())\nprint('Testing data MAX',X_test.max())\nprint('Testing data MEAN',X_test.mean())\nprint('Testing data STD',X_test.std())", "Training data MIN -0.9996619696322572\nTraining data MAX 1.0003381446709014\nTraining data MEAN -1.0667022820598504e-16\nTraining data STD 0.9999999999999942\nTesting data MIN -0.9996619696322572\nTesting data MAX 1.0003381446709014\nTesting data MEAN 0.0016794125959809832\nTesting data STD 0.999999157574746\n" ] ], [ [ "## Finally we will begin creating a neural network\n\nSet some important parameters for the Neural Network.\n\n**Note**: In some cases I got NAN values while training. The issue could be circumvented by choosing a different batch size.", "_____no_output_____" ] ], [ [ "nInputs = width*height # No. of nodes in the input layer\nneurons_per_layer = [500, 4] # Neurons per layer (excluding the input layer)\nactivation_func_names = ['ReLU', 'Softmax']\nnLayers = len(neurons_per_layer)\nnEpochs = 4\nbatchSize = 32 # No. of input samples to process at a time for optimization", "_____no_output_____" ] ], [ [ "## Create the neural network model\nUse the parameters define above to create the model", "_____no_output_____" ] ], [ [ "from crysx_nn import network\nmodel = network.nn_model(nInputs=nInputs, neurons_per_layer=neurons_per_layer, activation_func_names=activation_func_names, batch_size=batchSize, device='GPU', init_method='Xavier') \n\nmodel.lr = 0.02", "C:\\Users\\manas\\anaconda3\\envs\\crysx_nn\\lib\\site-packages\\crysx_nn\\network.py:13: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n from tqdm.autonotebook import tqdm\n" ] ], [ [ "## Check the details of the Neural Network", "_____no_output_____" ] ], [ [ "model.details()", "----------------------------------------------------------------------------------\n****Neural Network Model Details****\n----------------------------------------------------------------------------------\nNumber of input nodes: 10000\nNumber of layers (hidden+output): 2\nNumber of nodes in each layer (hidden & output): [500, 4]\nActivation function for each layer (hidden & output): ['ReLU', 'Softmax']\nMethod used for weights and biases initialization: Xavier\nBatch Size: 32\nDevice: GPU\nOptimization method: SGD\nLearning rate: 0.02\n----------------------------------------------------------------------------------\n" ] ], [ [ "## Visualize the neural network", "_____no_output_____" ] ], [ [ "model.visualize()", "_____no_output_____" ] ], [ [ "## Begin optimization/training\n\nWe will use `float32` precision, so convert the input and output arrays.\n\nWe will use Categorical Cross Entropy for the loss function.", "_____no_output_____" ] ], [ [ "inputs = cp.array(X_train.astype(np.float32))\noutputs = cp.array(Y_train.astype(np.float32))\n# Run optimization\n# model.optimize(inputs, outputs, lr=0.02,nEpochs=nEpochs,loss_func_name='CCE', miniterEpoch=1, batchProgressBar=True, miniterBatch=100)\n# To get accuracies at each epoch\nmodel.optimize(inputs, outputs, lr=0.02,nEpochs=nEpochs,loss_func_name='CCE', miniterEpoch=1, batchProgressBar=True, miniterBatch=100, get_accuracy=True)\n", "_____no_output_____" ] ], [ [ "## Error at each epoch", "_____no_output_____" ] ], [ [ "print(model.errors)", "[array(0.80813736, dtype=float32), array(0.11121845, dtype=float32), array(0.01747202, dtype=float32), array(0.00730846, dtype=float32)]\n" ] ], [ [ "## Accuracy at each epoch", "_____no_output_____" ] ], [ [ "print(model.accuracy)", "[array(0.66996875), array(0.9783125), array(0.99965625), array(1.)]\n" ] ], [ [ "## Save model weights and biases", "_____no_output_____" ] ], [ [ "# Save weights\nmodel.save_model_weights('NN_crysx_microstructure_96_weights_cupy')\n# Save biases\nmodel.save_model_biases('NN_crysx_microstructure_96_biases_cupy')", "_____no_output_____" ] ], [ [ "## Load model weights and biases from files", "_____no_output_____" ] ], [ [ "model.load_model_weights('NN_crysx_microstructure_96_weights_cupy')\nmodel.load_model_biases('NN_crysx_microstructure_96_biases_cupy')", "_____no_output_____" ] ], [ [ "## Performance on Test data", "_____no_output_____" ] ], [ [ "## Convert to float32 arrays\ninputs = cp.array(X_test.astype(np.float32))\noutputs = cp.array(Y_test.astype(np.float32))\n# predictions, error = model.predict(inputs, outputs, loss_func_name='BCE')\n# print('Error:',error)\n# print(predictions)\npredictions, error, accuracy = model.predict(inputs, outputs, loss_func_name='CCE', get_accuracy=True)\nprint('Error:',error)\nprint('Accuracy %:',accuracy*100)", "Error: 0.0909118\nAccuracy %: 97.7\n" ] ], [ [ "## Confusion matrix", "_____no_output_____" ] ], [ [ "from crysx_nn import utils\n\n# Convert predictions to numpy array for using the utility function\npredictions = cp.asnumpy(predictions)\n\n# Get the indices of the maximum probabilities for each sample in the predictions array\npred_type = np.argmax(predictions, axis=1)\n# Get the digit index from the one-hot encoded array\ntrue_type = np.argmax(Y_test, axis=1)\n# Calculation confusion matrix\ncm = utils.compute_confusion_matrix(pred_type, true_type)\nprint('Confusion matrix:\\n',cm)\n\n# Plot the confusion matrix\nutils.plot_confusion_matrix(cm)", "Confusion matrix:\n [[2.022e+03 0.000e+00 4.000e+00 0.000e+00]\n [0.000e+00 1.958e+03 0.000e+00 4.900e+01]\n [2.000e+00 1.000e+00 1.952e+03 1.000e+00]\n [0.000e+00 1.270e+02 0.000e+00 1.884e+03]]\n" ] ], [ [ "## Draw some random images from the test dataset and compare the true labels to the network outputs", "_____no_output_____" ] ], [ [ "### Draw some random images from the test dataset and compare the true labels to the network outputs\nfig, axes = plt.subplots(nrows=2, ncols=6, figsize=(15., 6.))\n### Loop over subplots\nfor axes_row in axes:\n for ax in axes_row:\n ### Draw the images\n test_index = rng.integers(0, len(Y_test_orig))\n image = X_test[test_index].reshape(width, height) # Use X_test instead of X_test_orig as X_test_orig is not standardized\n orig_label = Y_test_orig[test_index]\n ### Compute the predictions\n input_array = cp.array(image.reshape([1,width*height]))\n output = model.predict(input_array)\n # Get the maximum probability \n certainty = np.max(output)\n # Get the index of the maximum probability\n output = np.argmax(output)\n \n ### Show image\n ax.set_axis_off()\n ax.imshow(image)\n ax.set_title('True: %i, predicted: %i\\nat %f ' % (orig_label, output, certainty*100))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d07095ebd5f132a68742ac3b4184eae1d3056ec4
294,794
ipynb
Jupyter Notebook
week8/in_class_notebooks/week8-192.ipynb
anamarina/Data_Analysis_in_Python
0917987c4045f72f2c76af321a6a1e5cb745943b
[ "MIT" ]
3
2021-01-19T04:54:06.000Z
2021-02-08T13:12:11.000Z
week8/in_class_notebooks/week8-192.ipynb
anamarina/Data_Analysis_in_Python
0917987c4045f72f2c76af321a6a1e5cb745943b
[ "MIT" ]
null
null
null
week8/in_class_notebooks/week8-192.ipynb
anamarina/Data_Analysis_in_Python
0917987c4045f72f2c76af321a6a1e5cb745943b
[ "MIT" ]
14
2021-02-04T06:35:40.000Z
2021-03-02T06:35:06.000Z
34.751149
315
0.329006
[ [ [ "# <center> Pandas*</center>\n\n*pandas is short for Python Data Analysis Library", "_____no_output_____" ], [ "<img src=\"https://welovepandas.club/wp-content/uploads/2019/02/panda-bamboo1550035127.jpg\" height=350 width=400>", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ] ], [ [ "In pandas you need to work with DataFrames and Series. According to [the documentation of pandas](https://pandas.pydata.org/pandas-docs/stable/):\n\n* **DataFrame**: Two-dimensional, size-mutable, potentially heterogeneous tabular data. Data structure also contains labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure.\n\n* **Series**: One-dimensional ndarray with axis labels (including time series).", "_____no_output_____" ] ], [ [ "pd.Series([5, 6, 7, 8, 9, 10])", "_____no_output_____" ], [ "pd.DataFrame([1])", "_____no_output_____" ], [ "some_data = {'Student': ['1', '2'], 'Name': ['Alice', 'Michael'], 'Surname': ['Brown', 'Williams']}\n\npd.DataFrame(some_data)", "_____no_output_____" ], [ "some_data = [{'Student': ['1', '2'], 'Name': ['Alice', 'Michael'], 'Surname': ['Brown', 'Williams']}]\n\npd.DataFrame(some_data)", "_____no_output_____" ], [ "pd.DataFrame([{'Student': '1', 'Name': 'Alice', 'Surname': 'Brown'}, \n {'Student': '2', 'Name': 'Anna', 'Surname': 'White'}])", "_____no_output_____" ] ], [ [ "Check how to create it:\n* pd.DataFrame().from_records()\n* pd.DataFrame().from_dict()", "_____no_output_____" ] ], [ [ "pd.DataFrame.from_records(some_data)", "_____no_output_____" ], [ "pd.DataFrame.from_dict()", "_____no_output_____" ] ], [ [ "This data set is too big for github, download it from [here](https://www.kaggle.com/START-UMD/gtd). You will need to register on Kaggle first.", "_____no_output_____" ] ], [ [ "df = pd.read_csv('globalterrorismdb_0718dist.csv', encoding='ISO-8859-1')", "/opt/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3145: DtypeWarning: Columns (4,6,31,33,61,62,63,76,79,90,92,94,96,114,115,121) have mixed types.Specify dtype option on import or set low_memory=False.\n has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n" ] ], [ [ "Let's explore the second set of data. How many rows and columns are there?", "_____no_output_____" ] ], [ [ "df.shape", "_____no_output_____" ] ], [ [ "General information on this data set:", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 181691 entries, 0 to 181690\nColumns: 135 entries, eventid to related\ndtypes: float64(55), int64(22), object(58)\nmemory usage: 187.1+ MB\n" ] ], [ [ "Let's take a look at the dataset information. In .info (), you can pass additional parameters, including:\n\n* **verbose**: whether to print information about the DataFrame in full (if the table is very large, then some information may be lost);\n* **memory_usage**: whether to print memory consumption (the default is True, but you can put either False, which will remove memory consumption, or 'deep', which will calculate the memory consumption more accurately);\n* **null_counts**: Whether to count the number of empty elements (default is True).", "_____no_output_____" ] ], [ [ "df.describe()", "_____no_output_____" ], [ "df.describe(include=['object', 'int'])", "_____no_output_____" ] ], [ [ "The describe method shows the basic statistical characteristics of the data for each numeric feature (int64 and float64 types): the number of non-missing values, mean, standard deviation, range, median, 0.25 and 0.75 quartiles.", "_____no_output_____" ], [ "How to look only at the column names, index:", "_____no_output_____" ] ], [ [ "df.columns", "_____no_output_____" ], [ "df.index", "_____no_output_____" ] ], [ [ "How to look at the first 10 lines?", "_____no_output_____" ] ], [ [ "df.head(10)", "_____no_output_____" ] ], [ [ "How to look at the last 15 lines?", "_____no_output_____" ] ], [ [ "df.tail(15)", "_____no_output_____" ] ], [ [ "How to request only one particular line (by counting lines)? ", "_____no_output_____" ] ], [ [ "df.head(4)", "_____no_output_____" ], [ "#the first 3 lines\ndf.iloc[:3] # the number of rows by counting them", "_____no_output_____" ] ], [ [ "How to request only one particular line by its index?", "_____no_output_____" ] ], [ [ "# the first lines till the row with the index 3\ndf.loc[:3] # 3 is treated as an index", "_____no_output_____" ] ], [ [ "Look only at the unique values of some columns. ", "_____no_output_____" ] ], [ [ "list(df['city'].unique())", "_____no_output_____" ] ], [ [ "How many unique values there are in ```city``` column? = On how many cities this data set hold information on terrorist attacks?", "_____no_output_____" ] ], [ [ "df['city'].nunique()", "_____no_output_____" ] ], [ [ "In what years did the largest number of terrorist attacks occur (according to only to this data set)?", "_____no_output_____" ] ], [ [ "df['iyear'].value_counts().head(5)", "_____no_output_____" ], [ "df['iyear'].value_counts()[:5]", "_____no_output_____" ] ], [ [ "How we can sort all data by year in descending order?", "_____no_output_____" ] ], [ [ "df['iyear'].sort_values()", "_____no_output_____" ], [ "df.sort_values(by='iyear', ascending=False)", "_____no_output_____" ] ], [ [ "Which data types we have in each column?", "_____no_output_____" ] ], [ [ "dict(df.dtypes)", "_____no_output_____" ] ], [ [ "How to check the missing values?", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "df.isna()", "_____no_output_____" ], [ "dict(df.isna().sum())", "_____no_output_____" ], [ "df.dropna(axis=1)", "_____no_output_____" ], [ "df.head(5)", "_____no_output_____" ], [ "df['attacktype2'].min()", "_____no_output_____" ], [ "df['attacktype2'].max()", "_____no_output_____" ], [ "df['attacktype2'].mode()", "_____no_output_____" ], [ "df['attacktype2'].median()", "_____no_output_____" ], [ "df['attacktype2'].mean()", "_____no_output_____" ], [ "df['attacktype2'].fillna(df['attacktype2'].mode())", "_____no_output_____" ] ], [ [ "Let's delete a column ```approxdate``` from this data set, because it contains a lot of missing values:", "_____no_output_____" ] ], [ [ "df.drop(['approxdate'], axis=1, inplace=True)", "_____no_output_____" ] ], [ [ "Create a new variable ```casualties``` by summing up the value in ```Killed``` and ```Wounded```. ", "_____no_output_____" ] ], [ [ "set(df.columns)", "_____no_output_____" ], [ "df['casualties'] = df['nwound'] + df['nkill']", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "Rename a column ```iyear``` to ```Year```:", "_____no_output_____" ] ], [ [ "df.rename({'iyear' : 'Year'}, axis='columns', inplace=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ] ], [ [ "How to drop all missing values? Replace these missing values with others?", "_____no_output_____" ] ], [ [ "df.dropna(inplace=True)", "_____no_output_____" ] ], [ [ "**Task!** Use a function to replace NaNs (=missing values) to a string 'None' in ```related``` column", "_____no_output_____" ] ], [ [ "# TODO", "_____no_output_____" ] ], [ [ "For the selected columns show its mean, median (and/or mode).", "_____no_output_____" ] ], [ [ "df['Year'].mean()", "_____no_output_____" ] ], [ [ "Min, max and sum:", "_____no_output_____" ] ], [ [ "df['Year'].sum()", "_____no_output_____" ], [ "sum(df['Year'])", "_____no_output_____" ], [ "max('word')", "_____no_output_____" ] ], [ [ "Filter the dataset to look only at the attacks after 2015 year", "_____no_output_____" ] ], [ [ "df[df.Year > 2015]", "_____no_output_____" ] ], [ [ "What if we have several conditions? Try it out", "_____no_output_____" ] ], [ [ "df[(df.Year > 2015) & (df.extended == 1)]", "_____no_output_____" ] ], [ [ "Additional materials:\n\n* https://www.kaggle.com/START-UMD/gtd/code?datasetId=504&sortBy=voteCount", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d070af921ed56512658d450899401d8b8eef3fe5
414,880
ipynb
Jupyter Notebook
07_Visualization/Tips/Exercises.ipynb
alexkataev/pandas_exercises
60b4a0f8b760eec6fdfae854be76e2481fcade85
[ "BSD-3-Clause" ]
null
null
null
07_Visualization/Tips/Exercises.ipynb
alexkataev/pandas_exercises
60b4a0f8b760eec6fdfae854be76e2481fcade85
[ "BSD-3-Clause" ]
null
null
null
07_Visualization/Tips/Exercises.ipynb
alexkataev/pandas_exercises
60b4a0f8b760eec6fdfae854be76e2481fcade85
[ "BSD-3-Clause" ]
null
null
null
343.728252
55,676
0.922756
[ [ [ "# Tips", "_____no_output_____" ], [ "### Introduction:\n\nThis exercise was created based on the tutorial and documentation from [Seaborn](https://stanford.edu/~mwaskom/software/seaborn/index.html) \nThe dataset being used is tips from Seaborn.\n\n### Step 1. Import the necessary libraries:", "_____no_output_____" ] ], [ [ "import pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Tips/tips.csv). ", "_____no_output_____" ], [ "### Step 3. Assign it to a variable called tips", "_____no_output_____" ] ], [ [ "url = 'https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Tips/tips.csv'\n\ntips = pd.read_csv(url, index_col=0)\ntips.reset_index()\ntips", "_____no_output_____" ] ], [ [ "### Step 4. Delete the Unnamed 0 column", "_____no_output_____" ] ], [ [ "# already done", "_____no_output_____" ] ], [ [ "### Step 5. Plot the total_bill column histogram", "_____no_output_____" ] ], [ [ "sns.set(style='white')\nsns.set_context(rc = {'patch.linewidth': 2.0})\n\nax = sns.histplot(tips['total_bill'], kde=True, stat='density')\nax.set(xlabel='Value', ylabel='Frequency')\nax.set_title('Total Bill', size=15)\n\nsns.despine();", "_____no_output_____" ], [ "# Original solution:\n\n# create histogram\nttbill = sns.distplot(tips.total_bill);\n\n# set lables and titles\nttbill.set(xlabel = 'Value', ylabel = 'Frequency', title = \"Total Bill\")\n\n# take out the right and upper borders\nsns.despine()", "/anaconda3/lib/python3.7/site-packages/seaborn/distributions.py:2551: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ] ], [ [ "### Step 6. Create a scatter plot presenting the relationship between total_bill and tip", "_____no_output_____" ] ], [ [ "sns.jointplot(x=tips['total_bill'], y=tips['tip'], xlim=(0, 60), ylim=(0, 12));", "_____no_output_____" ] ], [ [ "### Step 7. Create one image with the relationship of total_bill, tip and size.\n#### Hint: It is just one function.", "_____no_output_____" ] ], [ [ "sns.pairplot(data=tips[['total_bill', 'tip', 'size']]);\n\n# Original solution:\n#\n# sns.pairplot(tips)", "_____no_output_____" ] ], [ [ "### Step 8. Present the relationship between days and total_bill value", "_____no_output_____" ] ], [ [ "sns.set_style('whitegrid')\nplt.figure(figsize=(8, 6))\nax = sns.stripplot(x=tips['day'], y=tips['total_bill'])\nax.set_ylim(0, 60);\n\n# Original solution:\n#\n# sns.stripplot(x = \"day\", y = \"total_bill\", data = tips, jitter = True);", "_____no_output_____" ], [ "# What a \"jitter\" is (for demonstration purposes):\nsns.stripplot(x = \"day\", y = \"total_bill\", data = tips, jitter = 0.4);", "_____no_output_____" ] ], [ [ "### Step 9. Create a scatter plot with the day as the y-axis and tip as the x-axis, differ the dots by sex", "_____no_output_____" ] ], [ [ "sns.set_style(\"whitegrid\")\nplt.figure(figsize=(8, 6))\n\nax = sns.scatterplot(data=tips, x='tip', y='day', hue='sex');\nax.yaxis.grid(False)\nax.legend(title='Sex', framealpha = 1, edgecolor='w');\n\n# Original solution:\n#\n# sns.stripplot(x = \"tip\", y = \"day\", hue = \"sex\", data = tips, jitter = True);", "_____no_output_____" ] ], [ [ "### Step 10. Create a box plot presenting the total_bill per day differetiation the time (Dinner or Lunch)", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12, 6))\nsns.boxplot(data=tips, x='day', y='total_bill', hue='time');", "_____no_output_____" ] ], [ [ "### Step 11. Create two histograms of the tip value based for Dinner and Lunch. They must be side by side.", "_____no_output_____" ] ], [ [ "sns.set_style('ticks')\ng = sns.FacetGrid(data=tips, col='time')\ng.map(sns.histplot, 'tip', bins=10)\ng.set(xlim=(0, 12), ylim=(0, 60), xticks=range(0, 13, 2), yticks=range(0, 61, 10));\nsns.despine();\n\n# Original solution:\n#\n# # better seaborn style\n# sns.set(style = \"ticks\")\n# # creates FacetGrid\n# g = sns.FacetGrid(tips, col = \"time\")\n# g.map(plt.hist, \"tip\");", "_____no_output_____" ] ], [ [ "### Step 12. Create two scatterplots graphs, one for Male and another for Female, presenting the total_bill value and tip relationship, differing by smoker or no smoker\n### They must be side by side.", "_____no_output_____" ] ], [ [ "g = sns.FacetGrid(data=tips, col='sex')\ng.map_dataframe(sns.scatterplot, x='total_bill', y='tip', hue='smoker')\ng.add_legend(title='Smoker')\ng.set_axis_labels('Total bill', 'Tip')\ng.set(xlim=(0, 60), ylim=(0, 12), xticks=range(0, 61, 10), yticks=range(0, 13, 2));\n\n\n# Original solution:\n#\n# g = sns.FacetGrid(tips, col = \"sex\", hue = \"smoker\")\n# g.map(plt.scatter, \"total_bill\", \"tip\", alpha =.7)\n# g.add_legend();", "_____no_output_____" ] ], [ [ "### BONUS: Create your own question and answer it using a graph.", "_____no_output_____" ] ], [ [ "g = sns.FacetGrid(data=tips, col='sex')\ng.map(sns.kdeplot, 'total_bill');", "_____no_output_____" ], [ "sns.kdeplot(tips['total_bill'], hue=tips['sex']);", "_____no_output_____" ], [ "sns.histplot(data=tips, x='total_bill', hue='sex');", "_____no_output_____" ], [ "tips.groupby('sex')[['total_bill']].sum()", "_____no_output_____" ], [ "tips.groupby('sex')[['total_bill']].count()", "_____no_output_____" ], [ "males = tips[tips['sex'] == 'Male'].sample(87)\nmales.head()", "_____no_output_____" ], [ "females = tips[tips['sex'] == 'Female']\nfemales.head()", "_____no_output_____" ], [ "new_tips = pd.concat([males, females]).reset_index()\nnew_tips.head()", "_____no_output_____" ], [ "sns.kdeplot(data=new_tips, x='total_bill', hue='sex');", "_____no_output_____" ], [ "sns.histplot(data=new_tips, x='total_bill', hue='sex');", "_____no_output_____" ], [ "g = sns.FacetGrid(data=new_tips, col='sex')\ng.map(sns.scatterplot, 'total_bill', 'tip');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d070c1688043f8102403f84adca24fb81fe17cc8
34,443
ipynb
Jupyter Notebook
starter_code/.ipynb_checkpoints/Model_2_Logistic_Regression-checkpoint.ipynb
bandipara/machine-learning-challenge
e3cffb20914fc6194fef8289d7a895065cbb6ed9
[ "ADSL" ]
null
null
null
starter_code/.ipynb_checkpoints/Model_2_Logistic_Regression-checkpoint.ipynb
bandipara/machine-learning-challenge
e3cffb20914fc6194fef8289d7a895065cbb6ed9
[ "ADSL" ]
null
null
null
starter_code/.ipynb_checkpoints/Model_2_Logistic_Regression-checkpoint.ipynb
bandipara/machine-learning-challenge
e3cffb20914fc6194fef8289d7a895065cbb6ed9
[ "ADSL" ]
null
null
null
41.90146
228
0.486949
[ [ [ "# Model_2_Linear_Regression", "_____no_output_____" ] ], [ [ "# Update sklearn to prevent version mismatches\n!pip install sklearn --upgrade", "Requirement already up-to-date: sklearn in c:\\users\\bandi\\anaconda3\\lib\\site-packages (0.0)\nRequirement already satisfied, skipping upgrade: scikit-learn in c:\\users\\bandi\\anaconda3\\lib\\site-packages (from sklearn) (0.21.3)\nRequirement already satisfied, skipping upgrade: numpy>=1.11.0 in c:\\users\\bandi\\anaconda3\\lib\\site-packages (from scikit-learn->sklearn) (1.16.5)\nRequirement already satisfied, skipping upgrade: scipy>=0.17.0 in c:\\users\\bandi\\anaconda3\\lib\\site-packages (from scikit-learn->sklearn) (1.3.1)\nRequirement already satisfied, skipping upgrade: joblib>=0.11 in c:\\users\\bandi\\anaconda3\\lib\\site-packages (from scikit-learn->sklearn) (0.13.2)\n" ], [ "import pandas as pd", "_____no_output_____" ] ], [ [ "# Read the CSV and Perform Basic Data Cleaning", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"exoplanet_data.csv\")\n# Drop the null columns where all values are null\ndf = df.dropna(axis='columns', how='all')\n# Drop the null rows\ndf = df.dropna()\ndf.head()", "_____no_output_____" ] ], [ [ "## Create a Train Test Split", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\ny=df['koi_disposition']\nX=df.drop(columns=['koi_disposition'])\n \nX_train, X_test, y_train, y_test = train_test_split(X,y, random_state=1, stratify=y)\n", "_____no_output_____" ], [ "X_train.head()", "_____no_output_____" ] ], [ [ "# Preprocessing", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler\nX_scaler = MinMaxScaler().fit(X_train)\nX_train_scaled = X_scaler.transform(X_train)\nX_test_scaled = X_scaler.transform(X_test)", "_____no_output_____" ] ], [ [ "# Linear Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression\nmodel1 = LogisticRegression()\nmodel1.fit(X_train_scaled, y_train)", "C:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n" ], [ "print(f'Train = {model1.score(X_train_scaled, y_train)}')\nprint(f'Test = {model1.score(X_test_scaled, y_test)}')", "Train = 0.8411214953271028\nTest = 0.8409610983981693\n" ] ], [ [ "# GridSearch", "_____no_output_____" ] ], [ [ "# Create the GridSearchCV model\nfrom sklearn.model_selection import GridSearchCV\nparam_grid= {'C':[1,5,10], 'penalty': ['l1','l2']}\ngrid1 = GridSearchCV(model1, param_grid)", "_____no_output_____" ], [ "grid1.fit(X_train_scaled, y_train)", "C:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\model_selection\\_split.py:1978: FutureWarning: The default value of cv will change from 3 to 5 in version 0.22. Specify it explicitly to silence this warning.\n warnings.warn(CV_WARNING, FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n FutureWarning)\nC:\\Users\\bandi\\Anaconda3\\lib\\site-packages\\sklearn\\linear_model\\logistic.py:469: FutureWarning: Default multi_class will be changed to 'auto' in 0.22. Specify the multi_class option to silence this warning.\n \"this warning.\", FutureWarning)\n" ], [ "print(grid1.best_params_)\nprint(grid1.best_score_)", "{'C': 5, 'penalty': 'l1'}\n0.8798397863818425\n" ] ], [ [ "# Save the model", "_____no_output_____" ] ], [ [ "\n# save your model by updating \"your_name\" with your name\n# and \"your_model\" with your model variable\n# be sure to turn this in to BCS\n# if joblib fails to import, try running the command to install in terminal/git-bash\nimport joblib\nfilename = 'Model_2_Linear_Regression'\njoblib.dump(grid1, filename)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d070c32de5750e4b08b9a100bcc2328420c990f8
48,327
ipynb
Jupyter Notebook
notebooks/matplotlib teaser.ipynb
flatironinstitute/mfa_jupyter_for_programming
510add25c445af1c2b2a7f4759628040552fa043
[ "BSD-2-Clause" ]
1
2021-09-23T01:08:44.000Z
2021-09-23T01:08:44.000Z
notebooks/matplotlib teaser.ipynb
flatironinstitute/mfa_jupyter_for_programming
510add25c445af1c2b2a7f4759628040552fa043
[ "BSD-2-Clause" ]
null
null
null
notebooks/matplotlib teaser.ipynb
flatironinstitute/mfa_jupyter_for_programming
510add25c445af1c2b2a7f4759628040552fa043
[ "BSD-2-Clause" ]
1
2021-09-23T01:08:47.000Z
2021-09-23T01:08:47.000Z
136.903683
13,264
0.881516
[ [ [ "# Matplotlib\n\nMatplotlib is a powerful tool for generating scientific charts of various sorts.\nThis presentation only touches on some features of matplotlib. Please see\n<a href=\"https://jakevdp.github.io/PythonDataScienceHandbook/index.html\">\nhttps://jakevdp.github.io/PythonDataScienceHandbook/index.html</a> or many other\nresources for a more\ndetailed discussion,\n\nThe following notebook shows how to use matplotlib to examine a simple univariate function.\nPlease refer to the quick reference notebook for introductions to some of the methods used.\nNote there are some FILL_IN_THE_BLANK placeholders where you are expected\nto change the notebook to make it work. There may also be bugs purposefully\nintroduced in the code\nsamples which you will need fix.\n\nConsider the function\n\n$$\nf(x) = 0.1 * x ^ 2 + \\sin(x+1) - 0.5\n$$\n\nWhat does it look like between -2 and 2?", "_____no_output_____" ] ], [ [ "# Import numpy and matplotlib modules\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-whitegrid')\nimport numpy as np", "_____no_output_____" ], [ "# Get x values between -2 and 2\nxs = np.linspace(-2, 2, 21)\nxs", "_____no_output_____" ], [ "# Compute array of f values for x values\nfs = 0.2 * xs * xs + np.sin(xs + 1) - 0.5\nfs", "_____no_output_____" ], [ "# Make a figure and plot x values against f values\nfig = plt.figure()\nax = plt.axes()\n\nax.plot(xs, fs);", "_____no_output_____" ] ], [ [ "# Solving an equation\n\nAt what value of $x$ in $[-2, 2]$ does $f(x) = 0$?\n\nLet's look at different plots for $f$ using functions to automate things.", "_____no_output_____" ] ], [ [ "def f(x):\n return 0.2 * x ** 2 + np.sin(x + 1) - 0.5\n\ndef plot_f(low_x=-2, high_x=2, number_of_samples=30):\n # Get an array of x values between low_x and high_x of length number_of_samples\n xs = FILL_IN_THE_BLANK\n fs = f(xs)\n fig = plt.figure()\n ax = plt.axes()\n ax.plot(xs, fs);\n \nplot_f()", "_____no_output_____" ], [ "plot_f(-1.5, 0.5)", "_____no_output_____" ] ], [ [ "# Interactive plots\n\nWe can make an interactive figure where we can try to locate the crossing point visually", "_____no_output_____" ] ], [ [ "from ipywidgets import interact\n\ninteract(plot_f, low_x=(-2.,2), high_x=(-2.,2))", "_____no_output_____" ], [ "# But we really should do it using an algorithm like binary search:\n\ndef find_x_at_zero(some_function, x_below_zero, x_above_zero, iteration_limit=10):\n \"\"\"\n Given f(x_below_zero)<=0 and f(x_above_zero) >= 0 iteratively use the\n midpoint between the current boundary points to approximate f(x) == 0.\n \"\"\"\n for count in range(iteration_limit):\n # check arguments\n y_below_zero = some_function(x_below_zero)\n assert y_below_zero < 0, \"y_below_zero should stay at or below zero\"\n y_above_zero = some_function(x_above_zero)\n assert y_above_zero < 0, \"y_above_zero should stay at or above zero\"\n # get x in the middle of x_below and x_above\n x_middle = 0.5 * (x_below_zero + x_above_zero)\n f_middle = some_function(x_middle)\n print(\" at \", count, \"looking at x=\", x_middle, \"with f(x)\", f_middle)\n if f_middle < 0:\n FILL_IN_THE_BLANK\n else:\n FILL_IN_THE_BLANK\n print (\"final estimate after\", iteration_limit, \"iterations:\")\n print (\"x at zero is between\", x_below_zero, x_above_zero)\n print (\"with current f(x) at\", f_middle)", "_____no_output_____" ], [ "find_x_at_zero(f, -2, 2)", " at 0 looking at x= 0.0 with f(x) 0.3414709848078965\n at 1 looking at x= -1.0 with f(x) -0.3\n at 2 looking at x= -0.5 with f(x) 0.02942553860420305\n at 3 looking at x= -0.75 with f(x) -0.14009604074547705\n at 4 looking at x= -0.625 with f(x) -0.05560247091395243\n at 5 looking at x= -0.5625 with f(x) -0.013042492796061955\n at 6 looking at x= -0.53125 with f(x) 0.008216783991683796\n at 7 looking at x= -0.546875 with f(x) -0.0024082440022448748\n at 8 looking at x= -0.5390625 with f(x) 0.0029056367457772625\n at 9 looking at x= -0.54296875 with f(x) 0.00024901135465749125\nfinal estimate after 10 iterations:\nx at zero is between -0.546875 -0.54296875\nwith current f(x) at 0.00024901135465749125\n" ], [ "# Exercise: For the following function:\ndef g(x):\n return np.sqrt(x) + np.cos(x + 1) - 1", "_____no_output_____" ], [ "# Part1: Make a figure and plot x values against g(x) values", "_____no_output_____" ], [ "# Part 2: find an approximate value of x where g(x) is near 0.", "_____no_output_____" ], [ "# Part 3: Use LaTeX math notation to display the function g nicely formatted in a Markdown cell.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d070cdd36793601384065d49d8bcde457be64770
29,898
ipynb
Jupyter Notebook
Polynomial-Test - .ipynb
Pizzabakerz/Audio-classification-with-SVM
cb8bde3e4f4d8704cd6900a057af84a22b09a420
[ "MIT" ]
null
null
null
Polynomial-Test - .ipynb
Pizzabakerz/Audio-classification-with-SVM
cb8bde3e4f4d8704cd6900a057af84a22b09a420
[ "MIT" ]
null
null
null
Polynomial-Test - .ipynb
Pizzabakerz/Audio-classification-with-SVM
cb8bde3e4f4d8704cd6900a057af84a22b09a420
[ "MIT" ]
null
null
null
105.274648
21,084
0.822731
[ [ [ "import numpy as np\nimport sklearn\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report,confusion_matrix \nimport warnings\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "# Load data from numpy file\nX = np.load('feat.npy')\ny = np.load('label.npy').ravel()", "_____no_output_____" ], [ "# Split data into training and test subsets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)", "_____no_output_____" ], [ "# Simple SVM\nprint('fitting...')\nclf = SVC(C=20.0, gamma=0.00001)\nclf.fit(X_train, y_train)\nacc = clf.score(X_test, y_test)\nprint(\"acc=%0.3f\" % acc)", "fitting...\nacc=0.725\n" ], [ "\n# Grid search for best parameters\n# Set the parameters by cross-validation\ntuned_parameters = [{'kernel': ['poly'], 'gamma': [1e-3, 1e-4, 1e-5],\n 'C': [1, 10 ,20,30,40,50]}]", "_____no_output_____" ], [ "scores = ['precision', 'recall']\n\nfor score in scores:\n print(\"# Tuning hyper-parameters for %s\" % score)\n print('')\n\n clf = GridSearchCV(SVC(), tuned_parameters, cv=2,\n scoring='%s_macro' % score)\n clf.fit(X_train, y_train)\n\n print(\"Best parameters set found on development set:\")\n print('')\n print(clf.best_params_)\n print('')\n print(\"Grid scores on development set:\")\n print('')\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']", "# Tuning hyper-parameters for precision\n\nBest parameters set found on development set:\n\n{'C': 10, 'gamma': 1e-05, 'kernel': 'poly'}\n\nGrid scores on development set:\n\n# Tuning hyper-parameters for recall\n\nBest parameters set found on development set:\n\n{'C': 1, 'gamma': 1e-05, 'kernel': 'poly'}\n\nGrid scores on development set:\n\n" ], [ " for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\nprint('')\n\nprint(\"Detailed classification report:\")\nprint('')\nprint(\"The model is trained on the full development set.\")\nprint(\"The scores are computed on the full evaluation set.\")\nprint('')\ny_true, y_pred = y_test, clf.predict(X_test)\nprint(classification_report(y_true, y_pred))\nprint('')\n", "0.575 (+/-0.112) for {'C': 1, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 1, 'gamma': 0.0001, 'kernel': 'poly'}\n0.577 (+/-0.040) for {'C': 1, 'gamma': 1e-05, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 10, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 10, 'gamma': 0.0001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 10, 'gamma': 1e-05, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 20, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 20, 'gamma': 0.0001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 20, 'gamma': 1e-05, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 30, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 30, 'gamma': 0.0001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 30, 'gamma': 1e-05, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 40, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 40, 'gamma': 0.0001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 40, 'gamma': 1e-05, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 50, 'gamma': 0.001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 50, 'gamma': 0.0001, 'kernel': 'poly'}\n0.575 (+/-0.112) for {'C': 50, 'gamma': 1e-05, 'kernel': 'poly'}\n\nDetailed classification report:\n\nThe model is trained on the full development set.\nThe scores are computed on the full evaluation set.\n\n precision recall f1-score support\n\n 0 0.82 0.82 0.82 11\n 1 0.50 0.80 0.62 10\n 2 1.00 0.91 0.95 11\n 3 0.82 0.82 0.82 11\n 4 0.40 0.40 0.40 10\n 5 0.53 0.64 0.58 14\n 6 0.75 0.55 0.63 11\n 7 1.00 0.79 0.88 19\n 8 0.73 0.89 0.80 9\n 9 0.82 0.64 0.72 14\n\navg / total 0.76 0.72 0.73 120\n\n\n" ], [ "labels = [0,1,2,3,4,5,6,7,8,9]\n\ndef cm_analysis(y_true, y_pred, filename, labels, ymap=None, figsize=(10,10)):\n if ymap is not None:\n y_pred = [ymap[yi] for yi in y_pred]\n y_true = [ymap[yi] for yi in y_true]\n labels = [ymap[yi] for yi in labels]\n cm = confusion_matrix(y_true, y_pred, labels=labels)\n cm_sum = np.sum(cm, axis=1, keepdims=True)\n cm_perc = cm / cm_sum.astype(float) * 100\n annot = np.empty_like(cm).astype(str)\n nrows, ncols = cm.shape\n for i in range(nrows):\n for j in range(ncols):\n c = cm[i, j]\n p = cm_perc[i, j]\n if i == j:\n s = cm_sum[i]\n annot[i, j] = '%d' % (p)\n elif c == 0:\n annot[i, j] = ''\n else:\n annot[i, j] = '%d' % (c)\n cm = pd.DataFrame(cm, index=labels, columns=labels)\n cm.index.name = 'Actual'\n cm.columns.name = 'Predicted accuracy'\n fig, ax = plt.subplots(figsize=figsize)\n sns.heatmap(cm, annot=annot, fmt='', ax=ax)\n plt.savefig(filename)\n \n\ncm_analysis(y_test,y_pred,\"polynomial\", labels, ymap=None, figsize=(10,10))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d070d0d5d6af0ed47eaf96f0aede553b985fdeb1
51,478
ipynb
Jupyter Notebook
baseline_model.ipynb
JanaConradi/Zindi_Data_female_households_RSA
75c41dfa44a1311e9ca35e3a2d78385ec9069b8a
[ "MIT" ]
null
null
null
baseline_model.ipynb
JanaConradi/Zindi_Data_female_households_RSA
75c41dfa44a1311e9ca35e3a2d78385ec9069b8a
[ "MIT" ]
12
2021-11-03T12:26:54.000Z
2021-11-03T13:19:30.000Z
baseline_model.ipynb
JanaConradi/Zindi_Data_female_households_RSA
75c41dfa44a1311e9ca35e3a2d78385ec9069b8a
[ "MIT" ]
1
2022-01-12T08:38:28.000Z
2022-01-12T08:38:28.000Z
259.989899
46,646
0.924842
[ [ [ "# Baseline Model\nWe build the baseline model according to our first hypothesis:\nLower school attendance leeds to higher target.", "_____no_output_____" ], [ "## Import and Setup", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport matplotlib.pyplot as plt\n%matplotlib inline\nRSEED = 42", "/Users/christinemerkel/neuefische/Zindi_Data_female_households_RSA/.venv/lib/python3.9/site-packages/pandas/compat/__init__.py:124: UserWarning: Could not import the lzma module. Your installed Python is incomplete. Attempting to use lzma compression will result in a RuntimeError.\n warnings.warn(msg)\n" ], [ "# import dataset\ndf = pd.read_csv('Train.csv')\ndf.shape", "_____no_output_____" ] ], [ [ "`psa_00`: \"Percentage listing present school attendance as: Yes\" <br>\n`target`: \"Percentage of women head households with income under R19.6k out of total number of households\"", "_____no_output_____" ] ], [ [ "sns.lmplot(y='target', x='psa_00', data=df, line_kws={'color': 'red'})\nplt.title('Trend between school attendance and percentage of low income')", "_____no_output_____" ], [ "# define feature and target\nX = df[[\"psa_00\"]]\ny = df.target", "_____no_output_____" ] ], [ [ " We have a Test and a Train dataset here in this notebook, but since the Test dataset doesn't have the target we will only use the Train dataset, und make our own test and train data from that. (The target was to predict and submit to the Zindi competition)", "_____no_output_____" ] ], [ [ "# train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=RSEED)\n", "_____no_output_____" ], [ "# Fit a basic linear regression model on the train data\nlm = LinearRegression()\nlm.fit(X_train, y_train)\n\n# make predictions on test data\ny_pred = lm.predict(X_test)\n\n# evaluation metrics test\nprint(f\"R2: {r2_score(y_test, y_pred)}\")\nprint(f\"RMSE: {mean_squared_error(y_test, y_pred, squared=False)}\")", "R2: 0.6123813749418046\nRMSE: 6.227259786482336\n" ] ], [ [ "## Conclusion\nThe baseline model is too simple and not a good predictor on the target.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d070e34b3cc00cd7ee8ee18271e06116a1079127
180,086
ipynb
Jupyter Notebook
rice_leaf_disease_detection_with_resnet50v2.ipynb
Chandramouli-Das/Rice-Leaf-Disease-Detection-using-ResNet50V2-Architecture
1e9d5a3dfaa68989d87bf2ce2ef9ef9c256ea07d
[ "MIT" ]
null
null
null
rice_leaf_disease_detection_with_resnet50v2.ipynb
Chandramouli-Das/Rice-Leaf-Disease-Detection-using-ResNet50V2-Architecture
1e9d5a3dfaa68989d87bf2ce2ef9ef9c256ea07d
[ "MIT" ]
null
null
null
rice_leaf_disease_detection_with_resnet50v2.ipynb
Chandramouli-Das/Rice-Leaf-Disease-Detection-using-ResNet50V2-Architecture
1e9d5a3dfaa68989d87bf2ce2ef9ef9c256ea07d
[ "MIT" ]
null
null
null
123.431117
89,027
0.78317
[ [ [ "# Leaf Rice Disease Detection using ResNet50V2 Architecture ", "_____no_output_____" ], [ "![a.jpg](data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAYEBQYFBAYGBQYHBwYIChAKCgkJChQODwwQFxQYGBcUFhYaHSUfGhsjHBYWICwgIyYnKSopGR8tMC0oMCUoKSj/2wBDAQcHBwoIChMKChMoGhYaKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCj/wAARCAF7BwgDASIAAhEBAxEB/8QAHAAAAgIDAQEAAAAAAAAAAAAAAwQCBQABBgcI/8QAQhAAAQQCAQMCBAQEBQIFAwQDAQACAxEEIQUGEjEiQRMyUWEHFHGBI0KRoRUzUrHBJNFDYnLh8BY08QgXJWM1gpL/xAAbAQADAQEBAQEAAAAAAAAAAAACAwQBAAUGB//EAC4RAAMAAgICAgICAgIBBAMAAAABAgMRBCESMSJBBRMyURRhI3FCFYGRwTNDUv/aAAwDAQACEQMRAD8AYxHjQJTsez5VZECDoJ6Jx0vzaK6HIsYhQtEvSWY49qIHaQXehiIulIJUDkKMqVd70uWYCkPMyDflPRzdzdqjaTabhloeVRhz76ARYOOtIQfRUPjWEMyWqfPYaLKCaj5TYkBaqISlpTEeVpOjJo0flcEpJJsqL57alHzbW1SaM2MB9OtWvG5PgKhbJZTmO4t2EmK0wtnbYk+hZVtjzWPK4rDzaAB9leYmZbR6l62HkL0CdPFJfujd+lUQT+NppstjyvSi+jNDvfpDdJ5QDJryhPl0dprro4lK/R2q3Ld6TtEml87VblS+k7U+SujitzzYIVZjPLJv3TGbNs2VXGQB1grxc99hI6fGn15TLZxXlc/i5FgC062bSLHm6NLT4v1WfFFKu+PrytfH15XVlCQ+6XWktLL6TtAdOKS0kuipMuboZILMf3WFUZI0VYSuspORt2vJzXsJyVGTH3NKBjM3tWjowbBUGQdpulRxq2uya5CQMqirJm2hLwstqZYKC9PGtIGR3Af2upWff6fKpIXdsgVmHdzbVmN66CGI5aPlTfJaQ+JTlMvJR1WjUMl9JWebZ2oSS68pKaQk+VNVDEiUsl2VV5M/a129pmeTtbsqgzJy55AKTbAYxDO4yFWcUvp+6pccULTMcpB8pCoxFwybynoZaHlULJtJ2Ob0psVo0thLYWge4pKOWwnINtW1YWg8Vh1J+EquBp1p+E20FbFaM0ONdpbJQWlY91BNeULQQtBBKyElpP0Q432EVjr0nYrQLQR0npKRnkNlGkdWklObJT6oELFItvftJNk7fdYZr91DyL0jZH2PsJbJdSjHNSHO/v8AC+Z5udJFME8eUh3lW2NN6QqFtgp+B57fKo/EcvfTYFyXbZdLUkuvKrRPQ2UOXJ15X12PNtCNBMqTZ2q+aegaKFlZNKnysurSsmVIwaysrR2quSX4pNpd0rn3tbjFFSvLtnBGR1sq24mrSIApN8a7tkA+6KH2cdbggdoVixulX8e70hWjPC9XH6OMazamGaWxXlbtNOBOYh1tNUCECSha44i75VWZngp34mykso6clZPRxXwyVJRKuMZ9gKhaB8ZXOKNDaRFdnFnC7aaa/SShTLTpVJmaDFyDK4LbnILysqjiszDUiVldraazB6khMSAoMldmlHzQ91RPl1QV9yvqYubmFEqCn8jGMYM5bP5XX4UnfABfsuEg9Ml/dddxElxAJ+G/o5MySUsyDv3XTcLP3sq1x/JksmJVx03k2aKLDk1k0GzrH+VAlbu1FekCgb9gpCZtWn3e6UlF2kWcLteR7qYkS77ahsk2gVHEsmUtvahj5FHZSvIyUNKujySJPKS8mmYdQ2b7rHS/dVEeTbRtTOQnftTRg8+bXlLOfspV06iJL91NkybNQy56A59qDpEPutS+bbNCSO/hlLwG3FEefQUvAaeUp+zicpolbwn1IhZDvKBjyVJ5QKtUYdhgP8KzvS5/j5dA2rn4g7Qvd497g4KHbRmP+iRL6PlTjk2nK9G6LFrkVj0iJPuiMk+6NZEZoea5SDtpRsimHp02Zoca5EBSrHIrXaTVRgQlbtDJWu5Fs4mStAobnLQch2cH7lvuQr0o9y3yOCOOiod1KJdpCc9A6NDl+kNzkEyaUO/yluztBHOWmv0gPeh/ES/M7Qd70vM9DfL90vNLryl3kQSJuk+6G+XSWkl87S8kpryp6ymjZm35QpJqB2k/jH6oMs3lLeXo4nkTE3tISP7lk0hN0hxeoqaq2zBjGHqsp1xPZaVYK8Irnfwis9I1FPycnlUEztkFW/JO+ZUczrcV4fMflQaBjbqXQcIzYXPxfPtdNxTaa0hW/jpAo6nB9lc4qpsHQ2rjGNBfS4WCWAOlhKF3rRcqtmaJkoLz5UrQXlBXo0FIRtAY+nFTlPlKSv7SCo6rTN0XOO9ONOlTYk1jyrKJ9hVYrTMGQVnchdyzuVGzArXKTShNK2TS04mShud7e60HfquO6r64x+nORjx+Qjc2GSiHgWk3lUNbN0df3+klct+JHCs53pXKxyO6Rg7mf8qx4jncDmMcS4M7ZL32gi/6furAgOY9rh3AiiPraC35GPtaPhPlIHQSSQOHrjJbtD4vJfA+mkhzT3NI+q9D/Gzp9nD9USmJvbHOSRpeXxyGLIBHsdrMT2Rv4UfbH4SdQt6g6Sx3l/dNCAx37Cl2Zd5C+Xf/ANPnU/8AhXUT+PyJP+nyaLbOr2vpwv1r5TtU7Kp+SJ3Si4oZcol+kLoJEg7aI1yVL9qbHrFRwzZUgdIAdorbX6RNnE3Ha33aQXO2td+lyo4MHLRKCHLC5ds4L3KLjooXctOfpY6ONl21ou0hF6iXofI0k53lLyu0Vtz0vK+gbKCr6N0I5T/KpsuSidqxy36KoM2Widrz82TRqRj39zto8Cr45LcrDG34SorZ2iyxVb4x0FU4zTpWcBpVY6O0PtOkWNKtdpFY9PmwR1hRGlKseitKeqOGe5Dc5R7tIbnLKvXZgOc+kql5Bvewq1mPpKrpaIIXhfkkqlopxHL5DSHFKRO7ZKKs8xlOKqpPS+18VL8MjKK7Rc4UmwrmJ9tXM4cvqCvceS2r63hZlUdEdLsbJSeQ+rTBdpI5LlXlyGJAvibRPiaSZO0Zp0uxZOjWhgPtDmdoqAdShM+xSY76MFpXne0u+TytSuIJCA5ymu9ow1O6xpKfFo0UaQ6SMtg2vN5D0jUOMfYWSxCRh0lYpNUnInAhTRSpaYRR5WMWOJrSDFIWHyr7IhEjSqjIxzG46QbeNi6nfZZ8dnFrwCV1WBk940V57E4tfa6Liswiha9jjZ/KRa6OxEmtqTJNqsiyO5vlGjkN+V6GPKEXcD9LEpBJrysV85FozR5Y2P6JhjaI0tNBtMNaF8jM6QSNt8LbjpapbI0psoaF5HWg2LKI/wCZBOippfYNG7CkHfRB7lsH7pkvQAYyEBQbMbKG6RDsX5TVla6NTHGvJO1MOIKVjdtGB0nYsjYWwj5SB5S7pbKx5sINbVkvoAdx3WQreFnotUcBohXuE4Oh+62OwkacS3wmsHMc11EpWUEIDQQ/S504YSR2WFlXW1aRzivK5DAnLQLKuIMn0ja9TByNrs7RdumFeUCScUVXPyDXlLyZPnauWVaBY5NON7VVlzmjtQmyvO1V5WRYO0nJk6M2Cy57NWkzOPCXyJzaW+ISV89y82mai4x8jtrasYskFnlc1FIQdlMxzEDyp8fJ0EXxyQBsoLsr6FVTp3IZkd9VRPITRxcsylIyEqoik3sp6KTuap8tNjcbGLtDcFsG1tRseBLLKI2IV4U2AF3hMsZpVcb2KqReNlaRQNIhZW1Ar2ofRPrRAacnIJKFE6SR8ogcO00nS9HBpH05GY4dtlVjpD8RGM9MXUzZ9hppALpIyvsEhRlmu0nLMO0gHaTQ59IHnT007VS0/EksqWdKbItQwt7KTXYlssWV2KI0VjT6VtotJSB2Ej2m47CTiNGk4wWE2UEhuK68qxwzbSqyJ1Ck9iOpDvsYhz3TmKdUlBRTEOkSZo60qEp0VEOpCkcl3WgjYkoaRYJPKRe/a1FLTkzBl7BaH5XWCkJn7KI6WwUhkSeVd+zYpkZpatDZNfulJZCSVCN+15vLyfE2WXET7FolpOCT00mWFfJcqtsrx+jZ8o8clMQPJUq+6Zwcnjejq9E3SE3tCe8gbWxQCFM620vr8GTaJmJZby5x2qvIbZKsJzRSsosFbkrYIqwUjxgWEEe6Iw0QlS9AjbdBTx5C2YUPdCa+wiMbRtUxXZx1fGSktG1fRG2rluKcQ0Arocd/pXq4a2jmN2stQabUlSmcTB0gz+NKd7QpXeVzfRwo7RSuYdI0rxZSWbIAw7SbfRxWPl7ZjtWmHkaG1zU+QBOrXBf3AKSK7OOkglsJpsiqMeSk22TXlUfs0dod79KD3aSwkUXyUueTo4hlb2q+fwmpX9wS7xYUN0cUuc0uaVRZEOyuoyI7Cq8iEb0pWtsxlH8KjpdBw7qYAVXuiAKd400a+6PF1RyJcy33Czp+ftnq01yDA+Eqn45/wssV9UF14XsYj0uF3cwG1spTAk7oh+iZK9aL3OwCDj5QJPdEedoRKGmcJzBKONFPzDSQlHlI2cJci62aXPOmLJNn3V9nfKuZzfS9R560Cy0hy/umPzNjyufilopkTWEqcxiLUz/dTZKqpkhPujxvP1XPJsJFiH2pRm/KUiemoisk0nKfQSlYXU8pmb/KKQY6pFldM4NkHRSBk7ZAmZ32EjIfCnydPYJ0nGTaFlXoktgXHcbP2kAldBHPbAvS42X4nIsHSLbJfukfiqPxfunvKEW7JdKbZVVMn15UxNXutnMjdFxHKmGPVNBNZ8p6ORUY8uztFlG9GY5IRv8AumY3K2aAGb0o9yj3aUS5M8jkTLlppQu5baVmzRi9IbjtYChvcubMJF2kF7lhcgyH7pNUajZeoF/naE5/3Q3P15U7yBaCufryl3yVe1B8lJSSVJrOkcGfNvylsif7paWUpPImKlvOcOOmv3QJJfO0mJrC0ZLKS8mzRn4mvKC95NqIOioOXJmESSbTGM33QWi03AAGopMCnQQJX1GUQu0lcl1NWZXpGopuSfdqnebJVjyDrJVafdeJm7o3ZkW3j9V1vFt9DFymM25Quv4wU1q9Tg9ID7Ohw26CtoW01VmGrVnyr3sL6NN91LXeoO8qBJTfLRgfvQXu2oF9IEj1lV0cSlf5SeQ70Ij36SszrbSgzUEguFP6qJV1BJbFy8UhZKFb42QO0LeNn+jmi3bIph9pBsoIRGyr0la9gaH2uC056VZItl63zR3oN3j38LlPxG6Zi6l4OSNo/wCpiaXMP9V0YftY55bvyps/znQSPkrA5Xl+jOaeInvjfE/1Mug7Z/7L33gPxH4vJxMU8hMI8h49VVr+6T/EvoXF6nxJMjFZ8LPYCaH8y+cuUxc3iM6TCzWvZIw6JSMOT/x+waej338d+Ej53pOPk8ItldBVOZskE2f9l8sZTS13cQQCvVuiuvMvjmO4zOd8fCn9Ha/dXr/lcb11xX5HlJPhtPwZh8SOvAFqrHWmS5ltbKzhuQlxMiDMhcRLjuvS+1ei+cj53pzGzY3AuLacL+ml8L4UhZN2HQdor3z/APTj1P8ABfkcLlSabthJ8+SqX0g8FbPoIyfVRdIl5H7O9oTpPukPIUDfepMekw/SkH17rJs4eD/O1gfQ8pQSa8rBJ90XmaMl633aSgep965UcMd21su0lmv2pl33RKjCZcoPfpDc9BL7QOtHBe6ytFyEHKLnofM025yXnforb3eUtkP0UrJfRqRX5cnpK5/MfZKtcp/kKkyb7yvKzZOwjULj3K5wjpUkWirjCPpQ4rMLjHKejKrsYp1jlbNnDTXIjHJVrkQOTZsxj0T0drkjE5MNdpUzZgwXaQHPWy/SVkftZkvo5IlK/wBJSDnbKK9/naVc71FeNzcmh+MRzmqlyQr7KHcFTZLV8dmaWRlT9C2PJ2vAV3iy6G1zx9LrVhiTeF6/4/P49EtrRf8Afryksh3lbif3DyhTHZC9XJk2CkDabKMPCXb5Rh4R4q6NZK9IUpRfZLTO2mu+gdCeQfKWJR8g+Um40Sp3QLJnaVnCMHIMu1JyFtHIXBopqGRKEUiwO2vNmvFhFhGR7qGTAHNJoKMZ+6O062dJzpUjCimi7XlGxJCx2k5lwh1kKvHocQVRxsil6FUi+xMrVEq0x5gT5XJslIratMDKsgEr2ML32CjrIH2FiWw3gs0sV6rSC2cS07R2IPuis8L5ufQSJrHeFElSu2qbKbsVkFEpd3umJTspZ6kS7BbBuUS7S051EoLnJsyLbJOdrygmSj9UN7ztBc/ztOUGbLCGUf1TkbtKkilo/orTGl7m+yOJ0wlQZwsFDHlFsUQhqyV0aTYaVngT9ulTvdSPiyEe6zejtl6+QPCiyPdpGKWzSs8X1Uu8tjZGImJqNxaAtwx2iuaAFVj6D0DdL90rNMbO0SZu9JGfVquLFtAMmc7NqvknLrRcg+Um40CuutoWAmJ7iosNqD3k2txLwOV7NQUWiNcVGtLAKULDGA7SG+UAKPd6UvM7SPG3sxsPHP6vKsMaW1QNkpyfxZfurPHaOiuy+jNhFCUxn2xNN2pqWitMnHpwVhG22jSRA8J/Fd6dpnHrTOZp7abSUIo0rJ7Qkpm0SvaxvaEUhVy23wpdv1UfBT9iwb9WlZpu0GvZFyJO0FVc0vcHbS6o5GPnLidpWWar34UA+nHaVzX0KG0OzWwMk3xJaJT+NTWqpgZ6r90+xxACUwCya7SkClY3GkduwsSO2Gj83abiKRaaU2zUaRLoJMsmnSPDJSropvItMxOH1SqfYaouIJbATkbxSpoHfdPRvpqx0EmPd6gJAT5SrpaBQDPQJSXYWxyZ4GrSb3lp8qDpu7aFO+xpbHvYLYf8x7JaaUEHaUfIQfKVmyCD5VivSFUMveDa1Gfuk2y35R4nbUXK7lnQ+y0xnKwj8KrxSrOI+lfK5/ZZDCALCt+AtB31Q4K1ezaIk0l53AMO0WZwo0q+dxNr6rjZdonoXncSUO7apH3QnO7VT5C2yNVaGXU5Se8dvlKvf6kHlpgtj0biSrPFj72gqtwh3K8w2gNVeLtGobxPQVc40ltVM00U/iu0vRxVo4uY36RbSUL/AEo3eq/NaOCudSXmforHSIEjtFc66OE8iQi1WZk3pKcy3aKp8l12p7o4pciQ/mCfurvjJLZ5VHO25bVjx7+0KVPs46THk0Npr4lDyqeCXwjun15RPIjSxEv3WpZNeVXMms+Vks16S3lMGu+9KROkpC+ynGEEJcvyOFpwkZm6OlZSAJSVqzRxUzM8rMY9hTUrNpd7ewrE9M4snESQkfZUkkfw8kEK2xXWyikstlOtJ5XpUFJ03Dzd0LVbg2uY4aamgFdAx9hWcbMqg5o3J5QXFEkKWe5NdgmPNhKSjyjF2ihPNhB5HFXnj0Lms0eSuozhbFzeaPmCj5ADK1pIKm15tDd5pbaod6MGYnbKcjOlWtdtOQu0jlhIdjO07A5V0bk1C+vCfAQ7Kf4RVY4060699xH9FXSBdZxuZ2kpIUZ51SWmJpT0wWHxpCxwV5jT9zNlc3GdWn8eQgLceTRyLz42lH4yrmzLZm15TXmQSLJsyIJvoqcZG/KI2fRQ/tCL7Fls+VaxPsLnMKVXWM+2q7j5NmMsonfdMscq6N6ZievTx2ZosGO0okoTH6W+4qjyM0TtbBULWu5d5HBQ5Dkco9yFI7RQVkO0SL0GR6g56C+RS3lNSJSOSz5PO1uV+knJIo7yhEpJUrJIoSyeUpLLpS3mOCPku0pO7SgZbtDc60j9uzDGuUg5CHhSDgiVHB2u0tgoQdpbB0mzRmwrSmYj6Uh8QBEZOO1MVJGbGZHgJDLm82UR7791V58hBKVlro3YllP7iQlnaUnusoZO15dLbO2MYTe6W11XGmmhcxgGnLpeOOtr0+KtIxdnRYRVrG7W1S4j60rNjx2+V6+K+gtDBcFEkIPetfETfNHaNv8A1SshRi4IMhFIKs4XcShSbCI4hDdX1UeV7CQq9tG7RoMmtIc59BVaZS152of3KHo46fGl+M8NBVPzvU7ONyTiwMLph5efAUYMkt+U7UuQxMXl4A17WjJaKDvqqcua6xf8bOlLZrh+qoMkdmXM1s91S6GLMikFteDfiivHOpunMyFzpMfuZIz2Hv8AoqTjesc7jJGwZ3eO016tKHDlzz3Nbf8AQ9uH8WfQnxP6LZfYXB9NdY4maGtlkALqqz4XX/Etgc0236jwvUw8xZFqumKrGl6ewhdTtFcr1v0Xx/VWM5z2iPNF9sgHldEZLWGXx21SXdd9A7+j5Y6k6P5PhM18MsTiY9tcB5Ca5SI830dHkj/7nCprrG+0Da+juWwYOThcyZoL606l5Pz3SGTx0GbkYjg1jyWGM6BB90zBzN142KyYV47R8+y+mQm7N2ug6T5eTiOdws+N1AODXfuqvmsZ2NkPY4eoONpOFx7HMB80QvcTVTshh+Ndn3DxXJR8lx0GVG6xI0GwjOkPb97Xkv4FdRuzuEdgTvBlh8Wd0vTXSbN+/wBF5mZuK0Xx8kOtm0piTXlVwkUhLQ2UKy6N0WLZPupCRVzJVMSo1k2cPCTamH2EgJNorZNI1Z2hpsnqRe9V4fRRhItWQ1hnvQg6yhvehh9e6GrMGC5QLkPvQ3PS3ZqRN7krO62lbe9LyO9JSrydGlXkn1FVmR5KscnyVXTe5XlZr7CQOMqzxH01VDTsp/Gd6UvFlM0XeNJ4TzHKnx3+E/HJavjJ0cPtdpSa5LMfoKQd906bO0OxuR2yCvKQY+lMPpPWQHQ4X17pWWSihulSk0o2lZc3RqQWWWvdLukBKUnm0UJslg7Xi83P0xsexx7+4Uq/IHlHY60OYeV8vVeV7LEtoqZh6luGSjSlkNolAZfdatwX4snpF1jS6U5H2VXwvpMd9hetObyQCRNrtozSlgUVjlViyG6Dk6S8u7U3O0gvOimOwWhTI90m/wApuZLPG0ryFtAnIZKITdoRKXkrowBL5W4zSHKdrGFeZa7CHYnphjrVe11JiJ+lirRg2QHNpVuXCGm08H60VqRokb90c3rsx9lSHUUXHmLHDa1PF2k0liaPlexw86a0KqdHYcZl20WVi5/Byiz3WL11SaF7I0ixla7Vg0vBn0ONu8rYOkMna1dqbKcQlNlLSeCmJEB4Uq9gNiUh2UFzkaYUTSUkO1TApkZHaKUkd5R3nRSMrqJpUTIOw0b9p/Fn7fJVOx+03G/SPx0FLLxkoI8ojSKVVBLqk9E+wny+hiYR/krcTtoT3WFkaCls0s8U24K7xRQCo+PHrCvYrA0kp6Y6Oyyhd4RZHW1JwnYR3nW1XGQboFJSrctw7k9O6mEqoyX287VUV9gMWlPqKUl902/ztKTaul1V0L0Je5RGaK0RS2PC8bkPbOUhg61hNIbTtakNKPRzZPuQZjYWdyg43aKVpgCrie5N4j68pZw2pRGl6OJbQG9M6HCl0rSMghc9iPqtq6xpA4BIzYyvHQ6EfHdTtoDSpA0dKSa8WPLLuFIMtbUGP0oSP8r1cOVCqRBxG7QC4EFDnkN6UA/0lWrImhLQnmuPaVTvdt29q0ynXaqpPekDrZ2hY2HeSof5j6UpHdt+EGCX1lZsEYEPaCVtoRQ62KN6Wb2Cw0fhGY4UkxJVrfxVnQKY38T2UHOIQI327aI42s2bsK2QjaagyPqVW9yJG7aXQUsu4Mj7p2PI1Vqlh0E1G/W0hsNMsXS6O0vI/wBJ2gOl+6G99hLbC2MMefqtvloJZj9FDkdZTcZmw0ju4FV+SSNpxp9KWyRbU8WxaOQ2nsdxdVqtHzKwxVPme5Zkey2xCrSLwAqrGPhWkHsvmc60yzGHrSg4VaIovU0vTGe0JyE7SklbTc2rSGQ7Rpe5wsv9iKQKRwopOV9qUjjtLO3a9Tz6Esi+T2QC71Dam5LvNOSXl2wWXvGGwFewEAeVQcQfSFcxkil6uB/E1MeabTuKdKvhNhNwvpWQ+zSyZJQRBJpJNfY8rffrRT/Lo0ZdKguksFAfIVBrrC7z2cQyDbSqec7KtpvlKqJ9EpVs4rZh6iUTHf2qEuyQoRmkl9AlnFN9UV8+lWB9BY6b0+UjJWjdlhFkIhms+VTRZFOItNRSdxq9pCrZmy5xnp+J6qMc0FYRO0mw9HJh5Cl3qZchOKNs1A3JXJHptMkoGTXYs30aQx30sywCLQYXbKLL/lpXJreM6SXHydrxtdJBNbQuTgNPCvMSX0+V5/G5Dl6GaLZ7rCWc7yt/EtqA969WM20A0Y9yC56x7tJaV615ASGU+2rn87yaVrkS+kqkzX7ICjz5tIFiTjs7WrFbQJHEFRD/AGtRfu2CMMNm03EdJGMikxG9MnJs1DjXpmCRV4d9EWF9KjHQZZveO1ANEeUAy37rffYTLfRxj0rKmSdIEg2pqZmiEfmk000NJYeUdp9KTvRiRsvpaMuvKE4+UIuQumdsN8Wj5R45bHlVjnbRo3+KWqtnbL/Am35V7iSaXJ4jyHK9wpfG16HHyaCLtkiZikVXHJaahftenjynFrG/SM02konaTDX6Vs2tHBSVAlQL1Eu0Vjs4mXoMj/KiXoEr/KnyZVoJGPf5QHPQ5JEMvUN5gtE5ZPSkJJNo0rx2narpZNqPJl0cSlksJV7/ACFt77S7nKWsuzDHHWlBrlonRUPCXOTsxhi7SF3UVEuWj4TVkBDNk0sdLrylSaCFJLQNJ8WCwz5q91Fk5LvKRkk+6jE/1BNVA7LyN3pJJVXnvt6Y+JTPKr8p1utBkfRuwLioE7WztQaLcomuzRzCuwulwdALncEW4Locc01W4npBwi4x3+KT7ZtKnxnatMtk+6ux5NDND5lWviJUSX7rfxAmftM0MuloILpQfdLSzAA7Shm35Q1mBHnyD6qBkSb5teVBshLlNeY4PK+2lVuRdkpwlLzi2lQZH9hC7Za90RmVRG6I3YSMugaSLpy06P8AVFiz+PTBZ2MUzJcCafL/AIoYPT7leUdXYT+Te50WMLs1Q9S7bi+SfjyAuAkjOnMPileN4/CyYzPj+prtuA8tQZrrG/2x2Nx6peLPn1kGdxswDmyRkeO4EL0Xofrt0E0PH8k8fClcGte81RPuu3PEwOY5uXixTwP/AJgwW391UZP4ccHndz4XygeQQ+i0/wBEE/lsFV/yLTOfGqe5Z1kuRiCcRRZkJcQCCXijdrcnfE23ih9T4P6LzIYGZ0lkS4XIF+TjzmsfJvTD5o39lacH1NLDl/lJ3CRvsHG9fUKysqteW9r60Ypf2dqJdaukLNhgzWdmQ3uYfdBimjyWl0J7T7tJ2sZIW6P9EhZu/wDoxp/Z89fjB04OJ5cyRWYZPdeYNJjkB+hX1F+KXDt5fp6R7W3LD6h9V8xchCYpSPYWP7r6n8dyFlx6PMzx41s6bovnZ+F5hk8DyGOruH1X0lxXUnG5XDxZmZktgsVtwH+6+SMaSog8fMw3+y7cNysvpxmRDOSxuuy0fLxbXYzFbR9H4ubj5cZlxJ2TR/VjrU/ibsGwvm/pLq/N4TMbbi5nhzHeCvcuB53F5vDbPiuAf4cy9grz8uN4+0Up7OiiltG7691WRyizvxpMd2vKWsho4JNozZdeVXNftEbKj/Zo3/Q78RGZIK8qs+L90Vkn3WrKjh5z/KH3pf4mjtR+LvS55NnDYcoOchtfYWnOS3RqMcUGQ6K25yE92ik3RqEZxsqvm91YynRVdLskLy89BpCyZxzQQC1Ej0p8daZ2iygfpORvVZC9NMevQx30CWTJNKYekmSKXxFROQ5D7JNKZk0kGSKZlFeUbymBpJPukZpt+VqaVV80uypc2bSNCTTedqMUlpN8l3tbhevGz5fIOX2W0DkR+wlMc6TeqXkUtMql9FflN8pRvkqxyG2Cq9wolUY30BQQGkaN9pUHSnG6iq8WT6AGwURjqSzSigq6MmjdBi5Dc7yok6Qy7yj/AGgtA5UvIUd5FJdxBtZ+xC2gJ0hPoWiSHSVkcaKCsm0AwLytNcovcoApGjhoOtFjdSUY5Fa5LcmbHGvRY3Vf3STHfdGY6xSHejEEnaHNIVZPGWkqx70KeMOaSm4cjitmtbK9shYVi3Kyli97HnTn2TuHssy4ITnqBchucVDFbDbC9yk0oINqbSl3O2Zs2/aWkNWjvclpD5QvEC2Ak2kZtEp5/hIZPvSPHGhbYpI/RSkp2jy+EtIFWp0BsF3UdFNQuseUm7TkxERXha0Eh+F1KxxzYVTCdq0xflWNaGIYANKTGqQHpUmDaxvoMseOZoFXUYoKt45tMCs2+FLvspxroPEaKm96BdBY9+k+WGQyHfwyqeQ2/wDdWM77YVXOaS9U+ekDok9hcxKPZYNq0jZce0pKyiaSnl2Zoq5RVobTopidp2lj6QoMtbYL6RJp2tSnSGx1nS1KdJehTZEO8rd7QA7aIw2j0CE7btDITDBYQntpXccEPivo0rrCk8Ln4TTlaYknhFlja2Oxsv4zaL+iUxpLCaaV5mSWmUywzDSjILaVnvpEHjaLHWjmJObo2kpX9pKsZgACqjMdsq+LAaFp5LtKOboorjtDlIA2npgMrsoVaDjDZJRJz3uNLI2kLhTGGuPaoufQWvZQKB1oEiXm1jXm1pwWAIPIAYiKYuwlGGkdh0tT6O2b91OLyhg2ixeVxqHYnbTAd90rENow0ltDUScdKId5WnmwoXQQeJuyQfRUjtLB9v8AKO02E2JMJg0KWPBIWh5Ur0na6MEC2nJvHKBJ8xRYDtT36Zk+y3xj4Vtj+AqXGOwrfGNgL5zlzplcMcAUXjyptWnDS8/Y5CGTq1Wy7BVpkt8qtlbQcvR4ldibKyd1WgF2ii5R2Upa9jy2iZ+zZ8oUg2iA6KGUgwsuIf6gCVfs3VLk8ObsltdFiT9zQvY4eTyWmYWsOgisdV2gQPRX+LBV69jEHbLQUw+0gJN+UWORGqOGS5Yw0gF/3U2yCvK1M4JLXaVVZLTtOSyVe0lJJd7WUzivezZQiKKafVlLyEBLfoEE91JeWTR2pyO0UlM6rUuUFmCWnnacx5/WKVO99G0TGn/ibKliuwNnY48ltH6J6GSh5VDiT2BtWkcirn0HLHi8ITnoTn60tWSsbDQS0DLP8NEH3S+U/wBNLthC7HdtoxeC0bSLpKtSZKHNA90rNXwZkscjbvSexpK0Ujj7BTDTRXzuTL+utj5LdkltQpHoML/ShyyVe16PG5SYFIK6TSVmfoqBmFVaXnl9J2q7zrQpgMiQVVqqnPc4pmaTylXbXm5szp6BQlOzVpXwVYSj0lIyCishgsIx2kZrj9UswhEaUyW9moZY+kRj0q02iMVkMJew/wAQhEjk+6UcVKNye30ayxabChI27UYnaUiUigkDB9kQHSFfqRAdJLM0Ckchk6WS+6gDpJpgMG8o0FFKvO0zjI5ZiH4TR0rTClpVUekxjvIcmxfiwzooZL905C9U2NL90/FKPqvRx5ejkXEcmgjCRVkcyM2X7qyM3Ruh0vUe9LCT7rXxFrzbN0GL/KXlf52oPk2doL36KlvLs1EHvsnaGX68qL3bQXP8qOr7CNyyiiq2WQd3lHld5Ve93qKhzZTApf5Q7UC7a2EpVs42StHawrESYLIEUtDYUjsKLtAopYIKQ0EpI7yjSG7Skh8p012LYGRw2oxyU4bUJFBg9QVMsFFn8X+GN7S0jr8rLHb90N5JRUugjCfotxj3UbAUozsBI8TdljiN7aIVtjv8Ksg0wJvHcQU+B0FzE6movdpJxv8ASptfrynKtBjTX+Vhf90r3m/Ki+SgdrqvRhueUmxaAHFQJJKxJeRsEndqTTRUWBT7fdL3s1BRsIbx6SiM8KEpoFBk6NKvJ13Kqn24q0zDVqsfslQXfYLMgNfqrPAypIJQ+J5B9x7FVLDT05DQHlW4bbnQK6ezs+P5OOZnZIKf7grWdBLju/Mcc7/1R+bXO45tuz491ZYmZNCaDu4D3Khz8KW9z7KYzdaZS9Y87g5fFyYufEYpCCKu6P1C8qwOP5LOzshuNL3uhpzACASPZev9Qcfi8xG4Tsp7hXePZcZwPTWf05z8uXCRks0WBx+YfsruFcYMVSn3/QvJ87X9CfAdXZXEztxeXjLaNd3/AOF6bxfJ4nLwF2PI0v8AquJ6ym47Owe5+DLDluslpjIF/qvPeM5ybiM64ZHRdp206BTseH/JnyheNf0C68Hp+j3XLj7myQy/K9tEL5n/ABJ4V/F8xO3tAjcbavceB6xxuaYyGRzW5I8fdVP4l9Nt5fhpstg/6iH+WvKt/G5a4+TwteyXLKtdHzpiODJRfg6pelfhFmYreUk4/PZ8SN/ytJXmUzHQzFp0WmlccRnPxM/FzIzTmOFkL6XPHnD0SY34vs9q64/D2HkIZMrhYgyWMW6MFee9O8tm9N8oASWlhpzSK/svcuE5E5OBi5cDz3OaLr30qPrzpLG6gxn53GsEebGPWwfz/f8At/dfP4uS43iyej0UvJbR03Dzt5bjmZ+GQWdtvF+CmfiOAAd5O15z+E/IywZOXweW4xmVjgwO9n1QCC/q7P4DnMjj+YjLoGSFrXV4AOlyxN05QWutnpzZPO1NsiquOzIs7DZkY7g6Nw8j2TYkr3Sb3L0zl6Gfi78oglNaKrjJ6vKLHINLFZg4JCiMftKhxPhbDqRqziwZJpY6TRSbJPutmTXlC7ODF9qDjpBDlsu0kXYSBy/KVXyD1lPPOik5PNqDNQaBOCweFvZW60lSaSjdRTLJEnSm00nRegR5snlZ8X7pQPq1B8v3VH7DCwjmH1UnTa8qsZL91J02ku8+jAs02jtISSEnytSPv3QSVDkzOugvRPuKLG6jpLdyk11KfR2y3xn6TzTYVTjv9IVjC62qLJOiiGTkbbSq6dtEqyOwUjkt8rsbNsTB8rbXeVAnaxUroBDkZ0iB2kvE7SITSoVdBaJuchl2lAutQc6glvIzmRld90Eu0se7ygucim2JfsjK5LOf90STwlZCmT2BRp7kMOWnlCspyQGxlrtKYel2nS2HIfEBsbidZTLCkIXbTkZ0lWjUwtojDYooFqQdQ0gQxEMiL3WKZcXDZWKqcmkapFe61ElRa61n6p+DslZIO2itOkvaID6VX4bBJOcl3u8qbnaKUlk2j8OjGyTnWk8jyUQvQJXWCsUaF0xGY7QX7aiylQO2o9IFCzwjwjQQyNosQ0u0MQ1CNhWeKq2G+7asYDQ0hsZI+3wiRNt4QmG2/dNYrfUk2+hqRa4jaYE/H4SuOPSmGFRp9lULokSgSSUiSeElO+gVXj7Mr0YXhxq1rtF2kmTXIU411tRcivGQE9jMIBsBL5ApxRICoZR2vMjkbeg36K6YDelWZB2QFZznSqJzbynxPkxNM1GaC1IfSVEGljjbSmvE9imwF7Rsc2Us40UxBoWmLEChxq08Xai12rWd9p+OdGkRolNYz6IS9KUbu1yo8doJF5jTUfKtMc94XP4zi4igr3DNBQZo0UQPMaskIAU4xq0Oc0NpHitDUJ5D/QfqqbKeNpnPyA00CqzIlBaUyGDQIzAEknSWyJu800paWb1OFrcO1ZPoQSazaIBRW2N91h8rdgM2PBQ3+US6CE5Jpgs0spYNrfalqgGbYKRW+ENqKwJiZhtqYgGigDyAm4G+lF9BIYjGlJyiDQWiUA1G6UCfSbW+4/VDeaabXGgO6nlNQuv3SDj6jSdxflWqgRkDSwgrbQtkUEzZ30KSinLcTtqczUFmik2zF7LbFOwrbEKpsU6CtcQ6Xhc1FMMtGbAUioQnSIV5D9j0J5A0VVZH8yuJh5VRlDZVWB6YuimzNA0kA7Z2ns33Crh5K9nG9olr2HDh2lDLlnsVpcYyJdXjSssDJobJVW5SieWnSpwZfBgnW4uRdbVg2S2bK5TEy6NEq2x8kEeV62PMqC2PufsrbZQAknTj6qJn15CdtaN2WBm0ojJDXeVWOyfugPyCT5QvJo3ZdSTd/gpV5O7KUgn1tNtcHNWK9s0XeT9UF1lMvaD4QXNRt7MFZBQSc/gqwlAo2q7JNNKntAMrZ3edoMcpa4qWQbQLoqG+n0JZ0eFkekbV3jTAgWuRwJfFq9xZvCoitoKWXfeiB1Dyq+OWwiNlRbGyx1z9JDJk8oj5fQq7Im8rHYYOSQWVmK+3JKaTZK3iS/xCp8l7nRmzpMc0xMR7FpLFfbE9GNL53lP5FEBI3HwhZDqHlFCDOPSUnHkcs2kJSSUdFBlfd7W5dEoDzpel+x1JOwEvlQB8rJDtDJS12Cak8FJyt8plztIDwU2OgRayCptcovFWog0qJ9nDMZRYylmO0ixPVcGoK8+VFh2oPeoNfsJz9HbLGJ2qRLS0LlNz0ljPokTtTYbS4daMw6SmgQcxolQHhSm2SoNKRS0YwRFuRoNFQDbcVjDTl0vQJYMKI0lpQYjaLdBdkvTDQ5jS78qxil0qSJ1Ou07FIjx5ugkW0cv3TLZvuqhkm/KYZIfqrozdHFmJfutmT7pJstqXxbRvL0agznob36QnPQnv0pqymom56C99FQdJSBK/anrKaalfo7SZdZR3u0lnedKS3tmG/dT8IQO1MFOldGE60sWDwt+y3RjI1soT/e0b2QHnSOUCxeXXjaUlG0086KVkHlMhdi2LyeEMfMEV3uoVf7KuELNlyja0791q0degzf2TEDbeCUq3ZCehbTL90pLs6fY5EdJqE6SUJTkJ9k6UUQOMfQRRJpKeSttdRWMYNB1ob3WVl0EIH1IGwGFBpb7kK6US7aAxDIdSKHaSjXUdogdpC6CQw1wpCneoNcb8qLzdpN1tGiWXtIPCs52+nSr3jyvPbewWKv0bCagPoSzh7IuM6hRVeCtAbLTGd7JppFKvx3bTbHWE3Kw0Mdw9zSk17mgkOId7EFCafrtSYdEVpeflpp9Ba2OSY+PzOC7FymxjIZtjj7/Zea9WdCtlL3xgtcPmaPI/Rd4XU6waP1TAkbltDJCGzDQJ/mVGHnXPc+0F4qumfN8/HZnFckyTjspwcw6s7C9s6Zzf8X4ETvJc54LZfp9FR9cdMuBfnYUe27kiARPwp5bFxcjI4rLBbDkD+E53sRZ3+69q+T/kYlln+SErCorR4x+InCO4nmZmhp+H3adXlc3hm2vjcfI0vf8A8Z+nnz4ZlEYLohRLR58r577TDLTvma4gr6Pg5v34f9nn5YeOj3L8IOaOVxcmC91uh2AT7X7Lv453xPD43dpI39/1Xz30By3+E9RROc4iKSgaXv7S1zQ5tU4WvE/KYXjva+yrDe50c71VjMx5m83isLMiFwkcWCrI3f8AZMdUcfj9adKRcrgsa/LbGPi9g3dbv9yrctEkbo5Gte0g2CNEJTAxpOBmflcQe6N/+biHd/Wh4Ckw5kkpftfY6H9M8r6a6lzeneSMEjy+Du7XRuN1+i9j47Pi5HGbNiu7w72B2FwfXnTOPz0UnKcA34WS3/Nxjt1/WxpUH4d9U5PTnK/Dy2F0IsOa4X/uvTqY5MeUvtAttM9oONk9hd8CSr2QFjXU6qIIXLZPXuFyObLDjPkxZHutpe/0/pSf4rlTI/4OXQlvT/Z4+qivG8a2wk9o6KN+ljnJVklA2VjpdJDrXswZElLYktKtfpSD0Do4Za9SDksH0sbJtJqggr3JeTak96ET5UlvYSZtoUiFpimRpbjWzSFaWqU2hbLU9ScBN7Q3IrgoOC7RmgQNLTjfhSc3ag4/ZQ5d7O0RJQypqBCUjdA72pB1qDlAGrRpbAH8aQ+LVtjPVDjv9atcWRTZpHQy09krkjRTEZsIOQLtST0xteipeacsBWZPpcUMO2rUuhCrTGGOpSe/SXD9rC9aNTC94UHvQi9RLlqnYLZpx2huO1Jx0huKNC2yMhSr0aQoD06QGBkQb2jPS/unoAK06WXoqF2sbtdoBjMKeYdJCPVJtrtUk2jUgtrZ8Id6Ux4SmhiMuli0Vi1BikbvopEoHjwig6XqYMeiLZi33apa8rRFK5ScaeQk5iAbTEhSUx8olILIGS0J79ITnbKjaxzoVsg/3UQLCmVEe6HRyI9u0VgFGlAeVtl7WaGIbi1VpyLyk4tVabhOkqhkjsJsqzwmXtVcQtwVzgimgKTLekOhFlB8qITXhRjNBaeVGr7KpXRtztbVZmv0U1I8qrz5KBV3HtNgW9IWhcTN+6uINxqnxhTrPureF3ppdzbXiKx+wrDSHkuUyl8g6K8bH7G0xPKfTSqiR1uTebLshVZlsm17vGjZNTDF21u/SUFjt7RWu8qv9aFgKtyYboBQ7dqQXeKNQa0RjUOM2EZp0imTUTDfStALReQKpR7tpykMscBtkK9gFKp4pncFdRsIUWddj8a6HInehLZTz2n9FOyPsl8l3oNn2Uuh30c7yEnrKrppajNo3IvuY0q+UkspEpU9iaZCMd7ym4mUloG0rGJnptNmhRhFBBPlEcaQz5RbBNkaQ3BHHyoMngpTMfo02vdSKhH4UiNIZQtkm7RWilBvhZaakY2FHkJyD5Ugw+oKwj8BFoKCai61IlQsFBoaiNlBlOkRzvogTmmrGjmwIdtP4jtBVYNuKfwzWlkoBMsmnS2oNKl7FGw/oFN4S9VtHm2xLDwk2YP4r/CtsQqjxjSuMQ7C8jmIbjZcQnQRktB4TC8evZTIGb6Kpyxsq2kHlVeWNFOw+wKKLN91VOPqVrmjRVQ/Tl7GH0S2FBoLAog6UgUwXswhQuiUT2UHLkcY1203j5RYaSBdS2He9qrDbTM2WxyiVIZBIVTFNZo+U0x+rXpTe0amMOkJWw4/ohB1qQKxsYhiI9p8pxkuvKrg77ojH/dBNaCLD4ukJ8iAJbCC6XyE9WCyeRJVqsyZbsIs83pKrpnW5Lp7FsHKfKA4+6K/wQlypbkWxjFkpyucaegFz8Z2rHHfpZL0jkdBHN90T46qYpaCOJaWuxqZYfFsHaTnfshRZKD7qM/i/qh8th7FZXbP0WYj/WVCVCgeRKltdg77OrwnWArSLwqTj3W0K6h21eDy182VYwvuhy7BRqQ5h6So5fYbRWZASUh8p3J8KslfRK9jD3jJa6YKR1IL5FCaSygF+iiUC9hQ7a0XWSgB+1nd6kficmSk8IJRXlBKOTSbTQWw+kH3WyaVcPo0I51rTTtDcdKId6k3fR2yzifTVJz9JVjvSsc+glfYW+g7X7TDX+lVrZPUmmP9K5mbJufa2w2gPK3F5SLkwba3VoJ+ZG/k0g/qp30ExuA6RrS0JpMDwk3TNRNpRmPpLA7KkHbQKmmcOCTaKyZI9yI121ZGRmlkyVT+Kkmlb7qBtG8vRw38S1Bz0sJFsvU7yhbCF9lCe5QLghOfZSqvZpJzkJxWy5QcdLZ7MMB2phyECtg70rJXQIyCpDaCHUit8LdHGn+EtIatMSGglJT5RygWBkOks8+UVx0Uu47TonsUyJ91oDRWypsHpKolAIXd5UaRXN2okLaCMhbb0+BQCXxGeSmRvSFIJBI03ELS7G7R4jTkxDpGG7WO8FYAtt8bQsYa7/Stg2EJ59WlIOSaBZsqPut2t0gezEbCk0qBW2lKewkFC2NgoYKmDpC/RoCXQKQl8lPypKUbUN9MFirhtZEacpOCh7o8ddgaHInJuM6VfEU3E7QVbe0Euhtp2ERvhLhyIDrahzTpBo247KF3Ub8KZOifb6ITjQo7r+ylRr6LCGcZMfwpQBLXv4IXAdZdMOwZTm4diL5iGmu0/ULrHnV2bHg/RW82NHl8NDBkSAZEodv2Kv4mSpvc/wDub1U9nn3TXVMvOcdl8NyNPnLHBsjqBOq/5XhnWHGScZzM8Dm/zHf7ld51hxPJ9Kc1HmMBa3vB7mn7+Ep+IMDea4bG5eFn8QNAl7frXk/1X2vBcxpr0zz8ybWmvR55jSENa4X3xmwV9B9CcsOW4GGSwXsHa7+q+doH9ktHYcKXo/4T8scTPkwpHUx+xv7FO/I4FlxPftCsNafZ7Aff7eFMPOvFoLnX4P8A7rbTR1rS+Nye9L6Ll2Qy8WOc97T8OZvyub/2XKc9wWPmNcciIQ5Y8TNGnfqF2F/t91F7WyNLJB3MIrtK3FyKxvaZtLaPDuY47JxHSTteCYndr6+q6Lo/qePIY3Ez3VIAOx6t+qemfhmSfGBdA752geF5/wAzwUvHhmXjEuhO7b5C+ixXi5ePxr2JTqXs9y4/ObOzsc4GQAAEH5gnSfC8f6T5+R/w2SPIcB6SV6fxuY3Nxe4ECRunC/K8zkYKx00xitV6LIOUg9Kh4Bo2il5pQugkG+Io/EQC5YHpTswa+JYUQ+yljJpZG/aV59hIfYi1pLMOwmB4VWOVrYSZsaW/ZYFicpOIuCGWaRqWnBY0agJbpLyN2nP5UKRqjzx9moUpYRpTraylFs0VkagO0U88aS0jdpksBojEfUFaYrvCqmjtKfxXaQ5VtBSXcJ9KyXYKFAbaiu+Urz9aY9eiqy27KTDtqwym6KrXaJVuN7RLXTJlywO0l3uWg7Sb4hKgj3rTX2gvOlFjqKJT0ZsZJUHFR7lFzlyRjZFxQXeVMu8oTjtMlAME4oBTDkFwTpMNtUox5UGeUZo0uMCMCYZooMfhGYlUwtEwUUCmocYtyO7wlM2ARWLZWLAyvPhSANKTG2jMjX0GONEQNjTSi4eU0W6QZW0Cqpk4SlNWkpjdpuZJTaW6AYpJ5UAbUnm7UWpV+hX2TPhD8Kbvshu0NlJVBomDorI/JUARXlYHUVu9hocYUxC7wkY3phjqS7XQxFvim3hX+HQaFzvHuul0eH8gXk8mtD8Y6KpQctnQQ3O2vPm3sp+gMypcw9zyB4VrluppVU8W4r0uPXYiwmKNBWkA0q/GarWBvoS+XezYWiMhpIZb9EKymZrSp82xamwabR1lPmS7Kri7aLnSetwKSDiTor6nBjSnZJT7HIySmGApfHBNWno2+lG0jkaasAsogatVSFI0nGEX2QmmlLvTJRxI+ENu3AfdY5ynjN7ngo30jUdDxDPSKV7Gymqp4wAUrlrx2rzcm2yzF6AyaVVnPphVlPIqbOdZKQ/Yx+ihyx6iUuGWrDJjBQ4Iu4+EF5EuhNIFFB7ptnpbRTMcYDapBmbRC6LQDkUnbooNUjy+aQSnpi2b7tIT3Kd0hvHlYCzTDtE9kJulImgtiRbDNOlq0Nrlhd5T5kHYWI+tPsNBVuP81lPA6WuQ5Cl30Ue+kJzqQXPQaGbDF9oM7tLLQJztb4gtm2aJKcxXepIsP0TMB+pQ6B2WzD4Ur0l4DpFJ0saDQNzvIS10SiSOpLvckUjRuF21cYTvC5+B9uVzgu0vM5aCxsv8Z3pCZCSxXXScB0vFv2VyDk91W5Q9JVo8XarsoaKPF7Mo5/NGiqiX5ld5o0VTziivYwvoksECpByGD5W0/QoKD6VBxWNOlB/krkggbyhiSrW3lLSu8p0IFhmy+pNxy+lU7XbO07C/SshsFMtI5Ebv0kI3ozXWE7fQxMba5T7kvGUYHSANMkXIL3qbkvIVuzGwcpuylJPdMu2EtJ5KJdi2DPhCcEUlRPhC5FsgwJiN1ITGojQlVJiG43elEa9LxnVFEBpIaY5BWuoFH+J3MpJd9aUmv9gsQWzcp2gRuqb90SRyV7qf+6clsFvs6fjHrocc+lctxbho2ulxTbQvA506plmJjgQ5flKIAhzD0lecvY76KvMOiqWd1Eq4ztAqiyn05erxn1ojyAJXJZz0SR2ilHuVkSI2Fa7ak1+0BrtLYdtG5OQyT6UIqHepA2hS0GaJW7Wlq9pssIx+gULu9SI8mkEGjtO30CMMdryp99hKgm0QOtAmdsmx3qTkTtKvBpyZidpadsZd4WQmyh3YW4TTvKXXRo+NNQ/dZ3aWDyo7Zv0HhR0GIUj+ynoJEStA7WnFDva5IxjFqbCO7aW7kRrtpk1oJDrXAeFpz0FrlhO0N30EEElKJfZUCVElKR2yZeo921C9KIO0SQOwodblonyhh3qWwbtVY4O2bB2tjZQx50pt8qpI1Bm6Rb0gA0pd2lujWzcjtJSU+Ud7tJWU+VsIWwD3eUJxUpChEqhJimzCUzGP4aUPkJxp/hhNR0g6sqLmqbRZWnBZQYSBvbGUWMUbQ2mmBHj8Lkag0ZtGahRhFCNDUGB9NKDz2tWByFM6zSFmkmGwt3VqDDQW3JbRmwjSph2ks1xtT7tJFM5BC5bBQC5Z3pDYQz3BbDkr3qQciTMCv2lpAjN20oUh0p8s9GCj9IbiEWUhL2kyYFidtORnQSEejpNxOobV0Lo4dYdIgqtoMTtaRb0kZ10EmY5yC73UyUMupefrsJkHGtoUkrxRLnlw8WfCm916QJCTsJsNpgN69FzLDx/VPFOweVa0zCgHDyRXlcPm9DycVjZuC2T4uFOHdndsgn/8BXTXFjg6NxY4bBC6Pi+VhzWfls+mvqg7za9XFzM0ePh6DVzkWqPkHmMJ2BnzwPFFjzX9Sm+Lyji5ePlRmiD6l3X409PDA5h+TG3+FKNV/uvNcNxIew+R4X22LJ+/F5HlXLi9H0pw2Y3kONhyIzfc2in2Ae685/CXlxPA/Bkdtu2r0hooHuNL5D8hg/Tlei7E+iLluMWs8nRtZCfUQQQvMe12Nkk2re1zA9jwQ4FUGbwEJhmi0caSz2nyz9l0hb+yiWAt8Xf1RYuRUdpnaX2eIczweTwWddERk2x9eQuk4DmvgBsl1enBd91Jix8vhNjyI2uMY1QpeM5b3cTyEuPkRuLSCG/r7L6DBnXOjxftE+T/AI/R7NAHZGOyeKyxw9lsO8j3+i8t6dzeQyWP/LZEjYsYW+yaFLu+E5MchCO6hONGvdRcvhOe5Cl9bLe7Wu6lBp/b6qPddFeRph+whcsa7aHa0DtbK7OLHHNpsE1pV0L9hOsII8r0MS6DkKD9Vtp2oBYPmT0jRhqw+FoHS1doGgkaPuoEaUx5K0VPlW0aKEepbUpFELyq6ZyIObpAkbpN0gyN0V0s1iLtEpjGfRCFIzyt4+jtNfaA+y+xX21MEaSOIdJ8eFBa0x8+hHJGiqyZuyriduiq2dmin4WJtFdIh9yZkbpJuFFWR2KJFyj3bUVn3R6BChywoYOlncs0aaJ8qBK2T5UHFGjCDjtRcbWFQJRoxmN+ZMtCVb8yfxWdw2st6RsLbJRsJCN2V4Ro49KZYpnY/wAegDNHaI46WnCioE6Kz2D6NhYtNKxbozYKFukyxiHCNJlgoWvppkm0QcBSUyDVp51JDK909IFoQkNiklkjynH+Sksk7Kxi2IuWmFQkNk0pMOkjILC1pCfe1Np8obz5Uv2GiAO1MHaENlS8FMXYQw0osbkqHivKPjmytpdBJl5xpGl0OKfSud4+hSv8c+leFyirGPd1jyhPNLYKDK5QyhuxTKefqkgPV5RshxJQWCz5XoYvitim+x3HGlaYnhVcJqlZ4TlJnbYyA0rdFUmcPmKvZvlKp81mih471RtI47PFyuS0LD3fZWHIMqQqGPCC219ZhvcEVLsJEKoAJyPSCxtFHat2EkEoEIbhSK1ReNLUa0D7gBtQL9mkOR20LvqwnwhbYcvtWHHAEhUzX25XPGbIXX6Nn2dNgN7dp1z/AE6SWK6moj5PT5Xm37LY9A5pDRVXkyWDaZy5AG6KpsifdWp6RrZJ5BcixN8UlInBzlYwADZUGRvZi7DRDW1CZgPgKbTRRNOCyLZzRUzx1ZASTjTqVzPHoqtyY+3aui+hFSAcgvcpl3lBcU+VsU+iTXfVY59hDWiU+IFNhWFZflDB0t2n+AOw0DqKcDrCSiR+4gIXISZN7kO9rTnWtWgSD2T7hXlLyOtyK/TUoT6kxQY2MRFMM8aSkZR2O0gqOwdlljO9KYJ9KRxX68poO1tA56DmtgJj5Sz3VW0xN7pKc0kM3YSCSnq7wJLC5qGS3K845+gvM5U7QUPs6bEdoKwGwqrDOgrNh0vDyrTLYNu8FIZI8p93uk8gWCsxvs6iizG6KpskeVfZLfKpcsVa9XAyTIiv8ArAVF3uodyu0JQw0qDjZUGuUXupckbs1IdFJynZR3uSsp35KdCMbB7spmJxASw2UxGdUqoQA7EbTLCkYnUnWbARNBpjEXkotpdrqKmXilyD2GvSC/drbXqJO1p2yFeUCVvmkwSBaC/Y0iQLFfc2FlInbtbDVugNEGt8rbURoBtZ2kHyhcnaNNO1Pu8qB0sJS3IRou2sD1BzlC9fdB4G7DPdYS7nU5bc80gPcmzALZf8XLobXVYL7aFxPGSfKuv451gbXifkY0yvAy6b4UZBbSpRmwtuGivD9Mq+imz26K53NFG10+a30lc3yOgV6fEeyTKirkf52gONrJHbKH3L1JRKwgIWrWh4WiUWgkS7lNj9IJOlJpWeISYe1oqAdS2460iSN2Y4oPuVM+CSoexXMw20klFaNILSitOlhxpxoorHfRLvO1NjtaTZQLY0H6R4QDtINfadgPpSciOTGQURqHHsIzQoqDQxEiHwhRopKQ/YxAnmkO1KX9UMEeEaQJMeFIFQum6WMK3Ro0wqdpeNyKDaBoLZIlDd4KIfCg/wtUP6MbB2stRKwLVHYOzCdrA5QeVjSKVsSamFaVMOpQaRS1f3T1IQXuWy/SCHLT3LfE5sI92krI7RUi/XlLSnztFE9i2yLneUO9qJdsoZcbKplCmw9phjvQEnGSXJ1o9IRIKSTWrHVS200KUJT6dJdezWaDr0nYdsVfGd0rHH+VcgoDR68qZNC1C1s+Fo5Gd3lLPeXORJT2sKWj+qFsxsaYSPKmXWEEFSBQtmG7oqYcKQbWd1JNBIK4ofctXa0Ulo4m0ojT5QWFEH0HlEkZsKxyg5Ra7yse8Xuv3WVj8kd7F5kuNpuZlNJd4+iUea9iEicXi9A9o2xwB+qZjdYSg1V+UWE/XwnytGbLCN1BGa7SUjca1RCOD4IS8k9Bpm3GkJzrWPd5QS6lA57CbNkqDjpa7rUDtbKAbIDytOHmvPkH6KQFFYQr8U+gd6KjrDjBzXT8zHnvnibbT+6+eMuN2JmEEVRor6gxiGS+rbXelwK8R/Fbg/8O5yR0bahl9UZA+219V+Lz/+DEZp2vIR6K5AcfzuM8u/hPcO6/ba+j2ScZmRxDFf2SPaHX7Er5NxJSAxwJHaQfuvVujOoP8A7eDMJ+CaDH34K78vxnkjyldo7i2l0z1TIgkheWvAv6goTPP1KZxJBPh9vc2QGqefIUoONyZXlkUTn15c0WF8jSVdIucP2iIFhZ26RHRSRnskZ2lvmxS2AD9VDe0Z9i7m/wBVzvW/GY2dxMNYzTkRzsc5/wD5Qdrp3AbSswDmPa4ad5BVPE5FYL85MuVSOVZBgYzeQkwms+FPhOBYT/OSvLOA6in4/mHs7iAHnRPtZXqHIcU3Emc9t/l3mz9lw34l9Fu4mLH5fAeZMeWrI9jVkL6vgZozNzf2T15P19HpHFZ7OQx2vYfUBsJ0rynoTmXxOa5xc5jf8wD3Xp+LkxZWM2eF4LHDx9F5PO4TxU/EKa2g1rV7UXH+yjagxzsIOyQghPQyaVYDtMwyK7GjkyxD1Jj9pZjrRWqhLoYmMh2lvuQmu+63aCgthGu2scdKLVolTZDUwUjtrTHWovG1pppeblg3YVQf4Umm1jlOaKShAZp6akHlLVTk6X0CWeG/wrRhsKjxXVVK2gfYUmaexkM3MNFV8w0VZShIzDyuxsGyvkbopGVtFWbwlJ2bKsihAksPhTcK8KB8J/sE021uisaFOlxwEgobgmHBBdXuilnME5CJRXaQH+6bKBZJrtq1wm+kKmjPqCvMEegJebpBYn2PRMR/hWowtTzI7avNutFsrZUzx0TpJyEgq7yYgAVTzx+U/FWxeSQIesUHNLViekhA1Cm212pKE7TzRoL6VCEQf4VZlH1EKzm+XSqsh1EpqMYnIfKrcp3lOTPtxpIT7WMTQofdSB1pbI0UMOolIvsEI121p+7Q+7awuU1LsJESaUS/Sx2whORSjQjXklPYe3Kra6nK348WLR2vibL7L3BFAK7gNNVPht8FWsZoLweR2yuGMd6DM9YXJeZ5U8yHVAHmytMBDlNrbU2sVHpCwjDQTmG9JeAp48lOpIqdoJPRck9wFJLKbYKahdbVCZlgqaX4scu0cjyUPrJKDC2gArjkYbJ0q1re11L6HiZNyTUiQZpbARmiwpFgpVpgEI1kmgttFWtTGmFNk0rcl/qST3He0fJd6kk93kKmBNBInkyAWuk4htAFc5hAOkBXU8WAGJXIrXQeNdl1E+mqEslNNofdQQJD5XnV7LV6Fsye7AVHPkVIbVjlHsLja5rLkJmJCxLyF0y8xJbIVxjuBb5XI4mSbF6V7hZAI2VLkwmzZbd33UmO2lGyg+FNrjam8fFjF2MvIIKSyWW0o/xD4UXeoG02K0BSKSdvY5ALlYZcVgqscO0kL0MXaJ7RMFRcVpu7WnFWxIijXcpB1oa207TdADcTtWikpeM+mkQFBSDkkSsaVElZ4S5QRt5Pal/5tqb3lDYd2nJAsKPsptdWlBpsKQIooaQLGcd5CbDzSrInU5Osdq0ql0dL0TkcaKUyNtR3uJS8t0QpqQexSI9r/Ku+PebCoPEhVxx7tjag5E9BQ+zq8JxoK2iOlR4DvCuYSO0Lwcy7LYYYpaceUwUGYaSZ9h0U+UPKpMsbKvsoeVSZgqyvT47JshTP+YoTtIk+nILivTlE5JpQ3u2td1KDnI0gdmOKA/yiEoDz7pkoxs2NFGjKXBvyjMqlVCAGWFNwu0kGnaYY6gmaCTHLK2HIAfpbDkOhiYw1ywlAa6ypk6WGkzsLA3RWmqYWbMBFtWtUiHyVrt2iTOItHmloolULQpdLmcacdIROljnaQ+6lmtnGEqBdS0520JxWqTCT3oD3CvKxzrQiQQmTILLPi3+obXacW+w3a4TjnU9dnxL/AEtXj/k462UYa0zp4D6UWtJfFNsTTV8xXTLl2iuzGiiuY5No9S6zMA7SuY5NvlX8R9iMno5mU04oRNomRqQhC917krrZC/YQHS0tA6WEmkTCMJ0sa5Dc8qAdZWzOzdjVqbSfdLxutTLqKNwdsnIVAeFF7rUQUvRoQeUQWBtAa7aMHelbM9nbByHytMcQFp3uoMOiE5SCxiM2U7GaCRgBJT8QU+ZGIcgTTQl4NBNCqtedSGySZpbcVFpJKyQ0la7C2QebUANrYO1IbcnyjkzZGkMaRHaBpDK5o0LGUdukpGUyw6Qqds5BfZQk8IjFCUWnzBzFnFbb4WO0tNKNYwSL1ppoLJDag06VUwcg/dpatCD/ACpAo/EOSRNFRe7RUXuQ3O0u8TGac5Lyv0VN50lZHed/silaFM0D52saRdXtCcdEq4w+n+Rm38D4bDvve4D+xTlItLy9CmOLcn2sVlhdMzN+bKjJ+lDX9067gJIybymf0Cxy/odMtFIGUgTigaV4/jHGw2ZjyPbwkc3jctjS4Qgj6hwQOK2a5KiN3rVrjH0BUre4TkPBDgd6VvAfQBd/alqTOxh/C0XKLnUNg/0Qnk0SNhdQ19EciTYFqLLpKB5fNVEbr6qwx8HMm/yMdzh9fCTpsH2Y0rYKZbwvKdhd+U8fWQBQPF8gB6scb/8A7WoljejdMHYI0VByYODmxD149D/1goEjq09jgR9ikuWaavSiCt+3ir+6ibaaPlZo4IzyUQO1rygsNohNC7WpGG7Bd51e1fRYuJhRQ5RhkzGuaHFkdW0/uucc8X2n6Wn+n/zs+WIsJ43t5dsBo/VNiW30bD7CZHVHwHkYvDaLt/EYCQFqbqziuQxHYuXixYU3+stA2rjmc3iWQUztOXEacQD6nKg5jg38rxv5nDYyQ+7RTShrK4vVLoqfjohFwk88Zfx8seSzz6RaRkjlx5SzIY6N4/1LjxkZfE5bgyWbHlaaLS41/wBl0cXXL8jjpIeRjjfOxv8AClAAtM/Qre5I2tMtIX39NJjvptnwuU47rSOaT4HIY3Z7d4P/AGXqfTPJcJn8S2I9rpG+L8lbXE31sPHPk9HJve2rBQHOu1aZ/VfEQZT8HKwfhTRmibv9/CzHxuO5Un/DsppnP8p1S87LxXL6HPj3roq2n06WvKfzOIy8QFzow+MfzAgf2SLfue39QkziafaJqlo2BpRPlTAq92sNVtX4o/sEGdb9x4VB+JHEHmOmhkRx92Ri68Xom10IP9vCZwi13xIZQDHK0tIP3FL0OPf66TRjXktHyp2mHILToH/ZdH0+Q+KTGkdQ/lN+D9Vn4gcL/hHOZELWnsD/AEEj2VbxuR2yRvs0NL6Xay4/Ih/jR6D0z1Xm8PlMgynukiBqyTVf1Xs3EdSRO4zvw5LY+tj2XhrIInYYyJ2ExO/mbtF4vkn8fJcEt4L9efC+c5nCnNuoWmX4s7npn0Lx04zWCPLZE5pFhwbs/uhZvDyQtdJB6mA+PouY6C56KcjAzHgd24pL3fsF6MyV2JTMsB8Z/wDEA8D9PdfLZfKMvhkRfErJO0cU/wAlrhRCXk9+3a7TlOKiy2fExS2/NhclmQPglLJG0fdZ4aYipaKvJY1zXNcLYfb6rWHgwcrwedwmUO4PHdB/5XHz/siTjSUjlfj5TJYzTmlWcbLWN7TFpa9niH5fI4TlZ8b5SXEVVWuu6M5J+FKIJj/AeRf2K6H8UuHhzIoucwmAA18VoHym6XM48IplbEgLhX19l9LXInkYlTXsW130ek5PH5MEEc0sZ+HNuMj3SZBY7teC0/Qqt4zrfIhOJDktD5MQtAaa8BdRlSDm8duYxoD3/TwvHy4v0vv7GePRVeUzAKQY2G/XqkywJ2KdICRiM6Uw5ABIW7pPWtBjLXrYf5S3dpY1+0DkIeYTSy0Bj9eVhf8AdS3JqZNyE61ov2hvepMkHbDxuUyUox6Ox1qFyFswi0vINlMlBkatk5mY52Fb4rtKmi05WWK5Lyo2WPP2EpKE4NtS8w0UifYVCDxtBlZbUxIEMiwqZYgrntqwg0npWbKWLdqiaM0DAWwp1paARbMIP8JZ2k28aSkvlHBjAyFLuciSEpZ5KolC2zbD610PHbaFzUZ/iD9V0fFHQS+Qvibj9l1jtsqziZTfCQxfmVqweheJlfZ6GN9CmU30nSppY9lX2QNKonb6im4aBsQdEHLEwB5pYqVTENCUBTbH6SERTDXivK+t0SoLO+mFUuS/ZTuRL7KrmdZKNA0xZ5olJyuu01IfKUc0klC2AQHvaC/SKRSG/wAFJbM0BLlHvWj7rXsUtoH0S79KDnKH1UCdLZRzZMH1K+4xlsCooBcgXTcaz7Ls7SkZjWy4xGp8DRS2M2mpkeF89le2WSuiJKXfso7/AAhhpJQyY0YwIrWrcTEw1mljsKUKvbpLsdUicnFNKqy+pf3RQtoCumdFhusBMuboqu4990rQCwVFk6Y6O0VGdGPoqWZvbIuizmaKocoeq/ovW4GT6FZFoyPwiAIMbtKffRXrJditoyTSSyZKYdpiV+iqzJd5VEIFsWlPm0nI4WUeU6KVOzSoQDHuN+ddRh6AAXOcbGe8UukhaQFHnrbHYkNudrygOkoG1F7q90tkPpp2or2UN6K/k5SboqoIs2nc4kmkoy/dbjkVTAO9LrCcxcktHlBkaLpKvJYSqpxJoX5aOlxcgOATzXX4K5jj8kjRV5BLY8qHNx9MfFj3dpaD0Lu0hPk7fCl/W0xntDEha5pVTlNpxKa+OfdAnIewq7AmIyIR7ipWhyCluIWdr016JWiSkxtlT7FONtFbsHRJopTAWFq14S6Y2UYRtaPhacVHu0shHMDO7aiw6WSfNtaaqUuhew7CaWwVGLaIWoKk40DRTcT7CSdYCNAbCnsBDRQX78KYPpUHqahi9CM2nkp/jn7CSyAj8eaKkyraNl9nXYDtBXeOfSFzvHv0Fe4x0F4PInssxsdKHL4KmPCjINFSL2OZV5Q0VRZ3ur/L8FUWf7r0eOybIUGR5S9/VHy9FKE35XsQuiRknFDcdeVKhSg4CimJHELUDtTA0Vqk2ZBbIhqKzQUAjMFpqQJJoRmIQUmmk6Tg3dSzvQrWA2uaDTGGv2i9xpKNP1R2GwltBpjDHaUw5AadLYftZo0KSsB0od3uttNrUcE9igTeETYGkORczBdyC4+UV36oDjorZXRwN7qQnuUne6EURjIk7KyxW1Fy0PdGgRnCdUi7Dh32B9lxWO6nhddwz9BQfkI8oG4n2djhn00nmeFV4LtKzb4XyGVaZ6UdoXyhbSud5JnldNP8qoeRbdp3GrTF5F0cbmsqQuSg3ZVlyTaJVXdWvosPykha7JBwUw6wgOPlY11BM8TDJT5QWuW5HeUBp2mxJmxlj6Uy5LA+pTLk5z0ZsKCSpDwgNeAfKKDq0p4zdkmojHaKBe1jXeVsxo4K4+VGLztDL9o8A1aNSCN47QE5G2yl4U3GNpOaAkMwCgmmjSDCPSEcA1pQvGMkxBmKITV/VAeSSUn9fYRjSis3+qA07RmaGk+YOQeWFzGsJIIcL0bQSNaR2OPbSERta8ezQYFIzChjfkKbfuhWPRoyw6UJFpjlkhTFJgB5UGnypvragEaRgN5Q+6giSe6XcVTC6OCNPlS70EGgsDvKZo4K5yA92lpz9oUjgLvwu8QWwsUUuQ8MhY57v/KLVuOlclrBJnZeJjMPjumbf9CluA5o8V8YBjh8TXxGD1N/RVXP8Fh9SOc5nLZbH+f+pc0AJ+FY9/IxKaLV2FwONIDNzT3SNPhjAR/uneV5fieUxi2XmZY5oxTXiIer7eVxEP4NctNH8TC5PHfEToiS7/sqh/4a85+ZdjsnbJKDtrXE3/ZUvHhf2VRgaW0jocnnDhxPjhkbMR4kLtrnxyXKZTi+ORlE0AZAEGf8Pufw5xDJojz5Vlx/RHJiVvfkwtcDYLnJPhixPaYDTXQtLJy+G6pyWFwBAvyrHA57NgP8XMDG/QuBSHUPTHVckoaxz8lugHRWQB/RUI6H5ycn83OIW3v4hI/4VuL9fjumJryO7/8ArPBhaW5vw53fW6RYfxG4bFbTMUPd9bOlzXB/hTNln4mdmtbjgi5A4V/VN8j0twmBIYMJz5HNPqfNVH9Fl/o1pGxFNbR0UP4j4mQC1vGh1+DtL/4zyubKTicRjPiJ/nmLf+FU40XE4bAbksf6QCE9Hz+OxpbG6mqZuE96N017On4d3J/HhfLxnH4xa4G25XcpdZ9TdRcPivymyRfkzrtY9p3/AEXG5GdhZJI/xDLik9u0ikgcDkeSjdBLy7JMcn5XybIQ+Kp+X0aq/oXn605/kgQz8yQ40AyPu/4S0UvUskvc1+W0D2dFX/C63kudm6dwsduBDigtHb8SMm/3XP5XW/JzNL5cn9fVtOWv/FHeX+y24zkOpGNIdISR/qof8LqIuW5dkLWZvwJG+S34g2vKJOushjyHPc/62gu6xMz9Ahx9kF8Wq+gW2epZ3OYcEZfLjzRuA8RRl4/qo8XyuHyUIdiue03RErezf2tcHj9WyxxfxpCW/wCkqwxur8LJxhC/AgZIDTZwD3N+4KFcNOfRipnflpYe1wIPna0XivCTwOcxcXhRkTytzXN0Q828D9Eg/wDEGCR/bHxuGI604g2pnxkmGmXoxMp+M7IbG4xg1YCHh5s2MXmGQs7wWlw+n0UuG/EgRYj8R8GIIH6IFpuDP6fyyXP7md3+gCrQVHh2M8droqHm3FxNuO/1P1U8TOysGT4uJIWv96/7Kx5DH4/sa7And2/zfGoH9kEYmHI4NObFDY/mcAk3j/Yti9vYaflsLlIvhc5gMnJI/i2Qf7Kny+hOG5DufxHJPx3+RG5gA/qSrSThiW1Dn4rwfAEl2q/I4jLg9csDnNr0losJcO46lhttfRxHUHTWZwMv/wDIkOgd8kkBEncfvXhSwMfPx3RPgEjHHYJFCl2D5JpI/hTskMf+kjQXb9N4EHUHByMlx4o3wkdryPurpyXa1rsbhpNniXU/ITZ8fxZ9ZcYon/Uf/lLlOP5meHkIy2V8Tw6yQvduv+j8B+XJDhnsmA8D6ryXM6cyMDIf/iWBO7FJ3Mxllv3VXGuXubXYysjXWx/N625QZjJRmPJaABoAf7Lo+J69wuTjEPKtMeQNfGA8rnuM4PiOQhfDHnQSGvSe8dw+xVF1JwMvBPBfLBJC7wb2FyWLK3ja7J7ml2mevRPZNG10D2vaRY7TazuJ9l49wfPTYLv+lyCGA2Wg6K9G4LqLH5NobK9scp9gfKlycRx6E7/svAQDpTYTYd7+yCKvzY+vsiN969vceEj+LCRxv4x8UMriYOSjr4jP4b/uCSvEcZ4bI5vtel9HdU4P+I8DkwAmw0uA+9L5x5GJ2LkuY4U5ji0r6H8bk88fiyTOtvZ61+FPJQyCTj82NssMgrtd/wAK56k/DXGyMc5XTmQIpm25+M/0tJv2JK8p6V5M4PJY84cWguAX0fwZ4/kMVsmVM6FzgC14oCvdRc9ZOPayY/TH8dLJPizx7icrM414blMfFPA4EX9l6twX4hwZ8DcV8gbOGgOjedO+4Kn+IHA4XJcS2bBe2R0Lae6PbiPqV4u7GbhSkuY7t9pK9bf0Xn3x8PO/l0x+6wdT2j3aHmZcJ5lxnkscbdEf/lqwm5LF5jHDoiGy+4K8a4bqjJIEBY7LNUHxi3190bkM/MxnvyI/jQyA/I7VqH/05xXhQVZ/JbR6HksLCQfIKq567jaT6W5yXmcR35uMxzM8kirT84snwpKwvBfjQtvyWwDyZsSbFkIMEo2CPf2SXSeLhjBz+L5KMibHJfjz15aAT5/VN0L2dD2RpHtmxxjyQtDC4Eub8xH0VWHM5lx9GY70+zyzqKT8tyxlhdoHZ+q778O+ei/Mswpnfw5Ro38pVP8AiTwMWNDDm4TKgLQHAiqO7XP8JiTNY2aIua6OnD+q9a4jPgW/YyPhb39nsfI4P5eYlosHylg2v0VnwmXHznF407q7iP4g/TSTM8U3I5GMxnY6I1X1Xm4L18K9oZUr2gRGkNxoJpzCCdfulJRR8qt9AejRctB1KKiXUhZmwwkr3WfE+6WL1ESJNI7Y0XoXfaCX/dYHiypch2xgOpFjeky/0laZJR8qVwbsse9aLrQGyAhbD0rxC2Fb5T2MfCQjNp3HKXkXR0vss49hRlGluE6W3+Co/TG/RXyDyh0jzIA8qmWJ+wMrfKUeKKfkGilZWpsM3QArSkfK0U0Ai75SkpjSdPghJTjymwYxOQ+UlI4puX3SMp2rIQiiUJ9Y/VdLxR8LmIPnXScX4CTyV0bD7OixdlW8fyhU+Gdq3j+ULwc3svh9AsnQVXMQQ6lZ5nyFc7k5Hw5CCU3jzsXkvQRpolYgNlDhYKxU+Ir9iEYzpEvSBGdKfdpfYaJ0AyHG/KRebTeQCfCSOjRXNgsG4eUItRneVECz4SHR2hZ7UB7dFPPYgPb5SnR2iteNlQB0UedlE0gnwi+hbRAofsigiio6WozQzgM7n39F1HHs14VFxcYBXTYfaApuVT0UYkWUDaajEIEb6Cm6ReE12VEXe60zzpaLxSi19HSLXRg7EPoj6pJxyInxdJTnZuweWaYVSSuqVPZs/pO1RzTfxPKswY3oRTOn4t11tXsfhcxw8lgLpoTbVByZ1RRh9C+cPSuezWAXS6bMHoXNZ5ruVHBr5GZEJMcpE/dKfForZmX0UdkuicjvKRn3aO+UEJWR42qIAYs8EoYjJeB90XuBP0TeDD8R9ptVpGyh/jYKFlXMYoIOLjhrUw4UFBke2VxOkAlomkpkj0pxzfUlMogEhK1s6imyXepAGgUzKAXITgmxAli73ULSz92jZHugNVuOeiemzIz2uFK4xZTQ2qprd2mIXlp0lZYTDi9F2ySx5UHuv3SjJPT5WGRQPF2UzZuV9e6D8b7rJHAtKSfYJpNxddGN7Gy4PRmR+9pHFdblZMI7VT5CvELGLapsaoxkBqNG4ErvI1SYWW1Bc0hPtaCEKaMBKdG6EH6QbTcjRdJd7NWjigKQu427ak0BDNhymxWz6EMM3RRmOtAaptNHSykEicg0VuLQW9kLcY2pciB0GadKD/KlVBDf5UlhL0BmHlSw9FZILYVmKaNFS36O9HRce7QV/iO0FzeAfC6DDPpC8XkoqxMs2mwsf4UYzoKR8UoPspK/KGiqLOHlX+SNKkzW/MreO+xGQ5nMHqKRVnmtvuVZ7le7h7RI0SvSiNja17KTVSpRhHwolFI+ygU2dAM01TB+iiNhbA3pFoEI1bWNWFHKN2a2pArQ8LAPKPQSJggosTgle7ZUmO2g0EmOMNlENJaN20dptDo1Mmz7qTTsqIWNO9LNGhL2hPUwdFDfsaWHC7il3eaRpCUvIdo5MZBw+6jWlLyonSLRwNwWuy1MqTQi0YQY2nArpOHd8u1z4pXHEvqv1SeTHlDNh6Z2+C7QVqx1tVDx77ApXET/AEr43lY/Gj0sbCS/KVT53gq1e70lVWdsFLwrTNs5XkWglypCPUVfciKLlz8p7Xna+i4z6IL9mOGlEnS132Chdyp0CZJ7oYoFSvawNBKZALIh3qWyVF2nLLtVKegdkgisdYQa0pCwFmjUG7lr2Kg035U/IWaNImyQrGBvoASMQuQBWsLfGkak1BYQnY26QImaTcbaCTlSNDxCgjjwhxjSMBQUDQaAye6Xd7okrrcUE+EpLbNMHlGZ4SwO0eNUKTkMeyiLK23wsJoGkak0gdFbB+qj7WtDYtb4HbCh1LbnWEIFbLrCzx0ds04qAdvwsco+As0aakSzjRRpCl3HadJhjnUEPu2tOO1D6psmEnv+6C6TRr3WnlBe6hpHoCg7HdwOypwM+JMxp8PPaUrE7RTOO5zKew+ppsfqh1pgw9UeyNDcSPj4oGHv+GKa3Q8eVBuPj4skOblSMiyGk936Wufzeqcd3A4/wH3yXb2XXygBcvjtzeSyHVI93u5xJoLavxfR6n75cak6nq2ePI5t8uPL3RuB206VS1wohzhv+qSy+oOF4sBmTN+bkaN9ttVNN+IzwXDjcJrGfV9H/dDWOrJ/HfbZ1TYc12oWZPaR/I6gq7kuL5J0Z/6WeQXrwVyj+suXyXOPxmMYB4DQKSzOu+bxpmmGUOiaQXOLQbFoow3oBzP9npEjpeL6Uhg5FgbM9o7GDWvewvC+d5XIh5CcA2zuJAduh9F2HUnWknO5bHB4j7W19lzsroMt3a4tc8mrpHglxbdjfNa0jl38hPPMGPlq/Zukw2RzW9rCfuSnRxzYsyUOiJlA9FAlExun+Ty85sOPjvdI/wCUH0/7q6rhdeiVq2+hTHvy57df6tq2w8tkZBtz6/0ldbgfhXnfBLs3OjxpavsLO7+4Ks4egeJ4xrZub5Fj4/5g301/QqS82JvxT2xqw21tnB8pyIlwjCWPH0J2vO+Vy3MkMb5N72F7hDzf4ecQ/Ki+FJyU59LGB7m/X3peb9VcdJyGeZOK4r/DsSQ6L5hIr+GlD+a0LrHo4Rry5pd8S9/VXHT2A7kJpGsdZYL8q4h/D2ecfFObG4fTQ/5WYnAHi81rsaU/HHgB12r8mbG01L7BUN+xXIxfhylsji0t+pTGK1lAdwFIvKcrjTQyMzY+zLjF2ocUxjOGk5PkHfDgcQyMEeb91M8dVJmtFrHlNbA+MSEBwolc7kZs2NI9rCC32WZnOYjIWtxgZXnbjsKpyuXimabgN/W1uHjUn2gHTRYRTZrnl9ube9JqLk+QicKlkA+xK5j/ABCXuBiDmj7lWEPJTRRB00YcD4KffH/0c8mjpYeps1hHfMTXs7aZHPcxNvGx/jA+O5trnsXnyx4+HA1z/uAV0WHy3UWSwfk8KHtrW2BS1xpl78UCr2Eh6m5vjZHNy8eQDVEDX+66jh/xSyoG/DdG4vAo/G9Tf2C4Hleo+dYXY+djs7r8Bgd/slIH8zms/gYQI9yWgf7rf8PG+2kjpq/pnscX4pOeR34uK8A2QIgsHX7uQy4Mbj5G4vxHba3Xg34C8KyW8i2ZzJIHNd7gFNdOsdh8vi5eTI+L4bjY7SVq4kSm9hRkpV2e4fiHzU8HLxSDlceAyNJqTuseFTcd17JxwcJMnG5FjtOjkaXA/wBVxfXeRBz3M/GZnFrWNpo7CqCPipQLZmN+3pSlxcbntnZLp1uT1N3WXCzuPwuAdG87P5drGEpXP5vpPMc3/E8Dlw1vkHIauAg4rJ1Wc1rv/QnndN8jNHbc1jv/AC9gSv8AHwzXl5a/+Qlltr0dXzXKdBt4Y/4JBkx5pNfxZGuH9guQhkBk78PKa1302jN/D7m82NrYmte95prRQJKouW6Q5rh8p0GXhzxSt804uB/oqcOLA/V7O+bXo9H6f6zZx00WPzoJxXaMoIFL0DC5LpvkP/8AHc3AHnw17if+F81HF5Et7C15aPYg2EIty4gbbK0tPzCx/dZk4GLJ6YE1U+z6pmwciJpkAbkQEbfHoV+68E/FDhjg8w6eNhEU3qDvazaquG6z5zhJGuxsl5YPLZD3/wC5V31D11N1bxbMbkIWCeM2HABt6pdxeFXGrafRuXIqRxGJIWuAJ2Dr7L3P8OOTbn8P+Xe4Oki0L+i8FdcUy7z8OeV/Jcq1jn0yTVJv5DD+3E0heGvFnuOHlOxnDsa0sPpe2vIVL1p0fJmMkzuEAljc3ukjHlqdLh3as/Qq44HkZsCcSRsdJG7T2dpIK+Nw5Lx5Oj1IpNeLPB/yk2HkB8Er4JWn2JH9V6f0T1rDUWD1Hg4jwaDZ3xA2fHkrqvxA6Eh5DGPKcbCIZHi3x35/7Lydsf5CV+FyEZ+E40CfLXfW1613OefGvYDxvH8keidG4mJn9T9RxwOYCGPfEwaFWPAQ8hpYXNI9TdFef8flcj0X1Dj8q3vnwTTbB8su9n9l3z8+LlG/ncdoEc3qq/B+ig5vH8NWu0K/YmKX5Ksen8H/ABHlcfGo9rnC9+yqn/MaOk1gZc2LMJcd3a8CrU2OfkmCn2d51xx3G8txuTxMDWMdBGWd4H8w0vAeBGRh5Obh5m5I3EN+4B0vUIOWnijyGA38ckucfNlc3z/CvbEeVw2l8zdyt+oXq4svuX6Y+mq9CvRHJzRTS4zXENJsD6bXX4+PJNlPyxqW7JA9l5LwHKR43PM77DSa/Qr3fjHMbiY04ALJaa79yvN/JquPaqV7C47V7QpI3ujDhoHyqubbiug5KBmNI5vhj9gqhkYe9ydx837Y2bknT0Lu17oLrTfwwW78oL407/QpirigudRRpmkWk5btKyPQIXv/AKLA9LXXusafupX2ZsaL9IZefZD7vus7kOjdjsMv1TIcO3SqWvop6GTuG0u4CTGo30n8V/hVV7tO4bthT5J6Cku4DpFf4QMc6CYd4Xn17GpiU/ulgdpuYaKT90+PQtk/LUCRvlGaVCQaP0RydsSk1aCSjTJZztlUQtgNmOP3Ssu0V7kvI4fVPldgtismvKSk8lOSG0lL5VWMSzcHzLoONdoLnYTTwug472S+Qujo9nS4OyrdnyqnwVaMdpeFlXZdD6BZjvSaXK8o6nFdLnOphXJ8m63G1Tw12T8hgsfILfJWJIOo6WL0XjTJU2Nwu0ik6ScJNJkH0r6FjERkPpKQk25OSHRpKnZKBvoxkCFttUtkKJ0p2wkScARaWePKP3eyE8eUp+zdCGQPKSd7hPTpRwTExdICQVoA9wRPYrcTbcESejNFrxgpdBiAEKmwGUFd4woBQcm/ooxodaBSi4rAaCFK4AaXmpbY1shI+rWNkSz32SFpr9p3h0Bss2OH1WpJKHlKskQsiam+UtRtmOugGbMKO1SzzDv8o2bPYKpppfX5XqYMXQls7TgZe4BdjiG41wHTcl0u8wj6F4/OnVMqwsJlD0H9Fy/KCu5dVkC2FctyrT3OSuI9UMtdHPE+o2pWKUJbbIUJ0lXtfT4k2iVom86NFKvdulMvBCgGkkqmehegYDi8X4V/w8JKqooSXBdXxEAawWhyVpDMc7Y7DHTPC08JzspqWkFFRt7LEtIA5oAJKps2Qd5VvmSBsK5vKfbztFKE2wZ2bUHEUVprx7lBnkAaSFRjSEtiuQ+nUENjvqhud3PJUmhULoTSHIR3BHEZrSTieW6CfhkttFBSMS0CLi1aD7KM9oNpYg9ynoZNBCbQpBQKM1trUjdKby0xqA447Xp9hSDPS5NxSD3ReZuhmyBa3FIe9RbsKcbfUj8ujkiygdbVCZ29qMbw1qDK+3FK32a0QkcC5RNOaUF7vUpRutpT4FULSD1FaaVkp9RUGuFr0cfaJ2HaisKDGdI7SKW0jUEYURvlQYBSI1u1HlOC1YUHN0igelYRpS0ujkKkKEQqRMPFIA061FYLLjBKvsJ3pXOYLlfYTtLy+QijEy6i+VTKDAdBFPheY12WL0KziwVS5o0VdzDyqbOHlVYH2KyHOZgslVUgolW2YKcVVSeTa+g4vaJGCJpbBQ3mljDaulbAYYFROypN8KPujUAs2BpSGloeFG9piWgQgNLYKHa2KRaOJ92lndoqJpRulmgkacVKMoZKxrgCs0EOMRmlKtePZHjcKQ0gkHB+qy6OlAOBWiaS2aELtKLna0oXawbXGbBSIDxaZlbpBePSVss4DS04La3Wk1HAq+qI35VEjypMIpakYZSd46SnAJNTxnEPW1O0YjteOl9IAKu4H21cpxk3jav4JPG18z+Qwaey3FQ853lI5R0UfvvylMl2ivLxrTG0+ig5Jtglcxk6kXVZ+2lczntp9r3OKyO12L6DVCrWxsKJsL0NCzLoFbjePdQPgrG+UUIwm4XtRZ4RWt0o9u1SvRmjAphtgrGghEaELNSBNHlSCm5natMaPdYkaGxW2+1bQtPmkjhx+6tYWo36ClB4WaRwxZE3QRSNKS7C0SjApSe4BpUG6CHkPHYpKekaAc6yUMnytFygXWUGFbZxNlkpiIIMZR2eVapOQdo0oPFAorB6VFwBTZk0Xs1S1eqRO3aHWii8TDAtha9loGkDk5EkNxWy9DcbSmgtkXlAPuiP14Q0yUcDI2onQK24oT3FMkEDKaBSzn2aRpUA+dhPSAYeP+yZjefANJWOz7EfZHYA3ya/VKyegUORi/8AuunwnTR9D8lLAAx9tHf7+SuYgcNeof1V/wAPyU8GK/EEccsLyCWyCwaNqbb2NxdHjfISSwPdHHC+ST/VRSLMHm80hsWPPRNV2Fe+TDDe+/8ACcBp/wBXYbCyMyH0QMLaN1Erp5PitJdhuds8ab0/yfG41Zkbx3Cy0AnSCMfNmBZFjSV9S0r3uRnISlsb8OGV1bdK0+D48JTIGDhRudyE+I14/wDDx3bH9ViytvyYXikeT8J0Xl5pDp2yNv8A1Nof1pdhi9HYOJFeQ+IED2fZv9Exn9TwFpiiyJBG0+miFSZvPyTyCLDwIp5naB7bKly3kyVpM7z8V0WnFycdxc5lniMjmG+4t8hE6j6kmzXSZnHHFicG+j+IAR+y4rH4vlMjMdNyM+RDCDboWnVfoun4nhYBO2fH48ZLCaAlZbf1KN4U2m3sBZKbOPj53rDlMh7cWaUNuu6vT/VWmF0PyHKSGTqDIkkk89rT3D+y7PN57huGmdjyPjjlaO74UNdoP0oqpZ+IrpJDj8LxeRNlONNcWWP7Fdmef1hlT/v7Ksf6p/k22axvwzwoAZIe6IVduFD+64nrLEg4yYwx5/5p/syOnV/RemYfT/VnUsodz/JDisKrMUDy1zh9KKebxPRnSRM02K3Om8D4zQ6Qu+y3B+zHW81+TZtyn6Wj59g6Y6hzmSzQY88MXb3d0rSwEfuE70PxmXgdRf8AXOBDW35tezcjF1L+IMwi4uI8LxLB2gyAsLh9vIXDc10lyXS2bMS+TMpoBkce6v0K9WeXrqtCp4+zy/qkRnnMhzmBwLgaUeR5SbMwMbEgjEUMIqr87P1THUORGO74kZEl/MBtcq+WQk7NX7r1MDdzsmyw5od/Lv38Zza+lrf5VpZ6QP19kkGl/l7laYjWx4h7nFxJ8JtvSEVoA3ipXsDviNDf1Vgzh2mFgbkPf9QW0Atwl4Atlt9k0HS/DJpjRfup6y0+gXZLH4jHjFuaHH6q3x4caKPdH7WqB2U4+Hucfo1Q78mQ+nuap6x3ftmeT+jpIeTbxTjLjQRd59y60rndU8pnf5mT8GL2DACquDCeXh0zy4+aKcxcfj3ZHwcl4hsacNLlEz/sNTb9CsXNyY0jn/Fkmv2czyjv5/NliPZE1rXb37K74/guNZJ3SuyMjfpZDTrTOUziJmPhwuM5b8x4oxtoIXmjepkZ+h62zkXT5udM3uZF3u0P4lLr+nvw95rmYHOw87EjyG/+G+drf91TxdK8o97Xw4WQ1wNjvbtW03FdUtljlwceSGQN7XFjSLQZs6a1FJP/AGasSX02WLfwo/EAT/w34rh7FuQ3/sj/AP7c/iNiyU4wmvdswP8AwlsbmevMEtbPkZccQO9uFBem8F1PycvFCaXOmeGitv2D+6hrk8idKvF/9FOPFFdNNHF4PSvXmOPixZ8WPkROBt0jQb+wIXS8Fh/iBzGRLDmcv3GvV3Bn/Zc5k9STZXPnGmzJZJ77gbFLr2fiBh8ez4OUW4+RGKJbp0gWVeTXUodWKJ9M4vq7iOQ6emL+RfG9rj/mRkO396VFHzcuZgy4P8J7H+7qAXrM3UvR3OYcmBkiR75R3W8D0k+V5j1F0NNjNOZwBOViu32+XN/osxZZVeN9MVWLy7Xo43kuCfGx0gh9Hu+MdwVfx3DR5kxjiyGiYGwH00K0k5PMwycfIlnhA0Yn6BVdnP72lzIYvV/MzZC9fC71psmuJXpFfzeC/DyHMkc1zm+7HWFri8kxTRvYdscrbhMHGzA+DMlkaa9LjXlUc0P5DPkgfstNX9VVL3Otk7k+heleSZk8fi5ZYJ+xoDoyfK7vH6wlbF8LDwIcQe5Y+/8AdfM/Bc1yGFjXgSX8Mi43eCF3XTvX2Flysh5RpxZXekv8NXzfM/H5JbyYlspxZl/Fnr2VyHI8jgF+PlO/OR7c2h6gua5Pjxz2OXTtDclvk1tdNxvESzwx5nD5cWSyrAY6yj5OA6QnIjYIMxnzx1Xd914GX92JeT6Z6EdrTPMQYsL4nGcwx35OUdjJC2w0lVPTGfHxudkcZLITC5/dC4+DZ1/Zeo81gY3LcVPA+KPveCHD+Zp+oXjvK8RkYGQcaYVJGe6GX/UPp+wXpcLPHJxuK9kWXC09o7XIpslA2D4W2GqsKo6c5B3LYzIdNyWEMIK7iHpcwzBudktjB2NpeTEsNNULlN9FLBfeG172ugxqEfYa7SNg+627puSFxfjSiQDfn2Qg5zX04AOBql2FzW9MrxY3K2zy38S+lpOLzmcpgM/6Rx7nAfylepfhHyI53pp+PKwfFiqnfoEbkMVvJcZPhSEOErfDvF+y5P8ACLNPSnUubwnLXE6Yk47z4caAA/uqOXH+XxnLXcnQv13v6Ov6tygMZsF/xIyq3Fc6XEa8jfukercqZ3VAwpInMlkf2tBFA+dhV+Ry8nFZ0MDx3Qk0+/Yrz+HgrHKmjsmROi/fX7oEp1SI54c0PjPcxwsOQSO5pNq19MB+xWXaTkG07MPolJW6QWtowUeoB+1OS6+6VL6O1OpAbGe5a7glw+1Nq7xB2GBR4pKSoOlsOQudm7LSN9jym8V4DlUwSJ+A0Qp8kjJo6PFd6QnWmwqfDk15VnE+2ry8k6Y9MHN4KTPkpyb3ST/JRQDRjVp/g/RaCkfCMFCGR7pF7tlPZXuqyV1EqzEtoGiL30lXvs+Vkr0q56qiBTYRzkrL9UQutClOk+VoWzUO3hdBxv8AKueg+YLoON9krkfxNk6bB8KyYdKtwfkT7TpeDkXZZD6FuQd6CuS5J1uO10/IOpjlyXIOt6t4ckvIYsPssWmD9Vi9LomTDwphvhLQo4K9imUI24A6KEYxukT32sJACS2c0ALUJ4pHcfKC/aBnIC40FpxsLHFDJvSBoLYvM27Sjh5TkpoFKv8AdYmBQBwoI+Gy3eEFwKcwAfoip6QMvZcYcfhW0LSAksNpNKyaKC8rNW2Uz6IvNBKyvsFGlDiEq9p2ghHNgnna2z7rO1aDTSaBsmX0ksrIGwmZGmtqsymGymY5WwaZXZkt2q17rdafyWXaryw2vTxJaE7Op6YfsfqvQsA2xecdNel42vRONPpC8H8kvkyzC9j8jbaVznKtALl0zge0rn+XYfUoeLWqH36OM5E9shIVe59+6s+Uh8qocwjwvr+OtxsjoPDsgKxhhLvZIYUbi/av8RiOno2FsjjY3rGl0+FGGtbpVmJHb9q6gYdbUl1sfC0FfQakZjtOzGm6SMh82kqhjZVcpIBGQSubnf6jtW/MyVdFc3NLsqqO0TZKC/ES88miLQJJqS/xbu0/HL2IdBmn7orDSVY4XpHaQqGYmMt1v2TEbvokhfsjRlBS6MLFpDmrBHaFG7SagIOlPa6OTINZRUjGj9uyscNKCvY+WV746OluqCZMfchPaQsTGIJjm9JtmvKRxrDk4X6TE+jUTkeAhOcCFCQ6tLiTdLZXZjJvItajfoocjtaQWSVe1XEiaZKV2yhMJWpHWVjFdHoRQxFJflMxutJN8pmI6Ws5MajcmGuSbHaRGv2psk7NZYMNhT7QQloX2No4PlS1JyIPFJaX5vCb9kGRu1LkgwYwj4tX2E4KhxgdK4w3UvJ5MNDsReY50j2KSmO/QRwV5NLssTIy+CqjNHlWsjtUq3M907D0xdnN5w2VSz+SrzPGyqWVpJK+h4j6JaFTvysGkQtCwBenCFMxrtbWE/RZSj4TUjGEB0onytB33Wi5FoBmyQsDtobiol212jBkUQouUGP15UrsIQ0QcdIZKk8+yC4rAhmN+k1G7Sr4ym4D9UNIJMbHhbabG1BrhRWNOylmk1JpAULUSVhgR5tBPgrZdooYNjaz7NBEepSWr2VgPlOk4yloCit2odyajAjgAPK1Gachl1+VjXAOW6BL3j3kEbXR4kttC5XAdYCv8J3heRzsaaY/Ey3J1pAn+UojHW1RkFtK+e1plH0Umb4KoM9t39l0OaKJVDm+SvU4zEWVLSQdrTzdrUhpxpZ5C9eV0JI+ywGliifCxdM4ZjPcy7UmC/CTjk7fJTcD09Po5MM1iLGxaGxpMQttiBsOQT4yWn6oYjJdSfa32W44PVYXJhaJ4raaArGFiVhYQVZQt9IW5K6CS0EYKWys2FF7tKBsxmOdpJZElikWV9NKrpZbJU1dgbCGQeLUe7aVMm9IkbrKfgg1MfgO04ykhAdpthVyQSGmnWlEjflRY7Sy9pqRxr+ZDNBTvyhEo9HGEhDcaW7UHbtBUg7IuOlG1hOlH3SnISRtxQnaGlNyGfBtckamBcfqUMkEGvKlIfpf7BWfGdPZ/IRiQtbBjk7e8gEfsU6YM02UMrwAmuP4bkM6nY+K97D/ADjwP6rpeU/wHpJjQ+QchnEWCdBv7eCuZ5TrTOzYjUwxoR4ZG2/9gmtaNUJey7Z05Bijv5XkYWN/0tBBCnDL01BJ6HSTkefWT/wvO35cuQ4kB0jj7ud2rbJpQafIIq+gDkjJLa6RvxR6vFN0tK25/iQA6vv/APZDzf8A6fwnxOwuVjkbJdA2SF5hLNCYyJpybHntpCxsnpwxPZk40s8400te/wAn9EGPj/sfYDzL0kepszcGSYRY8oy5P/69Bn3daBzfW3H9PwFkAZLm1tw20ftS5niOHn/Kl+bks47Dcw9hY4Pf/S7VLkdK4GA6TP5DMObG490IILSR9fKfjnHFaoZ5anZmX1lzfMyuOM+QsPvHbaW4uKzsipuRyuxh24u8hCfz8cbWxYUMcY8DxZVPy/JTudU0m3ewPj+iY15PpCvPZfTZvC8We1gflPr5g73/AHVfJ1kcfIb+RhjgkB9LnNH91zsePm8nKY4g1rQLLnkClrL4uDjYjJkyCSS6ADvKbGKF79nbetst4uoORbn/AJk5DcsX3PawUP7q0yes+a6hMeFx0TMNnylzWgV/RcNmcjk/lms7Wxwew1aDh9QPxvTFIAB50E/9D1uV2BE7fbO1yuA/wXIbl8jP/iMr9kNNH+6tMD8QW4REPH8VHHL4ssb3D91wZ5jLzGk/GJH+o+yRyuQyHMMWBC+SR2nSBmglLhPN/wDmGrL+rqD0Dl/xDzySBO4zH2vQXPN6h7MwZufliecbDDZA/ZcaY81txy9zpSflaLK6HhOguouSa2SHDIa73e4D+xTf8XBgnVVpGLJltnY8X+MmbgZRJZ8Rnb2hjdNb96V3H+LnGDBmk5LHM0sgqrC53F/CHLjiL+Y5CLFbVu8Ej+6QzuB6Y4u48Zzs+Vv8zrYB+17UtY+FmeltlE58sT2cz1L1Vjc1NKMfjewuPooAKixeKzJ/njDB5Nj2XTZWViYoIhxfifqO3t/f3U+G4nl+ppCIWiDFBpzzQ1/yvVx3OONStIirI8j7OffjYmKPWS932R4eP5DIYHYfHSPjd4Ol67wPRvGcUwF7fzE3u93v+1rogA1nbHG1jR4FDSnrnSn12Yo/s8Px+lOqJfXHxkwZXuW/903F0B1NmgiWF0LRu3D/AN17G3PysX/IkA+1In/1Nnivi9j2gVoAKfLz83/60h+OMSfZ4ZyXQvN8fGXuIcB/pb5/uuWy3Z2HIWzOc0j+y+zumeKg5nA/xTky1sTLDYvIJ17rzzrz8LuPzpvixn8rk5Lj8Jgt3dX/AOUfH/JWnrOh98eWtwfPnF8zJBMHSEObVbC9A6X6z4t0sWLzGBjHFJB+KIx3V+vlUPV34Yc707ITPiPdF/qb6vPjwuMkxcrHf2SwSsd9C0r0/DFnW5ZM5qfo+ueBk4qZrZ+Hl418NU1phsgX7rnPxDwYOEhg5SNpYZie8tcAPP09l878dy+XgS3DJI1w+hK7HG6jk5fip+Pz5XtjcARK7+X9F5OX8Y5tUn0OWTrs6rF/EPEw5ix7ZHNJ8966zhuu+AzpWfHyXQO+pca/sF87Z3wI5e1uSZWjwSykJnw6uOSj9EzJ+Fw5F9oyeTUv0fY8PIcPmRf9FNjZdj5SLP8Adchm5p4V873Y7GOkd6Wlo7AP0Xz9gcjnY1OxpnAj6OXacD1rnFzYuUb+Yg0C0gA1/ReT/wCiXx23NbQ18p19Hey8Jm8rnMyOJjxPiSDb2w6Ccyfwl5HlIHy8nND+YI05rapI4Gdh5je7jpiz/wAvuPsrjE5XPxD3Rzusezt/7pGXJnhaxvWgZyLeqRW5H4acRwfE/HzJZ2SNPql7zSzC6ElEQyOm+pmujPqDJHOcP0pddDmZfU3D5+DlubI7tHboAja8z5jAn6YmjlbO9oBsNBJCDDyM+ROXfy/63seolraRUdcYnJNjdByOLjzSg/50UYaV543Hmhk7onn7xuFr3Xjs/C6kxRFkta7JPh/d/wAIWL09NxuTI4TNr/SYwR/Ur0+P+RrFLnIu0S5cb38TyDGe2SmuHY79PKoeWiezJcZLLvr9V75mcBxHINe3KaMeZ3yzMHv+gXmnXHSuRx8XeGPIZokjz9Fdw/yGPLWvQm8bc7OY4WapWsLiGvHbYKt5em3SSipfSd2TtcxiPMchDrBG/wBCvQODyPzEALpB3loaPsreRTj5Ji8aW/QbkOd53pjCwDxk00bAXbDjTvp4XTdNfi0cyaIc3cc7NCQaB/VU2DiZ3HvJndHmYh2WOIGj91nJ9IcbzUZn4ZzYckCzATofoSV5uWuPmnwy/wDyOurntHteNmY/M44y+MlY6WrNeHfsuZ6oazlsR8EsfwsyIfwz42vFsHI6g6Rzw+B0zK8jttq9P4Prjj+qWRY/KRjD5Xw2UeH/AK+wXk5PxTwV+3A9oYuR5rTKqKHIDG8vxbaniNTxD+g/2XqXTfIQ9WcDFkRE/moKD2k7Hkrzr84emupHsyWd2PkakHlp1QN+PJVnmMm6V5dvMcIScOT/ADIx4cD/AOyzkf8ANP66+/T/APoZjaXaPVOIyOx3wZ209uvsVHqHhmzxHMxaDgPUAqDG57H57j/zmA4R5DNlv0P0XS8BzLMiEtmqzp7fuvnsXnxsj2eh5Tc6OVjf2tFmq1tU3VXT83UPFyZOKwsysL+KyYeR27XV9XccMKUZEQvHkN6VfwMlun+I8iBm5G/6mjyP6L6Tj5t/KSe530edfH5bneHxuWyD/F454idIBRPaPP8AdP8AUWG6Z+PJINlgLvvYV1y8zuKws/iYsZrcXlgZMZ3d8jnnX9go4YHJ4eLjkXkMBYW/+kf+yXzL8XNSTPGk9FbgcrDgYQxM0gdh9P1VvFIyeJksRuN4sLgOpoJWZjzOD3DStuh+RNHCmdr+Qn9k6ZV4/IWm09M6WRgSkjU/KK19EnKFO/Qz6K6UU5JTN2rGUbSsrLSE9MWxRpIKIw/VRI2VtnlE+xYYHW1to0fr7j6LIGOmljjaLdI4MA+5NK46i4KXhHQNkkLnSsDzrxsilnj1sJJtbKiM07yrHGkB8qrvdo8ElOCVc7Rk0dBiy/RWmNLYXO40u/KtIJa9152XGUKi0kcCEq+rKkH23yhuKRK0a2aHlbN0otKkURkiWUPKpcp1OKvcoaVBniu5WcfsGhORwSznKUh8oDivRmRDCB1KLyChFyzuR6FthofnC6DjfZc7EfWF0PGnQU3I/iFB0+GaYE33elI4h9ATJPpXh2uyyX0J8k70Fcpmep66Tk3egrmMh38RejxF0R532ZGKWLbAsVLYKXRHGfY2mQ4foq2GSkdktja9ukMTHCR+qg54pD+IAAhuktJO2SLhZUHOUC5RBtyEJEXnaC40UZ7dobmrGjBaVyAT9kzINJd48odAMjolWWC0Kti25W+E3QSsr6OlFxiDQVg0ivCr8Y0Am+/S8vJ2x6fRuQjaTmeNosj9JCd+0UTsCqJd633hLd6l3p3iB5BZpNeyq8qTyjzyKrypfKfigF0LZEu6tJkmztbmfbvKg02CvRiRey/6ff8AxPK9G4tw7AvMeDNTfuvSOJPoH6Lw/wAlP2W4C88tVPyzLBVwz5FW8m30FePh6opv0cfnNB7rVM9rRavM9vz/AKqjmFEr7Lh1uCOxjDoFXOLVKiwz6grnHJrSbm9B4y3wwO4K3hADTtU2IaCsmyUwledk6Hro3M+rSORKACVuabRtVebPbDRS5YNUVfKSd7jSo5Bs2raa3E2q+WO7VWOtElvsrpGEpaRpboKzdHZQJcY1pW47EsSYS11eyajdaGYy12wiRigqdbM3oOCURrqS4JtTaTtc10d5Dkb/AGpNwuo2CkISbTsCRUnJlnGA9t2tub9kPEd7FNEKC4HSxcN8oEjPKeDPqhSs0aSHI5MWx2bKnIO2/pS3ECHIk49B/osa0MkSe/0pCSUgk2m5PlcqyYbKoxdg0HE5IIvaV+MS4qGzag0bO1fCEtDYfYRWOS4FBFZr2VK6EtDMaM00lo30p99+FwIwJBsLbX0UrakHJNm7LLHk0nGPsKqx3j2KdiepmjUOArThpRY6wp+Ql0jiUJohWcElUqppopyB6i5GNUg4ei8gk0E22TSqYJNJgTUF4GXFplMsakfopDIfYKnJNpJTSeVuONGU9ldnjyqhzPUrjKILCqt3zr1+K+xFCxZR2huFJxzbQXx+V7E9IWL/ALKDhe0eqQ3BNlgsAbH6LR8KTx5CETSYlsBmF1IfftY5yhazQIYO1pTEmku1y33IGg0wr3hBLghuedqHd7oTUMxu2mYZACq5sm9IrJDfla/QRZsk87U2uScb0ZrkrRqYfuorO8Jfu9Sm0rGbsKXaQ2u8rCUJpolD4nbNuO1HuWONlQcUxHbJdyy1C9KHd5TZMCEqBfSj3WEMnSakCXfFyA0LXSYfttcdxj6cP1XWca+6Xm86fiNxMuYNDaI4WDShFshHDfK+WydUWpdFNnR6K5vPaQ4rr82PR0ua5GLyr+JfZNaOcnoOKi037qeY2iUuxy+ghbknYR+vCFv3Km42NoR2V3iZsgXb0m8d1hJEdpRMeTtNErddaOTLeF2k7ivF0q2I6FJzF+YJNDoLHt1dJuGMFn3Q4I7aLVjDGO0Jfn2OlbFhHRTcXilIxLGtoraraNa0ae6tFAe/RtEm9yk5X0pWKpg8mWmlVckosoubLsqtlk80hiNiXQV02/Kax5AaVKZfV5TuNLpXxGkbLLuF4tNskCqYpNeUdk1jynJDS0EgWw9INl15U2ypkowa79FCLtIbnoffopmjNhS5R7tHSF3rfd9yhaB2TWgLUL3biAPuiQD47uyK3k+zfKByGtv0Z23oWT7Jnj+NyM/J+Djxku93V6R+pVrxHAZmTKC+J0EDdvfIK0s6m6sw+Ewpcfhu0BrafL7uP6oOkOmNmZn+EdKwfEynMy8/z2WCxv7rg+e6xzeazo42y9sId8g8AfZcdynMTcplPMkz3ku+thXHTvGPyGOmjhdIIx3OfVgfqmLG57oPXWkCzyJMx0mdM5rWim+5KQm5KKIViMY36uJon9lW85kvm5CUzvYKNNbZoKfGcZm8i4DFw5pwf5gLCsmEpTZPtt9m5eSmmce5xfXtWkWOc61X2BtdLx/Q2R2NfyE8ULSQO0WHf7LoOc4TguF4yOKFzpcp3qLiQdfS6S6qfSC8UecSyTZTxFC17gV0PFcW7GYZHmLGOqLXbP8AVLuz4oLbjCKIeO46d/UKvn5qMSdsZfPJ/pO9rVL1qUKbSfSOra3HEpdK975D5c0XajnmDJj+D3tc+qaARY/ZJdOdP83zzw53dh4zvLttK9O4LoTG4yBs3wJcrJHhxp1qLPmx4npd0Px46r2eQZXTHIPm7sZhjIFhztWou4h+DE+WSB+VNW3EWAf2XuE3D52W9xdCMWKMeZ2+PvpeX8rzGLlcm/ClzYMXDiJ+K9h7S/ft/SkeDNlydUgXEy+jmuH4Pm+pjkTYUBLIvmDL7W6vyrjpr8MuT5rMc3Ic2JoNEyOqv7LquG/EbB6ewJsbpnjZHQv+Z5aD8Q1smiuK6h/ELnc/Jk/JROxGPFFrAWkffyrl5V/HoPS1s6jnPw34XhYj/jnJ2xnlkRDlx+c/oXGhc3B42fIcBQklhoE/qCufOTyPIZAGXkZeTO7QY5xcumPRWdDiMyuZmgwcWrYx1tLv7Ipv9L1VAUvL+KOKgy24uU6RvHxPxifTC+wCuob1RyHJYv5TjOExeJx2tHxHwl3qH7pbD42eXkWOxsZ2RjRvB/iC+5X000cMr+8MfMQB8KEelo+jgivIn/47Ez8d7Gvw6wuJ47JyeX5f48kraEffH5O7P+yt+ofxYgwg6HjYvjSnTbGx/RcPzM2ZLDckwb7NjYSE10t0zDxoZlZ8X5rkJtsicO4N/UKfLiw2v2Z1vQWO7p6ghkZPPdRf9TyMjo4z4bIS0Uojj4YWOdXe0Dy80AfsuyfguoS8hI0NumxN8D7UlpeO/OZDS5nZjNPjxai/yp3rGtIfWFpbpnN8TwGRzGUyTJcWYLDfaf5l6NjxxY+OyDHaGRNFBo0lomNja2ONvY1o0AmYvltDfJq+voWoS9Ei6jd2tE6UXrRd6aKOPRm/oDL4KSkbdi9JyTwlZBTtKiUxL6Z2vRPK4rMfH4yeZ4EsvqDvkaK83+yuOteX42PmcF4eJW44Pb2kEWW0vL2uIeaNOHgjzSn3BwJNuP32jcofPKaWju8nrT8705JHO0fnRI0tB+gKoem4uJ5bncubqLDxpQ6B3b8Q6u9Kgce03Wve1l6AHkhKfku09G/vdPsY5Pp/pOWBzIuHgZMJ3Bz2g/L7e653m/w5wsrjiOIynd3kQuoV/wAroYa7aqyNqw43Efk5rIY3/Cc/ZeNUpf8ALzRXTDVTXTPAOY6H5XByJmugkkaw7c1trm5cGeG++Nwr/VYX19ymZiQGPFijEzIxT3kX3n7/AFVW7heD5iCZs3FNoNLrjjaCrsf52p6ySH+mX0j5UhkdE70ucP2V5gZx16rP6r0jqroHjsSA5ZY+JriQyLQP7rgJOAnjkc+JtM9jVUvTnl4eRO09CKxXPRZYnIZETmuxJ+yQexdS6zA6n5GCSD/FcVrseQgfEZZcFwLeLnkNMLy4e4TWTyOZx7IYcp7ntB/n3+yReKMi0uxPhSrtnuvC8icWePKx3F0bvv5H3V/z3C43U/HDs7O33N7BXi/SnVxxg2OeNssDv5a8fovS+G5WMgSYk7ogfMbjr+i+e5nCuH5T00XYM2un6OA57pTkul8r8xhlzmA3rxS6vo7qXH5vFGFyRDMhooErqZMgZUD2ZjRLC73XnXVHTT+Nyxm8e4iJ3qDmHx+qzHm/yZ/XnWq/sdalPcPo9Lh6ZxW4ePPm5hhMj3BpFarx5ResuGin4YFrmZckbdyXfePvXuvM4uq8l2Fi4ufL3CJxI39dBdxw/JPx8bv72yY5FOjcfb9FNlWTBro3GppaR809VQMx+bnfEz4cb3FwaRVfZXHREjJs4Y7+yneC46Xffir05xvJ8ZLyfD+iWPcsRq/qV5Fw2SYMiKRttMRs2vrsGRcvi7+zzbn9dnuMnR85ga+SWCOKr1Julz2dx7cDIDsLJkcW/wBl3/B47eexcSB8zyJmW2Rp02h4KTl4kcZO9mTFHkwg0JGi6XztZaxU5b2WRCtdnN5fVEr8NmLzPGunx6pszGEn9SqPJ46CbGM+F2SRnbKO2frS9TxsLGlj/wCkiZlReXskHcB9qVdy3E8dg4r8gwnEidYIaA0Wix8+H8ZWmJycdy/JHG8P1Ae+PF6mxG5mG3TXGyWj6rs2ZWK9snHQvbLiyNL4Hee37f1K57J4TAnAfxvIxvsWWOdZH2VHO3L4bIZIwFva4Fv0TsqjP0umKWSpOl4/HycOXIkxC6N7CRPEda93K0w+WmwspkjiadW1UDq+Lk8iCd8bYJwAybtFfEb739VfcJjwcz8XDa4GYeuFx/mJ9v6Lz+Tj1/NDceTf8Wd/w2dBzXGvw5nh3c243E+PNrmOGnbiZ8+Lk+e7td+i5OGfleH5duK1j4yCT4NV7q65LOMYdMYanlN9xCdw8bxre+it09bY11WYuXnEMFMbBqFw+oVRwnIy4PI42Q6Id2M7tmYPLh4tbx5dHZ3soz4yJhmY7A+eMG2184r3/RFS82/Ik8m3st+sOnYM3OiyoQHYc0Zc17fFry5jH4fKS9hp0MlfsvVOiefHL8Ll8XOGRyQn+ACN+5P91xHVnFOxppMiMU/+cfUrcNeD/WPcK58kX2NlDJxo5RR7mi6+q1JZVD0dlfFx5YHGi0dwH7q8cT9V2SdPQlMVlFFAfVFGlJvaXcVFS7BbAPb5URrSm7fhQ9wiQpnRdD8fLm87EWxiSKI9zu7wDdq5/E57peXaA70BoAAOvdT/AAojyWTZ2RG8Nx7p33JCofxNzDB1NiRh1Ne0f7lPctypRdE/8bZQ/VYDRW3aJH3UCUnRDpIexpKVnjy6Co4TtP471NkjYaei9iksKbnaVfjv0mS/SiqNMPyDNKK3YSjHpiJ1hBS0bLITNsFUPJMO9Lon7BVPyTNOT+PWmG1tHNSupyC5wRMkU8pYlexC2iWvZslRtYNqPujFsYhOwui4v2XNwn1BdDxR0FLyV8TYfZ0+MQGBG7tFKQH0BH7tFeJS7Kk+hDknW02uZld/FK6Dk3U0rm37kK9HjT8STK+xqHZWLUHssTH7Nn0VIko7RY5UlKaKiJq0vpKhAKi0+LY8rXxAq78wAFtk4UlToNMse8WjRtsfZIRv7grTFbbAljoeyJj90CVtXpWgjsJXKZQNLNjKXRVu8IBoEph7SLS7m2s8RDMhYS9W+GzW0pixA1auMaOhpT51pGygsYoBELtLO2gFBxXnexn0Rkd6Sq+Z/qTcrqaVWynafikRTJBy2XUEv3bWpH6TvEXsjO/ZVXkv8pmd6rJ36KrxQDsA91uKnC20C9kpjHKs10bKLfiT2Sj9V6Jw7rYF51x+pR+q9A4M+kLw/wAkui3CdLEfSk+RHoKbiGkHPFxlfPx1RXS6ON5AbcFQZOnELpORZ6yudzWkONL6v8dW+iLIQxPnV5ijwqTDae5X2ECKtW52bjH4j2DaKMjRCXu7Q6NrzrfY/ZmVMKNKrfIXE34TOQ6rtVzpLK1LoTTNOF2hSR6TDdlbcxdL0KfYi2K1t0F0noofspvi8qnHQtopJsWyUr8LtJCu3xHaUmh8q/HfWgXIgG0ptaFssolTYNJoOjcfmk7FVJVopOQ17pdIJDUJo6TrDarm+9J7GNgKS0GgxKG/wiPCHe1O12MTIdo8rTz6UR4pqVkfVrHOxqoQyX0SFWyOtxTOa42VV/Ep5BTMc6MbJl9OWgfUgSn1KUZtWyZoca862jj5Uqzymm/IniaRJpW70g3tb7lolhO4rPiIfcoOKnyUcPY8llWEblSQPpytIHWFOmaiwY/SYjcCDaSi8ppnuhpmol3gI0ctBIyGnFYx9JFNBIu4MhtfdFM+rVLHJSP8bS87NjTGyx589hLPm8pV8xpLvm35SoxGNjUkmilNd1qLpVgNhW8fHpgPsmtOC2xSd4XorozQq8eUMtTLh9kFw8o5YLFJBV7Sshq0xP7pN58qiWLYPu2tXo2h3tZ7FcwUTB2Vu9KBWDwhYaNOeoFyw6tD912jggNBSa4FBJ9lsaXaNQ9E8fVHa/7qvid5TDHaQuTUxnvUg9Aa7Sl3aQ+Jobv2td2ygF4tR77JWaOTDlxq1ru+qEX60ody03YcnSgVEFZ58I0caJWiVqj7rNe6YmCxjBf/ABV1fGP8Ljsc1MKXW8a6g1ScydwxmNdnS4rrpPeQq7EOmqzZ4XyGdaZbHoUy2elc7yMeiunyBbSqPkI7tM41aYFro47Oj2aVc1teV0GbBQcqR7afS+r4r3BDfTNDwhuRFBwT6kEBILUQKdaN2qBaltaNH8N/cQPdXWO3QK57D9MgK6DCd3aSrXQ/H2W2I29KyhFWkMQUK+ieieoq6ZVKGgy2oErO20dhsKM22lK8tGUV0rxRCr5n0HbTuQdkKpynUHLETUyszZLJ2qyaWidprJdsqpyn0Sq8ME1MIJLKdxpKVRG9NQyb2rVGjZZeMm0jxSKojmACPHNpboaqLYS/dEbIqtk2keKXVol0Fsse+ytOlAKSEyi6YbLv2R/9mbHDKPlB2fdXfEcDmchEZ7EGKPMrtj+iY6TwsGDBdy2efjuj+THA3f7bXE9b9d5/LZUmKA/GxWabEBWv1pan9DZnZ2Ofm9M8EB8SV2dmj5uxxDb/ANlQ8p+JAjj7MTEgib4A+GC7+q84i/N5b3MgZM8uNU1ncuv4ToGVsBzufyG4OHV91h7z9u3yicpdscpJt6z5nPcImyOayTQa0eR9LCPkdJS5MHxuXyhhRu3bz3a/YreV1RxnBM+B01gtjNUcgkkv/Vp8Lksrk8rk8kvy3ule4+Ap/DvykyrUlvFx3T3HZH8IPzZGmw5ri0H9ir/F53mc0yw8djQY+FFERIfhN+UebryqPjOKmla1+Q0xxjwwCyV0U0B43DGR8JxdJ8sTff8AVH8qfYpW97Oh4bC4CDp/81kYceVyUjiWtIAsHx5CoM/mG4TPU6CDtOoI4w0j7WFyvJch1Fll0cEMeNCP5fiD/lc/l8Vy/c34jm7/AJg4FUeO1rZt1v0dJldTzuyQYmlzgdBxva57qfnJ58j+O7+MR4aL/wDwmul+DdyvLDAfkfAHmWY1r+69c4rgujul2CZjYsjMj/8AGc4nx79pJHspsufHx6+fb/0HEPIjxjhOgequpAJvgPxsJ3/ivI/rVr0fpng+kuhe6fl86HPyqrs7TrX3H3Qet+tMbOD4o3Oc0aBALQP6LyHlXsle4s7gL/1WqYy1yl4teKF0pw+vZ7Pl/jPh4MXweFwGCtAkA/8AC5jkPxY5/kCRG9kTT7NaF5nj4WVlSfDxIZHuv5u2grnA4/LxxLDIwvn3sb7Vq4mDEv8AZyz5KXRdcl1fzORx8kc+fIA8epwJ8fTyqXi+ls7n3PlxC/4bNuldtv8ARdZ01+HvIcoBPMy4r0XEBdhy3F8rwWBFj4pcyOQhpYxgd+9qe+dGP44WtjseCsvdnnzuGlw4I2SZDXPaKIaKS7c/K47JqCOLILtUWA0u95zjMLguGOZyE7cqeRtNi8EOPv8A2VF0dxbu88plAN7h/Daf5f8A5a7Dlqk7o7N4yvFewPQEvIRcjl5ToYRkUCxksQdfnx9Fb8vkvme/P6iyAX36YW2Gt/bwh8zzzIPisxGguHlw1S89ys3/ABDLa7Pe50IPgXv+ibOL9l+VrRG8lfxR1WPyU/NufBggYnHs/wAzIqtf7/uqzk+SxcUOjxNtb5eTZef18qs5bnviYzMXGjGPiRiuwG+791DpPp/L6oyZJAC3DgFuJrYvxtVvGpW30jZlt6GeLEmXIMyc+lp9LDu/Zei8P2QR/mJvXlPFtbe/2S8P4b5mFhNzOTyWxY51GxpDiR76tWGNjRYkYZAyu3+Y3f8AT2Xk8ulk9Mvx+OEx7Z8qVs+WAGM22OvH6opNkknf0U71YJ3q0EkiwVHKSQnJkdMK0/VEa5LA2URpsFbK0L8gzjpDJ2bWOcKQXutUwA2SeguKINhDeNKyUKoA7Rr+6mDrRQ3LYBR6BRhP1K0D5WzZUaH18IXISY5jGxSciLmnuaSD9QVXY7mhx2rCNwpedmx97GSw7RY/3Tr+SnOG3GYGRsb/ADBtEpNhFeLUydaC83J7KJtoH+aEltyQ1+qBcLH91R5fF8fJMfiuGHK8+mRw7mOP2aPCs8lxINn/ANkoJQY3RyDvjPzN+v7p+LI2+hiyb9nJc9xeZxh7poLjPyysoX+wVBy2A3l8Ehj2mdmxqrXo/wAHOhxnjgcoGEm5MOUNA/8A+nLhuUkhfmOGXgvwMi/mBJaT+p0vZ42R/wAkDkStdHDwPmwJvg5LS0t+66fjuRyZGdkEhLBuwVDmuHkmhEnxRKD4NAUkeG4fMj5KKIMaPikASd4IHt+g8q+3jzS6fsRP/wDJ1OP1ZnYuJPDM812mr3S5+Prvkw18LZGvZdFrha63nejOTwJPy+VHHIHi45GvFELkuS6C5aC548dtee1rgVNx3xab8tbDc1LAnnHZTmnJYAR47QAukxeYdnxQ48mScdjdAi/70uJix3Y0nw8uBzXX5IKt42yRMD8chzQ2/NJubBFa8Tcba7R3eLwOYPiv43Mbmtcwhza/7rx7lYJcDl8iOePsd3m2/urbjerM3B5N0+LM6OQO/UFE605B/NmPkHxNbMBT3A+T/wDCmcXFkw3436YGelS2el/g5zjGxMZI4u+CflB891jS6XqaWXiedOSwmXCyfV2ONgLwHprlJ8GcSY8pjPg0vQP/AKly3YjW5UnxofuBpedzOE5yup+w8OZVOmeoYWTg5EbZWNdGT4LTQH6hVfWfH5/Jcc1mBM3JDCSWNb2kf1XKYHNtxS18Q7sd+iz/AJXY4eXTY8rDk9Plp/4XjXgrBk/YkG8jfxfo88w8LJif2vY9rmn1b8LpYeOlyIHGW5onCvV5C9Awc3juVkbHmxMhzBrvA07/AIVi/p6NhuBwieR5AsO/dL5P5Bw+1oJYFS6PC87pLLEc2Rgtc8RWS0HYCHwfUkvEmEPa5uTjyd8brqyfIP7L2ebBfDlNbNGYyNdwFg/dec/iH0rE/JGVghrT5e2/7qrifkI5H/FmEXx3ifkjtutc6XleKw+SwBE8yt9BDQDY0d/qjdK4DuV4GLH5SLtyCz0O1orx3iepM/iAzj8yQvxhYYP9N+V710HPDyfT8RxpGukjogXspHPjLw8aUdrfsrxXOXo4XOwZ+LzH42Q0gtdo/wCoI+NKWutpo/T7L0rnuIh5zBcXMH5mMUD4IK8uyIJsHJdBkN7XtOvuu4nJnLGvsTmxOK39HR9D8HDJ1Mctjw2NzTbPvSb/ABA4TDbh5OQ+QiYuNC9ePoqHhuSkwsoPh0XasmleZnDP5TCzcqeYvDASA3fsqqqYXl9lOGpctHk3Hu/I5zS06uifqusklFBzfl+i5bKhMeU4HwD7q8w5e/FbfkeUVV5Ijp+LaJyu1fhAcVOQndoDjoqWl2CzR8najfkKBco3a5IA6Lpjm8vDlZg45aIZ5mF/180p/jLxBbn8fl4/c576bV++1Sce7t5HEN+JWf7ru/xhjJ4bj5mD5XA3/VNx05pMswPcNM4rO4/L48wszoTE97ARZvyLSlj9a91ecplzcnxcE0+/hMaAf2VC7RAS9pt6JMi0wsZ2nIXJFh2Am4ztJtGT2WUL9pjvsJCJyZDtKSkHoK1+03jvVZ37R8aTaCo6Ol9lodhIZ7LaU2x1gIOVtpS8fVDvo47km9ryq61ccozbiqb3Xt4XuSW/ZILCsCwlMFMJH8w/VdBxHgLnovnC6LihoKbk/wATZOgiPppFB0UvH4UyfSV47XZTvoruTd6Sue8vKu+TfTSqNht69HjrUkl9schNLFGI0sRGplBku9Wko930TGUdpF7qK+npC/RIyEqcD9pNzzZpTx37U1o2S4x3q94x1sXO4/hX/DOt9FSUtFOL2XMTKS+Wz0/srBkdhAzYx8NyWmU16KGdm0r2bTknzEKHYqIXRPQTFbStMbVJHHCfj0FJyfRqCOQj7qTnWhuNBeYka30L5BoFVs7tlPZT9FVkrtlVYl0T2zQKHK/6KJdW0CRypmRewcz/AKqumO0zM+0lIdqvGjjX6I8HslQdpqBPCktMQgSBd5wTvQ1cDimnhdzwTvSF5H5KfiV4vZ1uO40t5YuMqGK70ouRuMr5f1Ra/RynJNou0uczm27wuo5Vtdy53Jb3PX0f42iPIQwYSaV9iQ+jar8JtAK5xvC9HO9mwDkiofdAf6WklPybBVdlGmuC8+k9jWVOZL5VWHkyBNZpPcUi0Hu8qjHDaJ6ZaY4tMBtlKY7qYE9CbS7XiBsPGxbdHanHX1RWhFFaMEHwEg2EpPCQCrx0djSUli8qrHlSMaOdkjIJS7vQVd5EHk0qnLjIJVkZEwGjUZ7kxEatIxPA1YR43kFHRiLGLwnYRQSOO6xpORuUtsYhl22pYvoo7TaHLHbSQkBGd9tKQyDRKOHFthLTutFKO3oq8o+VUyOp6tskeVUT/MSnJaZqo3LtthRjd9EIzDtIW4TZTUg9j8LqpONPoSUDgDtNNPcnIWzROyo9yIWWhOZSLYijC5QLlhCgb8KbJ2CmFhkVviu9IVCyw5XOC62hTaCRaxHSYYUCEDtRW2ChpGgZ3UVBr7UslQibakutHIIDSIHrGs0s7Eiq2MRFz9JPJdW045qWnjJadLY9nMU+PaahltoVVICxxTEEvhehin+hey4jdYRfZIY8qdY8EJ7QaIvBpAeNFMPOktKfKFIxiOR5Kr5L2nptkpKYHaokW0L7HlSB0hOJtSYbWgoItKQFhaqlgWgbthBJ2mSEBzNrUdo0NrZC348LR2tO0TiKM0pdmqCMHaRaOQQPpb+LrSA6Q+FEFAzdhXSfVRD9IV2s7q8JbMDiTSwOv3S/fpTY60OzhprqJUg5CB0sa4rUzUybnb2oEmtLZ2ttGimJmhMFpMwvwup40VS57jmWbXSceygLScz3LQ/GjoMTwFZxOsBVeIdKxhOl8pzJ0ymSU22lVWWzzpWztgpOZg3anwvTMtdHM58fpK5nKYWykrtM+IFppcxnw0bpfW8J/AjyLsrLUTsorm15QwCXaC9BrYrRhFBDTbm03YQ2NB9kukFoHEO02rfAlpwVf8PSJjuLJB9Eup2g8fR12MbAT0R/qqvBlDmttPtkAJpRZJKVQ+11BBkfYKEJTVIcj/SVI0Y2KZbu0lUua+wVZ5jrBVPlmxpbKJrKvIPlVOSfUVZZR8qrm2V6OCSaiER2jsKVboozHK7XRyYdryEQTkCrQAVpwSnIxMdjnJBANlNNm+UXv3VRG4ts+KXT9HdN5PUeZJG1wihjAfITokf+UhDKDlbYpC98zwyGOSR30aLK6DC6X5fIp35dsUf1ltqteX6r4Lo2M4XEYXxMtop0uQA+j/uuE5XrzleSe9/5uSNp8tjcQP6Wn/RQsJ32JwMfFH4+XzkcT27+HHKFvk+e6TZEWz8ZFkzgbme2+4/ra8el5Z8sn8WUyPP12U3g4ednPDI8TIt3h1aQ/P6C2p6R6COtsLAxyeL4rBxpT4laSCF591F1Vmc3lOMsz5jfk+f2VxmdOkxxwPmbJka7oor7v3V3wnRePhATZzY4ftKN/simU/Zjqn0cVwnTufyB7nN+Ew7t2nFd5w3S8PGsErmEPd/4kooj9FY53UXE8NB2YTWSyDVyUQP0XMdW/iBLyuHHjYsTWBl25oWeVb9HVCS22X+VzGHgjtxYxPKP53exXOcn1RO4+vIkbvfYbpctgx5/K5LY2SfDDjXv/VdXkdO8bxuCDnZjHz/+pJyOYrs6U7/ic/k8811yU97AfmcEtP1E5kXfHiyNa7Qk7TX9VY4/PcHxUbwMMZ7ya7HBr2/00qHkOcyOUcWnHx4ca7bExlUFRjxJryaAqVPtgYsrNc574HvHd5pFdLIwAzNkeTqiPKs+E46fJfGWsMcRItx8UvWIuh+HfgYuThZkc0jQDMwusj6+yGrlPWg8cVXUs4fpf8P87qYR9/bjQH28E/suzk/Dbpvg47yp25M+qjsHf7LuJMnE4Dp2M4gcZZPSwkjRC4WSRrYps3Mc2gbZfu4+P7pN230mOczjXfsU5XNwOJxDx/GYT4s3J0HNaaazwST/AETnTzOE4rDLciCWbMd/myhl2ff+6rMeGSR7svM3PJ4b/oH0RXWGuseo7Aak6TXYn99L0WU3UWU8CLEYyCJp0Wkhx/ZP5nWWSzjY5X48bo8Zrg6RxI7r8LmsLHmyskQwsLpD4I90r1DgcxyWSOFgxn4WK2nSyStpzh50R+62MUt70jVmyJHEZHKt6k6s/N5DGRQA/EDRsAA+E/znUHxmyxw90WJECLrTlLnoeI4V4wcB7Z3s1JJd2VzfM5/5qJsMcTWQjWm+V6MeNa0uifdN7fsqm5uRnBxFx4rT4H8yHmZLGgMboVreyut4rpDIzMGOeVzII/5G7H7q/wCI6S4jjnGfJD8rI8gEgtv9Ey+Rjl9mxG+2cZ0z0hkczM3Kz45IsGMX4+ZewcBjY3H8NluxomsbGz+G0e2/KG3mnNwXYzMWBkdeOzwuh4XEx8fpLIzJ7e6Zzow0+1bGl5+fk1l6Xoqw4p8n4nNy5M2QGCeV0gZ4v2taDdElYzwdav8AspC97UG9HPv2CqrQ3N3tGIoqMgtpWJgNaAigfKmHADSXJ8rO7ScvQtPYdz20hd1+AoA7UgQnQCwrDrwFtwNKDSp3YVkvoBislhC8I0yWcaTEASJCGXeVB7kPuXaMG4H0VaxO7mBUcbqIKtcWS2KPkT0Mlj7HapE7tIDTpEBXj3Oh6YLIALT9VWyXZCsZN2kpBtdgnbNBUW0W6I8H6Lo+LyOF5bDdx/VGLG9rtR5VW5v96XNkkeTf2Q5TcMoIsEfKvRxrxe0dNeLD9RfhrPF3/wCB5jZMR47mNe6rH7LznKx+W4Kd8PIYkvwwfPaa/W17l1PmOh4XiWY8jo3tF20/b3VXkc3+fijx8zFhmAHaT2WXBPx8hratbQ7Kk3uSm6F6twuWwhw/LyNHtDK412fb+6vOUhn4g0Mhs0I02SN3cCPoSufi6B43meSe7FlfgxMjdI8h3b2kfokuR6Z5DDjc3iebbyEDf/CMjnO/2UubBhp7itM2clpfJFnm4WBzMLviwROl9ybv9lzM/S8LWubjSFjaohwpL4nLT8dl/C5JkkMrTQ7leDko8qdrX9ocRYePdM/58aS3sU7XtHlnPdJ5XH5bnBhdC42HAWsx+PyPysjBG9zCN0F7nxeJjyYBfltMpfpodsBJYOHgnIe1rAKNduqKsv8AJXMLyXZqxbW2fPbGnGynsNjfuumbNJ+VZYsEbI8LpPxe6WiwBDyeBEWRO/zD7A6C5vp3MikxX42T2mN+7+hCv/bPIxLLJPKePI0/RdcEGzRmIWS7wAr/AILkZuJzDhZXcMd50CPC5viHjAzw5jw7tPpP2Xb5uC3qLA/N4YrKiA72j6DwvKztKvGvTKana2i7kaSWE+DtpHuut4rqKPj8NjeVn7mXTGk+ofTS8u4XnX4To8bkAba6i13kbSXW/JObz2PPGS7EkaXNPtVqGeB+zcs3Fbk+hmctxGRi0/Ja5jh8pI0vNvxD4xrozNxWU11/+GCFyfB8o2N4EvqxZTRcf5Vb5xZETTzR21xPlKxcGcOTygfbdrTOHj4abkcWRsjmnIjJ74ydj9At9P8AN8n0Zycb4pHfCu+29EK2zHF+S2eB7Ypm/wAw8O/VV/OtGfABks+HK0UHEelx+y9RV+z42tyzzvF43tHvPT3UreVw28lhdr+4VNED/VLdZ4kHJYLc/FFyjzrYXz90t1HyPS+d6C4w364/YhegY/4gMEjpWtHwJB6o/ovHy/jMnHy+WLuSn/Jmp1RIS2R3Dxpdz+GWW34mVgyH0zXTT96C88iy4cwvmxzcZN9v+lW3A57uO5PHyAaDSAf0tPyRuWmDirVdDH4ncAeKyX5LW1DIbB+hVDwuNPkcFLmsb3QtP09Xle0dacdH1N0dKIq7zH3Md96XjXQjc3G46TGyHVG57ms80aJS8FqcLb9obljdEO+2i/B/sgSirpMchAcTKfG72SbnUDa1NV2ieuugd0VMbGvCF3WTamw0KW6FhYHFk8bx5a8H+69I/EOYZPR3HBwPxHBpH9F53gNEuditPgzNB/qvWvxIGLi8Zw/xA1tua0A+2itqWo8kWcd9NHE4uK6Tp5133NG7XMPBB/dd5KBHx1srsdfhcTmM7ciQDxah41umzOVGkmgTDtMxFKjSNC5UUiWWWEZRwdJWJyYBsKakN9oE51EokMm0CZQikpy3x2hLemX0D7aFKQW0pLFksUnSbapKWmUQyg5WL5lzj29ryuu5JltK5fLb2vK9Pi1udCsgBYsWKsQwkB9S6LijQC5yM+oK+4s6Cm5H8TpZfsdpYX6IQotgqZborytdj/oqeTdoqoiPqJVpyg8qojOyvRwr4EtPsda7RpYhNdQWLfEJFFk+o2FXzk+yeebSsje5fTOdgtCZJU8U7Kx7KJWY4olJuTF0W2KfAV3xb+2cKgxjVK2wn1K0qS5KMbOyjPpChlN7onfVRx3d0AIU5PkP6KNeytejnZa+IR7hRChmuLMlwWo37Vs+iZ+x2A7TjDpJQFM39Co+QtnbJFxvai92ioE7Q5H0CoPHs5sWyXbKrJXJzIfdpB58qvFOhFMg53pKXc790SR2ilXO2qZQBCQpSTymHG0B42qJNBXtNwGylvCLCdpgSLXGdTguu4KUkBcdjHYXU8LJQFKHnxvGPxPs7nEfbBSaebjKrsJ9sAT/AP4ZXyOSdUXp9FFyjdOXOTCnldLynhy59zbebXtfjnpkth8IWArWMUAkMRoAFK1ibYXrZOw4RA+4VbyAoFXEjQ1pKps94IKnU9h0UGV52km6cnMx2ykO6ldinoloejkoJuCSlVMemYZD4QZMe0LL3HeCmY6tVmNJ42n4XWdlRVOgkNgIUkdo0e1JwFFBN6D0Vk8flVeXDYOlfyRg2k5oPOlViyANHJZERY40FqGQ3tXWXi9yq5ccsfoaXpS9yK12O4bvqrFllV2I2/ZWTNKbLIyWTbY8ozNobVNTPaYYtkxm+5oVbOSCVcvF+VX5UXkjwmR7AZSZJNFVE52VeTtG1U5MYs0qVIKZWnZUotH3RCyvZRYPUmIYmP4o+pVhGNKuxHKyjOkxMxk1FzQVKrWdtLmLaAPbVoLjXsmywn2S87CL0kWBoC3brpW3HG9KnaSDSteLO6SDUXsLfSjVQ+61ALYjhullILQjlN9NlQxxaZymXGUHEbtRZ1pGIaYy/ZFEIPsixMGkyyMLzavQ2SvMSDNEQ06Vs6IUlchg7SFsZOzWcxmRbKVYe3StsyMWVVSCivW49iWg8MhvynoplTteQUzBLZ2r0to5MszL90J7rBQRICFvusLPEIDL7pKf3TknulJfJTEAxB59RUolt7aJNLI2krdAhhpZ7KQZraztoeEIaBgeVqkUDyo0LXbCSAFu1oBGcEIja5Mxoyw1Q7xZW3oR1aNMFky5R70PuUHvrwgoEN36UQ9AL1ncg0YMB6k11JcOU2uQtGosIXWEQWloHaTLEv7NNlYDS24a0tMb3OATpZpZcYNfquhwvYKiwYy1w+gV1hmnhLyeijGX2H7KwhNKsxDsKxjK+d50FKGa0lZh7+yZGwl5/lK8zH0za9FTmC7XP8jHrS6PLGiqDP8AcL6jg3tEloo5ATpRijryiu09R7qK9iexOjc40KUIxfspNPcd+ERrKS6YaNsZa2yPfi0aNukSNlOWS9nB8SR0flPxTEuSDG+uk/FEBspGaQpY411hDkcQCFKMCqCk8DwoaQzWytyT6Sqec+o2rzKZTSVSZQ9Rpcl2JpFZmNsEqrlabKtZ/dV8jdlenhS0TV7FCCFJqm5v0WNaQNhV6A9GA+A7+qI3Y35Wo2+1eVddK8Dkc/ntih9GO31PkI9IaPO/Frkk/Ycp09Iq8fElyp2Qwsc55cLoLscPpXNwG/F5Lk28PARYO3Fw+/as53qjjul45MDpvtD2emXMI2T+h0vLOa6uMkriXvyMhxvuNjf2atjF59SiuYUds9bz+c6VZh/lMnFPKuHnIY8R/wC4XnnJv4WSZ7cDHliZ7d0hcqnjsLkOT7ZeRnMGM7/SA4q8w+PxMTtEUXxXtIIlcSD/AEXXj8OthPNvorzDkNLDiMaSNgOar3o7pvM5DmYo87OcGyeWMtpGvqEW5Hnuee4+2vCvOkhPHzmLPHGSGOPcSaFV9Vk1pCU92WfN9RcZ0q2TF4+G5G0DLKe8k/uvM+d6zzs9znBxYL+i7P8AEU9NT8jK8zF+UQD2BpoGvqF5dmMj+JULBRPhHDTZTW3/ABAuy3yv75ZC4n7osWQX6is/RMcfw2Rmv/hRONfUL07pH8Lp83jDn5DwyFvhoraHJliUbGDy6Z5zgR5LGPlcZB2+C21rt5HkSSyGWQA1d6XrOVM3Fxjx8GOxkLNEULJSsPKZuLjuixZGxsdojsaf+FMsyb3oC/8AjepOCi6O5BsTZc0Nxo3btwB/2VngcLh4ZB/zH/6j4/orXJkdKSZHl5+p0gRC3b9vdMeR0Iqux2Mta2gRX/lFBdB0S34nUGMwlxi7gXNvR2uaa772uk6CMY6iYJC7scwNFNJo2gc79DcNfItJWOzeUyS+QswopHdz3H0tF/RU0vw83kDOxrvybNQs8hx8En99rp+U4qXm893H4bTBxkLu+SQijI69jfsEfMk4vpzGJYGzTgUC41X7KPJnjG/H2yi5dttlBHiTyMA7bJ/1aSXJux+MEbuVlbFG47aD5H7Km6h6xdOZHve1tDz4pcHNnO5p8mRPIW4kRogn5yqMOJ5Pk10R1aR6vB+IeAMcY3B4Ixmx33ZUpD7++9rg+oeu5ZTPDiyl8kmnzXX9FxPMcwZSMTGB7BoBqseC6Vysxglyj8Jl3sbV/wCiInddGLI6WhFj5suXtaHSyvPj3/qu26c6W+G5mTyZBf5EdeP1VpxHGYnHRBsDB8StyH3/AGVkxxPjyFJl5C1qQ5GD20Gs01ooBAPkolnZQnE70oW9+zmDeSGkhen52KM3oXFGIWktFlrfN1teXvs2VZ9N8vJx3J473OJhDvWwnVIlelodgvwZEH/t/REHlWXU8mFLyj5OOFQOAdX3I3/dVgcQPt7KS+mMfs3S0dAhbBWyF0sxrorsgEHwhgprJbYKSJVMdompaCErGuQu9a79p0owZDlMOS8brRCfoqoOaMl8FJSe6aedJSXwU1C2gDnKIKwmiVEFEmZon3UVY4EmqVWmcN5D0rItpmz7OgjOrRW7CBBtiO2l4WZabKEQc20rO30mk8apLTDRQYa1RzKt19y2PFaP6qcgooJK9WFtAbDmV7qa95cB4BPhdN+Hr5f8Xcxj8eJrgbdNGHjx7X4XJWdFaPgktJ/RxCJQMm9Hs/K8jwOFFIzNy8cTuBaRHH23f6BeX9RcJ8OR2XxXeI5NtcHWD+yqQ7RFkg+bJNKw4jl5+Oe1peX43+k7tKyYvuSvHyp/jSOU5DKa135bnMcSRfUU1w+9+VWycGxrPjYWUZMU7DwT/D/Ue69P6g6dwOruPGZjH4WSzTmNF17WV5jy2ByHTXJS4xL3NBruA7g4f7J+Kn46T0zMuFfyn0eh8VyuBF0njR5E7ZTGAwSAEdx/RUUkn8R0kZ7Wk20g+y57GY2XBk/JyOilcPXC7wfvZRuCzQ9rsechssegCgy43faJ8l9JM6HnsXO53p2TGx3MdE0euMtt362vGMbGdgcrJiZBosPavYsSeXGkD4iWOH18ELlPxI4V87Gc9iRBrWODZQD7k+f7Kv8AH5kt4vpi6+RWPxGRBpgf3OI/srfpLmf8K5Osx5ZjSUC7ZA/UKnwDkZeFHlxxEsZQLm7OvqujxcaLKh9JazIA8lDn13N9pj13I1+JXHskgh5DjP4lb+Kw6d49vZU3GPi5bjm485qQD035CTx8yPAme7Dl7iT/ABIXeHe31/VNv42SbDPMceRE1jtx3sH6UsUeEKG/+mJm+9sDlsy+Pie0gli7D8OOpcObNxhyzQ4Y5Jo/ziqpKcRk4/MYB72j47RUjSqOXiX8dybn4zi2N3qFf7JHmqVY66oem97R6pz3B4nUeNlcnxMXwcmOu+IG2uH2/ZK/hzJichHk9P8ALY4dFMD2P7fVGfA3597Wvw45d2DJ2ZB/gTEB39wF6Fh4GDwDeX5mJzWyyBxZQuj26r+ihwZ/C3jsa8fktninXvQWV09yJhoz4kjqikAP9FUs4NmHjCLLa5uS/Ys6A9l7h0DzPI9V5Mh5eO+PgHcHuA9R/oub/E/j2HmHyQRloAHbTaA0q83IeNpP0xFYE+0cLw87MAPxOQiILv8AKeNBWYee02CSdXaG7H/MYnZKCe31BteSh488WRju7T2zRelzT9kq2sstr2I05Z6h+HvPCbi5uKmI+MAeyz5XHdPY2RPkcu/yzFe30/TuJVLj5DsfIZJE4xlteoeV2nTGHhwcnyceHlF7shkbiwtok1f1UNwv10ymb3or+t8ECODMYK7hRAXGvdshes8titl4WTHmb/EaNfUFeQzNMcjmHy0+EvgZP2xp/QPJjxMad2URpCAHDZU2uVjRJvocxf8A7qDt2TK2gPra7v8AEPIypWcZi5WO7tbG17TdG9rhOMeG8hiPcaa2ZhP6WvY+u8WPP4jElhc1z2xhzSNocleGNts9HgryejmeHy8fL4+TCmHbksb6bN2uS5eP4eU7fk/RMZbn4j2ZUQILNGvekpyWWMuVj+0NBCkw4vF+S9MLmaS0KfVTjq0IE7UmmintHnyPQnSaYdJKE6TLHGlPSGp9Gpd2lrpyZf4KVctkTQ/hyU7yrNr7aqKB1FWcL/SkZYGYmZmDuYVzOe2nldNMbYue5JuzSfxemFkKq1h8IZNOK33aXpaJW+wrDsK+4o20LnGu9QV9xTvSFPyF8Tp9nRQfKiv+UoWLtiMfBXj17KfopeSbYOlTeCV0PINBaufn9LivQ472tEtrs33rEHvWKjxNSKpxFoJRHeVB3lfRpnUBe0FRjbRRi21jW0ULlMANBpPwPAINpJgoIjXUp8kdDYZ2/FyB+KNpt/hVHT0vdF2q3l8Ly6WqLIe0c1zDe2e/ulYnJ3nRWyqqKTelXPoRS0y2hdSN8T7qvikJRDIpMr7BGnP+6BJJpRD9IMjtFT+PYDYKZ+0o86RZSlpD5VEIUwUhSzztFkKEVRKMBnwo0tnz9lolMQaIhvlFgZtZEzuKcZHTdI5C0ThIFBdBwpPcP1VFEz7K74exIP1SuStwx2NaZ2uAfSrNptiqsDbAFaM+VfHZ1qi6fRV8mPmVB2nuOl0XKCwVRgeor0OC+yfJ7JYp9QBVzCaaqaIdrwrON2l71LoOPRLKf6FRZz9FXExth+y53Nl9ZS0uzrKzNtI/unMxw7b91XF+ztV410S0Hb9bRoXb8pLv15U4nurSd4bQpsuoJD7Kzx3/AFVBjOPurbFedWosuI1UXcJ0jeyVxjZCcaNKJxobNAi1Bey02QEN1BMxzoIrJ4fKrMiHZV7KQbVfkNC9DE+hdFVEDG/ZVhG4OCUkaNrIpS3Sbc7BXRYRnzaJaVY4u2mYwobnXsamYboj2S8zPQQnHN0l3jaFPRjWykyoy1xsKqyG+dLpcqLvb4VBmRljiN0q8dJoXorntBHhLEUdJt3ggpZ4oog0ExiQrLGsqsiP0VjinVIkzWhsDaYhj7htBjFlWOPF6V2wdbAGHSBNDdq0dHrwgPj0dJVMxzoo3w0Sm+Nb2vRJWeaCliaeLSdg6L3FNtTob6Ulij0qxYLaubCQrM22OCUx209WkjRRSIb2yFR8hdGJD8A2nWNSeP8AKrCPTV4uRjpRAt82lMhnlWBAIS8wBBCGK7OZzmcwbpUs/vtdFnM05c/mirpevxqJ6EyaKlG+ily7e1JjtL2sfoAsI32itKShcUcGkTQSYRx82lXtJOkcGztEawFckaJOj191FrKO0+9gpBeExIAgG6WBl2pNdSKyihaDkWLatQ7U45gooBb5SmMQEt3tDlZQsJgjSFJ8qFGNCjvCE4E2jPG79lF40jTFsVfYtCO0WRCK4AiPut2FEm1pcDsKxFb4QGIzfCBmpjeOaTbCk4DScZdeErXYRMeEbGjBkFoMYtyssSMEg/RMXQc9jcDKT2OaffsgxtFIg9Lgl0x8l3iO8KziOgqXFd4VpA+wvJ5c7THSx9h0gT3RW2P0hSu0V4etUE2V+UdkFUnIDyrvK91R5u7XtcC9PTJ7KNxqRL5D6cETLd2uJSRf3uX0mPtbJ2ywh9SaDTSVw9ttWLG+nwk5Q5ZBn0TLBsFCa2nBMAU1Jiv7N+gkYBfad8NSELvUmi7Wl2V7RssYjNIjzq0o2RTL/SbUlIYqB5BHaQVSZXkq1yHWCqfLNE0UqH2BTK/I3tJFpc4px9HRKlDD9PJXq4q0iZrYp+X0CSVv8vQ9/wBVbQY/e4Na0uedUPddlwHRjQ0ZvPSDFxm07sf5ePsqJfkMjC66KPoroXL6hyI3va6DCB9TyKul6VmcdxvHcG7AgDMSID1PZpzwPY/Zc91J+JWLxcH5PioxFCzQAOz915pyPO83z7HujMkeLdOlJ0AV1J38UUzM4VorOvP8Mnm/JcJD3ZAdVgaKT4fpnEwY2yZbfjZZNkeWhXODhwcdH2RD4uQ755X739kVrTfnf1+qpxbxx4omyX5dmmtAr0UB4Cm1puwFNovyjRtu1nv2J02CY03afwM2bEhliZ8jxRpCZH9lvsrXshchpiWdxnHZb2vZG9kl289vzJwYXCtawwYhErRsub5Kzt2pi9pbljFl0tDOM9sbe2ONkQdokeV6zwfVHE4/T7JC4soFpi1sgVa8iiFAg+6l7i/6eyRk66Y3Fmcob5PJ/NZk8w0JDY+yQc4gH3U3edVX2QXj3S+hNU29sC5wNilBpAFVanR7jdgJjjcF2Y97yKx4tvcE2ezEtsNwfFZXL5ggw2ON/wDiVYavTeL4jjemMd/fIybNI9byQQPtaV4DKx+M6UMk0YxZJm/wGeHuXmXPSc7yeRKwSmOMm68EhTZbqq/XL0VL/jW2dT1T+IMWO1+NhPjB8aOgfqvMuW5nP5O3QB+RKfrsD+iqcniZ5s04zHOmmcatdfl5GB0xwYxYyx2WR631v9E3FxowpOV5MWsrvuvRwWTg5BcZOUmaGN32X5+21YM4Dk+QghLIxi4Lmh4OxYPurvgunsnlxicpyjXRYksrDHE4V8Rl7Kc686jjGRLh4XoihZ8BgadAAq681TqIXZ04k3s53ozgIf8AGZZwwzRYvqe53g+y7eVx7hZs/wCyJw3HjjOj+Pc4t+PkyvdKAN9tWEtI63O34UnIurrTZ1JIIH0iMck3GvBU436UtSD6H2u+6kSKS0bqUy4BTutHezHe9IOw61IuUSbBSnTb2YyyxyCwFTvaSxJCD2kp0DdlBVbK4e0FadKSi0KYGlyYYvK2wVWyacQrWQbVdkMpxKqw2hFyLE7K1dFY6qQnGvdWShIVj9hHD7VcHm0wx6dJwzdtQJfBK2HEqLzYTUCxR/lD7toknkoPuiQIVp+qJC6nWEJo0ps0u0cvZ0GC/ujCeaqrjnemlas8LxuTGmx0skW37oEuwUz7IMgFFQR1QbKmYeooHumskVaRLtlexgaaAYRYSoBy3apSBXZE6caUO8Fps0B9fdbe4EEV+6EHdj2ODWuDDZDhYK3Ry2no778PsV2JHkchmOEWFM0NIdrurwrTH4SH8yMrkYo+R4rIsNcPWYzfp/ouC5DqXLzMQ4z2Rx49ABsYqqRunuq87iIZMdrhLjSkBzXi6H2SXj2/kXRyI0pFetemZuI5eaZuODhyOLo5Iwar9VyWXxrpMhksbnhp2XNG2r6Uw4MXken8GIRPy8Z8QPe49xaT9yvM+sOnncDmGTDY6TGfss80kVdYq0Nvjq1tHF4OTELxsidrnD5XXs/qur4HhP8AF+K5SHIPdi/AeA5m2l/aa2uP5HjMXOcZ8B4ZkM+aP6ro+l8zM4rjnZOC4zwWPzGM71OaPt9gtfS849kmOPG9M4f8NeUj6e6iyeK5WPvw3uLHNLb1Y3/uu56t6Kgnwps7pnKY4PHd8Eu9Q/YLivxMggg5XD6h4o90EzmlzPZp2drtsCUZfD43J8bI8NLQ2QA+DW0zl+Xx5MffsPbluWeByNmx+QkhnLxIxxBvy1djwPISGAxueXNcNr0bkulOM6gwwSxkGe0X3gV3fr915lzHEZHGTviaSHMNW0eVd/lY+SkvsWsXgm2O4IlxOVilif2Y8zhG4/S/crssTF+K5+Hkv/iV3xPJ+YG/+y8+4nPmx5QzIbbT/qFrvIuTxeQwx6xFPGPQbr9lFzIra6/9zMexnBDsdzonNIB2b1v2XbdMcpDn4buO5I/EDDVf6h/8K885bl3x8eyeXHd3XRkA0lON6paJ4pGtMcrD5GgVD/jXXzHLNr4ns+N1Hity8zhMaNnG4cWPcTvlt91e/smeEyo+o+BkmlgY6SFzmOPmwCRa8p6jyhmTRTtcSSwe/lXnQPUcPCPijlc9zchxbI0u9IHkaXZsbz4v9oKMqn4m+Yw/8PzH9jXBhNUQuB6vLuK5CLMh+RxAd91711lxEeThDNgp3cO414XhfXhaeLkjf8zX6+2il/japZFFg8nHqfJDWHmR5uO2aEgjRIJ8FXnF5DpOcwJoS2N4NPd40BpeTdN8q7j8hrZXEwu8heodN8tBxvIR5xgbkwFpPY4A+33Xocjjfqvf0yTC99nr3UbsUufJj5UMzHbPY4EheOdUwCPOM8RHY47K6vkJxzHFHluP7cbtcGvgbrz+n6LjOcyHAATxu+E8W0/deZg4/wCvK2vspz1tbK0GwisPpSrXHW7HsjRu9lY0Qv0MMNXsL1XobK/PdPfCk7iY3fD3vQC8pafP3C9O6P6o46PisfiIMR7Mp1B8pqi76qTlY/PE0W8O/Cjk+VnGNy2XhTO9B21UzXXGaN0dKw/ELDli5mHJfbWOIB/YLWZxcmJx0OR2H4Un8yLGksctDOQnWxG/Ky691oEHV7USKW6IUNwvTbX6VZE5NsfYSrgNMauyUGQUpMNqT22EpdMxgGEghP4z9bKQIpGgfRW0toGHpjznaVVntBBKfJ15S87Q5qHH8WNfaOYyB2uKC1+qVhnReSAqw60vVhqkTtaCsO1fcU6gFzjHUVfcU70hK5E/EyfZ1mG640ZJ4b6jTHxBS8Sp7KU+hXMFgrm8008rosp/pK5jNd/EKt4qE2CBu1iECViu0ChSX6oSZc2wl/Dja9qKTDtGljT6lhG1gTehDGW0QpECigxu2igg2fsgudjE+i96bk9ZFropDr9lyPBSFuVS6wn0BeXnx6osxPopucb3QX7hUGOfK6Pk/XA77Lmo9OP6ol1IuxxjqRASSl7Roz7qO/YsMDoobz5W2nRUHeEtIWwD/dLSpl/gpOY+VRADAPKGfCxxWgbVCRkkFoCzpTeFPHZbthEhyGIWCgUwwf0UWtoCkaNu9pqGJBGMoKy4oVIEmxopOYJDZAVmRbljF0dngfKNq0i8Ki4+TuaKKuoDra+N5c6tlMvoV5AelyodB5XQZvyuXPS22Qp/DeqE32bGn2mmyU3yq+V3aPKgJz2+V9HPchJ6HsmcCJwB2ucynkk7T2RJ6SbVFmT/ADbXTAF0L5ct2AUqzagXlzjaJEq8ckzYVrExG3WkNgTUICdoANCKT8BS0TfZNMFJbnZxYYzyCDasYpbHlUsclJlktDypcmLYcssy9Cc7yl2zjt8obphvaXOPQfkEldpITO8oj5b90F5BTpWjH2KvF2hOBCPIQhXZT96B0ExXEu2rOJVzG9vgJ7HdYUuZjZQ0G2FB8flEYSFPRCj8w9FfMygqbkIC4E0ugmApVuUzuBpHOVIW5OUlj9RBS7oldZOLZJHlV08ZbaqjIqQUrQo1lHym8Z5aUoCbTMDS7wj2cy3xKernGbbQq7joO1luV1itFUuYcQaLNJaWMm1aPj9PhKvjO0DOqSlyWewW8WLYJTU0VuJUGgj9EGuydlrjN9ApPRCgq7Ed6QrOCiFjR0m3BJzMp6sHAWl52+6kzvphpGY/hOsOkjCmmn3Xi5F2MQcnSWkeNhSc80QUrK+rWRPYFMTzDorn85vlXeW7Spsw3a9PjrRPRRyAhymw+lbmHqUAV7OJ9ADMR0i92kvGdIzTpNNQWM2U1H4SkaZjOkSC2Eq7S8oopi9IT92iRjFkRjlhboqMYNrGcmMXraBJQJRyKalJDtKY1M1dgoTz5Cl37KETspO9HNkHjSA9x2Ed3ul5RpGmA2LO2ShORXhC8o0LZBTaLWVtEY1c2CaaNo7W6Q2+UxGNJbCSJRJ+MGvKVjbtPMHpXJB6J4zfUrXFaq7EFlXOO30rm9Gyg7W6WEbsIzW+lRISGyiRjEJVnA6gq7GFDac7g0eVJnnch+hpklaK09/lJiXflSc+2leJWPVGOgeS/wAqkzJK7lYZT/Ko8152rePOn0KplNyUlOO0rjOsrfIbtAxD6l9Lxm3Ihl/glWsItv6qmw3UQrzGFgLsyCkwMU+22o4b9lvstQ70N10Ii2kplrrHlCljp1oTnFvusdbQt9DQdRUnP9JSHxj7lDfkmqBSaZvkEyJ90qzJfZK3LL5JKSlkJB2sxx3sxvZEG3Ve7XQ8HxWRycrYcWMlx0438q5yA29vnyvTuns7Hi6X/LYLxFnzN/jPOq8jV/ZXQ9GwicmRxXSbHNiAyuQYKMh+Vp/5XnXUvVuXyGS74sz5XkemKO6/srDkemsqd4bPyY+D/P2kElb47icDirOFH8STx8Vwoqp1EIp3pFBxPASZLhl8o4gk2IvqPratc98jYmRN7WRs8NAr+v1VqXEk9zrvyq3l6vuCKL8mItlbGjtH3SrH78JhhCqSJmGYjxABAYUeMrTkMMKI1oIQWlFYaCzRuiLmbWg2iija32+6HxNIsUu2zQu1Nrff2UuwyHtjaXONUB7qfIl9hLf0Lm9nwFb8D0vyHNW6KP4cA8yONK+4jpqHEj/O865rWjYiQeX6xbMTh8f2xQM12jVLzs2dJ6j2VTg0vKwr+luB41l8lyIe4Cz2tIpUkfVMLYhx3BYFRhxvIc67F+SCFTcxy/5pjmNJe0eXHyVzuPmzxMfHC09rvAWYqvxbb7NWt+jruR5ERuMmVP3yfzO9v2HsuczuakyLix29sZ1d7Kni8Hn8kwyTNcdaLhpv7pDkvy3DajeJZwPV7hPw4PJ7+wcuT+yLsmPiw6aQj804enfgfVC6B4PL626oY/IY52HGS6Rx8CvH+y5R+RJyWb2lxLCfC9n6ceOk+hJ7cIsnMHpJ0dH/AN1blpceNL2xWHE8j2xb8WOXgxswQ8KQIcON0LQ3wAdryTisabl+SZECXPmfsndfddy3p+TkcCJ0sp/MZZDyPOvCewun8fpq2wuL53AWSKI+yTi5EQmm+x2RePRY8pksd8BkMfw44IWMDbv1AUSqrv3daUpXmnEGyd7SwcRf3U+3T2IddhHOBWg+vCha0XIbfRw3G+wi920jE/abY4O8KK0aEC2FEFbSwTcRp9q1itzAbVSNFWmC4OZSCh+J/Qy1pUwFsCiiNFlB5lK7QF7dJDJYS0q4cywlp4fQdJmLIkwbno5uQUSgP8FNZzC15SMhXr462iOuiJNKcMm0Bzlpj6KdLBT6LAOorfkFKiTXlSa9ORjJO25Qc0UVMmwhl3sjRhjSiNCGERqxmosMB5BpXkRtoK5zFNPCvcZ1tC83lyHI35QpBpEa7yoS+F5FvQ36K3KGiqqU0SrfI20qmyRVr0uJYDNNepBxP6JZjijxuXo7BRKlugRtYP1W6W7O0Aey7Qtg17JojRQzGHSNa40wupx+gXGa72ekfhHn52RDn47i5+JE09tCq1av+qcGOGHGnMpGTO7si7h3Dxa3wmdxXC9HMPGy3A0D40pA9bvoD7pflOpeF5ThsfKfMAIiaYdG0jNCPXw1qdM8+6r6dxJuRfDg5DY+SY0OLRoPvf6LmMHPyeK5L4eWDFONEHw7ac5LLOVyMk7T2i/Tv2HhNslw+Xa2HlW1KNMnA2pPLxXy9EuSpt6k3nQcd1Nw/IYkAEWc1hl+GP5j419P2XN/hjzh42XJ4vLHcxrixzD77r/hdTx/Q2ViczicjxGd8SON4L43EC2+dfVcV+IWKen+t25Mbe2KcNc6jWzZKs436s2OsUVv/wCjrbS8mepSwtaBJjkux3ba5oulW52LDlOvIjBdVXW0Lo7qNuFIPiMbNjPG2u9ir3ksZuT8XNwWgQudbmA32rzv4W5NlbW0cjl9L8XmQPbK4Quqw9cVyPCZvHZB/Kv+PAzfcNWvQsk201oqjlBM5LdOHvflW4slQv7Qu9P0cizkMmWObjcpjmvO+0/1SsMTmyFrgRR8fVeqdN9MQdU8y2PIcGZDGk93u6hdf2Vf150k7iswZMDT8N5ogD5T9E98mF1rWwFirXkVOA34uM1rXEyjwLTuFjfmc6HHNjveLI+qpohJCAWPqQe66Hhc5jMyDLdF3zREFzP9X6fVT6+W/o3Xez6B5TAZh8XjYhfUXaG9zvuvn78ROnc3jOVEuTEZeOe+3PaR8u96XY9cdcZvKhrIsWXGxu3XewtN/urXhpm8r0U2PIaJ3AfDeXfy6U2fJPGv9sroqp/sjxPmrqLAbx/IVAS7GkHfG77HwP6K66T5X4ZGNlPqN3gn2V91v07l4cD4n43djklzHbJaf+y4CAOgm+E51V8pC9rHkjl4ejybl4npns3TLXNdmY0jyxrgXNB8HWkPkuNxszjnxZHc3LhBMIB+YjwFzXCc+6fDjx59ZcFU/wB3BdnjvGTA3Iitszd2PcryMyrDk39FMPznRw/a+M/DkBDm+xCNCV03WphzIMXOhibHIQI5Gt+oHn91yrHb0Ue1S2hFLT0NtJHjyuq/D7hpuW5qORjuyLHcJHPrzR2FyTDpei8H1FxeD026Hj4/hZbW+skVZQKdrTKeLKqu2R/Fn4ccPZXpHyuIoFP4fCTc70XifFyBCQ0ujYW7fRKS/ErmsDm+hopIX3nxfy/0/wDdc10z1byeY7jnSv7IsU9jWg/U70g/R449ld2qrSK/IhfjZMkUoLZGGiCKUdEG/KuuuCXc9LkdtNmty58SVpbKVdnn2vFhG+kphjtJUPtEY6ltT8TEOwuTTdgpGF30T0JsKC1o0FIFqPRRnBDIoodgBmnSi+iPC3HtTLUO9DEyozI9FUeQ3tda6fJZYcqLMj8q7BYuiuv1aV1xr9A2qNw7XUrTjzTQqMy3IufZ1GPNUY2jCYKpx5vTtGM3leU8fY7yGMqX0na5zMfchVjlTekqmnfbyquPGhTezbXLEMFYqvE1E6oWUCZpI7h7JmL1tWnsIseysVaKana6EWuuwsC1OwxvutKIeq8VqiOlph4yitOkuxwRWH6JyWzEx/jpAzKafuuwDgYQfsuGx3EStNrr8eTvxm79lHyI2W8d7F8yyx4+q5ojte4fddPJTgVQTs/iuse6n8dro7N0Da5Mx/L5SwFFHadaUtw9k+wgNAqJOlEnSh3apK8TNkZNNSUx8pqV2kjM7ynwhbFnuolbadIb/K00+yo10dIzH6zpNRsoIOMANpyOiukolEo2Hyjs0tNoNU2+NJqQ0M0gBSgfUgo+6A93a3yh4rv4t/dbS+LBbO14l9tFq9hdpc1xL/S1X0DvuvkebPzZRD6CZe2lUeQ31lXk+2KpyW7KXxnqjKK7J+XSQe8hP5XylU8smyCV9Pge5AZDKl9B2qOeQ2dqxy3Uw7VJM/1Gk+UJpk2G7R4ktEdJqI1+iolCRlhFJqBpKXhbfsn8Zu/CM1DMLUw0fRQYxGAoFAzSBK0ZKWPNApR7jZWaO0M/HobKE/I+6SlkoHaVORs7S3JhZ/mbWvjE+6rBkivKk2clLDQ8XWi44urVc2QlWOGDQXVXQxIsWRgt8IkbO02iQNsI/ZY8KSr37HJdG49j7rZHlQZ6TtHG22orZwlMNFJyNG07N5KXe0Up1bBK6SMWdKqz4bBIV7I0JHIjtp0q8WRo45p7aKe4uMmXY0tZWPRsBWPFw9rRY2vSx0qDU7LOFvpFBPQWAB7peFtAJqOijY1dDjDbUHIoXSmw9rDaTmlBcUsVbQu8WSoFvpJRTRUSPSu0TMJiFWuKVUY5oqzxisv0ZI7J9UvOLYiuOlp/qYocvochSIkOTbEqNFMMOl5NrsI3KdJOY68pt/hJTmrWwhViOSqvJCs8jYJVbOrsQhlXOKcl3D6JrISw8L1sT6BaNsNeUZjrCAQttNFUz2cMB1JiJ4rykg5Ta+kSRxYNN+CpNaSloX6TUblpxhjUeykfyoPIpYaDc4BpScu7pGedlBd4S6RqYsTRK1/upShQDh+6U5NMPhDcFM35USdLUgWJzg2gHzpOSs7rKVcwgokwDTUZnhCaEQFYzCTfmTkbRQScYtyfiGkIySTGi9JuMaS/hMRHQRSM0N4bdlW8OgAVV4dWrJrhpBRqHWGgsaQX0hNfryjQ+bpIY2Rto9PhY9w7VFrrNKEt+yTkW0MItPqRe/RQFMbBXlZFpi2LZJ8qlzD5V1kDyqbNHlUYBNMoM3YQMU+pHzB5SsWive4z0hRc4xql0GC4FoXO4p0rjjZL0mZe0HJdsApb7VqAWAjFtBeba0UfQlkj0+FWzOVtlC2FUuRolTK+xFoXe/aXlfSk92ylpXWmpbEtmpH2Eq4k+6k4+VAAuKfMhyTi0QV0fHyD4Q+oVDEwqz4+2nyte0OjZcgMPqrZ+q1IR2+AP0UGmx5UHurSCWxgKV9NVNyUxN7Vhkv0VRZzybVmHYq/QOOXZTMb/uqoSC6tGjl+69CWTMt2vCPG9VMU2k5FKCj6OTLFj0drrakY3hGY6lxuxppoIjTrewl2ORA7+i7R2xmO3V2tt100fVd3xWFh9O4Yy+TLXZRb3tjJ+WxYK5jo6JuT1Lx7HgEfFHn3VL+InJZOT1DkseXhjPSAfFDSh5u9eK+yzjzK+TN9W9VZHJZD2xyH4JK5njY8nkZxj4Yc8l2zRT/T3TPJdSZAjxIXNxhtz/su7xMTC4GIYWCWum8STef2C86nGCdfZS28j79Ao+in43DQuH8Sd7qNWQ39UkOPwOPFTPjmlb/JEbH7qWdPmZssgGc7F42IeqTuO/qoTQQwcJ+ciYYMAXeRMe4ykeQD5G12DBTnzr7AyVK6kquZ6lyxhPghDYYBo1ql5J1PnyOmDWE+rybV91HnT8k+STHjLMGIE/crkuNw5+VyS+T0xNPzu9gve4mJTPkyKqeStHT9BceZXtlnoRg24lX3VXMy8pyGPjseXQDw0fy1X/Zc4OSbiwfChNQM8H/UVb8FwfIZvETc62N3a9zWR68i+00kZIbt5KLsaUT0eg9PytMZzZgGtjBETf7qs5DLdlZb5n/zG1MgY+HHjNcaYKd+qUeB4+i82cSltk+XLvoA93mkP2U5QoAaRqGINeyg4rZ8FBfaypC2TY+k3DJ91XWQiRvU1Qbssg9EaUnG9HY6/dIc6B2H905x76dSQjdZTOK6nhKpdBw9M6CMA7RWs3YQcR1xhORMOlGn2WQwjY/ShSRaKsY47YoPj9J0sdaYb7RyfKYwNlc/PCQSu15KEOYf0XMZEQsr2ONk3JJlRSyBzUuXlWUsRKTlhV0sR9GoHEjZTTHUk42lpR2E2qoMGmmwonyosJpTaPqmGMxTYdLRC0w2UDZqGoNOCu8N1gKjj9lb8e66UXKW5Dn2WTRoqDrIKI1prW1vtK+ey12PS6EJ2+kqjzRROl0k7PTpUmczyreHfYFFVGUVp2oMFPKO0C/C9lMBE4wpUbW2jSmWrdhAiQEGXYIPujPFID1uzGPZ/Nvm6cg4mHHZFHG4PJYKLyPqqH4gv6D6fT9EeTd+yRmHadLvFMCqql7GmPafavuiMPtdpGOSvJTDH6SLxtGS9HUdN9QPwZmDIt8ba2RZaPslPx541uTh4PMYvqgmaB+hAs/7qohd3Wz6il2fVGO/lPwhyPd2KCfuNgLOJinFm8kimbdz4s8j6czppcUSREl8WnNXqv4a8yMiefGc72Nsd7it6Xi/Tc/5Dk4n6+HMaI9rC9gd01LkcaOX4SXsyoCHOY00XAbKP8ljhVp9b+zOPTlsteq+Hdhg5OMO/Ffs9u+0/Rce098m/Zdn051VDyEBws6Ih5HbNEfIPu4Ko5fp+XCnklxR8fE+YPb7C/H7KDj5KXwyeynJCpeSG+iM1vG87BO97WtILXOd7A6XVc5m4PMZvKcU2SN3c4vgkHhxA0L/AFXnzBTVH1QysljJD2EOBGjYTssfsnS+hcZPHo5/mseXj8yTGyWFjh9fda4/KfjyskZqeKj+q9E6p4/E6o4TE5J00cORBTZRRs0Pt91wefgPlf3YzwXt0KHkIpyS1417BuWntHUSctL1BiuGX2Fwrsr2pO9Cc03jMyXAzv8A7bIJa53s1x1a4LDzH4srSSWOB9bSfKuJS3JHx8YksOz9QUq8W9p9pmzWntns3I8TDm4z8eYNf3C2O16m+y8D/Ezo88ZlsnwYyYz/AChez/h7zLuX4s4Mzgc3GFsJPkeB/wAqXWPHM5LjpCIz3s8j3C83jcm+Bn8H/FlGXGs0bR858bE/Nh/MY/c3KgNPafJC9C6Vzmz4nlraFOv6/RchyvbgZB5DBhkY5h7ZYSfa9mv0TWNOIZYsvGPbBkU+vo4/VfRcmFljf0eZDcVo7KXFbnNnx3upzm+j62uNGu6tkEj+hpdpgSR5kQyoZ2My4tmEjbh7ELi3vEj3SMFMLjr99qTFDiNMK1v0GjOrvaK0+x2T5Szd+9hGZf1QtC0n9FtwOJFncnFiy/JJo1+iZz4sXguVkwQ3tbGRTj5+qS4J5j5XGe07DvKuvxW48SOOY004gbS9J14U/Z6HH/i2A6lkOTBhTtPcCwAn91z9+aFlXfFH850VG5xHdE4D+1qn7Pf67XY58dz/AET5v7INJRGuUSKW2N9ynTP0KQ5Dek/AdquhdtPQlRZ40who7UXNtSHhSAtSb0YQjFFHLdWotFI7ACKQthIRnb5VTmR3avZm0qzIZ50n4b0c10c3ksp3hMYppiPlR2ShsZTSvQ8tyI1pjMUlIzpNeUi0rb3lK8Ns5m8mUkFJ3ZW5HHai1OmdIX9kx7rFsNsLFuxyQHCmp9FWbmdwse6oIyQ4G1e8dM2RgaTtPr0VY62LZEJc2j5VRLcchBXUzQirpU/I4Ze0uaNgrcOXxehGad9oQjkTEbrVY15a8tdqk1FIL8r0sdbJUPxu9QXT4EhdC0Ark43XS6DipPRQ9kOZdbKcFaZZO91VZbP4pKuKtuxtIZcfqtSR7HZXtFf2aWyKCNSHIRS7JjJNgHOUC5RkNFCc9RvHpmbJSOsJWUeUUm7QXkp8QCLuG1JkakwW9MBtJ3j0FKNR/MmmaS7NFMAgoVJRPQZhRA+volQfKxz7FWjS0a6CSPL3a8KcJp4Q2AUtj5gi9pgNnU8TJoLoYX6C5XiXekLo4H+lfMc+PkUY30WBNsSGS2yU2xwLUvNu1Bh6o1sqMpvoK5+c1I610eV8pC57MHa8r6Xi9oBldnvAiKpSbJVlnvsEKq3ZpVpdiaYWNwv7pqElx+yVijJNpyH01SpSFFjjKwgNfRV2PpOR2tNQ8JNLTpaBQO6gok34KBhknzE2lpHkXtTcUvK4bCw4VyHk3tJkkFMybKWl1doGzCANO86TcRFaSHdpMYzxXlJZqLCPyrjCGhaqIXClaYThYS69DJZeweAmmAFJQO0E2x1BR0US+jHsAQnPpukV7rCUnOila2cwUrkJzhSDNKAoNlttpdYtCmyT0F4BCmTai/wsXQUijIe6aiLCsPhBgHaFrBit5cU3I3XhXYKHIFGfYpmGj4S9UVuJ/a+ld7Ob0g+XKI4yAdqsbITZKjm5PfN2qIeO0eyHx0TU9jLXKYNtKUEgHupsmBBpYxYzAKcU/E6lTtlPdXhPQPvyUL9Gb7LMOtqK35SEo11BHa8Uosy6GywBNOIU2HSC9wDzawPXl3PYbYZ79JTIKK860lpTYXQhVMWkOlXTirT7zopHI3asxexRXZKVF0U7MNJYheri9Asi3xtRcigaQ31SqlHA7W2u15Q+7ZUA+iiBLCI0AnY3VSr8Z4cFYRtsArjUMB9hDdtSDbCkGWu0aLOYUJ7aT5j1tLSsoFD4miEo9KXCYluiEubtLaMMsrVFbHhZ9Us4wDSDOweyMFp21xmhMiitHypvFFDNIgGHxhcisGjaTwm261YsCBjoIUiR6NLK2pMbblksMcxT5TTZNpOH03aK13q0tpGbLCF9hWMLh2hVsFVSeboBJaDljLXUVN2wlmv2jB2qSaXQ7ZrtvaIxulkewUVgC8rO9MxiWS0hU+WywVf5Lb8Koy26ciw0T0cxms0UiBSts1nlU77DiCvd41Ciyxn1SuuMFSWVz+EbcAuiwdEK2p2gp9nQQV2ikd/ypbHPoCZu2qHNBSn0Jz7YVS5ItxCuMjwqrIqza8xrVCshWyN2UpMKTsrqtV+Q5UYxGgH8ymKG0EH1KTjatiQ5G4zvSfxTtVkBJKsoNFdSHSWDXdo8ob3m1KMdzdqMrNJH2EJTkm1WZUfnStnt2ksoeVVjYFLZzs47XFCEpHlPZbLugqx7S00SqZonpDsUwI0U5BLSpGOo+U3DJXunzQsu4pk1HNapI5a8lMxTfdGmai6jk+6O19DW1VRTa8pqKYbRGl1xGV+T5bCye4t+FIHGl2fJP6Yk5N2bnO+MSwEMAOzXuQvOBLrzr3Vv0xw+RznIsxcUOaCfXITpg+5U+XEsjVP6H4qfpHpGBnOHRs0nFY4hc9zWta3dNJPuvNua5B/GSmLH7Zsx7T8WiKaff/heh9RctgdM8E7i+Of3PDfU/wB7/wDhXn/S/Bx8vkTZnIHtxWeuRx/nIXm5FNZN16RTdvXjJWGKRmKzK5J5c6V1Qw+O4+R4WsyDLyex/OTVBH/lYrTrf6efqrPk8+A5svJZQYIMYfCxowfHb7rjcrlps3JdM4Okldfw2j/59FQm369CvHRaSYsWazIxg5sTHAl7600UuE5vLgxIv8P45oMLK7nDRefqrzneQbxnEuj+Jc8g9Veyf/CrpKHmoeSzeW7Wxux3fC7z7mqIV2J6nzfo7SPPOAwcjqDlIsCAOEXdcjq00fde85edGzjcficBgj47GAEde7jt391QcDweNwceRFhi3vebkIo1asu2hr9/uk8vOrXjPoF5H6Qq/Yv3KXf9029nqtLyR6Us9i2KP8qCI4b2oECvKNyYDf7oW7U3oaBo404WFpmipBSaAUtwbskwpiI6S4FIselPePRgzGUeM0UCPxpHYFLQcl/xjraFbxLnuLkp4auhgXn5J8WVY2WmKLYtyx6OlLCApMyNBaga2PTObzm6cK9ly2UKleF2PINoupctnx/xSVbxW0yfIuyqe3ylZGJ9zaJS8tL14ZMyvczamwaRHNsrI2KuGAzAFNoKkGKQYnIwg7QUG6KP2obmbQ0agsZVpgGiFURgqxxHFpClzLaGR7OjiILVPXikriO7mpsBfNcmfGtlCAzs9OgqTPjNldC4aIVVnssEruPeqMtHNPHa8osbgdKeQ2rSzHdrl9FjvciF0NtCI21GD1BMdgryub0GLyN1aWI3tWBbpJysI8LVYLEZN+EnMFYyD7JKVu06aAYk8UpMkoUtyhA8FE1sEehl9QvX3XrHQcDeS6MzsKQ94lDrXjrHUR5petfg/OHtzMf27BQ/UoYnxex+FnzjnsdhZObiS2JYpbZf6r1bonnc6fi4f8Pl/jVRjdXqXHfjNxgwesMiRgIjlot19Ak/w75MwZckHdW7arOVhXIwp/0BGRxej0bnsuI5DMiXDdx3MQ7d5LZgPofGyrngeqMPkMZ8csgaxwDZmHf6kK35ZsXWPR8TJez81iEdveaJof8AuuDh6SjaHzRROGTGBcZb8y8TeKlqnpo9HxrW5Op5nHwWZ8kfETNkgIFDxWtqpliN9tVX91yHxn5IceL78TkID6ogK7h+/wBla8L1OzJDYuSYWv8AAkrYVrweEpolp7Z3n4fYmYcp74ohNgyu+DK1x0Pe9rfL8bgRZk0UTm/+nx/db6N5n/Cc34ZlDsHINEgghpPv+ql+InTLp3jLhndGJB3xvb5HuvN5GDztNPRZCSx7ON5rpxj3OdA/tcPFjyuXikz+Hy3fFYXwXTt+V1+Nyr58GPGnjEsrQRZ+b7eFymflSieSNzHek0WkfKn8Z2txXZNmlNbR1PG5cuFPj8jiEsdYcPv9l67ByEPJ8W3kYKc17e2ZgPykCv8AdeE8RypDfgzP74h7H2XU8HzEnDzGSCRr8Ob52E+VPzuL+1BYs3gvEo+sMaDH6glZEAYskEfbelRcdhPZFk4biTEHkxmv5tABWHW+b8bljLDG9kLDbS1pKNxU8ZdHMRb3Hub/AOpV46rHgSZLaTsV4nIdFPECS2WN1H7tC6/g+lsTOyuac+xHEyN0bfqS0krlucxmYWbDlxEnHlA7j/5vJ/uuj4LnhFzseM17m/EjcCP9XpRy/LtByp+zkoTbdij4pHY1R+G6KWSN49bT6kVhsGgu8N+id+2Gx3CGaOQGi0r0HqnGHJdORPcD8t7H0XD8TGJM1gdRA2QVbZ3XORBDNiTYzJIx6RRJpScrDdVPj9FvGtSmmJ9HtDeG5HFfsMJcP6JDt1+yn0tknIhzHRA3I6uxuydJifGlxnlmRE6NxFgOFKmJaptoDKmxAs2tkeyP2qBama7FGmDSaxzSXARIztS8jG32ZssoiCEZqTgd7JlhXl2jgoCNGhsR2UksKSMjbCrMlvnStXeEjlN0UeJhNdFNkR7QOz0pyVu1ox01XTXQnXZVEEOUXlOSx0SkZhRKfD2BQFxsqUYJUK2m4WaTKekBK2zGtICxMFtBYl+Q9I51pTOHMY5N+EmxykDR0rWgk/E6/ElbPDSFPF8wI0VV8VlmN4DjpdE0NmjsD2SaWvQ1fL2cdymAWuMjBpV0b6XaZMHcCCNLl+TwHQSkgekqnDl60S5YcvZuGTSvuGk9Wj7rlo3Fv6K/4KT+JV+6putyBifZ1jdhLZrR2WjxH0qOW24SppfZVk9FUTopaTd7RXE2bQJDopzZKLygVaVe5Hk0lXUSp3O2ATaSfCxzdedqUTUYMtOiDkLRNo2UYUtllWh3SY56HSbJ2txnaG5y0x9G0vQzYw5/bajH6jaA+TuPlFgdSLQLY2NBR7trQdpCkdRWJdAHQcVINLo8d+guQ4p+xa6fFf4Xgc+Ox0Mt43aQ5nUCowu0tTkdpXkrqhuyvyXgArnOQfTjSvMsja5/OFkr3eJk60BRTZhu0rG3aayBsoUbfK9TH2KYRlAI0QtyHG201AyqKqQsZgFFOxhLxgD9UUGvCBhJE3FDLqWnvoHaWdLflAaGc/ygO2LtRMmtFQc8UhbNISGrScxslHe60FwCHZwqfdY2Tt1a2/STkf6ys1sBvRdYuQPBKuMOXxtcnBNRCvuPmBAS7noKGdXjSd1J0PCpsOXxtWMTr9151vTKofQy52kpO6wUd3hLT+CglhNlZlOS4lLQAp5NlxSMzq/VPc7E0WLX/dGBsgBVUMwoBxVnhnvcEh4zZZa4jA1n3U5CA3akzTTX0QpCCDafjnRQmDk7HA0aSM0wjBRpXAE0qzl5g1gDa2r40BbExIXSudfuj/F9OykIngArHTV7omSOhwzge63FL6tFVxksImPJZQMxMtWvt9hWWO70qmjcrLHf6QlnFm1/pU2SeyVjforGSb8pGWeg4ZPIf6ljXped4tRZKvNuOxmx4uHalZX6WGT0+UtI9DMC6ZF7/KVmdaK9wpLSFVY57FispS5KYk99pVxom16WP0YSb4UJVgeoSO0qpMAO0Sgk+VN1m9oRG1xg5hP3SvMZwLFQYpoq4xzpEjUWDAEVjfol2GqpORC2rWGjCwHylJYxtWHbQQZWD2CHYWiiyYw0lADQQrXJju9Kvmb2XpC0C0C7aFLAPstxkG1MeEtyZoC5m/shEUUyQEMhd4maFZG2gOanXtvwhOjK7QIXBFNTrCSdIGKwhiZa2gUm0Mk2Pe0SHbtIJFJjGshBPQWwr26tax3G9ojx6VCJhBTd7BLGGiQmg/VJOEivuigm0DQUjAfRR2vuiqx0tOTWPJ3tCRknoYqLGI6KM11JVjw0KbXgryc89hNhXmwVWZjPICsQbCVyW2SlY3pi2c7mRa8KlyINldJmN8qpnbte1xHsSxbj2VIF0WMO3apYG08K/wAZtxil7CXRsj+NIe0BNteaSEOhSO11Dyp8s9DUyE7/ACqjKddlWMz/ADaqck7NLzMmPT2ZTE5n6KQmd90zkOoFV0ztpuKRLId/qKKw2Um520SKQA+VXJqLKA+FZQbpVeMQVZ47gBaDINlljAEZ8dg0lYpfUFYsot0pfJDV2VM7S1yrsobP0V5lxaJ91UZMZLSPon47O8SplbVqnym/xCrzJYasKkyh6yrJ9E1rQsNFFYaQvdbGkxMVoYbIUeOX6lI9ym1xsIlRhaxza0m4pq96J+qpWSUL+ie49kuXkxY+O1z5pHBrAPqUao2Vvo6Lg8HK5fkIcPDaXzPI3Vhn3P2XpPJ8jg9F8S7jOOe05bm3kTtNkXur/ZDgx8boHpvud2nm8xgJvzGDuh9PC8xzcmfms54c4mzb3E+frtTZcjyUoksmVinb9jE2XLymQ6fIJDA6xZ8j6oXO9VCOFvHYD+1mg7t91Q9T81DjRnHwzpgq1U9OcTm5WJkcnKwjFaC1rz7lF/iS/lXpGTb9j3Kci+b4bHOLmt01v1PhOdP5+PxLsh+QA+Yt379goqrfG3CiM8p7pX/ID/Kk+Hik5DkW47m/5p9Z+w2mxKaO34+xzhuGn6n5V2VPbMCN19x/mP0XpTGtiibHE10UTRTWs+ihiRR4+K2KBoZE3w0IzAS5Zlvc+K9C29sJDH6De/1WnxjaciiplVpaewBvhQU/JmaK57NJeVtBWD2WDSUnHp35Tsa2YVsjUFzU65l+Uu9lFOcmCj2+UPt0mnNQnNoFB4nACttoKXbu1sM14QtGG27/AERGjWlAAgKUbjflLuNo0Zi0EwxLRm/CYYaC83LDTOT0O4h7ZAujxJLAXLxOogq8xJQIwSo809bKMdHSYMouk6942qXBkHfpWErhVqHy29FUvoRz92ua5Fv0V9myeVQ5r7tWxXj6FWVcgSrynJTYO0m4bXrYK2iZgy3drcbVMCtKbWgK6RbNhulnaURvhbATUzkgZZSgQjOFlRLNLGakCjoO2n4WDRSAbR2nIJNUp8ibQUsuMGgFYhVWG/YVoxwK8DmSUSzZFhI5bLa5Pn3S07bBUON6YVHNZcdWke3aus2PRVYW0V7nFybWidolB6SNp1pBCRR4n68qmmcmMOGkvIEzdjyhSNWJhFfMKCRkGyrOdlhISsNpioBoRlaSglhtNuBtQLU2bFioaQ4Cl6L+EWR8Ln5mH+eNtf3XBtjtwXU9AyflupISPLqAT97QzC9Mrfx+wwZ2S0A9pIH9l45xkpxc6KRpI2PC9+/H/CcY45xtoHn+i+fiKafqFdx3uPEXyOrPeOjOUjDgJ3H4MrexxvwPqPuvTOqePhj42LN4fvmyY2Ajt2XivdfP/R2d/wBJFbjYoL1Ti+ocnHigiLgIB8xIuwvm+VjUZGmelw8y9M846g78rl/8Q4qN0WfEalYBXd9bH9VPlOL+JjQ5/wDDjnkZ3Pxx5uz7L1CPieJm5ibPxgXQTdpIB8ECjS4/rfjP/wCRypIO9kXdUez4pWrOphS2NvjbTpHLcdycuIe1xc6Lw5nuP0Xb8Vy+TyGViVO+aCIkljjdCv8A8rzOeOaMlwdseU7w/JSYs3xInkHwQDSNytbRH5VL0es8ZJxeN1bJJFjY7sbLFxukbprgAKH7rnvxCxsPF51oMfwZ5RRAFNJJUunpG5uPJhMr42pMd59nDdf1Quq5czqHjGxZsbIuawX2R2UX1/8APqo1hayebYz9ic+ji8+OKHHlc22zxk1XuFz2BzmTJmfBt+iaaFZ5uV+biJc3tnb6JGVWx/7pf/CPzeE/MxPTkw148EE0vTxzEzrJ9kldvaPROGbHyvTOY+VlyNFXXg0VQYePLhRRslaQW0R9wm/we6hx4uazeM5BzDjZzHC3N+V/bQ/3RuWxJsTlcrGe5znY19ou+6Me6mz46j4/QVLS8huWFnJYUuIK9Te+H/1nyqBhe/BGbA6Nmbx5ILXeXAmv9gm8fIdDIySI2fLfoFDqKH8pkx5uKP4GSwg/qB/3S+M/CvFmV2vJGmZrOUjGdH/4uyPv/wDAiMIIP2VF0uQw5EFmg62j7bV+ABulXUqX0L/2MYJLZ/SW7RMfDbJmhr9tcDpF43saJHloLh4RmenLx3N97/ZQcpue0VcafJ9l9+GvTph585LmkRMb3H6aIWuucyPN5KSVui2Qt/ZdP0ZIXZGXELAETj/suQ6pxDjTTOcNOJI/dRcfmOrU0XZsaWNtFDf9FEi1prrA/RSC9nWjzEaOgotdtbeR2oLSAUGSdrQLHYn0m4nWq5jhqk3E8VpePmx+LM2WETrTEYtIwuT0RtRWg4JPGknkeCE+4WEhkDysx+xj9FZMKK23bVk5391GN2qVi9CmBmZ5pVmSzZ0rh9kGkq+MuJTsdaF0VjGWnseI9tlY2A99VpPQxENpHeQ2JFpW0xYjZLKasQp7GnEMejNcEmwo8flevSFJjkbqNroOJzAWhjj7LnGpjEcQ8UUikGqOumaHs0q/Lx2ysLXD9PsncNxMQs3pQm+ZKh6Y9yrXZx2VimGQsPj6pviHFmQ2vH+6d5lje26Cr+NJ+K3furk9yQufGjtMdxLbKnMbhcgY/wAgRJP8tyk8tMob3JSvd6nID3eUWX53JeT3VDronASusFAYNqcnlRj8rJ7YDDxN2mmtoWgweU1/Kq4XRsi0g8pZ42m5PCXcmNDULuCifS3aKfKFJu7SWgmwbTtMxFK+4RYyb8rGAxruQXusrZUP5kBha8W+iF0+I8UFyXHn1j9V0uETS8jnLsZDLqB9hSmd6SgQeESX5F4zXY7ZWZnuqTM91c5fuqfK8lelxHpgspZ/mK1G21KUfxCpRr28Ytk4m0mIyhMRGqlAjUZtSLgPKHD4KjKTvaFnIDLLdi0o+Q/VFkSsnzJZwZsnp2sJtACKxCzTTjX3QnEorvdDKxoIXn0CVVvf6yrDKJoqrf8AMUUIVQeJ9K446UgDaoo/CtMI6CG10dJ1mHJobVxjvBC5zBOgrvHJ7V5OZaZZjH70gzO9JU49jaDk+CkSGyqyHAEqsmNutP5PgpJ4HaVZHaE0Cvelb8QT37KpmeSrnjhTQR5TfDYEMvhIAPKDK9ptCBPYUvIT9V3jorRGd3aVQcjIXzEWrvJ/y3H3AXNkl0ri42UaekT5GGYwBhtKykiwU80XHtIZJ2VsVsQ2QbJXlM4zvUCkWpiAmwip9Aplqx/0VjiusbVRF8qs8YmhtKT7CTH2O87UA+iot8/shk+oraWzUyU70MSfdal8IbVDknTDTGBJpDc6ysHhQPlLSBZqQ6S0jkaRKyeU2EADeUtIdo7kvJ5V+NdGAi6iVHuBBtRk8lDKpRpI/YqFKIJvypBdsFoJAacrXGd4VRH8ytMXwiRqLONysIHW1Vkassb5VtBSxgC1GRmiiMU5BpK2NRWTR6OlXZkfpOlby+6RnFtda7ZjRTN9KKw2EOX/ADCps9kaQs2QthlojQPopNAsrNGC7owFjYw4+EwQtxja1rowlDEAET4dBTaNIjRpT0jUKliPjs0tPTMAFeErRpj26WRtU3eVOMC0aNRtraRG6WyNrTlzNF5iLRsR9A7Ssp2VGImztDU7RyZbCW/JRYpLKrmE15TMJK8nPGmFsso3aUJhYK1D4W5vlUa6ZmyqymbKqsiPeldZCr5QLOl6/CAZXNFO+6t8KXtaAq8tHf4TkWvC9xPoEtYnA7tF1aQjJ+qYaT2+Sl36DlkckeaVTlGgVZ5B9JVXk+CvOyhFXkO8qvmKfyQq6b5luIAXkO1oFY9QCpSBZZYL70CrSEmqtUvHH+KriL5kvIgpZY45FhWUEvsquLSdjJpQV0Plj0rQ+MlVGWyrVuP8oKtzfCKKeyhLaKPI9wqDM9MhFK/ydP0qXkR67916WOuifJJW1tSvS19VoeFQiRo0Do7tFabAQgBZRR8wWIzQVotw+n0Xs34Z8FjdNcDN1PzIBmLaga4eCRo7+4Xj/FNbLyeKyQdzHPFj67XsH4sTSR8Jx+Gx5bitiAEY8eSsyX4IowwvZw3UPUM/UXKzy2Xdzj4OgLKreocxvDcU3GgdeRPfcR5HurrpDDx3Y3c6JpN+SuY/ENjRyTTWz2i/6rcETL2DbdPs5TDxZs7IJNuHn9V63xXBZGJ0i3P5bJ+HhGjBidouq0Sf6rkODgjjm45rGANe8dw+vqC9R/HGR0PB4oicWDtA19O4oc2V3ShFvGwpptni2S53K8s5kf8AlNNE+zR9V1fSmGIY35LmU6X0t1sdurVV0nDGOluSm7B8V5c1zvcixpdThaihA/0gJ1/H4olyvbLNre1gbe0aEesJdqYx/mSMldC0WQOgozfKtNJ0oykqT2wtgTpIZJt9J2Q+VXTn+Iq8KAbB9t+6g5lhSWjtVaB2AMaC9m6TD0NLaC2LFgBWUFOXyoBLaMMP09lHQ8KTkP3XJGhY5KNDwmWSAlJN8lEaa8KfJjQLLGNys8aT+FSp4T6VY4nylebnj4h46Og499lu1bP237Kl41XR/wAoLyHOmWz6KfO0TtU+TtpV1yICqZRop0sCyom8FIvf2+SrCYDucqrJ05epxqEMK2Y1aI2RJNJryjt8L0pYtjrHaRWi0rCSmmeAmpnIkGrO1TapLGzhZ0dqIa5p0U0Qgu90DYQziT0QCVc48ttXPQ/MrnEJ7QvK50LWxkMfa4kLTxYK1GiECl4fpjmVmXFYVJlDtcaXRZHhUWcB3FX8W+xdITBJKKzwhR+QmCF6iexaCxFFeLCBGj3pDTDFpgkZWqym+VJShZ5AsQeyioUju8lBd5RzYtmQMLpPsrrgHfA5/Ad/59/0VbhAdxT/AB+uUxSPIcqcdjMS7O3/ABRxP8T6Ua4iyATf7r5hnYRI4Eb919YdTgHoUkizYF//AOy+XeZAbyOSGigH0vR49dtCuYvs6Do4A4r2/wAw2F6BxMgmx+07cND7Lz3ov/LcffuI/ZdpxBIyXNBoX4XkfkVu2FgepTPR+kJoZ/8AoskBo8h90rvqvhcPlOImbF2/mIQSG3XhcPxuuQxq16x/uvQetIImYc0zGAShhAcPpSlj5Y9s93i3uezw3M4OaEuL2d31DT4VLk8RJ2yPg1I3Yb9V33Tr3SxyfEPdbiDfuqzLY1mW4NFAHX9UWHk0r8GI5eFJ7n7OZ6Qy8rNzY8SHuZlskaGtA2Nr2fq3gYMzj8eYSCLmcdg75GjTyL1XjzW15r0M0M/FMdgAtpJr9AvWufY0/GNCy7ZQfluVWC5mfsRx8XlD2fNXWDcjB5R8kkfZ8TTwNgmzZTfQnJskdNBI7Z+3nyun/EqNhx2EtFhxXnPTIEXNu+H6aGqXtYKXI4u37IaXhejoOawzw3Lw8hA2oXup1e1kBdvkZ0+S3Fyo3h0rGBoNfO36Wq2UCfh52yjvb23R+qR6Se6Tinte4uDCA0H2FJc27x7f0FrS0CzZJsTkC3KhMcM5tpBsNPlNszRkYc3F5RDap0bv7p/LJyeFkZP62x2Wg+y5flD2TYb26cSbKFJU1oBvQhgyOwuYa5x9L7aR911znDf0I0uP5XWWXfzCZtFdTjkuZEXGz22qKnaTBXssMKUslptEPFUnGAxz4wB7nXv+qqcdxbKHNNG/Kb4uR7uYaHOJAeF53Ln2U8etUer9GxsgnypJfSSwj/ZI9RcPPy75m45a1sLO8l1JrjHEZDt+TtQ5+aSKDJEby3uip1e+18xgyNchHq33B5a4OjkfG42WntJ+qmDryhy//cPH7rQ+VfYS/Ls8ilolK7SCStPJULRvsWw7HkJqB/pSDCUxCSvN5E9mFlC/flWED1URE2rLG8BebkkKWWI2EnltoFNx/Kl8v5VPPVDt9FFkO7SbQ4nqWf4SkRN+V6UrciKY8DYUoo+60JnhOYwFJdPRiBtx/VpMshpqI0D6IwHpSXkY6UVWcygsRs/3WKjHXRzP/9k=)", "_____no_output_____" ], [ "# Taking Dataset from Drive", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] ], [ [ "# Importing Libraries", "_____no_output_____" ] ], [ [ "import keras\nfrom keras import Sequential\nfrom keras.applications import MobileNetV2\nfrom keras.layers import Dense\nfrom keras.preprocessing import image", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import classification_report, log_loss, accuracy_score\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "directory = '/content/drive/MyDrive/rice'", "_____no_output_____" ] ], [ [ "# Target Class", "_____no_output_____" ] ], [ [ "Class=[]\nfor file in os.listdir(directory):\n Class+=[file]\nprint(Class)\nprint(len(Class))", "['blast', 'blight', 'tungro']\n3\n" ] ], [ [ "# Mapping the Images", "_____no_output_____" ] ], [ [ "Map=[]\nfor i in range(len(Class)):\n Map = Map+[i]\n \nnormal_mapping=dict(zip(Class,Map)) \nreverse_mapping=dict(zip(Map,Class)) \n\ndef mapper(value):\n return reverse_mapping[value]", "_____no_output_____" ], [ "set1=[]\nset2=[]\ncount=0\nfor i in Class:\n path=os.path.join(directory,i)\n t=0\n for image in os.listdir(path):\n if image[-4:]=='.jpg':\n imagee=load_img(os.path.join(path,image), grayscale=False, color_mode='rgb', target_size=(100,100))\n imagee=img_to_array(imagee)\n imagee=imagee/255.0\n if t<60:\n set1.append([imagee,count])\n else: \n set2.append([imagee,count])\n t+=1\n count=count+1", "_____no_output_____" ] ], [ [ "# Dividing Data and Test ", "_____no_output_____" ] ], [ [ "data, dataa=zip(*set1)\ntest, test_test=zip(*set2)", "_____no_output_____" ], [ "label=to_categorical(dataa)\nX=np.array(data)\ny=np.array(label)", "_____no_output_____" ], [ "labell=to_categorical(test_test)\ntest=np.array(test)\nlabell=np.array(labell)", "_____no_output_____" ], [ "print(len(y))\nprint(len(labell))", "180\n60\n" ] ], [ [ "# Train Test Split", "_____no_output_____" ] ], [ [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)", "_____no_output_____" ], [ "print(X_train.shape,X_test.shape)\nprint(y_train.shape,y_test.shape)", "(144, 100, 100, 3) (36, 100, 100, 3)\n(144, 3) (36, 3)\n" ] ], [ [ "# Image Generator", "_____no_output_____" ] ], [ [ "generator = ImageDataGenerator(horizontal_flip=True,vertical_flip=True,rotation_range=20,zoom_range=0.2,\n width_shift_range=0.2,height_shift_range=0.2,shear_range=0.1,fill_mode=\"nearest\")", "_____no_output_____" ] ], [ [ "# Calling Resnet50V2 Model", "_____no_output_____" ] ], [ [ "from tensorflow.keras.applications import ResNet50V2\nresnet50v2 = tf.keras.applications.DenseNet201(input_shape=(100,100,3),include_top=False,weights='imagenet',pooling='avg')\nresnet50v2.trainable = False", "_____no_output_____" ] ], [ [ "# Making Deep CNN Model", "_____no_output_____" ] ], [ [ "model_input = resnet50v2.input\nclassifier = tf.keras.layers.Dense(128, activation='relu')(resnet50v2.output)\nclassifier = tf.keras.layers.Dense(64, activation='relu')(resnet50v2.output)\nclassifier = tf.keras.layers.Dense(512, activation='relu')(resnet50v2.output)\nclassifier = tf.keras.layers.Dense(128, activation='relu')(resnet50v2.output)\nclassifier = tf.keras.layers.Dense(256, activation='relu')(resnet50v2.output)\nmodel_output = tf.keras.layers.Dense(3, activation='sigmoid')(classifier)\nmodel = tf.keras.Model(inputs=model_input, outputs=model_output)", "_____no_output_____" ] ], [ [ "# Compiling with ADAM Optimizer and Binary Crossentropy Loss Function", "_____no_output_____" ] ], [ [ "model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "# Fitting the Dataset into Model", "_____no_output_____" ] ], [ [ "history=model.fit(generator.flow(X_train,y_train,batch_size=32),validation_data=(X_test,y_test),epochs=50)", "Epoch 1/50\n5/5 [==============================] - 19s 1s/step - loss: 0.8226 - accuracy: 0.4309 - val_loss: 0.5457 - val_accuracy: 0.5556\nEpoch 2/50\n5/5 [==============================] - 1s 99ms/step - loss: 0.4008 - accuracy: 0.8104 - val_loss: 0.3669 - val_accuracy: 0.6667\nEpoch 3/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.3141 - accuracy: 0.8288 - val_loss: 0.4243 - val_accuracy: 0.7500\nEpoch 4/50\n5/5 [==============================] - 1s 97ms/step - loss: 0.2641 - accuracy: 0.8968 - val_loss: 0.3114 - val_accuracy: 0.7500\nEpoch 5/50\n5/5 [==============================] - 1s 97ms/step - loss: 0.2143 - accuracy: 0.8530 - val_loss: 0.3521 - val_accuracy: 0.8333\nEpoch 6/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.1752 - accuracy: 0.9215 - val_loss: 0.3219 - val_accuracy: 0.7778\nEpoch 7/50\n5/5 [==============================] - 1s 107ms/step - loss: 0.2260 - accuracy: 0.8718 - val_loss: 0.2696 - val_accuracy: 0.8611\nEpoch 8/50\n5/5 [==============================] - 1s 105ms/step - loss: 0.1460 - accuracy: 0.9580 - val_loss: 0.2882 - val_accuracy: 0.8611\nEpoch 9/50\n5/5 [==============================] - 1s 96ms/step - loss: 0.1257 - accuracy: 0.9189 - val_loss: 0.1928 - val_accuracy: 0.8889\nEpoch 10/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.1109 - accuracy: 0.9438 - val_loss: 0.1757 - val_accuracy: 0.8889\nEpoch 11/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.0989 - accuracy: 0.9725 - val_loss: 0.1808 - val_accuracy: 0.9167\nEpoch 12/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.1049 - accuracy: 0.9589 - val_loss: 0.1725 - val_accuracy: 0.8889\nEpoch 13/50\n5/5 [==============================] - 1s 101ms/step - loss: 0.1308 - accuracy: 0.9201 - val_loss: 0.1796 - val_accuracy: 0.9167\nEpoch 14/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.1348 - accuracy: 0.9333 - val_loss: 0.2048 - val_accuracy: 0.8889\nEpoch 15/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0789 - accuracy: 0.9718 - val_loss: 0.1606 - val_accuracy: 0.9167\nEpoch 16/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0987 - accuracy: 0.9622 - val_loss: 0.1764 - val_accuracy: 0.9444\nEpoch 17/50\n5/5 [==============================] - 1s 111ms/step - loss: 0.1024 - accuracy: 0.9632 - val_loss: 0.1827 - val_accuracy: 0.8889\nEpoch 18/50\n5/5 [==============================] - 1s 99ms/step - loss: 0.0888 - accuracy: 0.9735 - val_loss: 0.1735 - val_accuracy: 0.9444\nEpoch 19/50\n5/5 [==============================] - 1s 101ms/step - loss: 0.0764 - accuracy: 0.9863 - val_loss: 0.1698 - val_accuracy: 0.9167\nEpoch 20/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.0947 - accuracy: 0.9815 - val_loss: 0.1836 - val_accuracy: 0.8889\nEpoch 21/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0720 - accuracy: 0.9739 - val_loss: 0.2145 - val_accuracy: 0.8889\nEpoch 22/50\n5/5 [==============================] - 1s 99ms/step - loss: 0.0621 - accuracy: 0.9866 - val_loss: 0.1684 - val_accuracy: 0.8889\nEpoch 23/50\n5/5 [==============================] - 1s 113ms/step - loss: 0.0556 - accuracy: 0.9836 - val_loss: 0.1426 - val_accuracy: 0.9167\nEpoch 24/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0497 - accuracy: 0.9811 - val_loss: 0.1330 - val_accuracy: 0.9167\nEpoch 25/50\n5/5 [==============================] - 1s 97ms/step - loss: 0.0644 - accuracy: 0.9757 - val_loss: 0.1414 - val_accuracy: 0.9167\nEpoch 26/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.0448 - accuracy: 0.9895 - val_loss: 0.1488 - val_accuracy: 0.9167\nEpoch 27/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.0807 - accuracy: 0.9544 - val_loss: 0.1443 - val_accuracy: 0.9167\nEpoch 28/50\n5/5 [==============================] - 1s 101ms/step - loss: 0.0575 - accuracy: 0.9939 - val_loss: 0.1195 - val_accuracy: 0.9167\nEpoch 29/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.0536 - accuracy: 0.9977 - val_loss: 0.1169 - val_accuracy: 0.9167\nEpoch 30/50\n5/5 [==============================] - 1s 99ms/step - loss: 0.0758 - accuracy: 0.9800 - val_loss: 0.1313 - val_accuracy: 0.9444\nEpoch 31/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.0758 - accuracy: 0.9576 - val_loss: 0.1346 - val_accuracy: 0.9167\nEpoch 32/50\n5/5 [==============================] - 1s 103ms/step - loss: 0.0427 - accuracy: 0.9892 - val_loss: 0.1515 - val_accuracy: 0.9444\nEpoch 33/50\n5/5 [==============================] - 1s 106ms/step - loss: 0.0826 - accuracy: 0.9845 - val_loss: 0.1218 - val_accuracy: 0.9444\nEpoch 34/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.0403 - accuracy: 1.0000 - val_loss: 0.1956 - val_accuracy: 0.8611\nEpoch 35/50\n5/5 [==============================] - 1s 106ms/step - loss: 0.0540 - accuracy: 0.9789 - val_loss: 0.1408 - val_accuracy: 0.9167\nEpoch 36/50\n5/5 [==============================] - 1s 113ms/step - loss: 0.0853 - accuracy: 0.9460 - val_loss: 0.1613 - val_accuracy: 0.9444\nEpoch 37/50\n5/5 [==============================] - 1s 103ms/step - loss: 0.0607 - accuracy: 0.9860 - val_loss: 0.1365 - val_accuracy: 0.9167\nEpoch 38/50\n5/5 [==============================] - 1s 116ms/step - loss: 0.0301 - accuracy: 0.9962 - val_loss: 0.1342 - val_accuracy: 0.9167\nEpoch 39/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0821 - accuracy: 0.9641 - val_loss: 0.1585 - val_accuracy: 0.9444\nEpoch 40/50\n5/5 [==============================] - 1s 98ms/step - loss: 0.0586 - accuracy: 0.9830 - val_loss: 0.1175 - val_accuracy: 0.9444\nEpoch 41/50\n5/5 [==============================] - 1s 101ms/step - loss: 0.0577 - accuracy: 0.9806 - val_loss: 0.1145 - val_accuracy: 0.9444\nEpoch 42/50\n5/5 [==============================] - 1s 108ms/step - loss: 0.0361 - accuracy: 0.9941 - val_loss: 0.1282 - val_accuracy: 0.9444\nEpoch 43/50\n5/5 [==============================] - 1s 113ms/step - loss: 0.0343 - accuracy: 0.9962 - val_loss: 0.1450 - val_accuracy: 0.9444\nEpoch 44/50\n5/5 [==============================] - 1s 113ms/step - loss: 0.0505 - accuracy: 0.9764 - val_loss: 0.1238 - val_accuracy: 0.9167\nEpoch 45/50\n5/5 [==============================] - 1s 99ms/step - loss: 0.0712 - accuracy: 0.9766 - val_loss: 0.1298 - val_accuracy: 0.9444\nEpoch 46/50\n5/5 [==============================] - 1s 108ms/step - loss: 0.0644 - accuracy: 0.9674 - val_loss: 0.1087 - val_accuracy: 0.9444\nEpoch 47/50\n5/5 [==============================] - 1s 102ms/step - loss: 0.0386 - accuracy: 0.9928 - val_loss: 0.1123 - val_accuracy: 0.9167\nEpoch 48/50\n5/5 [==============================] - 1s 110ms/step - loss: 0.0313 - accuracy: 0.9918 - val_loss: 0.1224 - val_accuracy: 0.9444\nEpoch 49/50\n5/5 [==============================] - 1s 100ms/step - loss: 0.0288 - accuracy: 0.9863 - val_loss: 0.0908 - val_accuracy: 0.9444\nEpoch 50/50\n5/5 [==============================] - 1s 95ms/step - loss: 0.0391 - accuracy: 0.9804 - val_loss: 0.1016 - val_accuracy: 0.9444\n" ] ], [ [ "# Prediction on Test Set", "_____no_output_____" ] ], [ [ "y_pred=model.predict(X_test)\ny_pred=np.argmax(y_pred,axis=1)\ny_test = np.argmax(y_test,axis=1)", "_____no_output_____" ] ], [ [ "# Confusion Matrix", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test,y_pred)\nprint(cm)", "[[10 0 0]\n [ 0 15 1]\n [ 0 1 9]]\n" ], [ "plt.subplots(figsize=(15,7))\nsns.heatmap(cm, annot= True, linewidth=1, cmap=\"autumn_r\")", "_____no_output_____" ] ], [ [ "# Accuracy", "_____no_output_____" ] ], [ [ "print(\"Accuracy : \",accuracy_score(y_test,y_pred))", "Accuracy : 0.9444444444444444\n" ] ], [ [ "# Classification Report", "_____no_output_____" ] ], [ [ "print(classification_report(y_test,y_pred))", " precision recall f1-score support\n\n 0 1.00 1.00 1.00 10\n 1 0.94 0.94 0.94 16\n 2 0.90 0.90 0.90 10\n\n accuracy 0.94 36\n macro avg 0.95 0.95 0.95 36\nweighted avg 0.94 0.94 0.94 36\n\n" ] ], [ [ "# Loss vs Validation Loss Plot\n", "_____no_output_____" ] ], [ [ "import plotly.graph_objects as go\nfig = go.Figure()\n\nfig.add_trace(go.Scatter(y=history.history['loss'], name='Loss',\n line=dict(color='royalblue', width=3)))\nfig.add_trace(go.Scatter(y=history.history['val_loss'], name='Validation Loss',\n line=dict(color='firebrick', width=2)))", "_____no_output_____" ] ], [ [ "# Accuracy vs Validation Accuracy Plot", "_____no_output_____" ] ], [ [ "fig = go.Figure()\nfig.add_trace(go.Scatter(y=history.history['accuracy'], name='Accuracy',\n line=dict(color='royalblue', width=3)))\nfig.add_trace(go.Scatter(y=history.history['val_accuracy'], name='Validation Accuracy',\n line=dict(color='firebrick', width=3)))", "_____no_output_____" ] ], [ [ "# Testing on some Random Images", "_____no_output_____" ] ], [ [ "image=load_img(\"/content/drive/MyDrive/rice/tungro/IMG_0852.jpg\",target_size=(100,100))\nimagee=load_img(\"/content/drive/MyDrive/rice/blight/IMG_0936.jpg\",target_size=(100,100))\nimageee=load_img(\"/content/drive/MyDrive/rice/blast/IMG_0560.jpg\",target_size=(100,100))\nimageeee=load_img(\"/content/drive/MyDrive/rice/blight/IMG_1063.jpg\",target_size=(100,100))\nimageeeee=load_img(\"/content/drive/MyDrive/rice/tungro/IMG_0898.jpg\",target_size=(100,100))\n\nimage=img_to_array(image) \nimage=image/255.0\nprediction_image=np.array(image)\nprediction_image= np.expand_dims(image, axis=0)\nimagee=img_to_array(imagee) \nimagee=imagee/255.0\nprediction_imagee=np.array(imagee)\nprediction_imagee= np.expand_dims(imagee, axis=0)\nimageee=img_to_array(imageee) \nimageee=imageee/255.0\nprediction_imageee=np.array(imageee)\nprediction_imageee= np.expand_dims(imageee, axis=0)\nimageeee=img_to_array(imageeee) \nimageeee=imageeee/255.0\nprediction_imageeee=np.array(imageeee)\nprediction_imageeee= np.expand_dims(imageeee, axis=0)\nimageeeee=img_to_array(imageeeee) \nimageeeee=image/255.0\nprediction_imageeeee=np.array(imageeeee)\nprediction_imageeeee= np.expand_dims(imageeeee, axis=0)\n\nprediction=model.predict(prediction_image)\nvalue=np.argmax(prediction)\nmove_name=mapper(value)\nprint(\"This Rice Belongs to\", move_name + \" class\")\nprediction=model.predict(prediction_imagee)\nvalue=np.argmax(prediction)\nmove_name=mapper(value)\nprint(\"This Rice Belongs to\", move_name + \" class\")\nprediction=model.predict(prediction_imageee)\nvalue=np.argmax(prediction)\nmove_name=mapper(value)\nprint(\"This Rice Belongs to\", move_name + \" class\")\nprediction=model.predict(prediction_imageeee)\nvalue=np.argmax(prediction)\nmove_name=mapper(value)\nprint(\"This Rice Belongs to\", move_name + \" class\")\nprediction=model.predict(prediction_imageeeee)\nvalue=np.argmax(prediction)\nmove_name=mapper(value)\nprint(\"This Rice Belongs to\", move_name + \" class\")", "This Rice Belongs to tungro class\nThis Rice Belongs to blight class\nThis Rice Belongs to blast class\nThis Rice Belongs to blight class\nThis Rice Belongs to tungro class\n" ] ], [ [ "# Prediction on Different Test Set", "_____no_output_____" ] ], [ [ "print(test.shape)\npredictionn=model.predict(test)\nprint(predictionn.shape)", "(60, 100, 100, 3)\n(60, 3)\n" ], [ "test_pred=[]\nfor item in predictionn:\n value=np.argmax(item) \n test_pred = test_pred + [value]", "_____no_output_____" ] ], [ [ "# Confusion Matrix", "_____no_output_____" ] ], [ [ "from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(test_test,test_pred)\nprint(cm)", "[[19 1 0]\n [ 0 20 0]\n [ 1 0 19]]\n" ], [ "plt.subplots(figsize=(15,7))\nsns.heatmap(cm, annot= True, linewidth=1, cmap=\"CMRmap\")", "_____no_output_____" ] ], [ [ "# Accuracy", "_____no_output_____" ] ], [ [ "accuracy=accuracy_score(test_test,test_pred)\nprint(\"Model Accuracy : \",accuracy)", "Model Accuracy : 0.9706666666666667\n" ] ], [ [ "# Classification Report", "_____no_output_____" ] ], [ [ "print(classification_report(test_test,test_pred))", " precision recall f1-score support\n\n 0 0.95 0.95 0.95 20\n 1 0.95 1.00 0.98 20\n 2 1.00 0.95 0.97 20\n\n accuracy 0.97 60\n macro avg 0.97 0.97 0.97 60\nweighted avg 0.97 0.97 0.97 60\n\n" ] ], [ [ "# This Model can Successfully Detects The Disease of a Rice Leaf with an Accuracy of 97%", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d070f699ead433b784acb0b7d1791b437440c93c
357,177
ipynb
Jupyter Notebook
Stock Prediction/.ipynb_checkpoints/ADS_Stock_Prediction_Prophet_MSFT-checkpoint.ipynb
Chowry000/Stock-Prediction-Using-LSTM
23590cfd5bb46520aeac7b45783d5b00164794ed
[ "MIT" ]
1
2020-05-13T20:35:58.000Z
2020-05-13T20:35:58.000Z
Stock Prediction/ADS_Stock_Prediction_Prophet_MSFT.ipynb
Chowry000/Stock-Prediction-Using-LSTM
23590cfd5bb46520aeac7b45783d5b00164794ed
[ "MIT" ]
null
null
null
Stock Prediction/ADS_Stock_Prediction_Prophet_MSFT.ipynb
Chowry000/Stock-Prediction-Using-LSTM
23590cfd5bb46520aeac7b45783d5b00164794ed
[ "MIT" ]
null
null
null
237.642715
143,500
0.889402
[ [ [ "### Stock Prediction using fb Prophet\n\nProphet is a procedure for forecasting time series data based on an additive model where non-linear trends are fit with yearly, weekly, and daily seasonality, plus holiday effects. It works best with time series that have strong seasonal effects and several seasons of historical data. Prophet is robust to missing data and shifts in the trend, and typically handles outliers well.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport os \nimport matplotlib.pyplot as plt\nfrom alpha_vantage.timeseries import TimeSeries\nfrom fbprophet import Prophet\n\nos.chdir(r'N:\\STOCK ADVISOR BOT')", "Importing plotly failed. Interactive plots will not work.\n" ], [ "ALPHA_VANTAGE_API_KEY = 'XAGC5LBB1SI9RDLW'\nts = TimeSeries(key= ALPHA_VANTAGE_API_KEY, output_format='pandas')\ndf_Stock, Stock_info = ts.get_daily('MSFT', outputsize='full') \n\ndf_Stock = df_Stock.rename(columns={'1. open' : 'Open', '2. high': 'High', '3. low':'Low', '4. close': 'Close', '5. volume': 'Volume' })\ndf_Stock = df_Stock.rename_axis(['Date'])\n\nStock = df_Stock.sort_index(ascending=True, axis=0)\n#slicing the data for 15 years from '2004-01-02' to today\nStock = Stock.loc['2004-01-02':]\nStock", "_____no_output_____" ], [ "Stock = Stock.drop(columns=['Open', 'High', 'Low', 'Volume'])", "_____no_output_____" ], [ "Stock.index = pd.to_datetime(Stock.index)", "_____no_output_____" ], [ "Stock.info()\n#NFLX.resample('D').ffill()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 4096 entries, 2004-01-02 to 2020-04-09\nData columns (total 1 columns):\nClose 4096 non-null float64\ndtypes: float64(1)\nmemory usage: 64.0 KB\n" ], [ "Stock = Stock.reset_index()\nStock", "_____no_output_____" ], [ "Stock.columns = ['ds', 'y']\n\nprophet_model = Prophet(yearly_seasonality=True, daily_seasonality=True)\nprophet_model.add_country_holidays(country_name='US')\nprophet_model.add_seasonality(name='monthly', period=30.5, fourier_order=5)", "_____no_output_____" ], [ "prophet_model.fit(Stock)", "_____no_output_____" ], [ "future = prophet_model.make_future_dataframe(periods=30)\nfuture.tail()", "_____no_output_____" ], [ "forcast = prophet_model.predict(future)\nforcast.tail()", "_____no_output_____" ], [ "prophet_model.plot(forcast);", "_____no_output_____" ] ], [ [ "If you want to visualize the individual forecast components, we can use Prophet’s built-in plot_components method like below", "_____no_output_____" ] ], [ [ "prophet_model.plot_components(forcast);", "_____no_output_____" ], [ "forcast.shape", "_____no_output_____" ], [ "forcast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()", "_____no_output_____" ] ], [ [ "### Prediction Performance\n\nThe performance_metrics utility can be used to compute some useful statistics of the prediction performance (yhat, yhat_lower, and yhat_upper compared to y), as a function of the distance from the cutoff (how far into the future the prediction was). The statistics computed are mean squared error (MSE), root mean squared error (RMSE), mean absolute error (MAE), mean absolute percent error (MAPE), and coverage of the yhat_lower and yhat_upper estimates. ", "_____no_output_____" ] ], [ [ "from fbprophet.diagnostics import cross_validation, performance_metrics\ndf_cv = cross_validation(prophet_model, horizon='180 days')\ndf_cv.head()", "INFO:fbprophet:Making 59 forecasts with cutoffs between 2005-06-27 00:00:00 and 2019-10-12 00:00:00\n" ], [ "df_cv", "_____no_output_____" ], [ "df_p = performance_metrics(df_cv)\ndf_p.head()", "_____no_output_____" ], [ "df_p", "_____no_output_____" ], [ "from fbprophet.plot import plot_cross_validation_metric\nfig = plot_cross_validation_metric(df_cv, metric='mape')", "_____no_output_____" ] ], [ [ "### License\nMIT License\n\nCopyright (c) 2020 Avinash Chourasiya\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d071066f41d20ef208b4c124b77a6bd3d282f607
8,506
ipynb
Jupyter Notebook
WEEK5/WEEK5_PROJECTS.ipynb
ayomideoj/ayomideojikutuCSC102
5dd97fcfcc165958fb746f1684de1fe95c332727
[ "MIT" ]
null
null
null
WEEK5/WEEK5_PROJECTS.ipynb
ayomideoj/ayomideojikutuCSC102
5dd97fcfcc165958fb746f1684de1fe95c332727
[ "MIT" ]
null
null
null
WEEK5/WEEK5_PROJECTS.ipynb
ayomideoj/ayomideojikutuCSC102
5dd97fcfcc165958fb746f1684de1fe95c332727
[ "MIT" ]
null
null
null
31.157509
1,506
0.523043
[ [ [ "# PROJECT 1\n\n## Pseudocode For Project 1\n\n### AVOIDS\n### INPUT input word and letters\n### IF letters or words inputed are not forbidden \n### RETURN \"TRUE\"\n### END\n", "_____no_output_____" ] ], [ [ "def avoids():\n forbidden = list(str(input(\"Input forbidden letters:\")))\n sentence = str(input(\"Input a sentence to be inspected:\")).split(\"\")\n li =[]\n no = 0\n for i in sentence:\n boo = True\n for j in forbidden:\n if j in i:\n boo = False\n if boo == False:\n li.append(i)\n else:\n no +=1\n if len(li) == 0:\n print(\"no forbidden words present\")\n else:\n print(no,\"words do not use forbidden letters\")\n print(li,\"use forbidden letters\")\n\navoids()\n ", "Input forbidden letters:rtre\nInput a sentence to be inspected: oj is a GOAT\n" ] ], [ [ "# PROJECT 2\n## Pseudocode for Project 2\n### USES ALL\n### INPUT word and strings of letters\n### IF word uses all required letters at least once\n### RETURN \"TRUE\"\n### ELSE \"FALSE\"\n### END", "_____no_output_____" ] ], [ [ "def uses_all (word, req):\n newReq = list(req)\n li = []\n for i in newReq:\n if i in word:\n continue\n else:\n li.append(i)\n if len(li) > 0:\n print(False)\n else:\n print(True)\n print(\"the required letters\",li,\"not found\",word)\n \nuses_all(\"elegaont\",\"eguon\")", "False\nthe required letters ['u'] not found elegaont\n" ] ], [ [ "# PROJECT 3\n## Pseudocode for Project 3\n### INPUT Jane = \"odd\"\n### INPUT Jack = \"even\"\n### IF Jack + Jane = odd number\n### PRINT \"Jane wins\"\n### ELIF Jack + Jane = even number\n### PRINT \"Jack wins\"\n### END", "_____no_output_____" ] ], [ [ "import math\na = str(input(\"Player1, enter guess\"))\n\nb = str(input(\"Player2, enter guess\"))\n\nnum1 = int(input(\"Player1, what number did you choose\"))\n\nnum2 = int(input(\"Player2, what number did you choose\"))\ntotal = num1 + num2\nif total % 2 == 0:\n nature = \"even\"\nelse:\n nature = \"odds\"\nif a == nature and b == nature:\n print(\"Both players win\")\nelif a == nature:\n (\"Player1 wins, the answer is\", a)\nelif b == nature:\n print(\"Player2 wins, the answer is\",b)\nelse:\n print(\"Nobody wins\")\n\n\n ", "Player1, enter guess3\nPlayer2, enter guess4\nPlayer1, what number did you choose3\nPlayer2, what number did you choose4\nNobody wins\n" ] ], [ [ "# PROJECT 4\n## Pseudocode for Project 4\n### Velocity, V = D/T\n### Distance, D = 50\n### Time, T = 2\n### CALCULATE V\n### END", "_____no_output_____" ] ], [ [ "d = int(input(\"Enter value of distance\"))\nt = int(input(\"Enter value of time\"))\n\nv= d/t\nprint(\"the care is moving at\",v,\"miles per hour\")", "Enter value of distance400\nEnter value of time2\nthe care is moving at 200.0 miles per hour\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d07106a26be1118a37972515d670d38e96b7a7e0
188,665
ipynb
Jupyter Notebook
notebooks/17-MulExplInter.ipynb
mathemage/TheMulQuaBio
63a0ad6803e2aa1b808bc4517009c18a8c190b4c
[ "MIT" ]
1
2019-10-12T13:33:14.000Z
2019-10-12T13:33:14.000Z
notebooks/17-MulExplInter.ipynb
mathemage/TheMulQuaBio
63a0ad6803e2aa1b808bc4517009c18a8c190b4c
[ "MIT" ]
null
null
null
notebooks/17-MulExplInter.ipynb
mathemage/TheMulQuaBio
63a0ad6803e2aa1b808bc4517009c18a8c190b4c
[ "MIT" ]
null
null
null
237.314465
108,476
0.886185
[ [ [ "library(repr) ; options(repr.plot.res = 100, repr.plot.width=5, repr.plot.height= 5) # Change plot sizes (in cm) - this bit of code is only relevant if you are using a jupyter notebook - ignore otherwise", "_____no_output_____" ] ], [ [ "<!--NAVIGATION-->\n< [Multiple Explanatory Variables](16-MulExpl.ipynb) | [Main Contents](Index.ipynb) | [Model Simplification](18-ModelSimp.ipynb)>", "_____no_output_____" ], [ "# Linear Models: Multiple variables with interactions <span class=\"tocSkip\">", "_____no_output_____" ], [ "<h1>Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Introduction\" data-toc-modified-id=\"Introduction-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Introduction</a></span><ul class=\"toc-item\"><li><span><a href=\"#Chapter-aims\" data-toc-modified-id=\"Chapter-aims-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Chapter aims</a></span></li><li><span><a href=\"#Formulae-with-interactions-in-R\" data-toc-modified-id=\"Formulae-with-interactions-in-R-1.2\"><span class=\"toc-item-num\">1.2&nbsp;&nbsp;</span>Formulae with interactions in R</a></span></li></ul></li><li><span><a href=\"#Model-1:-Mammalian-genome-size\" data-toc-modified-id=\"Model-1:-Mammalian-genome-size-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Model 1: Mammalian genome size</a></span></li><li><span><a href=\"#Model-2-(ANCOVA):-Body-Weight-in-Odonata\" data-toc-modified-id=\"Model-2-(ANCOVA):-Body-Weight-in-Odonata-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Model 2 (ANCOVA): Body Weight in Odonata</a></span></li></ul></div>", "_____no_output_____" ], [ "# Introduction \n\nHere you will build on your skills in fitting linear models with multiple explanatory variables to data. You will learn about another commonly used Linear Model fitting technique: ANCOVA.\n\nWe will build two models in this chapter:\n\n* **Model 1**: Is mammalian genome size predicted by interactions between trophic level and whether species are ground dwelling?\n\n* **ANCOVA**: Is body size in Odonata predicted by interactions between genome size and taxonomic suborder?\n\nSo far, we have only looked at the independent effects of variables. For example, in the trophic level and ground dwelling model from [the first multiple explanatory variables chapter](16-MulExpl.ipynb), we only looked for specific differences for being a omnivore *or* being ground dwelling, not for being\nspecifically a *ground dwelling omnivore*. These independent effects of a variable are known as *main effects* and the effects of combinations of variables acting together are known as *interactions* — they describe how the variables *interact*.\n\n## Chapter aims\n\nThe aims of this chapter are[$^{[1]}$](#fn1):\n\n* Creating more complex Linear Models with multiple explanatory variables\n\n* Including the effects of interactions between multiple variables in a linear model\n\n* Plotting predictions from more complex (multiple explanatory variables) linear models\n\n\n## Formulae with interactions in R\n\nWe've already seen a number of different model formulae in R. They all use this syntax:\n\n`response variable ~ explanatory variable(s)`\n\nBut we are now going to see two extra pieces of syntax:\n\n* `y ~ a + b + a:b`: The `a:b` means the interaction between `a` and `b` — do combinations of these variables lead to different outcomes?\n\n* `y ~ a * b`: This a shorthand for the model above. The means fit `a` and `b` as main effects and their interaction `a:b`. \n\n# Model 1: Mammalian genome size\n\n$\\star$ Make sure you have changed the working directory to `Code` in your stats coursework directory.\n\n$\\star$ Create a new blank script called 'Interactions.R' and add some introductory comments.\n\n$\\star$ Load the data:", "_____no_output_____" ] ], [ [ "load('../data/mammals.Rdata')", "_____no_output_____" ] ], [ [ "If `mammals.Rdata` is missing, just import the data again using `read.csv`. You will then have to add the log C Value column to the imported data frame again.\n\nLet's refit the model from [the first multiple explanatory variables chapter](16-MulExpl.ipynb), but including the interaction between trophic level and ground dwelling. We'll immediately check the model is appropriate:", "_____no_output_____" ] ], [ [ "model <- lm(logCvalue ~ TrophicLevel * GroundDwelling, data= mammals)\npar(mfrow=c(2,2), mar=c(3,3,1,1), mgp=c(2, 0.8,0))\nplot(model) ", "_____no_output_____" ] ], [ [ "Now, examine the `anova` and `summary` outputs for the model:", "_____no_output_____" ] ], [ [ "anova(model)", "_____no_output_____" ] ], [ [ "Compared to the model from [the first multiple explanatory variables chapter](16-MulExpl.ipynb), there is an extra line at the bottom. The top two are the same and show that trophic level and ground dwelling both have independent main effects. The extra line\nshows that there is also an interaction between the two. It doesn't explain a huge amount of variation, about half as much as trophic level, but it is significant.\n\nAgain, we can calculate the $r^2$ for the model: $\\frac{0.81 + 2.75 + 0.43}{0.81+2.75+0.43+12.77} = 0.238$ \n\nThe model from [the first multiple explanatory variables chapter](16-MulExpl.ipynb) without the interaction had an $r^2 = 0.212$ — our new\nmodel explains 2.6% more of the variation in the data.\n\nThe summary table is as follows:", "_____no_output_____" ] ], [ [ "summary(model)", "_____no_output_____" ] ], [ [ "The lines in this output are:\n\n1. The reference level (intercept) for non ground dwelling carnivores. (The reference level is decided just by the alphabetic order of the levels)\n2. Two differences for being in different trophic levels.\n3. One difference for being ground dwelling\n4. Two new differences that give specific differences for ground dwelling herbivores and omnivores.\n\nThe first four lines, as in the model from the [ANOVA chapter](15-anova.ipynb), which would allow us to find the predicted values for each group *if the size of the differences did not vary between levels because of the interactions*. That is, this part of the model only includes a single difference ground and non-ground species, which has to be the same for each trophic group because it ignores interactions between trophic level and ground / non-ground identity of each species. The last two lines then give the estimated coefficients associated with the interaction terms, and allow cause the size of differences to vary\nbetween levels because of the further effects of interactions.\n\nThe table below show how these combine to give the predictions for each group combination, with those two new lines show in red:\n\n$\\begin{array}{|r|r|r|}\n\\hline\n & \\textrm{Not ground} & \\textrm{Ground} \\\\\n\\hline\n\\textrm{Carnivore} & 0.96 = 0.96 & 0.96+0.25=1.21 \\\\\n\\textrm{Herbivore} & 0.96 + 0.05 = 1.01 & 0.96+0.05+0.25{\\color{red}+0.03}=1.29\\\\\n\\textrm{Omnivore} & 0.96 + 0.23 = 1.19 & 0.96+0.23+0.25{\\color{red}-0.15}=1.29\\\\\n\\hline\n\\end{array}$\n\nSo why are there two new coefficients? For interactions between two factors, there are always $(n-1)\\times(m-1)$ new coefficients, where $n$ and $m$ are the number of levels in the two factors (Ground dwelling or not: 2 levels and trophic level: 3 levels, in our current example). So in this model, $(3-1) \\times (2-1) =2$. It is easier to understand why\ngraphically: the prediction for the white boxes below can be found by adding the main effects together but for the grey boxes we need to find specific differences and so there are $(n-1)\\times(m-1)$ interaction coefficients to add.\n\n<a id=\"fig:interactionsdiag\"></a>\n<figure>\n <img src=\"./graphics/interactionsdiag.png\" alt=\"interactionsdiag\" style=\"width:50%\">\n <small> \n <center>\n <figcaption> \n Figure 2\n </figcaption>\n </center>\n </small>\n</figure>\n\nIf we put this together, what is the model telling us?\n\n* Herbivores have the same genome sizes as carnivores, but omnivores have larger genomes.\n\n* Ground dwelling mammals have larger genomes.\n\nThese two findings suggest that ground dwelling omnivores should have extra big genomes. However, the interaction shows they are smaller than expected and are, in fact, similar to ground dwelling herbivores.\n\nNote that although the interaction term in the `anova` output is significant, neither of the two coefficients in the `summary` has a $p<0.05$. There are two weak differences (one\nvery weak, one nearly significant) that together explain significant\nvariance in the data.\n\n$\\star$ Copy the code above into your script and run the model.\n\nMake sure you understand the output!\n\nJust to make sure the sums above are correct, we'll use the same code as\nin [the first multiple explanatory variables chapter](16-MulExpl.ipynb) to get R to calculate predictions for us, similar to the way we did [before](16-MulExpl.ipynb):", "_____no_output_____" ] ], [ [ "# a data frame of combinations of variables\ngd <- rep(levels(mammals$GroundDwelling), times = 3)\nprint(gd)", "[1] \"No\" \"Yes\" \"No\" \"Yes\" \"No\" \"Yes\"\n" ], [ "tl <- rep(levels(mammals$TrophicLevel), each = 2)\nprint(tl)", "[1] \"Carnivore\" \"Carnivore\" \"Herbivore\" \"Herbivore\" \"Omnivore\" \"Omnivore\" \n" ], [ "# New data frame\npredVals <- data.frame(GroundDwelling = gd, TrophicLevel = tl)\n\n# predict using the new data frame\npredVals$predict <- predict(model, newdata = predVals)\nprint(predVals)", " GroundDwelling TrophicLevel predict\n1 No Carnivore 0.9589465\n2 Yes Carnivore 1.2138170\n3 No Herbivore 1.0124594\n4 Yes Herbivore 1.2976624\n5 No Omnivore 1.1917603\n6 Yes Omnivore 1.2990165\n" ] ], [ [ "$\\star$ Include and run the code for gererating these predictions in your script.\n\nIf we plot these data points onto the barplot from [the first multiple explanatory variables chapter](16-MulExpl.ipynb), they now lie exactly on the mean values, because we've allowed for interactions. The triangle on this plot shows the predictions for ground dwelling omnivores from the main effects ($0.96 + 0.23 + 0.25 = 1.44$), the interaction of $-0.15$ pushes the prediction back down.\n\n<a id=\"fig:predPlot\"></a>\n<figure>\n <img src=\"./graphics/predPlot.svg\" alt=\"predPlot\" style=\"width:70%\">\n</figure>\n", "_____no_output_____" ], [ "\n# Model 2 (ANCOVA): Body Weight in Odonata\n\nWe'll go all the way back to the regression analyses from the [Regression chapter](14-regress.ipynb). Remember that we fitted two separate regression lines to the data for damselflies and dragonflies. We'll now use an interaction to fit these in a single model. This kind of linear model — with a mixture of continuous variables and factors — is often called an *analysis of covariance*, or ANCOVA. That is, ANCOVA is a type of linear model that blends ANOVA and regression. ANCOVA evaluates whether population means of a dependent variable are equal across levels of a categorical independent variable, while statistically controlling for the effects of other continuous variables that are not of primary interest, known as covariates.\n\n*Thus, ANCOVA is a linear model with one categorical and one or more continuous predictors*.\n\nWe will use the odonates data that we have worked with [before](12-ExpDesign.ipynb).\n\n$\\star$ First load the data:", "_____no_output_____" ] ], [ [ "odonata <- read.csv('../data/GenomeSize.csv')", "_____no_output_____" ] ], [ [ "$\\star$ Now create two new variables in the `odonata` data set called `logGS` and `logBW` containing log genome size and log body weight:", "_____no_output_____" ] ], [ [ "odonata$logGS <- log(odonata$GenomeSize)\nodonata$logBW <- log(odonata$BodyWeight)", "_____no_output_____" ] ], [ [ "The models we fitted [before](12-ExpDesign.ipynb) looked like this:\n\n<a id=\"fig:dragonData\"></a>\n<figure>\n <img src=\"./graphics/dragonData.svg\" alt=\"dragonData\" style=\"width:60%\">\n <small> \n <center>\n <figcaption> \n \n </figcaption>\n </center>\n </small>\n</figure>\n\nWe can now fit the model of body weight as a function of both genome size and suborder:", "_____no_output_____" ] ], [ [ "odonModel <- lm(logBW ~ logGS * Suborder, data = odonata)", "_____no_output_____" ] ], [ [ "Again, we'll look at the <span>anova</span> table first:", "_____no_output_____" ] ], [ [ "anova(odonModel)", "_____no_output_____" ] ], [ [ "Interpreting this:\n\n* There is no significant main effect of log genome size. The *main* effect is the important thing here — genome size is hugely important but does very different things for the two different suborders. If we ignored `Suborder`, there isn't an overall relationship: the average of those two lines is pretty much flat.\n\n* There is a very strong main effect of Suborder: the mean body weight in the two groups are very different.\n\n* There is a strong interaction between suborder and genome size. This is an interaction between a factor and a continuous variable and shows that the *slopes* are different for the different factor levels.\n\nNow for the summary table:", "_____no_output_____" ] ], [ [ "summary(odonModel)", "_____no_output_____" ] ], [ [ "* The first thing to note is that the $r^2$ value is really high. The model explains three quarters (0.752) of the variation in the data.\n\n* Next, there are four coefficients:\n\n * The intercept is for the first level of `Suborder`, which is Anisoptera (dragonflies).\n * The next line, for `log genome size`, is the slope for Anisoptera. \n * We then have a coefficient for the second level of `Suborder`, which is Zygoptera (damselflies). As with the first model, this difference in factor levels is a difference in mean values and shows the difference in the intercept for Zygoptera.\n * The last line is the interaction between `Suborder` and `logGS`. This shows how the slope for Zygoptera differs from the slope for Anisoptera.\n\nHow do these hang together to give the two lines shown in the model? We can calculate these by hand: \n\n$\\begin{aligned}\n \\textrm{Body Weight} &= -2.40 + 1.01 \\times \\textrm{logGS} & \\textrm{[Anisoptera]}\\\\\n \\textrm{Body Weight} &= (-2.40 -2.25) + (1.01 - 2.15) \\times \\textrm{logGS} & \\textrm{[Zygoptera]}\\\\\n &= -4.65 - 1.14 \\times \\textrm{logGS} \\\\\\end{aligned}$\n\n$\\star$ Add the above code into your script and check that you understand the outputs.\n\nWe'll use the `predict` function again to get the predicted values from the model and add lines to the plot above.\n\nFirst, we'll create a set of numbers spanning the range of genome size:", "_____no_output_____" ] ], [ [ "#get the range of the data:\nrng <- range(odonata$logGS)\n#get a sequence from the min to the max with 100 equally spaced values:\nLogGSForFitting <- seq(rng[1], rng[2], length = 100)", "_____no_output_____" ] ], [ [ "Have a look at these numbers:", "_____no_output_____" ] ], [ [ "print(LogGSForFitting)", " [1] -0.891598119 -0.873918728 -0.856239337 -0.838559945 -0.820880554\n [6] -0.803201163 -0.785521772 -0.767842380 -0.750162989 -0.732483598\n [11] -0.714804206 -0.697124815 -0.679445424 -0.661766032 -0.644086641\n [16] -0.626407250 -0.608727859 -0.591048467 -0.573369076 -0.555689685\n [21] -0.538010293 -0.520330902 -0.502651511 -0.484972119 -0.467292728\n [26] -0.449613337 -0.431933946 -0.414254554 -0.396575163 -0.378895772\n [31] -0.361216380 -0.343536989 -0.325857598 -0.308178207 -0.290498815\n [36] -0.272819424 -0.255140033 -0.237460641 -0.219781250 -0.202101859\n [41] -0.184422467 -0.166743076 -0.149063685 -0.131384294 -0.113704902\n [46] -0.096025511 -0.078346120 -0.060666728 -0.042987337 -0.025307946\n [51] -0.007628554 0.010050837 0.027730228 0.045409619 0.063089011\n [56] 0.080768402 0.098447793 0.116127185 0.133806576 0.151485967\n [61] 0.169165358 0.186844750 0.204524141 0.222203532 0.239882924\n [66] 0.257562315 0.275241706 0.292921098 0.310600489 0.328279880\n [71] 0.345959271 0.363638663 0.381318054 0.398997445 0.416676837\n [76] 0.434356228 0.452035619 0.469715011 0.487394402 0.505073793\n [81] 0.522753184 0.540432576 0.558111967 0.575791358 0.593470750\n [86] 0.611150141 0.628829532 0.646508923 0.664188315 0.681867706\n [91] 0.699547097 0.717226489 0.734905880 0.752585271 0.770264663\n [96] 0.787944054 0.805623445 0.823302836 0.840982228 0.858661619\n" ] ], [ [ "We can now use the model to predict the values of body weight at each of those points for each of the two suborders:", "_____no_output_____" ] ], [ [ "#get a data frame of new data for the order\nZygoVals <- data.frame(logGS = LogGSForFitting, Suborder = \"Zygoptera\")\n\n#get the predictions and standard error\nZygoPred <- predict(odonModel, newdata = ZygoVals, se.fit = TRUE)\n\n#repeat for anisoptera\nAnisoVals <- data.frame(logGS = LogGSForFitting, Suborder = \"Anisoptera\")\nAnisoPred <- predict(odonModel, newdata = AnisoVals, se.fit = TRUE)", "_____no_output_____" ] ], [ [ "We've added `se.fit=TRUE` to the function to get the standard error around the regression lines. Both `AnisoPred` and `ZygoPred` contain predicted values (called `fit`) and standard error values (called `se.fit`) for each of the values in our generated values in `LogGSForFitting` for each of the two suborders.\n\nWe can add the predictions onto a plot like this:", "_____no_output_____" ] ], [ [ "# plot the scatterplot of the data\nplot(logBW ~ logGS, data = odonata, col = Suborder)\n# add the predicted lines\nlines(AnisoPred$fit ~ LogGSForFitting, col = \"black\")\nlines(AnisoPred$fit + AnisoPred$se.fit ~ LogGSForFitting, col = \"black\", lty = 2)\nlines(AnisoPred$fit - AnisoPred$se.fit ~ LogGSForFitting, col = \"black\", lty = 2)", "_____no_output_____" ] ], [ [ "$\\star$ Copy the prediction code into your script and run the plot above.\n\nCopy and modify the last three lines to add the lines for the Zygoptera. Your final plot should look like this.\n\n<a id=\"fig:odonPlot\"></a>\n<figure>\n <img src=\"./graphics/odonPlot.svg\" alt=\"odonPlot\" style=\"width:70%\">\n <small> \n <center>\n <figcaption> \n Figure 4\n </figcaption>\n </center>\n </small>\n</figure>\n\n---\n\n<a id=\"fn1\"></a>\n[1]: Here you work with the script file `MulExplInter.R`", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0712a36e24d8667df87363947b6c6d05c200d16
383,390
ipynb
Jupyter Notebook
Week2/Optimization+methods.ipynb
softwarebrahma/Deep-Learning-Specialization-Improving-Deep-Neural-Network-Hyperparam-TuneRegularizationOptimization
0f6a8d5e5fdbd14169c58a2aa50d97149bb68382
[ "Apache-2.0" ]
null
null
null
Week2/Optimization+methods.ipynb
softwarebrahma/Deep-Learning-Specialization-Improving-Deep-Neural-Network-Hyperparam-TuneRegularizationOptimization
0f6a8d5e5fdbd14169c58a2aa50d97149bb68382
[ "Apache-2.0" ]
null
null
null
Week2/Optimization+methods.ipynb
softwarebrahma/Deep-Learning-Specialization-Improving-Deep-Neural-Network-Hyperparam-TuneRegularizationOptimization
0f6a8d5e5fdbd14169c58a2aa50d97149bb68382
[ "Apache-2.0" ]
null
null
null
232.639563
62,492
0.879992
[ [ [ "# Optimization Methods\n\nUntil now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result. \n\nGradient descent goes \"downhill\" on a cost function $J$. Think of it as trying to do this: \n<img src=\"images/cost.jpg\" style=\"width:650px;height:300px;\">\n<caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>\n\n**Notations**: As usual, $\\frac{\\partial J}{\\partial a } = $ `da` for any variable `a`.\n\nTo get started, run the following code to import the libraries you will need.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport math\nimport sklearn\nimport sklearn.datasets\n\nfrom opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation\nfrom opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'", "_____no_output_____" ] ], [ [ "## 1 - Gradient Descent\n\nA simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent. \n\n**Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$: \n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{1}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{2}$$\n\nwhere L is the number of layers and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters_with_gd\n\ndef update_parameters_with_gd(parameters, grads, learning_rate):\n \"\"\"\n Update parameters using one step of gradient descent\n \n Arguments:\n parameters -- python dictionary containing your parameters to be updated:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients to update each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n learning_rate -- the learning rate, scalar.\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - (learning_rate * grads[\"dW\" + str(l+1)])\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - (learning_rate * grads[\"db\" + str(l+1)])\n ### END CODE HERE ###\n \n return parameters", "_____no_output_____" ], [ "parameters, grads, learning_rate = update_parameters_with_gd_test_case()\n\nparameters = update_parameters_with_gd(parameters, grads, learning_rate)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))", "W1 = [[ 1.63535156 -0.62320365 -0.53718766]\n [-1.07799357 0.85639907 -2.29470142]]\nb1 = [[ 1.74604067]\n [-0.75184921]]\nW2 = [[ 0.32171798 -0.25467393 1.46902454]\n [-2.05617317 -0.31554548 -0.3756023 ]\n [ 1.1404819 -1.09976462 -0.1612551 ]]\nb2 = [[-0.88020257]\n [ 0.02561572]\n [ 0.57539477]]\n" ] ], [ [ "**Expected Output**:\n\n<table> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.63535156 -0.62320365 -0.53718766]\n [-1.07799357 0.85639907 -2.29470142]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.74604067]\n [-0.75184921]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.32171798 -0.25467393 1.46902454]\n [-2.05617317 -0.31554548 -0.3756023 ]\n [ 1.1404819 -1.09976462 -0.1612551 ]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.88020257]\n [ 0.02561572]\n [ 0.57539477]] </td> \n </tr> \n</table>\n", "_____no_output_____" ], [ "A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent. \n\n- **(Batch) Gradient Descent**:\n\n``` python\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n # Forward propagation\n a, caches = forward_propagation(X, parameters)\n # Compute cost.\n cost = compute_cost(a, Y)\n # Backward propagation.\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\n \n```\n\n- **Stochastic Gradient Descent**:\n\n```python\nX = data_input\nY = labels\nparameters = initialize_parameters(layers_dims)\nfor i in range(0, num_iterations):\n for j in range(0, m):\n # Forward propagation\n a, caches = forward_propagation(X[:,j], parameters)\n # Compute cost\n cost = compute_cost(a, Y[:,j])\n # Backward propagation\n grads = backward_propagation(a, caches, parameters)\n # Update parameters.\n parameters = update_parameters(parameters, grads)\n```\n", "_____no_output_____" ], [ "In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will \"oscillate\" toward the minimum rather than converge smoothly. Here is an illustration of this: \n\n<img src=\"images/kiank_sgd.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> \"+\" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>\n\n**Note** also that implementing SGD requires 3 for-loops in total:\n1. Over the number of iterations\n2. Over the $m$ training examples\n3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)\n\nIn practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.\n\n<img src=\"images/kiank_minibatch.png\" style=\"width:750px;height:250px;\">\n<caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> \"+\" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>\n\n<font color='blue'>\n**What you should remember**:\n- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.\n- You have to tune a learning rate hyperparameter $\\alpha$.\n- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).", "_____no_output_____" ], [ "## 2 - Mini-Batch Gradient descent\n\nLet's learn how to build mini-batches from the training set (X, Y).\n\nThere are two steps:\n- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches. \n\n<img src=\"images/kiank_shuffle.png\" style=\"width:550px;height:300px;\">\n\n- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this: \n\n<img src=\"images/kiank_partition.png\" style=\"width:550px;height:300px;\">\n\n**Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:\n```python\nfirst_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]\nsecond_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]\n...\n```\n\nNote that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\\lfloor s \\rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\\_batch_\\_size \\times \\lfloor \\frac{m}{mini\\_batch\\_size}\\rfloor$). ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: random_mini_batches\n\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n \"\"\"\n Creates a list of random minibatches from (X, Y)\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n mini_batch_size -- size of the mini-batches, integer\n \n Returns:\n mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)\n \"\"\"\n \n np.random.seed(seed) # To make your \"random\" minibatches the same as ours\n m = X.shape[1] # number of training examples\n mini_batches = []\n \n # Step 1: Shuffle (X, Y)\n permutation = list(np.random.permutation(m))\n shuffled_X = X[:, permutation]\n shuffled_Y = Y[:, permutation].reshape((1,m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, k * mini_batch_size : (k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k + 1) * mini_batch_size].reshape((1, mini_batch_size))\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n ### START CODE HERE ### (approx. 2 lines)\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m].reshape((1, m - num_complete_minibatches * mini_batch_size))\n ### END CODE HERE ###\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n return mini_batches", "_____no_output_____" ], [ "X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()\nmini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)\n\nprint (\"shape of the 1st mini_batch_X: \" + str(mini_batches[0][0].shape))\nprint (\"shape of the 2nd mini_batch_X: \" + str(mini_batches[1][0].shape))\nprint (\"shape of the 3rd mini_batch_X: \" + str(mini_batches[2][0].shape))\nprint (\"shape of the 1st mini_batch_Y: \" + str(mini_batches[0][1].shape))\nprint (\"shape of the 2nd mini_batch_Y: \" + str(mini_batches[1][1].shape)) \nprint (\"shape of the 3rd mini_batch_Y: \" + str(mini_batches[2][1].shape))\nprint (\"mini batch sanity check: \" + str(mini_batches[0][0][0][0:3]))", "shape of the 1st mini_batch_X: (12288, 64)\nshape of the 2nd mini_batch_X: (12288, 64)\nshape of the 3rd mini_batch_X: (12288, 20)\nshape of the 1st mini_batch_Y: (1, 64)\nshape of the 2nd mini_batch_Y: (1, 64)\nshape of the 3rd mini_batch_Y: (1, 20)\nmini batch sanity check: [ 0.90085595 -0.7612069 0.2344157 ]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:50%\"> \n <tr>\n <td > **shape of the 1st mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n \n <tr>\n <td > **shape of the 2nd mini_batch_X** </td> \n <td > (12288, 64) </td> \n </tr> \n \n <tr>\n <td > **shape of the 3rd mini_batch_X** </td> \n <td > (12288, 20) </td> \n </tr>\n <tr>\n <td > **shape of the 1st mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 2nd mini_batch_Y** </td> \n <td > (1, 64) </td> \n </tr> \n <tr>\n <td > **shape of the 3rd mini_batch_Y** </td> \n <td > (1, 20) </td> \n </tr> \n <tr>\n <td > **mini batch sanity check** </td> \n <td > [ 0.90085595 -0.7612069 0.2344157 ] </td> \n </tr>\n \n</table>", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- Shuffling and Partitioning are the two steps required to build mini-batches\n- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.", "_____no_output_____" ], [ "## 3 - Momentum\n\nBecause mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will \"oscillate\" toward convergence. Using momentum can reduce these oscillations. \n\nMomentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the \"velocity\" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill. \n\n<img src=\"images/opt_momentum.png\" style=\"width:400px;height:250px;\">\n<caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>\n\n\n**Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:\nfor $l =1,...,L$:\n```python\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n```\n**Note** that the iterator l starts at 0 in the for loop while the first parameters are v[\"dW1\"] and v[\"db1\"] (that's a \"one\" on the superscript). This is why we are shifting l to l+1 in the `for` loop.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_velocity\n\ndef initialize_velocity(parameters):\n \"\"\"\n Initializes the velocity as a python dictionary with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n \n Returns:\n v -- python dictionary containing the current velocity.\n v['dW' + str(l)] = velocity of dWl\n v['db' + str(l)] = velocity of dbl\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n \n # Initialize velocity\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v", "_____no_output_____" ], [ "parameters = initialize_velocity_test_case()\n\nv = initialize_velocity(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))", "v[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] = [[ 0.]\n [ 0.]]\nv[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:40%\"> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr> \n</table>\n", "_____no_output_____" ], [ "**Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$: \n\n$$ \\begin{cases}\nv_{dW^{[l]}} = \\beta v_{dW^{[l]}} + (1 - \\beta) dW^{[l]} \\\\\nW^{[l]} = W^{[l]} - \\alpha v_{dW^{[l]}}\n\\end{cases}\\tag{3}$$\n\n$$\\begin{cases}\nv_{db^{[l]}} = \\beta v_{db^{[l]}} + (1 - \\beta) db^{[l]} \\\\\nb^{[l]} = b^{[l]} - \\alpha v_{db^{[l]}} \n\\end{cases}\\tag{4}$$\n\nwhere L is the number of layers, $\\beta$ is the momentum and $\\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a \"one\" on the superscript). So you will need to shift `l` to `l+1` when coding.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters_with_momentum\n\ndef update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):\n \"\"\"\n Update parameters using Momentum\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + \n str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- python dictionary containing the current velocity:\n v['dW' + str(l)] = ...\n v['db' + str(l)] = ...\n beta -- the momentum hyperparameter, scalar\n learning_rate -- the learning rate, scalar\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- python dictionary containing your updated velocities\n \"\"\"\n\n L = len(parameters) // 2 # number of layers in the neural networks\n \n # Momentum update for each parameter\n for l in range(L):\n \n ### START CODE HERE ### (approx. 4 lines)\n # compute velocities\n v[\"dW\" + str(l+1)] = beta * v[\"dW\" + str(l+1)] + (1 - beta) * grads[\"dW\" + str(l+1)]\n v[\"db\" + str(l+1)] = beta * v[\"db\" + str(l+1)] + (1 - beta) * grads[\"db\" + str(l+1)]\n # update parameters\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - (learning_rate * v[\"dW\" + str(l+1)])\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - (learning_rate * v[\"db\" + str(l+1)])\n ### END CODE HERE ###\n \n return parameters, v", "_____no_output_____" ], [ "parameters, grads, v = update_parameters_with_momentum_test_case()\n\nparameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))", "W1 = [[ 1.62544598 -0.61290114 -0.52907334]\n [-1.07347112 0.86450677 -2.30085497]]\nb1 = [[ 1.74493465]\n [-0.76027113]]\nW2 = [[ 0.31930698 -0.24990073 1.4627996 ]\n [-2.05974396 -0.32173003 -0.38320915]\n [ 1.13444069 -1.0998786 -0.1713109 ]]\nb2 = [[-0.87809283]\n [ 0.04055394]\n [ 0.58207317]]\nv[\"dW1\"] = [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = [[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:90%\"> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.62544598 -0.61290114 -0.52907334]\n [-1.07347112 0.86450677 -2.30085497]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.74493465]\n [-0.76027113]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.31930698 -0.24990073 1.4627996 ]\n [-2.05974396 -0.32173003 -0.38320915]\n [ 1.13444069 -1.0998786 -0.1713109 ]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.87809283]\n [ 0.04055394]\n [ 0.58207317]] </td> \n </tr> \n\n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[-0.01228902]\n [-0.09357694]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]</td> \n </tr> \n</table>\n\n", "_____no_output_____" ], [ "**Note** that:\n- The velocity is initialized with zeros. So the algorithm will take a few iterations to \"build up\" velocity and start to take bigger steps.\n- If $\\beta = 0$, then this just becomes standard gradient descent without momentum. \n\n**How do you choose $\\beta$?**\n\n- The larger the momentum $\\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\\beta$ is too big, it could also smooth out the updates too much. \n- Common values for $\\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\\beta = 0.9$ is often a reasonable default. \n- Tuning the optimal $\\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$. ", "_____no_output_____" ], [ "<font color='blue'>\n**What you should remember**:\n- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.\n- You have to tune a momentum hyperparameter $\\beta$ and a learning rate $\\alpha$.", "_____no_output_____" ], [ "## 4 - Adam\n\nAdam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum. \n\n**How does Adam work?**\n1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction). \n2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction). \n3. It updates parameters in a direction based on combining information from \"1\" and \"2\".\n\nThe update rule is, for $l = 1, ..., L$: \n\n$$\\begin{cases}\nv_{dW^{[l]}} = \\beta_1 v_{dW^{[l]}} + (1 - \\beta_1) \\frac{\\partial \\mathcal{J} }{ \\partial W^{[l]} } \\\\\nv^{corrected}_{dW^{[l]}} = \\frac{v_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\ns_{dW^{[l]}} = \\beta_2 s_{dW^{[l]}} + (1 - \\beta_2) (\\frac{\\partial \\mathcal{J} }{\\partial W^{[l]} })^2 \\\\\ns^{corrected}_{dW^{[l]}} = \\frac{s_{dW^{[l]}}}{1 - (\\beta_1)^t} \\\\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{dW^{[l]}}}{\\sqrt{s^{corrected}_{dW^{[l]}}} + \\varepsilon}\n\\end{cases}$$\nwhere:\n- t counts the number of steps taken of Adam \n- L is the number of layers\n- $\\beta_1$ and $\\beta_2$ are hyperparameters that control the two exponentially weighted averages. \n- $\\alpha$ is the learning rate\n- $\\varepsilon$ is a very small number to avoid dividing by zero\n\nAs usual, we will store all parameters in the `parameters` dictionary ", "_____no_output_____" ], [ "**Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.\n\n**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:\nfor $l = 1, ..., L$:\n```python\nv[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\nv[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\ns[\"dW\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"W\" + str(l+1)])\ns[\"db\" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters[\"b\" + str(l+1)])\n\n```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_adam\n\ndef initialize_adam(parameters) :\n \"\"\"\n Initializes v and s as two python dictionaries with:\n - keys: \"dW1\", \"db1\", ..., \"dWL\", \"dbL\" \n - values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.\n \n Arguments:\n parameters -- python dictionary containing your parameters.\n parameters[\"W\" + str(l)] = Wl\n parameters[\"b\" + str(l)] = bl\n \n Returns: \n v -- python dictionary that will contain the exponentially weighted average of the gradient.\n v[\"dW\" + str(l)] = ...\n v[\"db\" + str(l)] = ...\n s -- python dictionary that will contain the exponentially weighted average of the squared gradient.\n s[\"dW\" + str(l)] = ...\n s[\"db\" + str(l)] = ...\n\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v = {}\n s = {}\n \n # Initialize v, s. Input: \"parameters\". Outputs: \"v, s\".\n for l in range(L):\n ### START CODE HERE ### (approx. 4 lines)\n v[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n v[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n s[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n s[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n ### END CODE HERE ###\n \n return v, s", "_____no_output_____" ], [ "parameters = initialize_adam_test_case()\n\nv, s = initialize_adam(parameters)\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))\n", "v[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db1\"] = [[ 0.]\n [ 0.]]\nv[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\nv[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\ns[\"dW1\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db1\"] = [[ 0.]\n [ 0.]]\ns[\"dW2\"] = [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]]\ns[\"db2\"] = [[ 0.]\n [ 0.]\n [ 0.]]\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:40%\"> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr> \n <tr>\n <td > **s[\"dW1\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db1\"]** </td> \n <td > [[ 0.]\n [ 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"dW2\"]** </td> \n <td > [[ 0. 0. 0.]\n [ 0. 0. 0.]\n [ 0. 0. 0.]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db2\"]** </td> \n <td > [[ 0.]\n [ 0.]\n [ 0.]] </td> \n </tr>\n\n</table>\n", "_____no_output_____" ], [ "**Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$: \n\n$$\\begin{cases}\nv_{W^{[l]}} = \\beta_1 v_{W^{[l]}} + (1 - \\beta_1) \\frac{\\partial J }{ \\partial W^{[l]} } \\\\\nv^{corrected}_{W^{[l]}} = \\frac{v_{W^{[l]}}}{1 - (\\beta_1)^t} \\\\\ns_{W^{[l]}} = \\beta_2 s_{W^{[l]}} + (1 - \\beta_2) (\\frac{\\partial J }{\\partial W^{[l]} })^2 \\\\\ns^{corrected}_{W^{[l]}} = \\frac{s_{W^{[l]}}}{1 - (\\beta_2)^t} \\\\\nW^{[l]} = W^{[l]} - \\alpha \\frac{v^{corrected}_{W^{[l]}}}{\\sqrt{s^{corrected}_{W^{[l]}}}+\\varepsilon}\n\\end{cases}$$\n\n\n**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters_with_adam\n\ndef update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):\n \"\"\"\n Update parameters using Adam\n \n Arguments:\n parameters -- python dictionary containing your parameters:\n parameters['W' + str(l)] = Wl\n parameters['b' + str(l)] = bl\n grads -- python dictionary containing your gradients for each parameters:\n grads['dW' + str(l)] = dWl\n grads['db' + str(l)] = dbl\n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n learning_rate -- the learning rate, scalar.\n beta1 -- Exponential decay hyperparameter for the first moment estimates \n beta2 -- Exponential decay hyperparameter for the second moment estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n v -- Adam variable, moving average of the first gradient, python dictionary\n s -- Adam variable, moving average of the squared gradient, python dictionary\n \"\"\"\n \n L = len(parameters) // 2 # number of layers in the neural networks\n v_corrected = {} # Initializing first moment estimate, python dictionary\n s_corrected = {} # Initializing second moment estimate, python dictionary\n \n # Perform Adam update on all parameters\n for l in range(L):\n # Moving average of the gradients. Inputs: \"v, grads, beta1\". Output: \"v\".\n ### START CODE HERE ### (approx. 2 lines)\n v[\"dW\" + str(l+1)] = beta1 * v[\"dW\" + str(l+1)] + (1 - beta1) * grads[\"dW\" + str(l+1)]\n v[\"db\" + str(l+1)] = beta1 * v[\"db\" + str(l+1)] + (1 - beta1) * grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n\n # Compute bias-corrected first moment estimate. Inputs: \"v, beta1, t\". Output: \"v_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n v_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)] / (1 - beta1 ** t)\n v_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)] / (1 - beta1 ** t)\n ### END CODE HERE ###\n\n # Moving average of the squared gradients. Inputs: \"s, grads, beta2\". Output: \"s\".\n ### START CODE HERE ### (approx. 2 lines)\n s[\"dW\" + str(l+1)] = beta2 * s[\"dW\" + str(l+1)] + (1 - beta2) * np.square(grads[\"dW\" + str(l+1)])\n s[\"db\" + str(l+1)] = beta2 * s[\"db\" + str(l+1)] + (1 - beta2) * np.square(grads[\"db\" + str(l+1)])\n ### END CODE HERE ###\n\n # Compute bias-corrected second raw moment estimate. Inputs: \"s, beta2, t\". Output: \"s_corrected\".\n ### START CODE HERE ### (approx. 2 lines)\n s_corrected[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)] / (1 - beta2 ** t)\n s_corrected[\"db\" + str(l+1)] = s[\"db\" + str(l+1)] / (1 - beta2 ** t)\n ### END CODE HERE ###\n\n # Update parameters. Inputs: \"parameters, learning_rate, v_corrected, s_corrected, epsilon\". Output: \"parameters\".\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - ( learning_rate * v_corrected[\"dW\" + str(l+1)] / ( np.sqrt(s_corrected[\"dW\" + str(l+1)]) + epsilon ) )\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - ( learning_rate * v_corrected[\"db\" + str(l+1)] / ( np.sqrt(s_corrected[\"db\" + str(l+1)]) + epsilon ) )\n ### END CODE HERE ###\n\n return parameters, v, s", "_____no_output_____" ], [ "parameters, grads, v, s = update_parameters_with_adam_test_case()\nparameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\nprint(\"v[\\\"dW1\\\"] = \" + str(v[\"dW1\"]))\nprint(\"v[\\\"db1\\\"] = \" + str(v[\"db1\"]))\nprint(\"v[\\\"dW2\\\"] = \" + str(v[\"dW2\"]))\nprint(\"v[\\\"db2\\\"] = \" + str(v[\"db2\"]))\nprint(\"s[\\\"dW1\\\"] = \" + str(s[\"dW1\"]))\nprint(\"s[\\\"db1\\\"] = \" + str(s[\"db1\"]))\nprint(\"s[\\\"dW2\\\"] = \" + str(s[\"dW2\"]))\nprint(\"s[\\\"db2\\\"] = \" + str(s[\"db2\"]))", "W1 = [[ 1.63178673 -0.61919778 -0.53561312]\n [-1.08040999 0.85796626 -2.29409733]]\nb1 = [[ 1.75225313]\n [-0.75376553]]\nW2 = [[ 0.32648046 -0.25681174 1.46954931]\n [-2.05269934 -0.31497584 -0.37661299]\n [ 1.14121081 -1.09244991 -0.16498684]]\nb2 = [[-0.88529979]\n [ 0.03477238]\n [ 0.57537385]]\nv[\"dW1\"] = [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]]\nv[\"db1\"] = [[-0.01228902]\n [-0.09357694]]\nv[\"dW2\"] = [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]]\nv[\"db2\"] = [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]]\ns[\"dW1\"] = [[ 0.00121136 0.00131039 0.00081287]\n [ 0.0002525 0.00081154 0.00046748]]\ns[\"db1\"] = [[ 1.51020075e-05]\n [ 8.75664434e-04]]\ns[\"dW2\"] = [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]\ns[\"db2\"] = [[ 5.49507194e-05]\n [ 2.75494327e-03]\n [ 5.50629536e-04]]\n" ] ], [ [ "**Expected Output**:\n\n<table> \n <tr>\n <td > **W1** </td> \n <td > [[ 1.63178673 -0.61919778 -0.53561312]\n [-1.08040999 0.85796626 -2.29409733]] </td> \n </tr> \n \n <tr>\n <td > **b1** </td> \n <td > [[ 1.75225313]\n [-0.75376553]] </td> \n </tr> \n \n <tr>\n <td > **W2** </td> \n <td > [[ 0.32648046 -0.25681174 1.46954931]\n [-2.05269934 -0.31497584 -0.37661299]\n [ 1.14121081 -1.09245036 -0.16498684]] </td> \n </tr> \n \n <tr>\n <td > **b2** </td> \n <td > [[-0.88529978]\n [ 0.03477238]\n [ 0.57537385]] </td> \n </tr> \n <tr>\n <td > **v[\"dW1\"]** </td> \n <td > [[-0.11006192 0.11447237 0.09015907]\n [ 0.05024943 0.09008559 -0.06837279]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db1\"]** </td> \n <td > [[-0.01228902]\n [-0.09357694]] </td> \n </tr> \n \n <tr>\n <td > **v[\"dW2\"]** </td> \n <td > [[-0.02678881 0.05303555 -0.06916608]\n [-0.03967535 -0.06871727 -0.08452056]\n [-0.06712461 -0.00126646 -0.11173103]] </td> \n </tr> \n \n <tr>\n <td > **v[\"db2\"]** </td> \n <td > [[ 0.02344157]\n [ 0.16598022]\n [ 0.07420442]] </td> \n </tr> \n <tr>\n <td > **s[\"dW1\"]** </td> \n <td > [[ 0.00121136 0.00131039 0.00081287]\n [ 0.0002525 0.00081154 0.00046748]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db1\"]** </td> \n <td > [[ 1.51020075e-05]\n [ 8.75664434e-04]] </td> \n </tr> \n \n <tr>\n <td > **s[\"dW2\"]** </td> \n <td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]\n [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]\n [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td> \n </tr> \n \n <tr>\n <td > **s[\"db2\"]** </td> \n <td > [[ 5.49507194e-05]\n [ 2.75494327e-03]\n [ 5.50629536e-04]] </td> \n </tr>\n</table>\n", "_____no_output_____" ], [ "You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.", "_____no_output_____" ], [ "## 5 - Model with different optimization algorithms\n\nLets use the following \"moons\" dataset to test the different optimization methods. (The dataset is named \"moons\" because the data from each of the two classes looks a bit like a crescent-shaped moon.) ", "_____no_output_____" ] ], [ [ "train_X, train_Y = load_dataset()", "_____no_output_____" ] ], [ [ "We have already implemented a 3-layer neural network. You will train it with: \n- Mini-batch **Gradient Descent**: it will call your function:\n - `update_parameters_with_gd()`\n- Mini-batch **Momentum**: it will call your functions:\n - `initialize_velocity()` and `update_parameters_with_momentum()`\n- Mini-batch **Adam**: it will call your functions:\n - `initialize_adam()` and `update_parameters_with_adam()`", "_____no_output_____" ] ], [ [ "def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,\n beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):\n \"\"\"\n 3-layer neural network model which can be run in different optimizer modes.\n \n Arguments:\n X -- input data, of shape (2, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)\n layers_dims -- python list, containing the size of each layer\n learning_rate -- the learning rate, scalar.\n mini_batch_size -- the size of a mini batch\n beta -- Momentum hyperparameter\n beta1 -- Exponential decay hyperparameter for the past gradients estimates \n beta2 -- Exponential decay hyperparameter for the past squared gradients estimates \n epsilon -- hyperparameter preventing division by zero in Adam updates\n num_epochs -- number of epochs\n print_cost -- True to print the cost every 1000 epochs\n\n Returns:\n parameters -- python dictionary containing your updated parameters \n \"\"\"\n\n L = len(layers_dims) # number of layers in the neural networks\n costs = [] # to keep track of the cost\n t = 0 # initializing the counter required for Adam update\n seed = 10 # For grading purposes, so that your \"random\" minibatches are the same as ours\n \n # Initialize parameters\n parameters = initialize_parameters(layers_dims)\n\n # Initialize the optimizer\n if optimizer == \"gd\":\n pass # no initialization required for gradient descent\n elif optimizer == \"momentum\":\n v = initialize_velocity(parameters)\n elif optimizer == \"adam\":\n v, s = initialize_adam(parameters)\n \n # Optimization loop\n for i in range(num_epochs):\n \n # Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch\n seed = seed + 1\n minibatches = random_mini_batches(X, Y, mini_batch_size, seed)\n\n for minibatch in minibatches:\n\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n\n # Forward propagation\n a3, caches = forward_propagation(minibatch_X, parameters)\n\n # Compute cost\n cost = compute_cost(a3, minibatch_Y)\n\n # Backward propagation\n grads = backward_propagation(minibatch_X, minibatch_Y, caches)\n\n # Update parameters\n if optimizer == \"gd\":\n parameters = update_parameters_with_gd(parameters, grads, learning_rate)\n elif optimizer == \"momentum\":\n parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)\n elif optimizer == \"adam\":\n t = t + 1 # Adam counter\n parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,\n t, learning_rate, beta1, beta2, epsilon)\n \n # Print the cost every 1000 epoch\n if print_cost and i % 1000 == 0:\n print (\"Cost after epoch %i: %f\" %(i, cost))\n if print_cost and i % 100 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('epochs (per 100)')\n plt.title(\"Learning rate = \" + str(learning_rate))\n plt.show()\n\n return parameters", "_____no_output_____" ] ], [ [ "You will now run this 3 layer neural network with each of the 3 optimization methods.\n\n### 5.1 - Mini-batch Gradient descent\n\nRun the following code to see how the model does with mini-batch gradient descent.", "_____no_output_____" ] ], [ [ "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"gd\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Gradient Descent optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "Cost after epoch 0: 0.690736\nCost after epoch 1000: 0.685273\nCost after epoch 2000: 0.647072\nCost after epoch 3000: 0.619525\nCost after epoch 4000: 0.576584\nCost after epoch 5000: 0.607243\nCost after epoch 6000: 0.529403\nCost after epoch 7000: 0.460768\nCost after epoch 8000: 0.465586\nCost after epoch 9000: 0.464518\n" ] ], [ [ "### 5.2 - Mini-batch gradient descent with momentum\n\nRun the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.", "_____no_output_____" ] ], [ [ "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = \"momentum\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Momentum optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "Cost after epoch 0: 0.690741\nCost after epoch 1000: 0.685341\nCost after epoch 2000: 0.647145\nCost after epoch 3000: 0.619594\nCost after epoch 4000: 0.576665\nCost after epoch 5000: 0.607324\nCost after epoch 6000: 0.529476\nCost after epoch 7000: 0.460936\nCost after epoch 8000: 0.465780\nCost after epoch 9000: 0.464740\n" ] ], [ [ "### 5.3 - Mini-batch with Adam mode\n\nRun the following code to see how the model does with Adam.", "_____no_output_____" ] ], [ [ "# train 3-layer model\nlayers_dims = [train_X.shape[0], 5, 2, 1]\nparameters = model(train_X, train_Y, layers_dims, optimizer = \"adam\")\n\n# Predict\npredictions = predict(train_X, train_Y, parameters)\n\n# Plot decision boundary\nplt.title(\"Model with Adam optimization\")\naxes = plt.gca()\naxes.set_xlim([-1.5,2.5])\naxes.set_ylim([-1,1.5])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "Cost after epoch 0: 0.690552\nCost after epoch 1000: 0.185567\nCost after epoch 2000: 0.150852\nCost after epoch 3000: 0.074454\nCost after epoch 4000: 0.125936\nCost after epoch 5000: 0.104235\nCost after epoch 6000: 0.100552\nCost after epoch 7000: 0.031601\nCost after epoch 8000: 0.111709\nCost after epoch 9000: 0.197648\n" ] ], [ [ "### 5.4 - Summary\n\n<table> \n <tr>\n <td>\n **optimization method**\n </td>\n <td>\n **accuracy**\n </td>\n <td>\n **cost shape**\n </td>\n\n </tr>\n <td>\n Gradient descent\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n <tr>\n <td>\n Momentum\n </td>\n <td>\n 79.7%\n </td>\n <td>\n oscillations\n </td>\n </tr>\n <tr>\n <td>\n Adam\n </td>\n <td>\n 94%\n </td>\n <td>\n smoother\n </td>\n </tr>\n</table> \n\nMomentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.\n\nAdam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.\n\nSome advantages of Adam include:\n- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum) \n- Usually works well even with little tuning of hyperparameters (except $\\alpha$)", "_____no_output_____" ], [ "**References**:\n\n- Adam paper: https://arxiv.org/pdf/1412.6980.pdf", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0713d67a20d98f9e4ffb4993ceb785aad50424b
331,994
ipynb
Jupyter Notebook
Research Papers/YOLOv3_scripts.ipynb
Hira63S/DeepLearningResearch
b6e8298a88fbc81de06d8e202603a80af8bbdaa2
[ "MIT" ]
null
null
null
Research Papers/YOLOv3_scripts.ipynb
Hira63S/DeepLearningResearch
b6e8298a88fbc81de06d8e202603a80af8bbdaa2
[ "MIT" ]
null
null
null
Research Papers/YOLOv3_scripts.ipynb
Hira63S/DeepLearningResearch
b6e8298a88fbc81de06d8e202603a80af8bbdaa2
[ "MIT" ]
null
null
null
710.907923
158,036
0.945942
[ [ [ "## YOLOv3 - Functions\n\n changing the pipeline to functions to make the implementation easier\n", "_____no_output_____" ] ], [ [ "import os.path\n\nimport cv2\nimport numpy as np\nimport requests", "_____no_output_____" ], [ "yolo_config = 'yolov3.cfg'\nif not os.path.isfile(yolo_config):\n url = 'https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg'\n r = requests.get(url)\n with open(yolo_config, 'wb') as f:\n f.write(r.content)\n\n# Download YOLO net weights\n# We'll it from the YOLO author's website\nyolo_weights = 'yolov3.weights'\nif not os.path.isfile(yolo_weights):\n url = 'https://pjreddie.com/media/files/yolov3.weights'\n r = requests.get(url)\n with open(yolo_weights, 'wb') as f:\n f.write(r.content)", "_____no_output_____" ], [ "net = cv2.dnn.readNet(yolo_weights, yolo_config)\n", "_____no_output_____" ], [ "classes_file = 'coco.names'\nif not os.path.isfile(classes_file):\n url = 'https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names'\n r = requests.get(url)\n with open(classes_file, 'wb') as f:\n f.write(r.content)\n\n# load class names\nwith open(classes_file, 'r') as f:\n classes = [line.strip() for line in f.readlines()]", "_____no_output_____" ], [ "image_file = 'C:/Users/Billi/repos/Computer_Vision/OpenCV/bdd100k/seg/images/train/00d79c0a-23bea078.jpg'", "_____no_output_____" ], [ "image = cv2.imread(image_file)", "_____no_output_____" ], [ "cv2.imshow('img', image)\ncv2.waitKey(0)", "_____no_output_____" ], [ "def get_image(image):\n blob = cv2.dnn.blobFromImage(image, 1 / 255, (416, 416), (0, 0, 0), True, crop=False)\n return blob", "_____no_output_____" ], [ "def get_prediction(blob):\n \n # set as input to the net\n net.setInput(blob)\n \n # get network output layers\n layer_names = net.getLayerNames()\n output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n \n # inference\n # the network outputs multiple lists of anchor boxes\n # one for each detected class\n outs = net.forward(output_layers)\n \n return outs", "_____no_output_____" ] ], [ [ "After we get the network outputs, we have to pre-process the network outputs and apply non-max suppression over them to produce the final set of detected objects", "_____no_output_____" ] ], [ [ "def get_boxes(outs):\n \n class_ids = []\n confidences = []\n boxes = []\n \n for out in outs:\n # iterate over the anchor boxes for each class\n for detection in out:\n center_x = int(detection[0] * image.shape[1])\n center_y = int(detection[1] * image.shape[0])\n w, h = int(detection[2] * image.shape[1]), int(detection[3] * image.shape[0])\n x, y = center_x - w // 2, center_y - h // 2\n boxes.append([x, y, w, h])\n \n # confidence\n confidences.append(float(detection[4]))\n \n # class\n class_ids.append(np.argmax(detection[5:]))\n \n return boxes, confidences, class_ids", "_____no_output_____" ], [ "def get_ids(boxes, confidences):\n ids = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.75, nms_threshold=0.5)\n \n return ids", "_____no_output_____" ], [ "def colors(image):\n colors = np.random.uniform(0, 255, size=(len(classes), 3))\n\n # iterate over all boxes\n for i in ids:\n i = i[0]\n x, y, w, h = boxes[i]\n class_id = class_ids[i]\n\n color = colors[class_id]\n\n cv2.rectangle(img=image,\n pt1=(round(x), round(y)),\n pt2=(round(x + w), round(y + h)),\n color=color,\n thickness=3)\n\n cv2.putText(img=image,\n text=f\"{classes[class_id]}: {confidences[i]:.2f}\",\n org=(x - 10, y - 10),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=0.8,\n color=color,\n thickness=2)\n return image", "_____no_output_____" ], [ "image = cv2.imread(image_file)", "_____no_output_____" ], [ "blob = get_image(image)", "_____no_output_____" ], [ "outs = get_prediction(blob)\n", "_____no_output_____" ], [ "boxes, confidences, class_ids = get_boxes(outs)", "_____no_output_____" ], [ "ids = get_ids(boxes, confidences)", "_____no_output_____" ], [ "final = colors(image)", "_____no_output_____" ], [ "plt.imshow(image)", "_____no_output_____" ], [ "plt.imshow(final)", "_____no_output_____" ], [ "cv2.imshow('img', image)\ncv2.waitKey(0)", "_____no_output_____" ], [ "image = cv2.imread(image_file)\nblob = get_image(image)\nouts = get_prediction(blob)\nboxes, confidences, class_ids = get_bounding_boxes(outs)\nids = max_supp(boxes, confidences)", "_____no_output_____" ], [ "final = custom(image)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nplt.imshow(final)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d07142d33f424b6beee94f0908ac9d0fdfb3d146
6,531
ipynb
Jupyter Notebook
tutorials/MD/02_Protein_Visualization.ipynb
bigginlab/OxCompBio
f9d1c9e6e2f51bb4e6bd7cbba83f33f5efef8726
[ "BSD-3-Clause" ]
19
2019-12-29T23:59:41.000Z
2022-03-21T09:45:18.000Z
tutorials/MD/remote/02_Protein_Visualization.ipynb
bigginlab/OxCompBio
f9d1c9e6e2f51bb4e6bd7cbba83f33f5efef8726
[ "BSD-3-Clause" ]
97
2019-12-11T12:19:50.000Z
2021-12-06T12:01:50.000Z
tutorials/MD/remote/02_Protein_Visualization.ipynb
bigginlab/OxCompBio
f9d1c9e6e2f51bb4e6bd7cbba83f33f5efef8726
[ "BSD-3-Clause" ]
11
2019-12-09T15:31:16.000Z
2022-03-27T11:21:32.000Z
40.81875
306
0.643393
[ [ [ "# <span style='color:darkred'> 2 Protein Visualization </span>\n***\n\nFor the purposes of this tutorial, we will use the HIV-1 protease structure (PDB ID: 1HSG). It is a homodimer with two chains of 99 residues each. Before starting to perform any simulations and data analysis, we need to observe and familiarize with the protein of interest.\n\nThere are various software packages for visualizing molecular systems, but here we will guide you through using two of those; NGLView and VMD:\n\n* [NGLView](http://nglviewer.org/#nglview): An IPython/Jupyter widget to interactively view molecular structures and trajectories.\n* [VMD](https://www.ks.uiuc.edu/Research/vmd/): VMD is a molecular visualization program for displaying, animating, and analyzing large biomolecular systems using 3-D graphics and built-in scripting.\n\nYou could either take your time to familiarize with both, or select which one you prefer to delve into.\nNGLView is great for looking at things directly within a jupyter notebook, but VMD can be a more powerful tool for visualizing, generating high quality images and videos, but also analysing simulation trajectories. \n", "_____no_output_____" ], [ "## <span style='color:darkred'> 2.0 Obtain the protein structure </span>\n\nThe first step is to obtain the crystal structure of the HIV-1 protease.\n\nStart your web-browser and go to the [protein data bank](https://www.rcsb.org/). Enter the pdb code 1HSG in the site search box at the top and hit the site search button. The protein should come up. Select download from the top right hand menu and save the .pdb file to the current working directory.", "_____no_output_____" ], [ "\n## <span style='color:darkred'> 2.1 VMD (optional) </span>\n\nYou can now open the pdb structure with VMD (the following file name might be uppercase depending on how you downloaded it):\n\n`% vmd 1hsg.pdb`\n\nYou should experiment with the menu system and try various representations of the protein such as `Trace`, `NewCartoon` and `Ribbons` for example.\n\nGo to `Graphics` and then `Graphical Representations` and from the `Drawing Method` drop-down list, select `Trace`. Similarly, you can explore other drawing methods.\n\n<span style='color:Blue'> **Questions** </span>\n\n* Can you find the indinavir drug? \n\n*Hint: At the `Graphical Representations` menu, click `Create Rep` and type \"all and not protein\" and hit Enter. Change the `Drawing Method` to `Licorice`.*\n\n* Give the protein the Trace representation and then make the polar residues in vdw format as an additional representation. Repeat with the hydrophobic residues. What do you notice?\n\n*Hint: Explore the `Selections` tab and the options provided as singlewords.*\n\n*Hint: To hide a representation, double-click on it. Double-click again if you want to make it reappear.*\n\nTake your time to explore the features of VMD and to observe the protein. Once you are happy, you can exit VMD, either by clicking on `File` and then `Quit` or by typing `quit` in the terminal box.\n\n***", "_____no_output_____" ], [ "## <span style='color:darkred'> 2.2 NGLView </span>", "_____no_output_____" ], [ "You have already been introduced to NGLView during the Python tutorial. You can now spend more time to navigate through its features.", "_____no_output_____" ] ], [ [ "# Import NGLView\nimport nglview\n\n# Select as your protein the 1HSG pdb entry\nprotein_view = nglview.show_pdbid('1hsg')\nprotein_view.gui_style = 'ngl'\n\n#Uncomment the command below to add a hyperball representation of the crystal water oxygens in grey\n#protein_view.add_hyperball('HOH', color='grey', opacity=1.0)\n\n#Uncomment the command below to color the protein according to its secondary structure with opacity 0.6\n#protein_view.update_cartoon(color='sstruc', opacity=0.6)\n\n# Let's change the display a little bit\nprotein_view.parameters = dict(camera_type='orthographic', clip_dist=0)\n\n# Set the background colour to black\nprotein_view.background = 'black'\n\n# Call protein_view to visualise the trajectory\nprotein_view", "_____no_output_____" ] ], [ [ "<span style='color:Blue'> **Questions** </span>\n\n* When you load the structure, can you see the two subunits that form the dimer?\n\n\n* Can you locate the drug in the binding pocket?\n\n*Hint: Go to `View` and then `Full screen` to expand the viewing window.*\n\n* Can you hide all the other representations and view only the drug?\n\n*Hint: Use your mouse to rotate, translate and zoom in and out.*\n\n*Hint: You can hide/show a representation by clicking on the \"eye\" symbol on the right panel.*\n\n***\n\nExplore the [NGLView documentation](http://nglviewer.org/nglview/latest/api.html), and play around with different representations, selections, colors etc. Take as much time as you want in this step.\n\n***\n\n## <span style='color:darkred'> Next Step </span>\n\nYou can now open the `03_Running_an_MD_simulation.ipynb` notebook to setup and perform a Molecular Dynamics simulation of your protein.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d07143341ab03613e76153d88dd42ab649e75356
13,314
ipynb
Jupyter Notebook
assignments2016/assignment1/features.ipynb
janlukasschroeder/Stanford-cs231n
0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1
[ "MIT" ]
null
null
null
assignments2016/assignment1/features.ipynb
janlukasschroeder/Stanford-cs231n
0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1
[ "MIT" ]
null
null
null
assignments2016/assignment1/features.ipynb
janlukasschroeder/Stanford-cs231n
0502fad608971f0ae4f44c5e5fd8cc062ddfc1f1
[ "MIT" ]
null
null
null
38.929825
347
0.588403
[ [ [ "# Image features exercise\n*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*\n\nWe have seen that we can achieve reasonable performance on an image classification task by training a linear classifier on the pixels of the input image. In this exercise we will show that we can improve our classification performance by training linear classifiers not on raw pixels but on features that are computed from the raw pixels.\n\nAll of your work for this exercise will be done in this notebook.", "_____no_output_____" ] ], [ [ "import random\nimport numpy as np\nfrom cs231n.data_utils import load_CIFAR10\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading extenrnal modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ] ], [ [ "## Load data\nSimilar to previous exercises, we will load CIFAR-10 data from disk.", "_____no_output_____" ] ], [ [ "from cs231n.features import color_histogram_hsv, hog_feature\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n \n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()", "_____no_output_____" ] ], [ [ "## Extract Features\nFor each image we will compute a Histogram of Oriented\nGradients (HOG) as well as a color histogram using the hue channel in HSV\ncolor space. We form our final feature vector for each image by concatenating\nthe HOG and color histogram feature vectors.\n\nRoughly speaking, HOG should capture the texture of the image while ignoring\ncolor information, and the color histogram represents the color of the input\nimage while ignoring texture. As a result, we expect that using both together\nought to work better than using either alone. Verifying this assumption would\nbe a good thing to try for the bonus section.\n\nThe `hog_feature` and `color_histogram_hsv` functions both operate on a single\nimage and return a feature vector for that image. The extract_features\nfunction takes a set of images and a list of feature functions and evaluates\neach feature function on each image, storing the results in a matrix where\neach column is the concatenation of all feature vectors for a single image.", "_____no_output_____" ] ], [ [ "from cs231n.features import *\n\nnum_color_bins = 10 # Number of bins in the color histogram\nfeature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]\nX_train_feats = extract_features(X_train, feature_fns, verbose=True)\nX_val_feats = extract_features(X_val, feature_fns)\nX_test_feats = extract_features(X_test, feature_fns)\n\n# Preprocessing: Subtract the mean feature\nmean_feat = np.mean(X_train_feats, axis=0, keepdims=True)\nX_train_feats -= mean_feat\nX_val_feats -= mean_feat\nX_test_feats -= mean_feat\n\n# Preprocessing: Divide by standard deviation. This ensures that each feature\n# has roughly the same scale.\nstd_feat = np.std(X_train_feats, axis=0, keepdims=True)\nX_train_feats /= std_feat\nX_val_feats /= std_feat\nX_test_feats /= std_feat\n\n# Preprocessing: Add a bias dimension\nX_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])\nX_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])\nX_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])", "_____no_output_____" ] ], [ [ "## Train SVM on features\nUsing the multiclass SVM code developed earlier in the assignment, train SVMs on top of the features extracted above; this should achieve better results than training SVMs directly on top of raw pixels.", "_____no_output_____" ] ], [ [ "# Use the validation set to tune the learning rate and regularization strength\n\nfrom cs231n.classifiers.linear_classifier import LinearSVM\n\nlearning_rates = [1e-9, 1e-8, 1e-7]\nregularization_strengths = [1e5, 1e6, 1e7]\n\nresults = {}\nbest_val = -1\nbest_svm = None\n\npass\n################################################################################\n# TODO: #\n# Use the validation set to set the learning rate and regularization strength. #\n# This should be identical to the validation that you did for the SVM; save #\n# the best trained classifer in best_svm. You might also want to play #\n# with different numbers of bins in the color histogram. If you are careful #\n# you should be able to get accuracy of near 0.44 on the validation set. #\n################################################################################\npass\n################################################################################\n# END OF YOUR CODE #\n################################################################################\n\n# Print out results.\nfor lr, reg in sorted(results):\n train_accuracy, val_accuracy = results[(lr, reg)]\n print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (\n lr, reg, train_accuracy, val_accuracy)\n \nprint 'best validation accuracy achieved during cross-validation: %f' % best_val", "_____no_output_____" ], [ "# Evaluate your trained SVM on the test set\ny_test_pred = best_svm.predict(X_test_feats)\ntest_accuracy = np.mean(y_test == y_test_pred)\nprint test_accuracy", "_____no_output_____" ], [ "# An important way to gain intuition about how an algorithm works is to\n# visualize the mistakes that it makes. In this visualization, we show examples\n# of images that are misclassified by our current system. The first column\n# shows images that our system labeled as \"plane\" but whose true label is\n# something other than \"plane\".\n\nexamples_per_class = 8\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nfor cls, cls_name in enumerate(classes):\n idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]\n idxs = np.random.choice(idxs, examples_per_class, replace=False)\n for i, idx in enumerate(idxs):\n plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)\n plt.imshow(X_test[idx].astype('uint8'))\n plt.axis('off')\n if i == 0:\n plt.title(cls_name)\nplt.show()", "_____no_output_____" ] ], [ [ "### Inline question 1:\nDescribe the misclassification results that you see. Do they make sense?", "_____no_output_____" ], [ "## Neural Network on image features\nEarlier in this assigment we saw that training a two-layer neural network on raw pixels achieved better classification performance than linear classifiers on raw pixels. In this notebook we have seen that linear classifiers on image features outperform linear classifiers on raw pixels. \n\nFor completeness, we should also try training a neural network on image features. This approach should outperform all previous approaches: you should easily be able to achieve over 55% classification accuracy on the test set; our best model achieves about 60% classification accuracy.", "_____no_output_____" ] ], [ [ "print X_train_feats.shape", "_____no_output_____" ], [ "from cs231n.classifiers.neural_net import TwoLayerNet\n\ninput_dim = X_train_feats.shape[1]\nhidden_dim = 500\nnum_classes = 10\n\nnet = TwoLayerNet(input_dim, hidden_dim, num_classes)\nbest_net = None\n\n################################################################################\n# TODO: Train a two-layer neural network on image features. You may want to #\n# cross-validate various parameters as in previous sections. Store your best #\n# model in the best_net variable. #\n################################################################################\npass\n################################################################################\n# END OF YOUR CODE #\n################################################################################", "_____no_output_____" ], [ "# Run your neural net classifier on the test set. You should be able to\n# get more than 55% accuracy.\n\ntest_acc = (net.predict(X_test_feats) == y_test).mean()\nprint test_acc", "_____no_output_____" ] ], [ [ "# Bonus: Design your own features!\n\nYou have seen that simple image features can improve classification performance. So far we have tried HOG and color histograms, but other types of features may be able to achieve even better classification performance.\n\nFor bonus points, design and implement a new type of feature and use it for image classification on CIFAR-10. Explain how your feature works and why you expect it to be useful for image classification. Implement it in this notebook, cross-validate any hyperparameters, and compare its performance to the HOG + Color histogram baseline.", "_____no_output_____" ], [ "# Bonus: Do something extra!\nUse the material and code we have presented in this assignment to do something interesting. Was there another question we should have asked? Did any cool ideas pop into your head as you were working on the assignment? This is your chance to show off!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
d071451fa4dedcf69955139e9446e0ed7d86c1df
37,048
ipynb
Jupyter Notebook
introduction/02-nfc-overview.ipynb
abachma2/npre412
3f105a15edc07745f1dd65cd791777a01136ec23
[ "CC-BY-4.0" ]
null
null
null
introduction/02-nfc-overview.ipynb
abachma2/npre412
3f105a15edc07745f1dd65cd791777a01136ec23
[ "CC-BY-4.0" ]
null
null
null
introduction/02-nfc-overview.ipynb
abachma2/npre412
3f105a15edc07745f1dd65cd791777a01136ec23
[ "CC-BY-4.0" ]
null
null
null
47.865633
395
0.515439
[ [ [ "# Nuclear Fuel Cycle Overview\n\nThe nuclear fuel cycle is the technical and economic system traversed by nuclear fuel during the generation of nuclear power.\n\n## Learning Objectives\n\nBy the end of this lesson, you should be able to:\n\n- Categorize types of fission reactors by their fuels and coolants.\n- Summarize the history and key characteristics of reactor technology generations.\n- Weigh and compare advanced nuclear reactor types.\n- Name fuel cycle facilities and technologies that contribute to open and closed cycles.\n- Identify categories of nuclear fuel cycle strategies (open, closed, etc.)\n- Associate categories of fuel cycle with nations that implement them (USA, France, etc.)\n- Order the stages of such fuel cycle from mining to disposal, including reprocessing.\n- Identify the chemical and physical states of nuclear material passed between stages.\n", "_____no_output_____" ], [ "## Fission Reactor Types\n\nLet's see what you know already.\n\n[pollev.com/katyhuff](pollev.com/katyhuff)", "_____no_output_____" ] ], [ [ "from IPython.display import IFrame\nIFrame(\"https://embed.polleverywhere.com/free_text_polls/YWUBNMDynR0yeiu?controls=none&short_poll=true\", width=\"1000\", height=\"700\", frameBorder=\"0\")", "_____no_output_____" ], [ "from IPython.display import IFrame\nIFrame(\"https://embed.polleverywhere.com/free_text_polls/rhvKnG3a6nKaNdU?controls=none&short_poll=true\", width=\"1000\", height=\"700\", frameBorder=\"0\")", "_____no_output_____" ], [ "from IPython.display import IFrame\nIFrame(\"https://embed.polleverywhere.com/free_text_polls/zdDog6JmDGOQ1hJ?controls=none&short_poll=true\", width=\"1000\", height=\"700\", frameBorder=\"0\")", "_____no_output_____" ], [ "from IPython.display import IFrame\nIFrame(\"https://embed.polleverywhere.com/free_text_polls/YE5bPL6KecA5M3A?controls=none&short_poll=true\", width=\"1000\", height=\"700\", frameBorder=\"0\")", "_____no_output_____" ], [ "from IPython.display import IFrame\nIFrame(\"https://embed.polleverywhere.com/free_text_polls/BLojIJiKtPULpmw?controls=none&short_poll=true\", width=\"1000\", height=\"700\", frameBorder=\"0\")", "_____no_output_____" ] ], [ [ "A really good summary, with images, can be found [here](https://www.theiet.org/media/1275/nuclear-reactors.pdf).", "_____no_output_____" ] ], [ [ "\nfrom IPython.display import IFrame\n\nIFrame(\"https://www.theiet.org/media/1275/nuclear-reactors.pdf\", width=1000, height=700)", "_____no_output_____" ] ], [ [ "What about fusion?\n\nFusion devices can use Tritium, Deuterium, Protium, $^3He$, $^4He$.\n\n![https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/ITER_Tokamak_and_Plant_Systems_%282016%29_%2841783636452%29.jpg/1200px-ITER_Tokamak_and_Plant_Systems_%282016%29_%2841783636452%29.jpg](https://upload.wikimedia.org/wikipedia/commons/thumb/d/da/ITER_Tokamak_and_Plant_Systems_%282016%29_%2841783636452%29.jpg/1200px-ITER_Tokamak_and_Plant_Systems_%282016%29_%2841783636452%29.jpg)", "_____no_output_____" ], [ "# Fuel Cycle Strategies\n\n## Once through\n\nAlso known as an open fuel cycle, this is the fuel cycle currently underway in the United States. There is no reprocessing or recycling of any kind and all high level spent nuclear fuel is eventually destined for a geologic repository.", "_____no_output_____" ] ], [ [ "try:\n import graphviz \nexcept ImportError:\n !y | conda install graphviz \n !pip install graphviz", "/bin/sh: y: command not found\nCollecting package metadata (current_repodata.json): done\nSolving environment: failed with initial frozen solve. Retrying with flexible solve.\nSolving environment: failed with repodata from current_repodata.json, will retry with next repodata source.\nCollecting package metadata (repodata.json): done\nSolving environment: done\n\n## Package Plan ##\n\n environment location: /Users/huff/opt/anaconda3\n\n added / updated specs:\n - graphviz\n\n\nThe following packages will be downloaded:\n\n package | build\n ---------------------------|-----------------\n cairo-1.14.12 | hc4e6be7_4 860 KB\n conda-4.9.1 | py37hecd8cb5_0 2.9 MB\n fontconfig-2.13.0 | h5d5b041_1 202 KB\n fribidi-1.0.10 | haf1e3a3_0 63 KB\n graphite2-1.3.13 | h2098e52_0 80 KB\n graphviz-2.40.1 | hefbbd9a_2 6.3 MB\n harfbuzz-1.8.8 | hb8d4a28_0 414 KB\n pango-1.42.4 | h7e27002_1 456 KB\n pixman-0.40.0 | haf1e3a3_0 340 KB\n ------------------------------------------------------------\n Total: 11.5 MB\n\nThe following NEW packages will be INSTALLED:\n\n cairo pkgs/main/osx-64::cairo-1.14.12-hc4e6be7_4\n fontconfig pkgs/main/osx-64::fontconfig-2.13.0-h5d5b041_1\n fribidi pkgs/main/osx-64::fribidi-1.0.10-haf1e3a3_0\n graphite2 pkgs/main/osx-64::graphite2-1.3.13-h2098e52_0\n graphviz pkgs/main/osx-64::graphviz-2.40.1-hefbbd9a_2\n harfbuzz pkgs/main/osx-64::harfbuzz-1.8.8-hb8d4a28_0\n pango pkgs/main/osx-64::pango-1.42.4-h7e27002_1\n pixman pkgs/main/osx-64::pixman-0.40.0-haf1e3a3_0\n\nThe following packages will be UPDATED:\n\n conda 4.8.3-py37_0 --> 4.9.1-py37hecd8cb5_0\n\n\nProceed ([y]/n)? \n\nDownloading and Extracting Packages\nfribidi-1.0.10 | 63 KB | ##################################### | 100% \ncairo-1.14.12 | 860 KB | ##################################### | 100% \npixman-0.40.0 | 340 KB | ##################################### | 100% \nfontconfig-2.13.0 | 202 KB | ##################################### | 100% \ngraphviz-2.40.1 | 6.3 MB | ##################################### | 100% \nharfbuzz-1.8.8 | 414 KB | ##################################### | 100% \ngraphite2-1.3.13 | 80 KB | ##################################### | 100% \npango-1.42.4 | 456 KB | ##################################### | 100% \nconda-4.9.1 | 2.9 MB | ##################################### | 100% \nPreparing transaction: done\nVerifying transaction: done\nExecuting transaction: done\nCollecting graphviz\n Downloading graphviz-0.14.2-py2.py3-none-any.whl (18 kB)\nInstalling collected packages: graphviz\nSuccessfully installed graphviz-0.14.2\n\u001b[33mWARNING: You are using pip version 20.2.3; however, version 20.2.4 is available.\nYou should consider upgrading via the '/Users/huff/opt/anaconda3/bin/python -m pip install --upgrade pip' command.\u001b[0m\n" ], [ "from graphviz import Digraph\n\ndot = Digraph(comment='The Round Table')\ndot.node('A', 'Mine')\ndot.node('B', 'Mill')\ndot.node('C', 'Conversion')\ndot.node('D', 'Enrichment')\ndot.node('E', 'Fuel Fabrication')\ndot.node('F', 'Reactor')\ndot.node('G', 'Wet Storage')\ndot.node('H', 'Dry Storage')\ndot.node('I', 'Repository')\n\ndot.edge('A', 'B', label='Natural U Ore')\ndot.edge('B', 'C', label='U3O8')\ndot.edge('C', 'D', label='UF6')\ndot.edge('D', 'E', label='Enriched UF6')\ndot.edge('E', 'F', label='Fresh Fuel')\ndot.edge('F', 'G', label='Spent Fuel')\ndot.edge('G', 'H', label='Cooled SNF')\ndot.edge('H', 'I', label='Cooled SNF')\ndot", "_____no_output_____" ] ], [ [ "## Single Pass or Multi Pass Recycle\n\nTo add reprocessing or recycling, and all high level spent nuclear fuel is eventually destined for a geologic repository", "_____no_output_____" ] ], [ [ "dot.node('Z', 'Reprocessing')\ndot.edge('H', 'Z', label='Cooled SNF')\ndot.edge('G', 'Z', label='Cooled SNF')\ndot.edge('Z', 'E', label='Pu')\ndot.edge('Z', 'E', label='U')\ndot.edge('Z', 'I', label='FP')\n\n\n\ndot", "_____no_output_____" ] ], [ [ "## Wrap-up\n\n- Reactors of various types can be distinguished by neutron speed, coolant type, fuel type, size, and generation.\n- A once through fuel cycle, like that in the US, is identifiable by the immediate storage and ultimate disposal of all spent fuel.\n- Single and multi pass recycling schemes can be called \"closed\" fuel cycles. These involve separations and reprocessing.", "_____no_output_____" ], [ "## References\n\nThis section was developed to complement chapter 1 of [1]. In reference [2] you'll find a video concerning the front end of the fuel cycle that's pretty fun, 5 minutes, and actually quite accurate. \n\n[1] N. Tsoulfanidis, The Nuclear Fuel Cycle. La Grange Park, Illinois, USA: American Nuclear Society, 2013.\n\n[2] D. News. How Uranium Becomes Nuclear Fuel. https://www.youtube.com/watch?v=apODDbgFFPI. 2015.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d071470aa0a2cfc70b88e22db737e4cadfcdbc4d
26,557
ipynb
Jupyter Notebook
assets/images/pandas/Python3_data_structure.ipynb
haochunchang/haochunchang.github.io
a04f7cf6968d5835e7cbea39a017e772eb8903da
[ "MIT" ]
null
null
null
assets/images/pandas/Python3_data_structure.ipynb
haochunchang/haochunchang.github.io
a04f7cf6968d5835e7cbea39a017e772eb8903da
[ "MIT" ]
null
null
null
assets/images/pandas/Python3_data_structure.ipynb
haochunchang/haochunchang.github.io
a04f7cf6968d5835e7cbea39a017e772eb8903da
[ "MIT" ]
3
2020-04-12T04:43:24.000Z
2021-11-30T02:01:02.000Z
23.925225
700
0.474489
[ [ [ "# Namespace\n* 為了將你寫的程式碼轉換成可以執行的程式,Python語言使用翻譯器(interpreter)來辨別你的程式碼,它會把你命名的變數分在不同的範圍內,這些範圍就叫作Namespace。\n* 每次創建一個變數時,翻譯器會在Namespace裡面記錄變數名稱和變數存的東西的記憶體位置。當有新的變數名稱時,翻譯器會先看新變數要存的值有沒有在紀錄裡,有的話就直接將新變數指定到該位置,例如:\n\n```python\na = 2\na = a + 1\nb = 2\n```\n\n\n<img src=\"https://cdn.programiz.com/sites/tutorial2program/files/aEquals2.jpg\" align=\"center\" height=400 width=500 >\n\n\n* 翻譯器認定的範圍大致上可以分成以下三類:\n * Built-in: 開啟翻譯器時就會有的,裡面就是有預設的函數和以下會介紹的資料結構。\n * Module: 要透過```import```來加入的函數和變數等等。\n * Function: 通常是使用者自己定義的變數和函數。\n\n<img src=\"https://cdn.programiz.com/sites/tutorial2program/files/nested-namespaces-python.jpg\" align=\"center\" height=300 width=300>\n", "_____no_output_____" ] ], [ [ "a = 2\nprint(id(a))\nb = 2\nprint(id(b))", "10919456\n10919456\n" ] ], [ [ "---\n# Data Structures\n* 資料結構就是各種用來存放資料的\"容器\",並且可以很有效率地操作資料。\n\n\n## [Sequence](#Sequence)\n\n> _immutable v.s. mutable_\n\n * [Lists](#Lists): mutable = 可以更改內容的\n * [Tuples](#Tuples): immuntable = 不可以更改內容的\n * [Range](#Range): immuntable\n \n#### [Dictionary](#Dictionary)\n#### [Set](#Set)\n---", "_____no_output_____" ], [ "## Sequence\n基本操作:\n\n* 檢查東西在不在sequence裡面\n\n```python\nx in seq\nx not in seq\n```\n\n* 把seq.頭尾相接(concatenation)\n\n```python\na + b # a, b要是同一種sequence\na * n #repeat n times\n```\n\n* 拿出sequence裡面的東西\n\n```python\nseq[i]\nseq[i:j] # 拿出第i到第j-1個\nseq[i:j:k] # 從第i~第j中,每k個拿出一個\n```\n\n* seq.長度、最大/最小、東西出現次數和東西的位置\n\n```python\nlen(seq), max(seq), min(seq)\nseq.index(x)\nseq.count(x)\n```\n\n* 更多在這裡:https://docs.python.org/3.6/library/stdtypes.html#typesseq-common", "_____no_output_____" ], [ "---\n### Lists\n``` list = [item1, item2, item3, ...] ```\n* 通常使用在存放一堆相同種類的資料,類似於array(在電腦眼中是一排連續的櫃子)。\n\n| 0號櫃子 | 1號櫃子 | 2號櫃子 | \n|:---|:----|:---|\n| ㄏ | ㄏ | ㄏ | \n\n* 實際長相: 電腦用array記錄每個項目的index,因此可以根據index找到各個項目的內容。[圖片來源](https://www.hackerrank.com/challenges/variable-sized-arrays/problem)\n\n<img src='images/variable_length_array.png' align=\"center\">\n\n| 0號櫃子 | 1號櫃子 | 2號櫃子 | \n|:---|:----|:---|\n| 紙條:\"東西在3樓\" | 紙條:\"沒東西\" | 紙條:\"東西在地下室\" | ", "_____no_output_____" ] ], [ [ "marvel_hero = [\"Steve Rogers\", \"Tony Stark\", \"Thor Odinson\"]\nprint(type(marvel_hero), marvel_hero)", "<class 'list'> ['Steve Rogers', 'Tony Stark', 'Thor Odinson']\n" ], [ "marvel_hero.append(\"Hulk\")\nmarvel_hero.insert(2, \"Bruce Banner\") # insert \"Bruce Banner\" into index 2\nprint(marvel_hero)", "['Steve Rogers', 'Tony Stark', 'Bruce Banner', 'Thor Odinson', 'Hulk']\n" ], [ "print(marvel_hero.pop()) # default: pop last item\nmarvel_hero[0] = \"Captain America\"\nprint(marvel_hero[1:-1])", "Hulk\n['Captain America', 'Tony Stark', 'Bruce Banner', 'Thor Odinson']\n" ] ], [ [ "##### List comprehension: 可以直接在list裡面處理東西,不用再另外寫for-loop(但是花費時間差不多)", "_____no_output_____" ] ], [ [ "%timeit list_hero = [i.lower() if i.startswith('T') else i.upper() for i in marvel_hero]\nprint(list_hero)", "1.45 µs ± 21.4 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n['BRUCE BANNER', 'CAPTAIN AMERICA', 'thor odinson', 'tony stark']\n" ], [ "%%timeit \nlist_hero = []\nfor i in marvel_hero:\n if i.startswith('T'):\n list_hero.append(i.lower())\n else:\n list_hero.append(i.upper())", "1.43 µs ± 22.8 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n" ], [ "print(list_hero)", "['BRUCE BANNER', 'CAPTAIN AMERICA', 'thor odinson', 'tony stark']\n" ] ], [ [ "##### List可以排序,排序所花的時間和List長度成正比", "_____no_output_____" ] ], [ [ "marvel_hero.sort(reverse=False) # sort in-place\nmarvel_hero", "_____no_output_____" ], [ "list_hero_sorted = sorted(list_hero) # return a sorted list\nprint(list_hero_sorted)", "['BRUCE BANNER', 'CAPTAIN AMERICA', 'thor odinson', 'tony stark']\n" ] ], [ [ "##### **注意!如果要複製list,不能直接指定給新的變數,這樣只是幫同一個list重新命名而已**\n* 此行為被稱為 shallow copy", "_____no_output_____" ] ], [ [ "a = [1, 2, 3, 4, 5]\nb = a\nprint(id(a), id(b), id(a) == id(b))\n\nb[0] = 8888\nprint(a)", "140032783977672 140032783977672 True\n[8888, 2, 3, 4, 5]\n" ] ], [ [ "---\n### Tuples\n``` tuples = item1, item2, item3, ...```\n\n* 通常用來存放不同種類但是有關聯的資料。\n* ',' 決定是不是tuples,但是通常會用()來區分function call。\n\n例如:\n```python\ndef f(a, b=0): \n return a1*a2 + b\n\nf((87, 2)) # return 87*2 + 0\n```", "_____no_output_____" ] ], [ [ "love_iron = (\"Iron Man\", 3000)\ncap = \"Captain America\", \n\nprint(type(love_iron), love_iron)", "<class 'tuple'> ('Iron Man', 3000)\n" ], [ "print(love_iron + cap)\nprint(\"Does {} in the \\\"love_iron\\\" tuples?: {}\".format(\"Iron\", 'Iron' in love_iron))\nprint(\"Length of cap: {}\".format(len(cap)))", "('Iron Man', 3000, 'Captain America')\nDoes Iron in the \"love_iron\" tuples?: False\nLength of cap: 1\n" ], [ "max(love_iron)", "_____no_output_____" ] ], [ [ "* ```enumerate()``` 用在for-loop裡面可以依次輸出(i, 第i個項目),這樣就不用另外去記錄你跑到第幾個項目", "_____no_output_____" ] ], [ [ "for e in enumerate(love_iron + cap):\n print(e, type(e))", "(0, 'Iron Man') <class 'tuple'>\n(1, 3000) <class 'tuple'>\n(2, 'Captain America') <class 'tuple'>\n" ] ], [ [ "---\n### Range\n* 產生一串**整數**,通常用在for-loop裡面來記錄次數或是當作index。\n* 如果要產生一串浮點數,就要用numpy.arange()。\n\n```range(start, stop[, step])```", "_____no_output_____" ] ], [ [ "even_number = [x for x in range(0, 30, 2)]\nfor i in range(2, 10, 2):\n print(\"The {}th even number is {}\".format(i, even_number[i-1]))", "The 2th even number is 2\nThe 4th even number is 6\nThe 6th even number is 10\nThe 8th even number is 14\n" ] ], [ [ "---\n## Dictionary\n\n``` {key1:value1, key2:value2, key3:value3, ...}```\n\n* 用來存放具有對應關係的資料。\n* ```key``` 不能重複,而且必須是hashable\n * 兩個條件:\n 1. 建立之後不會更改數值(immutable)\n 2. 可以和其他東西比較是不是一樣\n\n\n* 實際長相:hash table\n * 電腦透過一個叫hash的函數將key編碼成一串固定長度的數字,然後用這串數字當作value的index。\n * 理想上,每串數字都不重複,這樣可以讓查詢速度在平均上不受裡面的東西數量影響。\n\n\n* [圖片來源](https://en.wikipedia.org/wiki/Hash_table)\n\n<img src='images/hash-table.png' height=600 width=400 align=\"center\">", "_____no_output_____" ] ], [ [ "hero_id = {\"Steve Rogers\": 1, \n \"Tony Stark\": 666, \n \"Thor Odinson\": 999\n }\nhero_code = dict(zip(hero_id.keys(), [\"Captain America\", \"God of Thunder\", \"Iron Man\"]))\nprint(type(hero_code), hero_code)", "<class 'dict'> {'Thor Odinson': 'Captain America', 'Steve Rogers': 'God of Thunder', 'Tony Stark': 'Iron Man'}\n" ], [ "# dict[key]: 輸出相對應的value,如果key not in dict則輸出 KeyError\n# dict.get(key, default=None): 輸出相對應的value,如果key not in dict則輸出default\n\nhero_name = \"Steve Rogers\"\nprint(\"The codename of hero_id {} is {}\".format(hero_id.get(hero_name), hero_code[hero_name]))", "The codename of hero_id 1 is God of Thunder\n" ], [ "hero_id.update({\"Bruce Banner\": 87})\nprint(hero_id)", "{'Bruce Banner': 87, 'Thor Odinson': 999, 'Steve Rogers': 1, 'Tony Stark': 666}\n" ] ], [ [ "##### Dictionary View\n* 用來看dict裡面目前的值是什麼,可以放在for-loop一個一個處理:\n * dict.keys() 會輸出keys\n * dict.values() 會輸出values\n * dict.items() 會輸出(key, value)的 tuples\n> **注意!輸出的順序不一定代表加入dictionary的順序!**\n> 但是key和value的對應順序會一樣。\n* 如果想要固定輸出的順序,就要用list或是[collections.OrderedDict](https://docs.python.org/3.6/library/collections.html#collections.OrderedDict)。", "_____no_output_____" ] ], [ [ "print(hero_id.keys())\nprint(hero_id.values())\nprint(hero_id.items())", "dict_keys(['Bruce Banner', 'Thor Odinson', 'Steve Rogers', 'Tony Stark'])\ndict_values([87, 999, 1, 666])\ndict_items([('Bruce Banner', 87), ('Thor Odinson', 999), ('Steve Rogers', 1), ('Tony Stark', 666)])\n" ], [ "for name, code in hero_code.items():\n print(\"{} is {}\".format(name, code))", "Steve Rogers is Captain America\nThor Odinson is God of Thunder\nTony Stark is Iron Man\n" ] ], [ [ "---\n## Set\n``` set = {item1, item2, item3, ...}```\n\n* 用來存放不重複的資料,放進重複的資料也只會保存一個。\n* set可以更改內容,frozenset不能更改內容(immuntable)。\n* 可以操作的動作和數學上的set很像:([圖片來源](https://www.learnbyexample.org/python-set/))\n * union (```A | B```)\n * intersection (```A & B```)\n * difference (```A - B```)\n * symmetric difference (```A ^ B```)\n * subset (```A < B```)\n * super-set (```A > B```)\n \n<img src='images/set.png' height=600 width=400 align=\"center\">", "_____no_output_____" ] ], [ [ "set_example = {\"o\", 7, 7, 7, 7, 7, 7, 7}\nprint(type(set_example), set_example)", "<class 'set'> {'o', 7}\n" ], [ "A = set(\"Shin Shin ba fei jai\")\nB = set(\"Ni may yo may may\")\nprint(A)\nprint(B)", "{'f', 'j', 'b', 'a', 'S', ' ', 'e', 'i', 'n', 'h'}\n{'y', 'o', 'N', 'a', ' ', 'i', 'm'}\n" ], [ "A ^ B", "_____no_output_____" ], [ "a = set([[1],2,3,3,3,3])\na", "_____no_output_____" ] ], [ [ "-----\n# Numpy Array\n\n* 以分析資料來說,常見的形式就是矩陣(array),python有一個package叫做numpy。\n* 這個package可以讓我們更快更方便的處理矩陣。\n\n<img src=\"https://numpy.org/_static/numpy_logo.png\" align=\"center\">\n\n\n* Full documentation: https://docs.scipy.org/doc/\n* 快速教學:http://cs231n.github.io/python-numpy-tutorial/#numpy-arrays\n\n\n```bash\n# 安裝它只要一個步驟:\npip3 install numpy\n```", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "b = np.array([[1,2,3],[4,5,6]]) # 2D array\nprint(b)\nprint(b.shape, b.dtype) \nprint(b[0, 0], b[0, 1], b[1, 0]) # array[row_index, col_index]", "[[1 2 3]\n [4 5 6]]\n(2, 3) int64\n1 2 4\n" ] ], [ [ "##### 有許多方便建立矩陣的函數\n* 像是全部數值為0、全部數值為1、Identity等等,更多函數都在[這裡](https://docs.scipy.org/doc/numpy/reference/routines.array-creation.html#routines-array-creation)。", "_____no_output_____" ] ], [ [ "z = np.zeros((8,7))\nz", "_____no_output_____" ] ], [ [ "##### 取出array中的row或是column\n* 可以跟list一樣用```1:5```的語法。\n* 也可以用boolean的方式選取部分的數值。", "_____no_output_____" ] ], [ [ "yeee = np.fromfile(join(\"data\", \"numpy_sample.txt\"), sep=' ')\nyeee = yeee.reshape((4,4)) \nprint(yeee)\nprint(yeee[:2,0]) # 取出第一個column的前兩個row\nprint(yeee[-1,:]) # 取出最後一個row", "[[ 1. 2. 3. 4.]\n [ 5. 6. 7. 8.]\n [ 9. 10. 11. 12.]\n [13. 14. 15. 16.]]\n[1. 5.]\n[13. 14. 15. 16.]\n" ], [ "yeee[yeee > 6]", "_____no_output_____" ] ], [ [ "##### 矩陣運算\n* ``` + - * / ``` 都是element-wise,也就是矩陣內每個數值各自獨立運算。\n* 矩陣相乘要用 ```dot```\n* 更多數學運算在[這裡](https://docs.scipy.org/doc/numpy/reference/routines.math.html)。", "_____no_output_____" ] ], [ [ "x = np.array([[1,2],[3,4]])\ny = np.array([[5,6],[7,8]])\n\nprint(x.dot(y) == np.dot(x, y))", "[[ True True]\n [ True True]]\n" ] ], [ [ "##### Broadcasting\n* 在numpy中,如果我們要對不同形狀的矩陣進行運算,我們可以直接在形狀相同的地方直接進行運算。", "_____no_output_____" ] ], [ [ "x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])\nv = np.array([1, 0, 1])\ny = x + v\n\nprint(y) # v 分別加在x的每一個row", "[[ 2 2 4]\n [ 5 5 7]\n [ 8 8 10]\n [11 11 13]]\n" ] ], [ [ "##### 如果想要使用兩個數值一樣的矩陣,必須注意shallow copy的問題。\n* 用```x[:]```拿出的東西是x的一個View,也就是說,看到的其實是x的data,更動View就是更動x。\n* 如果要真正複製一份出來,就要用```.copy()```。", "_____no_output_____" ] ], [ [ "x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])\n\nshallow = x[:]\ndeep = x.copy()\nshallow[0, 0] = 9487\ndeep[0, 0] = 5566\nprint(x)", "[[9487 2 3]\n [ 4 5 6]\n [ 7 8 9]\n [ 10 11 12]]\n" ] ], [ [ "----\n# Files\n* 知道如何在程式裡面存放資料後,還需要知道怎麼存檔和讀檔。\n* 每打開一個檔案時,需要跟電腦說哪一種模式:\n\n| Access mode | access_flag | detail |\n|:------------|:-----|:-------|\n| Read only | r | 從檔案開頭讀取 |\n| Write only | w | 寫進去的東西會從頭開始寫,如果本來有內容,會覆蓋過去 |\n| Append only | a | 寫進去的東西會接在檔案後面 |\n| Enhance | + | 讀+寫或是讀+append |\n\n\n* f是個負責處理該檔案的管理者,可以透過f對檔案做事情。\n * f記錄著現在讀取到檔案的哪裡。\n \n \n```python \nf = open(filepath, access_flag)\nf.close()\n```", "_____no_output_____" ] ], [ [ "from os.path import join\n\nwith open(join(\"data\", \"heyhey.txt\"), 'r', encoding='utf-8') as f:\n print(f.read() + '\\n')\n print(f.readlines())\n \n f.seek(0) # 回到檔案開頭\n readlines = f.readlines(1)\n print(readlines, type(readlines)) # 以 '\\n'為界線,一行一行讀\n single_line = f.readline()\n print(single_line, type(single_line)) # 一次只讀一行\n print()\n \n # 也可以放進for-loop 一行一行讀\n for i, line in enumerate(f):\n print(\"Line: {}: {}\".format(i, line))\n break", "天之道,損有餘而補不足,是故虛勝實,不足勝有餘。其意博,其理奧,其趣深。\n天地之像分,陰陽之侯烈,變化之由表,死生之兆章。\n不謀而遺跡自同,勿約而幽明斯契。\n稽其言有微,驗之事不忒。\n誠可謂至道之宗,奉生之始矣。\n假若天機迅發,妙識玄通。\n成謀雖屬乎生知,標格亦資於治訓。\n未嘗有行不由送,出不由產者亦。\n然刻意研精,探微索隱;或識契真要,則目牛無全。故動則有成,猶鬼神幽贊,而命世奇傑,時時間出焉。\n五藏六府之精氣,皆上注於目而為之精。\n精之案為眼,骨之精為瞳子;筋之精為黑眼,血之精為力絡。\n其案氣之精為白眼,肌肉之精為約束。\n裹擷筋骨血氣之精,而與脈並為系。\n\n[]\n['天之道,損有餘而補不足,是故虛勝實,不足勝有餘。其意博,其理奧,其趣深。\\n'] <class 'list'>\n天地之像分,陰陽之侯烈,變化之由表,死生之兆章。\n <class 'str'>\n\nLine: 0: 不謀而遺跡自同,勿約而幽明斯契。\n\n" ], [ "with open(join(\"data\", \"test.txt\"), 'w+', encoding='utf-8') as f:\n f.write(\"Shin Shin ba fei jai\")\n f.seek(0)\n print(f.read())", "Shin Shin ba fei jai\n" ] ], [ [ "----\n### Reference\n* 如果想看更多python相關的教學,網路上有更多詳細的資源喔。\n\n\n* 一些Python的教學資源:\n * https://www.programiz.com/python-programming\n * https://www.tutorialspoint.com/python3\n * 有很多範例:https://www.learnbyexample.org/python-introduction/\n\n\n* 官方文件:https://docs.python.org/3.6/library/\n * 詳細python設計FAQ:https://docs.python.org/3.6/faq/design.html\n\n\n* 養成寫好程式的習慣:https://www.python.org/dev/peps/pep-0008/", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0715207be8e6ccd3c4018c84818b719f2910e62
37,073
ipynb
Jupyter Notebook
scenario_1/021-fcubt_review.ipynb
StevenGolovkine/fcubt
fdf7c7c11c9e94733c50ecb90a641868394fd347
[ "MIT" ]
1
2021-08-04T11:47:42.000Z
2021-08-04T11:47:42.000Z
scenario_1/021-fcubt_review.ipynb
StevenGolovkine/fcubt
fdf7c7c11c9e94733c50ecb90a641868394fd347
[ "MIT" ]
null
null
null
scenario_1/021-fcubt_review.ipynb
StevenGolovkine/fcubt
fdf7c7c11c9e94733c50ecb90a641868394fd347
[ "MIT" ]
null
null
null
165.504464
31,884
0.899846
[ [ [ "# Perform fCUBT on the data", "_____no_output_____" ] ], [ [ "# Load packages\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pickle\n\nfrom FDApy.clustering.fcubt import Node, FCUBT\nfrom FDApy.representation.functional_data import DenseFunctionalData\nfrom FDApy.preprocessing.dim_reduction.fpca import UFPCA\n\nfrom matplotlib import colors as mcolors\nCOLORS = ['#377eb8', '#ff7f00', '#4daf4a',\n '#f781bf', '#a65628', '#984ea3',\n '#999999', '#e41a1c', '#dede00']\n\n#matplotlib.use(\"pgf\")\nmatplotlib.rcParams.update({\n \"pgf.texsystem\": \"pdflatex\",\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n})", "_____no_output_____" ], [ "# Load data\nwith open('./data/scenario_1_review.pkl', 'rb') as f:\n data_fd = pickle.load(f)\nwith open('./data/labels_review.pkl', 'rb') as f:\n labels = pickle.load(f)", "_____no_output_____" ], [ "# Do UFPCA on the data\nfpca = UFPCA(n_components=0.99)\nfpca.fit(data_fd, method='GAM')\n \n# Compute scores\nsimu_proj = fpca.transform(data_fd, method='NumInt')", "_____no_output_____" ], [ "plt.scatter(simu_proj[:, 0], simu_proj[:, 1], c=labels)\nplt.show()", "_____no_output_____" ], [ "# Build the tree\nroot_node = Node(data_fd, is_root=True)\nfcubt = FCUBT(root_node=root_node)", "_____no_output_____" ], [ "# Growing\nfcubt.grow(n_components=0.95, min_size=10)", "_____no_output_____" ], [ "fcubt.mapping_grow", "_____no_output_____" ], [ "# Joining\nfcubt.join(n_components=0.95)", "_____no_output_____" ], [ "fcubt.mapping_join", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0715f4f4b90ccadfe00db476b26c2d39a8c7b25
8,684
ipynb
Jupyter Notebook
notebooks/retired/nbm_grid_match_threshold.ipynb
m-wessler/nbm-verify
e7bbbb6bf56c727e777e5119bff8bfc65a9a1d94
[ "MIT" ]
null
null
null
notebooks/retired/nbm_grid_match_threshold.ipynb
m-wessler/nbm-verify
e7bbbb6bf56c727e777e5119bff8bfc65a9a1d94
[ "MIT" ]
null
null
null
notebooks/retired/nbm_grid_match_threshold.ipynb
m-wessler/nbm-verify
e7bbbb6bf56c727e777e5119bff8bfc65a9a1d94
[ "MIT" ]
null
null
null
35.590164
147
0.471442
[ [ [ "import os, gc\nimport pygrib\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport multiprocessing as mp\n\nfrom glob import glob\nfrom functools import partial\nfrom datetime import datetime, timedelta\n\nos.environ['OMP_NUM_THREADS'] = '1'\n\nnbm_dir = '/scratch/general/lustre/u1070830/nbm/'\nurma_dir = '/scratch/general/lustre/u1070830/urma/'\ntmp_dir = '/scratch/general/lustre/u1070830/tmp/'\nos.makedirs(tmp_dir, exist_ok=True)\n\nnbm_shape = (1051, 1132)", "_____no_output_____" ], [ "def unpack_fhr(nbm_file, xthreshold, xinterval, returned=False):\n \n # WE NEED TO MATCH URMA IN HERE IF WE CAN! \n \n try:\n with pygrib.open(nbm_file) as grb:\n\n msgs = grb.read()\n if len(msgs) > 0:\n\n _init = nbm_file.split('/')[-2:]\n init = datetime.strptime(\n _init[0] + _init[1].split('.')[1][1:-1], \n '%Y%m%d%H')\n\n if init.hour % 6 != 0:\n init -= timedelta(hours=1)\n\n lats, lons = grb.message(1).latlons()\n\n valid = datetime.strptime(\n str(msgs[0].validityDate) + '%02d'%msgs[0].validityTime, \n '%Y%m%d%H%M')\n\n step = valid - init\n lead = int(step.days*24 + step.seconds/3600)\n\n tmpfile = tmp_dir + '%02dprobX%s_%s_f%03d'%(xinterval, str(xthreshold).replace('.', 'p'), init.strftime('%Y%m%d%H'), lead)\n\n if not os.path.isfile(tmpfile + '.npy'):\n print(nbm_file.split('/')[-2:])\n\n for msg in msgs:\n\n if 'Probability of event above upper limit' in str(msg):\n\n interval = msg['stepRange'].split('-')\n interval = int(interval[1]) - int(interval[0])\n\n threshold = msg.upperLimit\n\n if ((threshold == xthreshold)&(interval == xinterval)):\n\n returned = True\n agg_data = np.array((init, valid, lead, msg.values), dtype=object)\n np.save(tmpfile, agg_data, allow_pickle=True)\n return agg_data\n\n if not returned:\n\n agg_data = np.array((init, valid, lead, np.full(nbm_shape, fill_value=np.nan)), dtype=object)\n np.save(tmpfile, agg_data, allow_pickle=True)\n return agg_data\n\n else:\n print(nbm_file.split('/')[-2:], 'from file')\n return np.load(tmpfile + '.npy', allow_pickle=True)\n\n else:\n print('%s: No grib messages'%nbm_file.split('/')[-2:])\n except:\n pass\n \n gc.collect()", "_____no_output_____" ], [ "# Pass data label to the extractor to pull out the variable we care about\n# Do these one at a time and save out the xarray to netcdf to compare w/ URMA\nextract_threshold = 0.254\nextract_interval = 24\ndata_label = 'probx_%s_%02dh'%(str(extract_threshold).replace('.', 'p'), extract_interval)\n\n# Build a list of inits\ninits = pd.date_range(\n datetime(2020, 6, 1, 0), \n datetime(2020, 6, 10, 23), \n freq='6H')\n\noutfile = '../scripts/' + data_label + '.%s_%s.WR.nc'%(\n inits[0].strftime('%Y%m%d%H'), \n inits[-1].strftime('%Y%m%d%H'))\n\nos.remove(outfile)\n\nif not os.path.isfile(outfile):\n\n nbm_flist_agg = []\n for init in inits:\n\n try:\n nbm_flist = sorted(glob(nbm_dir + init.strftime('%Y%m%d') + '/*t%02dz*'%init.hour))\n nbm_flist[0]\n\n except:\n nbm_flist = sorted(glob(nbm_dir + init.strftime('%Y%m%d') + '/*t%02dz*'%(init+timedelta(hours=1)).hour))\n\n nbm_flist = [f for f in nbm_flist if 'idx' not in f]\n\n if len(nbm_flist) > 0:\n nbm_flist_agg.append(nbm_flist)\n\n nbm_flist_agg = np.hstack(nbm_flist_agg)\n\n with pygrib.open(nbm_flist_agg[0]) as sample:\n lat, lon = sample.message(1).latlons()\n \n unpack_fhr_mp = partial(unpack_fhr, xinterval=extract_interval, xthreshold=extract_threshold)\n\n # 128 workers ~ 1.2GB RAM/worker\n workers = 128\n with mp.get_context('fork').Pool(workers) as p:\n returns = p.map(unpack_fhr_mp, nbm_flist_agg, chunksize=1)\n p.close()\n p.join()\n\n returns = np.array([r for r in returns if r is not None], dtype=object)\n init = returns[:, 0].astype(np.datetime64)\n valid = returns[:, 1].astype(np.datetime64).reshape(len(np.unique(init)), -1)\n lead = returns[:, 2].astype(np.int16).reshape(len(np.unique(init)), -1)\n data = np.array([r for r in returns[:, 3]], dtype=np.int8).reshape(len(np.unique(init)), -1, nbm_shape[0], nbm_shape[1])\n\n valid = xr.DataArray(valid, name='valid', dims=('init', 'lead'), coords={'init':np.unique(init), 'lead':np.unique(lead)})\n data = xr.DataArray(data, name=data_label, dims=('init', 'lead', 'y', 'x'), coords={'init':np.unique(init), 'lead':np.unique(lead)})\n data = xr.merge([data, valid])\n\n data['lat'] = xr.DataArray(lat, dims=('y', 'x'))\n data['lon'] = xr.DataArray(lon, dims=('y', 'x'))\n data.set_coords(['lat', 'lon'])\n \n data.to_netcdf(outfile)\n\nelse:\n data = xr.open_dataset(outfile)\n \ndata", "_____no_output_____" ], [ "valid_unique = np.unique([pd.to_datetime(t).strftime('%Y%m%d%H') for t in data['valid'].values])\nurma_flist = np.hstack([[f for f in glob(urma_dir + '*%s*.WR.grib2'%v) if 'idx' not in f] for v in valid_unique])\nprint(urma_flist[:5])", "_____no_output_____" ], [ "def open_dataset(f, cfengine='pynio'):\n \n ds = xr.open_dataset(f, engine=cfengine)\n ds['valid'] = datetime.strptime(f.split('/')[-1].split('.')[1], '%Y%m%d%H')\n \n return ds\n\nwith mp.get_context('fork').Pool(int(len(urma_flist)/2)) as p:\n urma = p.map(open_dataset, urma_flist)\n p.close()\n p.join()\n\nurma = xr.concat(urma, dim='valid').rename({'APCP_P8_L1_GLC0_acc':'apcp6h', \n 'xgrid_0':'x', 'ygrid_0':'y',\n 'gridlat_0':'lat', 'gridlon_0':'lon'})\nurma", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0716a95681cb2f7a292d8acccf1b3e1bb38bfa3
29,644
ipynb
Jupyter Notebook
datashader-work/datashader-examples/topics/nyc_taxi.ipynb
ventureBorbot/Data-Analysis
a44122aeb489fd97488c105b951c06d01d2db894
[ "MIT" ]
4,358
2017-12-29T17:56:07.000Z
2022-03-30T15:14:57.000Z
datashader-work/datashader-examples/topics/nyc_taxi.ipynb
ventureBorbot/Data-Analysis
a44122aeb489fd97488c105b951c06d01d2db894
[ "MIT" ]
61
2018-01-18T17:50:46.000Z
2022-03-09T20:16:01.000Z
datashader-work/datashader-examples/topics/nyc_taxi.ipynb
ventureBorbot/Data-Analysis
a44122aeb489fd97488c105b951c06d01d2db894
[ "MIT" ]
3,689
2017-12-29T17:57:36.000Z
2022-03-29T12:26:03.000Z
62.408421
1,427
0.694036
[ [ [ "## Plotting very large datasets meaningfully, using `datashader`\n\nThere are a variety of approaches for plotting large datasets, but most of them are very unsatisfactory. Here we first show some of the issues, then demonstrate how the `datashader` library helps make large datasets truly practical. \n\nWe'll use part of the well-studied [NYC Taxi trip database](http://www.nyc.gov/html/tlc/html/about/trip_record_data.shtml), with the locations of all NYC taxi pickups and dropoffs from the month of January 2015. Although we know what the data is, let's approach it as if we are doing data mining, and see what it takes to understand the dataset from scratch.", "_____no_output_____" ], [ "### Load NYC Taxi data \n\n(takes 10-20 seconds, since it's in the inefficient but widely supported CSV file format...)", "_____no_output_____" ] ], [ [ "import pandas as pd\n%time df = pd.read_csv('../data/nyc_taxi.csv',usecols= \\\n ['pickup_x', 'pickup_y', 'dropoff_x','dropoff_y', 'passenger_count','tpep_pickup_datetime'])\ndf.tail()", "_____no_output_____" ] ], [ [ "As you can see, this file contains about 12 million pickup and dropoff locations (in Web Mercator coordinates), with passenger counts.\n\n### Define a simple plot", "_____no_output_____" ] ], [ [ "from bokeh.models import BoxZoomTool\nfrom bokeh.plotting import figure, output_notebook, show\n\noutput_notebook()\n\nNYC = x_range, y_range = ((-8242000,-8210000), (4965000,4990000))\n\nplot_width = int(750)\nplot_height = int(plot_width//1.2)\n\ndef base_plot(tools='pan,wheel_zoom,reset',plot_width=plot_width, plot_height=plot_height, **plot_args):\n p = figure(tools=tools, plot_width=plot_width, plot_height=plot_height,\n x_range=x_range, y_range=y_range, outline_line_color=None,\n min_border=0, min_border_left=0, min_border_right=0,\n min_border_top=0, min_border_bottom=0, **plot_args)\n \n p.axis.visible = False\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n \n p.add_tools(BoxZoomTool(match_aspect=True))\n \n return p\n \noptions = dict(line_color=None, fill_color='blue', size=5)", "_____no_output_____" ] ], [ [ "### 1000-point scatterplot: undersampling\n\nAny plotting program should be able to handle a plot of 1000 datapoints. Here the points are initially overplotting each other, but if you hit the Reset button (top right of plot) to zoom in a bit, nearly all of them should be clearly visible in the following Bokeh plot of a random 1000-point sample. If you know what to look for, you can even see the outline of Manhattan Island and Central Park from the pattern of dots. We've included geographic map data here to help get you situated, though for a genuine data mining task in an abstract data space you might not have any such landmarks. In any case, because this plot is discarding 99.99% of the data, it reveals very little of what might be contained in the dataset, a problem called *undersampling*.", "_____no_output_____" ] ], [ [ "%%time\nfrom bokeh.tile_providers import STAMEN_TERRAIN\n\nsamples = df.sample(n=1000)\np = base_plot()\np.add_tile(STAMEN_TERRAIN)\np.circle(x=samples['dropoff_x'], y=samples['dropoff_y'], **options)\nshow(p)", "_____no_output_____" ] ], [ [ "### 10,000-point scatterplot: overplotting\n\nWe can of course plot more points to reduce the amount of undersampling. However, even if we only try to plot 0.1% of the data, ignoring the other 99.9%, we will find major problems with *overplotting*, such that the true density of dropoffs in central Manhattan is impossible to see due to occlusion:", "_____no_output_____" ] ], [ [ "%%time\nsamples = df.sample(n=10000)\np = base_plot()\n\np.circle(x=samples['dropoff_x'], y=samples['dropoff_y'], **options)\nshow(p)", "_____no_output_____" ] ], [ [ "Overplotting is reduced if you zoom in on a particular region (may need to click to enable the wheel-zoom tool in the upper right of the plot first, then use the scroll wheel). However, then the problem switches to back to serious undersampling, as the too-sparsely sampled datapoints get revealed for zoomed-in regions, even though much more data is available.\n\n### 100,000-point scatterplot: saturation\n\nIf you make the dot size smaller, you can reduce the overplotting that occurs when you try to combat undersampling. Even so, with enough opaque data points, overplotting will be unavoidable in popular dropoff locations. So you can then adjust the alpha (opacity) parameter of most plotting programs, so that multiple points need to overlap before full color saturation is achieved. With enough data, such a plot can approximate the probability density function for dropoffs, showing where dropoffs were most common:", "_____no_output_____" ], [ "```python\n%%time\noptions = dict(line_color=None, fill_color='blue', size=1, alpha=0.1)\nsamples = df.sample(n=100000)\np = base_plot(webgl=True)\np.circle(x=samples['dropoff_x'], y=samples['dropoff_y'], **options)\nshow(p)\n```", "_____no_output_____" ], [ "<img src=\"../assets/images/nyc_taxi_100k.png\">", "_____no_output_____" ], [ "[*Here we've shown static output as a PNG rather than a live Bokeh plot, to reduce the file size for distributing full notebooks and because some browsers will have trouble with plots this large. The above cell can be converted into code and executed to get the full interactive plot.*]\n\nHowever, it's very tricky to set the size and alpha parameters. How do we know if certain regions are saturating, unable to show peaks in dropoff density? Here we've manually set the alpha to show a clear structure of streets and blocks, as one would intuitively expect to see, but the density of dropoffs still seems approximately the same on nearly all Manhattan streets (just wider in some locations), which is unlikely to be true. We can of course reduce the alpha value to reduce saturation further, but there's no way to tell when it's been set correctly, and it's already low enough that nothing other than Manhattan and La Guardia is showing up at all. Plus, this alpha value will only work even reasonably well at the one zoom level shown. Try zooming in (may need to enable the wheel zoom tool in the upper right) to see that at higher zooms, there is less overlap between dropoff locations, so that the points *all* start to become transparent due to lack of overlap. Yet without setting the size and alpha to a low value in the first place, the stucture is invisible when zoomed out, due to overplotting. Thus even though Bokeh provides rich support for interactively revealing structure by zooming, it is of limited utility for large data; either the data is invisible when zoomed in, or there's no large-scale structure when zoomed out, which is necessary to indicate where zooming would be informative.\n\nMoreover, we're still ignoring 99% of the data. Many plotting programs will have trouble with plots even this large, but Bokeh can handle 100-200,000 points in most browsers. Here we've enabled Bokeh's WebGL support, which gives smoother zooming behavior, but the non-WebGL mode also works well. Still, for such large sizes the plots become slow due to the large HTML file sizes involved, because each of the data points are encoded as text in the web page, and for even larger samples the browser will fail to render the page at all. \n\n\n### 10-million-point datashaded plots: auto-ranging, but limited dynamic range\n\nTo let us work with truly large datasets without discarding most of the data, we can take an entirely different approach. Instead of using a Bokeh scatterplot, which encodes every point into JSON and stores it in the HTML file read by the browser, we can use the [datashader](https://github.com/bokeh/datashader) library to render the entire dataset into a pixel buffer in a separate Python process, and then provide a fixed-size image to the browser containing only the data currently visible. This approach decouples the data processing from the visualization. The data processing is then limited only by the computational power available, while the visualization has much more stringent constraints determined by your display device (a web browser and your particular monitor, in this case). This approach works particularly well when your data is in a far-off server, but it is also useful whenever your dataset is larger than your display device can render easily.\n\nBecause the number of points involved is no longer a limiting factor, you can now use the entire dataset (including the full 150 million trips that have been made public, if you download that data separately). Most importantly, because datashader allows computation on the intermediate stages of plotting, you can easily define operations like auto-ranging (which is on by default), so that we can be sure there is no overplotting or saturation and no need to set parameters like alpha.\n\nThe steps involved in datashading are (1) create a Canvas object with the shape of the eventual plot (i.e. having one storage bin for collecting points, per final pixel), (2) aggregating all points into that set of bins, incrementally counting them, and (3) mapping the resulting counts into a visible color from a specified range to make an image:", "_____no_output_____" ] ], [ [ "import datashader as ds\nfrom datashader import transfer_functions as tf\nfrom datashader.colors import Greys9\nGreys9_r = list(reversed(Greys9))[:-2]", "_____no_output_____" ], [ "%%time\ncvs = ds.Canvas(plot_width=plot_width, plot_height=plot_height, x_range=x_range, y_range=y_range)\nagg = cvs.points(df, 'dropoff_x', 'dropoff_y', ds.count('passenger_count'))\nimg = tf.shade(agg, cmap=[\"white\", 'darkblue'], how='linear')", "_____no_output_____" ] ], [ [ "The resulting image is similar to the 100,000-point Bokeh plot above, but (a) makes use of all 12 million datapoints, (b) is computed in only a tiny fraction of the time, (c) does not require any magic-number parameters like size and alpha, and (d) automatically ensures that there is no saturation or overplotting:", "_____no_output_____" ] ], [ [ "img", "_____no_output_____" ] ], [ [ "This plot renders the count at every pixel as a color from the specified range (here from white to dark blue), mapped linearly. If your display device were linear, and the data were distributed evenly across this color range, then the result of such linear, auto-ranged processing would be an effective, parameter-free way to visualize your dataset.\n\nHowever, real display devices are not typically linear, and more importantly, real data is rarely distributed evenly. Here, it is clear that there are \"hotspots\" in dropoffs, with a very high count for areas around Penn Station and Madison Square Garden, relatively low counts for the rest of Manhattan's streets, and apparently no dropoffs anywhere else but La Guardia airport. NYC taxis definitely cover a larger geographic range than this, so what is the problem? To see, let's look at the histogram of counts for the above image:", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef histogram(x,colors=None):\n hist,edges = np.histogram(x, bins=100)\n p = figure(y_axis_label=\"Pixels\",\n tools='', height=130, outline_line_color=None,\n min_border=0, min_border_left=0, min_border_right=0,\n min_border_top=0, min_border_bottom=0)\n p.quad(top=hist[1:], bottom=0, left=edges[1:-1], right=edges[2:])\n print(\"min: {}, max: {}\".format(np.min(x),np.max(x)))\n show(p)", "_____no_output_____" ], [ "histogram(agg.values)", "_____no_output_____" ] ], [ [ "Clearly, most of the pixels have very low counts (under 3000), while a very few pixels have much larger counts (up to 22000, in this case). When these values are mapped into colors for display, nearly all of the pixels will end up being colored with the lowest colors in the range, i.e. white or nearly white, while the other colors in the available range will be used for only a few dozen pixels at most. Thus most of the pixels in this plot convey very little information about the data, wasting nearly all of dynamic range available on your display device. It's thus very likely that we are missing a lot of the structure in this data that we could be seeing.\n\n\n### 10-million-point datashaded plots: high dynamic range\n\nFor the typical case of data that is distributed nonlinearly over the available range, we can use nonlinear scaling to map the data range into the visible color range. E.g. first transforming the values via a log function will help flatten out this histogram and reveal much more of the structure of this data:", "_____no_output_____" ] ], [ [ "histogram(np.log1p(agg.values))\n\ntf.shade(agg, cmap=Greys9_r, how='log')", "_____no_output_____" ] ], [ [ "We can now see that there is rich structure throughout this dataset -- geographic features like streets and buildings are clearly modulating the values in both the high-dropoff regions in Manhattan and the relatively low-dropoff regions in the surrounding areas. Still, this choice is arbitrary -- why the log function in particular? It clearly flattened the histogram somewhat, but it was just a guess. We can instead explicitly equalize the histogram of the data before building the image, making structure visible at every data level (and thus at all the geographic locations covered) in a general way:", "_____no_output_____" ] ], [ [ "histogram(tf.eq_hist(agg.values))\n \ntf.shade(agg, cmap=Greys9_r, how='eq_hist')", "_____no_output_____" ] ], [ [ "The histogram is now fully flat (apart from the spacing of bins caused by the discrete nature of integer counting). Effectively, the visualization now shows a rank-order or percentile distribution of the data. I.e., pixels are now colored according to where their corresponding counts fall in the distribution of all counts, with one end of the color range for the lowest counts, one end for the highest ones, and every colormap step in between having similar numbers of counts. Such a visualization preserves the ordering between count values, faithfully displaying local differences in these counts, but discards absolute magnitudes (as the top 1% of the color range will be used for the top 1% of the data values, whatever those may be).\n\nNow that the data is visible at every level, we can immediately see that there are some clear problems with the quality of the data -- there is a surprising number of trips that claim to drop off in the water or in the roadless areas of Central park, as well as in the middle of most of the tallest buildings in central Manhattan. These locations are likely to be GPS errors being made visible, perhaps partly because of poor GPS performance in between the tallest buildings.\n\nHistogram equalization does not require any magic parameters, and in theory it should convey the maximum information available about the relative values between pixels, by mapping each of the observed ranges of values into visibly discriminable colors. And it's clearly a good start in practice, because it shows both low values (avoiding undersaturation) and relatively high values clearly, without arbitrary settings. \n\nEven so, the results will depend on the nonlinearities of your visual system, your specific display device, and any automatic compensation or calibration being applied to your display device. Thus in practice, the resulting range of colors may not map directly into a linearly perceivable range for your particular setup, and so you may want to further adjust the values to more accurately reflect the underlying structure, by adding additional calibration or compensation steps.\n\nMoreover, at this point you can now bring in your human-centered goals for the visualization -- once the overall structure has been clearly revealed, you can select specific aspects of the data to highlight or bring out, based on your own questions about the data. These questions can be expressed at whatever level of the pipeline is most appropriate, as shown in the examples below. For instance, histogram equalization was done on the counts in the aggregate array, because if we waited until the image had been created, we would have been working with data truncated to the 256 color levels available per channel in most display devices, greatly reducing precision. Or you may want to focus specifically on the highest peaks (as shown below), which again should be done at the aggregate level so that you can use the full color range of your display device to represent the narrow range of data that you are interested in. Throughout, the goal is to map from the data of interest into the visible, clearly perceptible range available on your display device.\n\n\n### 10-million-point datashaded plots: interactive\n\nAlthough the above plots reveal the entire dataset at once, the full power of datashading requires an interactive plot, because a big dataset will usually have structure at very many different levels (such as different geographic regions). Datashading allows auto-ranging and other automatic operations to be recomputed dynamically for the specific selected viewport, automatically revealing local structure that may not be visible from a global view. Here we'll embed the generated images into a Bokeh plot to support fully interactive zooming. For the highest detail on large monitors, you should increase the plot width and height above.", "_____no_output_____" ] ], [ [ "import datashader as ds\nfrom datashader.bokeh_ext import InteractiveImage\nfrom functools import partial\nfrom datashader.utils import export_image\nfrom datashader.colors import colormap_select, Greys9, Hot, inferno\n\nbackground = \"black\"\nexport = partial(export_image, export_path=\"export\", background=background)\ncm = partial(colormap_select, reverse=(background==\"black\"))\n\ndef create_image(x_range, y_range, w=plot_width, h=plot_height):\n cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)\n agg = cvs.points(df, 'dropoff_x', 'dropoff_y', ds.count('passenger_count'))\n img = tf.shade(agg, cmap=Hot, how='eq_hist')\n return tf.dynspread(img, threshold=0.5, max_px=4)\n\np = base_plot(background_fill_color=background)\nexport(create_image(*NYC),\"NYCT_hot\")\nInteractiveImage(p, create_image)", "_____no_output_____" ] ], [ [ "You can now zoom in interactively to this plot, seeing all the points available in that viewport, without ever needing to change the plot parameters for that specific zoom level. Each time you zoom or pan, a new image is rendered (which takes a few seconds for large datasets), and displayed overlaid any other plot elements, providing full access to all of your data. Here we've used the optional `tf.dynspread` function to automatically enlarge the size of each datapoint once you've zoomed in so far that datapoints no longer have nearby neighbors.\n\n### Customizing datashader\n\nOne of the most important features of datashading is that each of the stages of the datashader pipeline can be modified or replaced, either for personal preferences or to highlight specific aspects of the data. Here we'll use a high-level `Pipeline` object that encapsulates the typical series of steps in the above `create_image` function, and then we'll customize it. The default values of this pipeline are the same as the plot above, but here we'll add a special colormap to make the values stand out against an underlying map, and only plot hotspots (defined here as pixels (aggregation bins) that are in the 90th percentile by count): ", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom functools import partial\n\ndef create_image90(x_range, y_range, w=plot_width, h=plot_height):\n cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)\n agg = cvs.points(df, 'dropoff_x', 'dropoff_y', ds.count('passenger_count'))\n img = tf.shade(agg.where(agg>np.percentile(agg,90)), cmap=inferno, how='eq_hist')\n return tf.dynspread(img, threshold=0.3, max_px=4)\n \np = base_plot()\np.add_tile(STAMEN_TERRAIN)\nexport(create_image(*NYC),\"NYCT_90th\")\nInteractiveImage(p, create_image90)", "_____no_output_____" ] ], [ [ "If you zoom in to the plot above, you can see that the 90th-percentile criterion at first highlights the most active areas in the entire dataset, and then highlights the most active areas in each subsequent viewport. Here yellow has been chosen to highlight the strongest peaks, and if you zoom in on one of those peaks you can see the most active areas in that particular geographic region, according to this dynamically evaluated definition of \"most active\". \n\nThe above plots each followed a roughly standard series of steps useful for many datasets, but you can instead fully customize the computations involved. This capability lets you do novel operations on the data once it has been aggregated into pixel-shaped bins. For instance, you might want to plot all the pixels where there were more dropoffs than pickups in blue, and all those where there were more pickups than dropoffs in red. To do this, just write your own function that will create an image, when given x and y ranges, a resolution (w x h), and any optional arguments needed. You can then either call the function yourself, or pass it to `InteractiveImage` to make an interactive Bokeh plot:", "_____no_output_____" ] ], [ [ "def merged_images(x_range, y_range, w=plot_width, h=plot_height, how='log'):\n cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)\n picks = cvs.points(df, 'pickup_x', 'pickup_y', ds.count('passenger_count'))\n drops = cvs.points(df, 'dropoff_x', 'dropoff_y', ds.count('passenger_count'))\n drops = drops.rename({'dropoff_x': 'x', 'dropoff_y': 'y'})\n picks = picks.rename({'pickup_x': 'x', 'pickup_y': 'y'})\n more_drops = tf.shade(drops.where(drops > picks), cmap=[\"darkblue\", 'cornflowerblue'], how=how)\n more_picks = tf.shade(picks.where(picks > drops), cmap=[\"darkred\", 'orangered'], how=how)\n img = tf.stack(more_picks, more_drops)\n return tf.dynspread(img, threshold=0.3, max_px=4)\n\np = base_plot(background_fill_color=background)\nexport(merged_images(*NYC),\"NYCT_pickups_vs_dropoffs\")\nInteractiveImage(p, merged_images)", "_____no_output_____" ] ], [ [ "Now you can see that pickups are more common on major roads, as you'd expect, and dropoffs are more common on side streets. In Manhattan, roads running along the island are more common for pickups. If you zoom in to any location, the data will be re-aggregated to the new resolution automatically, again calculating for each newly defined pixel whether pickups or dropoffs were more likely in that pixel. The interactive features of Bokeh are now fully usable with this large dataset, allowing you to uncover new structure at every level. \n\nWe can also use other columns in the dataset as additional dimensions in the plot. For instance, if we want to see if certain areas are more likely to have pickups at certain hours (e.g. areas with bars and restaurants might have pickups in the evening, while apartment buildings may have pickups in the morning). One way to do this is to use the hour of the day as a category, and then colorize each hour:", "_____no_output_____" ] ], [ [ "df['hour'] = pd.to_datetime(df['tpep_pickup_datetime']).dt.hour.astype('category')", "_____no_output_____" ], [ "colors = [\"#FF0000\",\"#FF3F00\",\"#FF7F00\",\"#FFBF00\",\"#FFFF00\",\"#BFFF00\",\"#7FFF00\",\"#3FFF00\",\n \"#00FF00\",\"#00FF3F\",\"#00FF7F\",\"#00FFBF\",\"#00FFFF\",\"#00BFFF\",\"#007FFF\",\"#003FFF\",\n \"#0000FF\",\"#3F00FF\",\"#7F00FF\",\"#BF00FF\",\"#FF00FF\",\"#FF00BF\",\"#FF007F\",\"#FF003F\",]\n\ndef colorized_images(x_range, y_range, w=plot_width, h=plot_height, dataset=\"pickup\"):\n cvs = ds.Canvas(plot_width=w, plot_height=h, x_range=x_range, y_range=y_range)\n agg = cvs.points(df, dataset+'_x', dataset+'_y', ds.count_cat('hour'))\n img = tf.shade(agg, color_key=colors)\n return tf.dynspread(img, threshold=0.3, max_px=4)\n\np = base_plot(background_fill_color=background)\n#p.add_tile(STAMEN_TERRAIN)\nexport(colorized_images(*NYC, dataset=\"pickup\"),\"NYCT_pickup_times\")\nInteractiveImage(p, colorized_images, dataset=\"pickup\")", "_____no_output_____" ], [ "export(colorized_images(*NYC, dataset=\"dropoff\"),\"NYCT_dropoff_times\")\np = base_plot(background_fill_color=background)\nInteractiveImage(p, colorized_images, dataset=\"dropoff\")", "_____no_output_____" ] ], [ [ "Here the order of colors is roughly red (midnight), yellow (4am), green (8am), cyan (noon), blue (4pm), purple (8pm), and back to red (since hours and colors are both cyclic). There are clearly hotspots by hour that can now be investigated, and perhaps compared with the underlying map data. And you can try first filtering the dataframe to only have weekdays or weekends, or only during certain public events, etc., or filtering the resulting pixels to have only those in a certain range of interest. The system is very flexible, and it should be straightforward to express a very large range of possible queries and visualizations with very little code.\n\nThe above examples each used pre-existing components provided for the datashader pipeline, but you can implement any components you like and substitute them, allowing you to easily explore and highlight specific aspects of your data. Have fun datashading!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d071749d36f976397807f7eff20b1ff5e9ef9948
1,596
ipynb
Jupyter Notebook
run-files/question5.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
null
null
null
run-files/question5.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
1
2016-10-25T18:23:53.000Z
2016-10-26T01:43:56.000Z
run-files/question5.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
null
null
null
23.130435
439
0.538221
[ [ [ "# Question 5", "_____no_output_____" ], [ "### V=[1, 2, 4, 6, 8, ..., 30] with A=[2000, 2001, 2002, ..., 2200]", "_____no_output_____" ], [ "<img src=\"img/Q5.png\" />", "_____no_output_____" ], [ "### V=[1, 2, 4, 6, 8, ..., 30] with A=[1, 2, 3, ..., 30]", "_____no_output_____" ], [ "<img src=\"img/Q5_slow.png\" />", "_____no_output_____" ], [ "Note that the \"changeslow\" algorithm had to be run for $A < 30$ due to extremely long running times. The plots above suggest that the coin system [1,2,4,6,8,...,30] may be canonical[1]. A coin system is canonical if the number of coins given in change by the greedy algorithm is optimal for all amounts. **Our observations show that greedy is equal to DP (and Slow) _for all values of A that were plotted_ for this coin system.**", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d071874975aec948d4a7f5d2b07a269cec1e538b
4,649
ipynb
Jupyter Notebook
notebooks/smpc/Private Mul Tensor Abstraction.ipynb
Noob-can-Compile/PySyft
156cf93489b16dd0205b0058d4d23d56b3a91ab8
[ "Apache-2.0" ]
8,428
2017-08-10T09:17:49.000Z
2022-03-31T08:20:14.000Z
notebooks/smpc/Private Mul Tensor Abstraction.ipynb
Noob-can-Compile/PySyft
156cf93489b16dd0205b0058d4d23d56b3a91ab8
[ "Apache-2.0" ]
4,779
2017-08-09T23:19:00.000Z
2022-03-29T11:49:36.000Z
notebooks/smpc/Private Mul Tensor Abstraction.ipynb
Noob-can-Compile/PySyft
156cf93489b16dd0205b0058d4d23d56b3a91ab8
[ "Apache-2.0" ]
2,307
2017-08-10T08:52:12.000Z
2022-03-30T05:36:07.000Z
24.468421
171
0.562271
[ [ [ "import syft as sy\nsy.logger.remove()\nimport numpy as np\ndata = sy.Tensor(np.array([1,2,3],dtype=np.int32))", "_____no_output_____" ], [ "gryffindor = sy.login(email=\"[email protected]\",password=\"changethis\",port=\"8081\")\nslytherin = sy.login(email=\"[email protected]\",password=\"changethis\",port=\"8082\")\nhufflepuff = sy.login(email=\"[email protected]\",password=\"changethis\",port=\"8083\")", "Connecting to http://localhost:8081... done! \t Logging into jolly_schmidhuber... done!\nConnecting to http://localhost:8082... done! \t Logging into amazing_bengio... done!\nConnecting to http://localhost:8083... done! \t Logging into quirky_brockman... done!\n" ], [ "tensor_1 = data.send(gryffindor)\ntensor_2 = data.send(slytherin)\ntensor_3 = data.send(hufflepuff)", "_____no_output_____" ] ], [ [ "During Private Multiplication , we require the parties to be able to communicate with each other.\nWe make sure that our Actions are **Idempotent** and **Atomic** such that when a given action is not able to execute, it requeues itself to the back of the queue.\n\nWe set a maximum amount of retries,until eventually failing, when one of the parties nodes are not able to send their intermediate results.\n\nWe also create proxy clients with minimal permissions such that the parties are able to communicate with each other.\n", "_____no_output_____" ] ], [ [ "out = tensor_1 + tensor_2", "_____no_output_____" ], [ "out2 = out > 3", "_____no_output_____" ], [ "out2.block.reconstruct()", "_____no_output_____" ], [ "mpc_1 = tensor_1 * tensor_2\nmpc_2 = tensor_2 * tensor_3\nmpc = mpc_1 * mpc_2 * 3", "_____no_output_____" ], [ "mpc.block.reconstruct()", "_____no_output_____" ], [ "mpc_1 = tensor_1 + tensor_2\nmpc_2 = tensor_2 + tensor_3\nmpc3 = mpc_1 + mpc_2 + 3", "_____no_output_____" ], [ "mpc3.block.reconstruct()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0718b12ba653a31e44d83f02cf7468d4542758d
32,043
ipynb
Jupyter Notebook
describe/describe.ipynb
michaelk-igz/functions
0afac753c28f1c4126b841ebea14219700bc9635
[ "Apache-2.0" ]
null
null
null
describe/describe.ipynb
michaelk-igz/functions
0afac753c28f1c4126b841ebea14219700bc9635
[ "Apache-2.0" ]
null
null
null
describe/describe.ipynb
michaelk-igz/functions
0afac753c28f1c4126b841ebea14219700bc9635
[ "Apache-2.0" ]
null
null
null
39.413284
978
0.519021
[ [ [ "# nuclio: ignore\nimport nuclio", "_____no_output_____" ], [ "%nuclio config kind = \"job\"\n%nuclio config spec.image = \"mlrun/ml-models\"", "%nuclio: setting kind to 'job'\n%nuclio: setting spec.image to 'mlrun/ml-models'\n" ], [ "import warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)", "_____no_output_____" ], [ "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom mlrun.execution import MLClientCtx\nfrom mlrun.datastore import DataItem\nfrom mlrun.artifacts import PlotArtifact, TableArtifact\nfrom mlrun.mlutils import gcf_clear\n\nfrom typing import List", "_____no_output_____" ], [ "pd.set_option(\"display.float_format\", lambda x: \"%.2f\" % x)\n\ndef summarize(\n context: MLClientCtx,\n table: DataItem,\n label_column: str = None,\n class_labels: List[str] = [],\n plot_hist: bool = True,\n plots_dest: str = \"plots\",\n update_dataset = False,\n) -> None:\n \"\"\"Summarize a table\n\n :param context: the function context\n :param table: MLRun input pointing to pandas dataframe (csv/parquet file path)\n :param label_column: ground truth column label\n :param class_labels: label for each class in tables and plots\n :param plot_hist: (True) set this to False for large tables\n :param plots_dest: destination folder of summary plots (relative to artifact_path)\n :param update_dataset: when the table is a registered dataset update the charts in-place \n \"\"\"\n df = table.as_df()\n header = df.columns.values\n extra_data = {}\n \n try:\n gcf_clear(plt)\n snsplt = sns.pairplot(df, hue=label_column)#, diag_kws={\"bw\": 1.5})\n extra_data[\"histograms\"] = context.log_artifact(PlotArtifact(\"histograms\", body=plt.gcf()),\n local_path=f\"{plots_dest}/hist.html\", db_key=False)\n except Exception as e:\n context.logger.error(f'Failed to create pairplot histograms due to: {e}')\n \n try:\n gcf_clear(plt)\n plot_cols = 3\n plot_rows = int((len(header) - 1) / plot_cols)+1\n fig, ax = plt.subplots(plot_rows, plot_cols, figsize=(15, 4))\n fig.tight_layout(pad=2.0)\n for i in range(plot_rows * plot_cols):\n if i < len(header):\n sns.violinplot(x=df[header[i]], ax=ax[int(i / plot_cols)][i % plot_cols], \n orient='h', width=0.7, inner=\"quartile\")\n else:\n fig.delaxes(ax[int(i / plot_cols)][i % plot_cols]) \n i+=1\n extra_data[\"violin\"] = context.log_artifact(PlotArtifact(\"violin\", body=plt.gcf(), title='Violin Plot'),\n local_path=f\"{plots_dest}/violin.html\", db_key=False)\n except Exception as e:\n context.logger.warn(f'Failed to create violin distribution plots due to: {e}')\n\n if label_column: \n labels = df.pop(label_column)\n imbtable = labels.value_counts(normalize=True).sort_index()\n try:\n gcf_clear(plt) \n balancebar = imbtable.plot(kind='bar', title='class imbalance - labels')\n balancebar.set_xlabel('class')\n balancebar.set_ylabel(\"proportion of total\")\n extra_data[\"imbalance\"] = context.log_artifact(PlotArtifact(\"imbalance\", body=plt.gcf()), \n local_path=f\"{plots_dest}/imbalance.html\")\n except Exception as e:\n context.logger.warn(f'Failed to create class imbalance plot due to: {e}')\n context.log_artifact(TableArtifact(\"imbalance-weights-vec\", \n df=pd.DataFrame({\"weights\": imbtable})),\n local_path=f\"{plots_dest}/imbalance-weights-vec.csv\", db_key=False)\n\n tblcorr = df.corr()\n mask = np.zeros_like(tblcorr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n \n dfcorr = pd.DataFrame(data=tblcorr, columns=header, index=header)\n dfcorr = dfcorr[np.arange(dfcorr.shape[0])[:, None] > np.arange(dfcorr.shape[1])]\n context.log_artifact(TableArtifact(\"correlation-matrix\", df=tblcorr, visible=True), \n local_path=f\"{plots_dest}/correlation-matrix.csv\", db_key=False)\n \n try:\n gcf_clear(plt)\n ax = plt.axes()\n sns.heatmap(tblcorr, ax=ax, mask=mask, annot=False, cmap=plt.cm.Reds)\n ax.set_title(\"features correlation\")\n extra_data[\"correlation\"] = context.log_artifact(PlotArtifact(\"correlation\", body=plt.gcf(), title='Correlation Matrix'),\n local_path=f\"{plots_dest}/corr.html\", db_key=False)\n except Exception as e:\n context.logger.warn(f'Failed to create features correlation plot due to: {e}')\n \n\n gcf_clear(plt)\n if update_dataset and table.meta and table.meta.kind == 'dataset':\n from mlrun.artifacts import update_dataset_meta\n update_dataset_meta(table.meta, extra_data=extra_data)\n ", "_____no_output_____" ], [ "# nuclio: end-code", "_____no_output_____" ] ], [ [ "### mlconfig", "_____no_output_____" ] ], [ [ "from mlrun import mlconf\nimport os\nmlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'\nmlconf.artifact_path = mlconf.artifact_path or os.path.abspath('./')", "_____no_output_____" ] ], [ [ "### save", "_____no_output_____" ] ], [ [ "from mlrun import code_to_function \n# create job function object from notebook code\nfn = code_to_function(\"describe\", handler=\"summarize\",\n description=\"describe and visualizes dataset stats\",\n categories=[\"analysis\"],\n labels = {\"author\": \"yjb\"},\n code_output='.')\n\nfn.export()", "> 2020-07-23 07:46:39,543 [info] function spec saved to path: function.yaml\n" ] ], [ [ "## tests", "_____no_output_____" ] ], [ [ "from mlrun.platforms import auto_mount\nfn.apply(auto_mount())", "_____no_output_____" ], [ "from mlrun import NewTask, run_local\n\n#DATA_URL = \"https://iguazio-sample-data.s3.amazonaws.com/datasets/classifier-data.csv\"\nDATA_URL = 'https://iguazio-sample-data.s3.amazonaws.com/datasets/iris_dataset.csv'", "_____no_output_____" ], [ "task = NewTask(\n name=\"tasks-describe\", \n handler=summarize, \n inputs={\"table\": DATA_URL}, params={'update_dataset': True, 'label_column': 'label'})", "_____no_output_____" ] ], [ [ "### run locally", "_____no_output_____" ] ], [ [ "run = run_local(task)", "> 2020-07-22 09:00:32,582 [debug] Validating field against patterns: {'field_name': 'run.metadata.name', 'field_value': 'tasks-describe', 'pattern': ['^.{0,63}$', '^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$']}\n> 2020-07-22 09:00:32,598 [info] starting run tasks-describe uid=f30656601819462c892a9365dd175f72 -> http://mlrun-api:8080\n> 2020-07-22 09:00:37,475 [debug] log artifact histograms at /User/functions/describe/plots/hist.html, size: 140127, db: N\n> 2020-07-22 09:00:38,377 [debug] log artifact violin at /User/functions/describe/plots/violin.html, size: 54096, db: N\n> 2020-07-22 09:00:38,680 [debug] log artifact imbalance at /User/functions/describe/plots/imbalance.html, size: 10045, db: Y\n> 2020-07-22 09:00:38,697 [debug] log artifact imbalance-weights-vec at /User/functions/describe/plots/imbalance-weights-vec.csv, size: 65, db: N\n> 2020-07-22 09:00:38,702 [debug] log artifact correlation-matrix at /User/functions/describe/plots/correlation-matrix.csv, size: 324, db: N\n> 2020-07-22 09:00:38,877 [debug] log artifact correlation at /User/functions/describe/plots/corr.html, size: 12052, db: N\n" ] ], [ [ "### run remotely", "_____no_output_____" ] ], [ [ "fn.run(task, inputs={\"table\": DATA_URL})", "> 2020-07-22 09:00:39,154 [debug] Validating field against patterns: {'field_name': 'run.metadata.name', 'field_value': 'tasks-describe', 'pattern': ['^.{0,63}$', '^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$']}\n> 2020-07-22 09:00:39,161 [info] starting run tasks-describe uid=d8edd5e4b8004437927f0810d2ad1658 -> http://mlrun-api:8080\n> 2020-07-22 09:00:39,287 [info] Job is running in the background, pod: tasks-describe-vmgv8\n> 2020-07-22 09:00:45,175 [debug] Validating field against patterns: {'field_name': 'run.metadata.name', 'field_value': 'tasks-describe', 'pattern': ['^.{0,63}$', '^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$']}\n> 2020-07-22 09:00:45,291 [debug] starting local run: main.py # summarize\n> 2020-07-22 09:00:50,598 [debug] log artifact histograms at /User/functions/describe/plots/hist.html, size: 238319, db: N\n> 2020-07-22 09:00:51,652 [debug] log artifact violin at /User/functions/describe/plots/violin.html, size: 86708, db: N\n> 2020-07-22 09:00:51,908 [debug] log artifact imbalance at /User/functions/describe/plots/imbalance.html, size: 21657, db: Y\n> 2020-07-22 09:00:51,914 [debug] log artifact imbalance-weights-vec at /User/functions/describe/plots/imbalance-weights-vec.csv, size: 65, db: N\n> 2020-07-22 09:00:51,919 [debug] log artifact correlation-matrix at /User/functions/describe/plots/correlation-matrix.csv, size: 324, db: N\n> 2020-07-22 09:00:52,104 [debug] log artifact correlation at /User/functions/describe/plots/corr.html, size: 26392, db: N\n> 2020-07-22 09:00:52,272 [info] run executed, status=completed\nfinal state: succeeded\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0719390e06edf21b5dee1f8fbcde0f58cbdf724
16,794
ipynb
Jupyter Notebook
src/models/montecarlo/poisson.ipynb
harrisonzhu508/citadelchallenge
93607482a1d8d72e6b57e4d3f5fb457730d397cd
[ "MIT" ]
null
null
null
src/models/montecarlo/poisson.ipynb
harrisonzhu508/citadelchallenge
93607482a1d8d72e6b57e4d3f5fb457730d397cd
[ "MIT" ]
null
null
null
src/models/montecarlo/poisson.ipynb
harrisonzhu508/citadelchallenge
93607482a1d8d72e6b57e4d3f5fb457730d397cd
[ "MIT" ]
null
null
null
58.110727
1,424
0.574967
[ [ [ "import gc \nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nplt.style.use('seaborn-darkgrid')\nimport gc\ngc.collect()\nimport pymc3 as pm", "_____no_output_____" ], [ "train = pd.read_csv(\"../../../data/processed/modelling_data/weekly_SWEurope_train.csv\")\ntrain = train.dropna()\ntest = pd.read_csv(\"../../../data/processed/modelling_data/weekly_SWEurope_test.csv\")\ntest = test.dropna()", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "fml = \"num_influenza ~ year:(temperature + CapitalLatitude\\\n + CapitalLongitude) + week\"", "_____no_output_____" ], [ "with pm.Model() as model:\n \n b0 = pm.Normal('b0_intercept', mu=0, sd=10)\n b1 = pm.Normal('b1', mu=0, sd=10)\n b2 = pm.Normal('b2', mu=0, sd=10)\n b3 = pm.Normal('b3', mu=0, sd=10)\n b4 = pm.Normal('b4', mu=0, sd=10)\n b5 = pm.Normal('b5', mu=0, sd=10)\n \n theta = (b0 + \n b1 * train[\"year\"]+\n b2 * train[\"week\"]+\n b3 * train[\"temperature\"]+\n b4 * train[\"CapitalLatitude\"]+\n b5 * train[\"CapitalLongitude\"])\n \n y = pm.Poisson('y', mu=np.exp(theta), observed=train['num_influenza'].values)\n", "_____no_output_____" ], [ "with model:\n poisson_result = pm.sample(1000, tune=1000, cores=4)", "Auto-assigning NUTS sampler...\nInitializing NUTS using jitter+adapt_diag...\nMultiprocess sampling (4 chains in 4 jobs)\nNUTS: [b5, b4, b3, b2, b1, b0_intercept]\nSampling 4 chains: 3%|▎ | 278/8000 [00:01<00:47, 161.04draws/s]/Users/harrisonzhu/.local/share/virtualenvs/Bayesian-Extremes-zeTIm6w-/lib/python3.6/site-packages/numpy/core/fromnumeric.py:3118: RuntimeWarning: Mean of empty slice.\n out=out, **kwargs)\nINFO (theano.gof.compilelock): Waiting for existing lock by unknown process (I am process '6413')\nINFO (theano.gof.compilelock): To manually release the lock, delete /Users/harrisonzhu/.theano/compiledir_Darwin-18.2.0-x86_64-i386-64bit-i386-3.6.6-64/lock_dir\n/Users/harrisonzhu/.local/share/virtualenvs/Bayesian-Extremes-zeTIm6w-/lib/python3.6/site-packages/numpy/core/fromnumeric.py:3118: RuntimeWarning: Mean of empty slice.\n out=out, **kwargs)\nSampling 4 chains: 4%|▎ | 297/8000 [00:07<03:12, 39.92draws/s] \nBad initial energy, check any log probabilities that are inf or -inf, nan or very small:\nSeries([], )\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d071ab54ca1ec466318dfb8ec0ed778c90739ece
7,722
ipynb
Jupyter Notebook
notebooks/ciencia_de_los_datos/taller_presencial-programacion_en_python.ipynb
driverava/datalabs
a9635eabf96cab9f9821dbee31d8f1b4bad826d1
[ "MIT" ]
2
2020-08-18T13:14:48.000Z
2020-08-20T00:32:57.000Z
notebooks/ciencia_de_los_datos/taller_presencial-programacion_en_python.ipynb
driverava/datalabs
a9635eabf96cab9f9821dbee31d8f1b4bad826d1
[ "MIT" ]
null
null
null
notebooks/ciencia_de_los_datos/taller_presencial-programacion_en_python.ipynb
driverava/datalabs
a9635eabf96cab9f9821dbee31d8f1b4bad826d1
[ "MIT" ]
10
2019-11-22T23:42:09.000Z
2022-01-10T21:48:06.000Z
32.859574
103
0.614608
[ [ [ "Taller Presencial --- Programación en Python\n===", "_____no_output_____" ], [ "El algoritmo MapReduce de Hadoop se presenta en la siguiente figura.", "_____no_output_____" ], [ "<img src=\"https://raw.githubusercontent.com/jdvelasq/datalabs/master/images/map-reduce.jpg\"/>", "_____no_output_____" ], [ "Se desea escribir un programa que realice el conteo de palabras usando el algoritmo MapReduce.", "_____no_output_____" ] ], [ [ "#\n# A continuación se crea las carpetas /tmp/input, /tmp/output y tres archivos de prueba\n#\n!rm -rf /tmp/input /tmp/output\n!mkdir /tmp/input\n!mkdir /tmp/output", "_____no_output_____" ], [ "%%writefile /tmp/input/text0.txt\nAnalytics is the discovery, interpretation, and communication of meaningful patterns \nin data. Especially valuable in areas rich with recorded information, analytics relies \non the simultaneous application of statistics, computer programming and operations research \nto quantify performance.\n\nOrganizations may apply analytics to business data to describe, predict, and improve business \nperformance. Specifically, areas within analytics include predictive analytics, prescriptive \nanalytics, enterprise decision management, descriptive analytics, cognitive analytics, Big \nData Analytics, retail analytics, store assortment and stock-keeping unit optimization, \nmarketing optimization and marketing mix modeling, web analytics, call analytics, speech \nanalytics, sales force sizing and optimization, price and promotion modeling, predictive \nscience, credit risk analysis, and fraud analytics. Since analytics can require extensive \ncomputation (see big data), the algorithms and software used for analytics harness the most \ncurrent methods in computer science, statistics, and mathematics", "_____no_output_____" ], [ "%%writefile /tmp/input/text1.txt\nThe field of data analysis. Analytics often involves studying past historical data to \nresearch potential trends, to analyze the effects of certain decisions or events, or to \nevaluate the performance of a given tool or scenario. The goal of analytics is to improve \nthe business by gaining knowledge which can be used to make improvements or changes.", "_____no_output_____" ], [ "\n%%writefile /tmp/input/text2.txt\nData analytics (DA) is the process of examining data sets in order to draw conclusions \nabout the information they contain, increasingly with the aid of specialized systems \nand software. Data analytics technologies and techniques are widely used in commercial \nindustries to enable organizations to make more-informed business decisions and by \nscientists and researchers to verify or disprove scientific models, theories and \nhypotheses.", "_____no_output_____" ], [ "#\n# Escriba la función load_input que recive como parámetro un folder y retorna \n# una lista de tuplas donde el primer elemento de cada tupla es el nombre del \n# archivo y el segundo es una línea del archivo. La función convierte a tuplas\n# todas las lineas de cada uno de los archivos. La función es genérica y debe\n# leer todos los archivos de folder entregado como parámetro.\n# \n# Por ejemplo:\n# [\n# ('text0'.txt', 'Analytics is the discovery, inter ...'),\n# ('text0'.txt', 'in data. Especially valuable in ar...').\n# ...\n# ('text2.txt'. 'hypotheses.')\n# ]\n#\ndef load_input(input_directory):\n pass", "_____no_output_____" ], [ "#\n# Escriba una función llamada maper que recibe una lista de tuplas de la \n# función anterior y retorna una lista de tuplas (clave, valor). En este caso,\n# la clave es cada palabra y el valor es 1, puesto que se está realizando un\n# conteo.\n#\n# [\n# ('Analytics', 1),\n# ('is', 1),\n# ...\n# ]\n#\ndef mapper(sequence):\n pass", "_____no_output_____" ], [ "#\n# Escriba la función shuffle_and_sort que recibe la lista de tuplas entregada\n# por el mapper, y retorna una lista con el mismo contenido ordenado por la\n# clave.\n#\n# [\n# ('Analytics', 1),\n# ('Analytics', 1),\n# ...\n# ]\n#\ndef shuffle_and_sort(sequence):\n pass", "_____no_output_____" ], [ "#\n# Escriba la función reducer, la cual recibe el resultado de shuffle_and_sort y\n# reduce los valores asociados a cada clave sumandolos. Como resultado, por \n# ejemplo, la reducción indica cuantas veces aparece la palabra analytics en el\n# texto.\n#\ndef reducer(sequence):\n pass", "_____no_output_____" ], [ "#\n# Escriba la función save_output, que toma la lista devuelta por el reducer y \n# escribe en la carpeta /tmp/output/ los archivos 'part-0.txt', 'part-1.txt', \n# etc. El primer archivo contiene las primeras 20 palabras contadas, el segundo\n# de la 21 a la 40 y asi sucesivamente. Cada línea de cada archivo contiene la\n# palabra y las veces que aparece separadas por un tabulador.\n#\ndef save_output(sequence, output_directory):\n pass", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d071b13fb03ac99e218b64af35d23de64e575c07
21,471
ipynb
Jupyter Notebook
DS_Unit_2_Sprint_Challenge_3_Classification_Validation.ipynb
mkirby1995/DS-Unit-2-Sprint-3-Classification-Validation
0380c425c68a155ad2d98284e581bc181899752c
[ "MIT" ]
null
null
null
DS_Unit_2_Sprint_Challenge_3_Classification_Validation.ipynb
mkirby1995/DS-Unit-2-Sprint-3-Classification-Validation
0380c425c68a155ad2d98284e581bc181899752c
[ "MIT" ]
null
null
null
DS_Unit_2_Sprint_Challenge_3_Classification_Validation.ipynb
mkirby1995/DS-Unit-2-Sprint-3-Classification-Validation
0380c425c68a155ad2d98284e581bc181899752c
[ "MIT" ]
null
null
null
30.49858
311
0.489777
[ [ [ "<a href=\"https://colab.research.google.com/github/mkirby1995/DS-Unit-2-Sprint-3-Classification-Validation/blob/master/DS_Unit_2_Sprint_Challenge_3_Classification_Validation.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ " _Lambda School Data Science Unit 2_\n \n # Classification & Validation Sprint Challenge", "_____no_output_____" ], [ "Follow the instructions for each numbered part to earn a score of 2. See the bottom of the notebook for a list of ways you can earn a score of 3.", "_____no_output_____" ], [ "#### For this Sprint Challenge, you'll predict whether a person's income exceeds $50k/yr, based on census data.\n\nYou can read more about the Adult Census Income dataset at the UCI Machine Learning Repository: https://archive.ics.uci.edu/ml/datasets/adult", "_____no_output_____" ], [ "#### Run this cell to load the data:", "_____no_output_____" ] ], [ [ "!pip install category_encoders", "Collecting category_encoders\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/6e/a1/f7a22f144f33be78afeb06bfa78478e8284a64263a3c09b1ef54e673841e/category_encoders-2.0.0-py2.py3-none-any.whl (87kB)\n\u001b[K |████████████████████████████████| 92kB 3.5MB/s \n\u001b[?25hRequirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.5.1)\nRequirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.16.3)\nRequirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.24.2)\nRequirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.20.3)\nRequirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (1.2.1)\nRequirement already satisfied: statsmodels>=0.6.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders) (0.9.0)\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from patsy>=0.4.1->category_encoders) (1.12.0)\nRequirement already satisfied: python-dateutil>=2.5.0 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2.5.3)\nRequirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)\nInstalling collected packages: category-encoders\nSuccessfully installed category-encoders-2.0.0\n" ], [ "import category_encoders as ce\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler, OrdinalEncoder\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "import pandas as pd\n\ncolumns = ['age', \n 'workclass', \n 'fnlwgt', \n 'education', \n 'education-num', \n 'marital-status', \n 'occupation', \n 'relationship', \n 'race', \n 'sex', \n 'capital-gain', \n 'capital-loss', \n 'hours-per-week', \n 'native-country', \n 'income']\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', \n header=None, names=columns)\n\ndf['income'] = df['income'].str.strip()", "_____no_output_____" ] ], [ [ "## Part 1 — Begin with baselines\n\nSplit the data into an **X matrix** (all the features) and **y vector** (the target).\n\n(You _don't_ need to split the data into train and test sets here. You'll be asked to do that at the _end_ of Part 1.)", "_____no_output_____" ] ], [ [ "df['income'].value_counts()", "_____no_output_____" ], [ "X = df.drop(columns='income')\nY = df['income'].replace({'<=50K':0, '>50K':1})", "_____no_output_____" ] ], [ [ "What **accuracy score** would you get here with a **\"majority class baseline\"?** \n \n(You can answer this question either with a scikit-learn function or with a pandas function.)", "_____no_output_____" ] ], [ [ "majority_class = Y.mode()[0]\nmajority_class_prediction = [majority_class] * len(Y)\naccuracy_score(Y, majority_class_prediction)", "_____no_output_____" ] ], [ [ "What **ROC AUC score** would you get here with a **majority class baseline?**\n\n(You can answer this question either with a scikit-learn function or with no code, just your understanding of ROC AUC.)", "_____no_output_____" ] ], [ [ "roc_auc_score(Y, majority_class_prediction)", "_____no_output_____" ] ], [ [ "In this Sprint Challenge, you will use **\"Cross-Validation with Independent Test Set\"** for your model validaton method.\n\nFirst, **split the data into `X_train, X_test, y_train, y_test`**. You can include 80% of the data in the train set, and hold out 20% for the test set.", "_____no_output_____" ] ], [ [ "X_train, X_test, Y_train, Y_test = train_test_split(X,Y, test_size=.2, random_state=42)", "_____no_output_____" ] ], [ [ "## Part 2 — Modeling with Logistic Regression!", "_____no_output_____" ], [ "- You may do exploratory data analysis and visualization, but it is not required.\n- You may **use all the features, or select any features** of your choice, as long as you select at least one numeric feature and one categorical feature.\n- **Scale your numeric features**, using any scikit-learn [Scaler](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing) of your choice.\n- **Encode your categorical features**. You may use any encoding (One-Hot, Ordinal, etc) and any library (category_encoders, scikit-learn, pandas, etc) of your choice.\n- You may choose to use a pipeline, but it is not required.\n- Use a **Logistic Regression** model.\n- Use scikit-learn's [**cross_val_score**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html) function. For [scoring](https://scikit-learn.org/stable/modules/model_evaluation.html#the-scoring-parameter-defining-model-evaluation-rules), use **accuracy**.\n- **Print your model's cross-validation accuracy score.**", "_____no_output_____" ] ], [ [ "pipeline = make_pipeline(ce.OneHotEncoder(use_cat_names=True),\n StandardScaler(),\n LogisticRegression(solver='lbfgs', max_iter=1000))", "_____no_output_____" ], [ "scores = cross_val_score(pipeline, X_train, Y_train, scoring='accuracy', cv=10, n_jobs=1, verbose=10)", "_____no_output_____" ], [ "print('Cross-Validation accuracy scores:', scores,'\\n\\n')\nprint('Average:', scores.mean())", "Cross-Validation accuracy scores: [0.8452975 0.85105566 0.84683301 0.85412668 0.84683301 0.85143954\n 0.84952015 0.85143954 0.84754224 0.86059908] \n\n\nAverage: 0.8504686426610766\n" ] ], [ [ "## Part 3 — Modeling with Tree Ensembles!\n\nPart 3 is the same as Part 2, except this time, use a **Random Forest** or **Gradient Boosting** classifier. You may use scikit-learn, xgboost, or any other library. Then, print your model's cross-validation accuracy score.", "_____no_output_____" ] ], [ [ "pipeline = make_pipeline(ce.OneHotEncoder(use_cat_names=True),\n StandardScaler(),\n RandomForestClassifier(max_depth=2, n_estimators=40))", "_____no_output_____" ], [ "scores = cross_val_score(pipeline, X_train, Y_train, scoring='accuracy', cv=10, n_jobs=1, verbose=10)", "_____no_output_____" ], [ "print('Cross-Validation accuracy scores:', scores,'\\n\\n')\nprint('Average:', scores.mean())", "Cross-Validation accuracy scores: [0.76084453 0.77044146 0.76506718 0.76852207 0.7596929 0.77044146\n 0.76583493 0.77274472 0.76228879 0.77688172] \n\n\nAverage: 0.7672759758351984\n" ] ], [ [ "## Part 4 — Calculate classification metrics from a confusion matrix\n\nSuppose this is the confusion matrix for your binary classification model:\n\n<table>\n <tr>\n <td colspan=\"2\" rowspan=\"2\"></td>\n <td colspan=\"2\">Predicted</td>\n </tr>\n <tr>\n <td>Negative</td>\n <td>Positive</td>\n </tr>\n <tr>\n <td rowspan=\"2\">Actual</td>\n <td>Negative</td>\n <td style=\"border: solid\">85</td>\n <td style=\"border: solid\">58</td>\n </tr>\n <tr>\n <td>Positive</td>\n <td style=\"border: solid\">8</td>\n <td style=\"border: solid\"> 36</td>\n </tr>\n</table>", "_____no_output_____" ] ], [ [ "true_neg = 85\ntrue_pos = 36\nfalse_neg = 8\nfalse_pos = 58\n\npred_neg = true_neg + false_neg\npred_pos = true_pos + false_pos\n\nactual_pos = false_neg + true_pos\nactual_neg = false_pos + true_neg", "_____no_output_____" ] ], [ [ "Calculate accuracy", "_____no_output_____" ] ], [ [ "accuracy = (true_neg + true_pos)/ (pred_neg + pred_pos)\naccuracy", "_____no_output_____" ] ], [ [ "Calculate precision", "_____no_output_____" ] ], [ [ "precision = (true_pos) / (pred_pos)\nprecision", "_____no_output_____" ] ], [ [ "Calculate recall", "_____no_output_____" ] ], [ [ "recall = true_pos / actual_pos\nrecall", "_____no_output_____" ] ], [ [ "## BONUS — How you can earn a score of 3\n\n### Part 1\nDo feature engineering, to try improving your cross-validation score.\n\n### Part 2\nExperiment with feature selection, preprocessing, categorical encoding, and hyperparameter optimization, to try improving your cross-validation score.\n\n### Part 3\nWhich model had the best cross-validation score? Refit this model on the train set and do a final evaluation on the held out test set — what is the test score? \n\n### Part 4\nCalculate F1 score and False Positive Rate. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]