hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
ecd181a2757f4e4867ac96a67ba677157393579f
4,561
ipynb
Jupyter Notebook
notebook/sys_maxsize.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
174
2018-05-30T21:14:50.000Z
2022-03-25T07:59:37.000Z
notebook/sys_maxsize.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
5
2019-08-10T03:22:02.000Z
2021-07-12T20:31:17.000Z
notebook/sys_maxsize.ipynb
puyopop/python-snippets
9d70aa3b2a867dd22f5a5e6178a5c0c5081add73
[ "MIT" ]
53
2018-04-27T05:26:35.000Z
2022-03-25T07:59:37.000Z
16.059859
111
0.459768
[ [ [ "import sys", "_____no_output_____" ], [ "print(sys.maxsize)", "9223372036854775807\n" ], [ "print(type(sys.maxsize))", "<class 'int'>\n" ], [ "print(sys.maxsize == 2**63 - 1)", "True\n" ], [ "print(bin(sys.maxsize))", "0b111111111111111111111111111111111111111111111111111111111111111\n" ], [ "print(hex(sys.maxsize))", "0x7fffffffffffffff\n" ], [ "i = 10**100", "_____no_output_____" ], [ "print(i)", "10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\n" ], [ "print(i > sys.maxsize)", "True\n" ], [ "print(sys.float_info.max)", "1.7976931348623157e+308\n" ], [ "i_e309 = 10**309", "_____no_output_____" ], [ "print(type(i_e309))", "<class 'int'>\n" ], [ "print(i_e309 > sys.float_info.max)", "True\n" ], [ "print(float('inf'))", "inf\n" ], [ "print(float('inf') > sys.float_info.max)", "True\n" ], [ "print(float('inf') > i_e309)", "True\n" ], [ "# int(float('inf'))\n# OverflowError: cannot convert float infinity to integer", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd18b3d8191aec015b026fd76429e9e15c9ed28
23,885
ipynb
Jupyter Notebook
cornet_features.ipynb
thefonseca/algonauts
aff8cd6c585e052770a529766439b8393519da7d
[ "BSD-3-Clause" ]
7
2019-07-22T08:11:52.000Z
2021-08-05T13:47:21.000Z
cornet_features.ipynb
thefonseca/algonauts
aff8cd6c585e052770a529766439b8393519da7d
[ "BSD-3-Clause" ]
null
null
null
cornet_features.ipynb
thefonseca/algonauts
aff8cd6c585e052770a529766439b8393519da7d
[ "BSD-3-Clause" ]
null
null
null
33.266017
610
0.482437
[ [ [ "# CORnet PredNet feature extraction\n\nIn this notebook we extract representations from pre-trained CORnet models. We also generate RDM submissions for fMRI and MEG data.", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/gdrive',force_remount=True)", "Mounted at /content/gdrive\n" ], [ "!wget -O algonauts.zip -q https://github.com/thefonseca/algonauts/archive/master.zip\n!unzip -q algonauts.zip -d /content/\n!mv algonauts-master/ algonauts-github\n!mv algonauts-github/* /content/\n\n!unzip -q gdrive/My\\ Drive/algonauts/algonauts-master.zip -d /content/\n!unzip -q gdrive/My\\ Drive/algonauts/algonauts-dataset.zip -d /content/\n!cp algonauts-master/Feature_Extract/create_RDMs.py /content/\n#!cp prednet/*.py /content/\n#!cp -r gdrive/My\\ Drive/algonauts/prednet-models /content/", "_____no_output_____" ] ], [ [ "## Get CORnet code", "_____no_output_____" ] ], [ [ "!wget -O cornet.zip -q https://github.com/dicarlolab/CORnet/archive/master.zip\n!unzip -q cornet.zip -d /content/", "_____no_output_____" ], [ "!mkdir -p cornet-feats/92images/cornet-s\n!mkdir -p cornet-feats/118images/cornet-s", "_____no_output_____" ], [ "!pip install fire", "\u001b[?25l\r\u001b[K |████▎ | 10kB 19.1MB/s eta 0:00:01\r\u001b[K |████████▋ | 20kB 1.6MB/s eta 0:00:01\r\u001b[K |████████████▉ | 30kB 2.4MB/s eta 0:00:01\r\u001b[K |█████████████████▏ | 40kB 1.6MB/s eta 0:00:01\r\u001b[K |█████████████████████▍ | 51kB 2.0MB/s eta 0:00:01\r\u001b[K |█████████████████████████▊ | 61kB 2.4MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 71kB 2.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 81kB 3.0MB/s \n\u001b[?25h Building wheel for fire (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ] ], [ [ "### Small fix\nFor the feature extraction to work we need to edit the `run.py` file and change the `_store_feats` hook from:\n\n```\ndef _store_feats(layer, inp, output):\n \"\"\"An ugly but effective way of accessing intermediate model features\n \"\"\"\n _model_feats.append(np.reshape(output, (len(output), -1)).numpy())\n```\n\nto the following:\n\n\n```\ndef _store_feats(layer, inp, output):\n \"\"\"An ugly but effective way of accessing intermediate model features\n \"\"\"\n _model_feats.append(output.view(len(output), -1).cpu().numpy())\n```", "_____no_output_____" ], [ "## Features for the 92 images dataset", "_____no_output_____" ] ], [ [ "!cd /content/CORnet-master/ && python run.py test --model S --layer decoder \\\n--sublayer avgpool --data_path /content/Training_Data/92_Image_Set/92images/ \\\n-o /content/cornet-feats/92images/ --ngpus 1", "Downloading: \"https://s3.amazonaws.com/cornet-models/cornet_z-5c427c9c.pth\" to /root/.cache/torch/checkpoints/cornet_z-5c427c9c.pth\n100% 15.8M/15.8M [00:02<00:00, 7.04MB/s]\n100% 92/92 [00:00<00:00, 153.70it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V1 \\\n--sublayer output --data_path /content/Training_Data/92_Image_Set/92images/ \\\n-o /content/cornet-feats/92images/ --ngpus 1", "100% 92/92 [00:00<00:00, 157.25it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V2 \\\n--sublayer output --data_path /content/Training_Data/92_Image_Set/92images/ \\\n-o /content/cornet-feats/92images/ --ngpus 1", "100% 92/92 [00:00<00:00, 160.83it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V4 \\\n--sublayer output --data_path /content/Training_Data/92_Image_Set/92images/ \\\n-o /content/cornet-feats/92images/ --ngpus 1", "100% 92/92 [00:00<00:00, 131.72it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer IT \\\n--sublayer output --data_path /content/Training_Data/92_Image_Set/92images/ \\\n-o /content/cornet-feats/92images/ --ngpus 1", "100% 92/92 [00:00<00:00, 135.45it/s]\n" ] ], [ [ "## Features for the 118 images dataset", "_____no_output_____" ] ], [ [ "!cd /content/CORnet-master/ && python run.py test --model S --layer decoder \\\n--sublayer avgpool --data_path /content/Training_Data/118_Image_Set/118images/ \\\n-o /content/cornet-feats/118images/ --ngpus 1", "100% 118/118 [00:00<00:00, 129.04it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V1 \\\n--sublayer output --data_path /content/Training_Data/118_Image_Set/118images/ \\\n-o /content/cornet-feats/118images/ --ngpus 1", "100% 118/118 [00:00<00:00, 137.79it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V2 \\\n--sublayer output --data_path /content/Training_Data/118_Image_Set/118images/ \\\n-o /content/cornet-feats/118images/ --ngpus 1", "100% 118/118 [00:00<00:00, 141.87it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer V4 \\\n--sublayer output --data_path /content/Training_Data/118_Image_Set/118images/ \\\n-o /content/cornet-feats/118images/ --ngpus 1", "100% 118/118 [00:00<00:00, 136.99it/s]\n" ], [ "!cd /content/CORnet-master/ && python run.py test --model S --layer IT \\\n--sublayer output --data_path /content/Training_Data/118_Image_Set/118images/ \\\n-o /content/cornet-feats/118images/ --ngpus 1", "100% 118/118 [00:00<00:00, 135.07it/s]\n" ], [ "import numpy as np\nnp.load('/content/cornet-feats/92images/CORnet-S_V1_output_feats.npy').shape", "_____no_output_____" ], [ "!cp -r /content/cornet-feats/ /content/gdrive/My\\ Drive/cornet-feats-$(date +%Y-%m-%dT%H:%M:%S)", "_____no_output_____" ] ], [ [ "## Generate RDMs", "_____no_output_____" ] ], [ [ "import create_RDMs\nimport os\nimport glob\nimport scipy.io as sio\nimport zipfile\n\n\ndef create_rdm(save_dir, feat_dir, dist):\n \"\"\"\n Main function to create RDM from activations\n Input:\n feat_dir: Directory containing activations generated using generate_features.py\n save_dir : directory to save the computed RDM\n dist : dist used for computing RDM (e.g. 1-Pearson's R)\n\n Output (in submission format):\n The model RDMs for each layer are saved in\n save_dir/layer_name/submit_fMRI.mat to compare with fMRI RDMs\n save_dir/layer_name/submit_MEG.mat to compare with MEG RDMs\n \"\"\"\n\n # get list of layers and number of conditions(images) for RDM\n # layer_list, num_condns = get_layers_ncondns(feat_dir)\n feat_files = glob.glob(feat_dir + \"/*feats.npy\")\n \n layer_list = []\n for feat_file in feat_files:\n feat_file = os.path.basename(feat_file)\n layer_list.append(feat_file.replace('_feats.npy', ''))\n \n print(layer_list)\n cwd = os.getcwd() \n print(save_dir, feat_dir, cwd)\n \n # loops over layers and create RDM for each layer\n for feat_file in feat_files:\n layer_id = os.path.basename(feat_file).replace('_feats.npy', '')\n feats = np.load(feat_file)\n num_condns = len(feats)\n \n os.chdir(cwd)\n # RDM is num_condnsxnum_condns matrix, initialized with zeros\n RDM = np.zeros((num_condns, num_condns))\n\n #save path for RDMs in challenge submission format\n print(f'Processing layer {layer_id}...')\n \n \n RDM_dir = os.path.join(save_dir, layer_id)\n # print(RDM_dir, save_dir, os.path.join(save_dir, layer_id))\n if not os.path.exists(RDM_dir):\n os.makedirs(RDM_dir)\n \n RDM_filename_meg = os.path.join(RDM_dir,'submit_meg.mat')\n RDM_filename_fmri = os.path.join(RDM_dir,'submit_fmri.mat')\n RDM_filename_meg_zip = os.path.join(RDM_dir,'submit_meg.zip')\n RDM_filename_fmri_zip = os.path.join(RDM_dir,'submit_fmri.zip')\n #RDM loop\n for i in range(num_condns):\n for j in range(num_condns):\n #get feature for image index i and j\n feature_i = feats[i]\n feature_j = feats[j]\n \n # compute distance 1-Pearson's R\n if dist == 'pearson':\n RDM[i,j] = 1-np.corrcoef(feature_i,feature_j)[0][1]\n else:\n print(\"The\", dist, \"distance measure not implemented, please request through issues\")\n\n #saving RDMs in challenge submission format\n rdm_fmri={}\n rdm_meg={}\n rdm_fmri['EVC_RDMs'] = RDM\n rdm_fmri['IT_RDMs'] = RDM\n rdm_meg['MEG_RDMs_late'] = RDM\n rdm_meg['MEG_RDMs_early'] = RDM\n sio.savemat(RDM_filename_fmri, rdm_fmri)\n sio.savemat(RDM_filename_meg, rdm_meg)\n\n #creating zipped file for submission\n zipfmri = zipfile.ZipFile(RDM_filename_fmri_zip, 'w')\n zipmeg = zipfile.ZipFile(RDM_filename_meg_zip, 'w')\n os.chdir(RDM_dir)\n zipfmri.write('submit_fmri.mat')\n zipmeg.write('submit_meg.mat')\n zipfmri.close()\n zipmeg.close()\n \n os.chdir(cwd)\n", "_____no_output_____" ], [ "feat_dir = '/content/cornet-feats/92images/'\nrdm_dir = '/content/results/rdms/92images/pearson'\ncreate_rdm(rdm_dir, feat_dir, 'pearson')", "['CORnet-R_V2_output', 'CORnet-Z_V2_output', 'CORnet-Z_V4_output', 'CORnet-S_IT_output', 'CORnet-R_V4_output', 'CORnet-Z_V1_output', 'CORnet-R_V1_output', 'CORnet-S_V1_output', 'CORnet-S_V4_output', 'CORnet-R_IT_output', 'CORnet-S_V2_output', 'CORnet-R_decoder_avgpool', 'CORnet-Z_decoder_avgpool', 'CORnet-Z_IT_output', 'CORnet-S_decoder_avgpool']\n/content/results/rdms/92images/pearson /content/cornet-feats/92images/ /content\nProcessing layer CORnet-R_V2_output...\nProcessing layer CORnet-Z_V2_output...\nProcessing layer CORnet-Z_V4_output...\nProcessing layer CORnet-S_IT_output...\nProcessing layer CORnet-R_V4_output...\nProcessing layer CORnet-Z_V1_output...\nProcessing layer CORnet-R_V1_output...\nProcessing layer CORnet-S_V1_output...\nProcessing layer CORnet-S_V4_output...\nProcessing layer CORnet-R_IT_output...\nProcessing layer CORnet-S_V2_output...\nProcessing layer CORnet-R_decoder_avgpool...\nProcessing layer CORnet-Z_decoder_avgpool...\nProcessing layer CORnet-Z_IT_output...\nProcessing layer CORnet-S_decoder_avgpool...\n" ], [ "feat_dir = '/content/cornet-feats/118images/'\nrdm_dir = '/content/results/rdms/118images/pearson'\ncreate_rdm(rdm_dir, feat_dir, 'pearson')", "['CORnet-R_V2_output', 'CORnet-Z_V2_output', 'CORnet-Z_V4_output', 'CORnet-S_IT_output', 'CORnet-R_V4_output', 'CORnet-Z_V1_output', 'CORnet-R_V1_output', 'CORnet-S_V1_output', 'CORnet-S_V4_output', 'CORnet-R_IT_output', 'CORnet-S_V2_output', 'CORnet-R_decoder_avgpool', 'CORnet-Z_decoder_avgpool', 'CORnet-Z_IT_output', 'CORnet-S_decoder_avgpool']\n/content/results/rdms/118images/pearson /content/cornet-feats/118images/ /content\nProcessing layer CORnet-R_V2_output...\nProcessing layer CORnet-Z_V2_output...\nProcessing layer CORnet-Z_V4_output...\nProcessing layer CORnet-S_IT_output...\nProcessing layer CORnet-R_V4_output...\nProcessing layer CORnet-Z_V1_output...\nProcessing layer CORnet-R_V1_output...\nProcessing layer CORnet-S_V1_output...\nProcessing layer CORnet-S_V4_output...\nProcessing layer CORnet-R_IT_output...\nProcessing layer CORnet-S_V2_output...\nProcessing layer CORnet-R_decoder_avgpool...\nProcessing layer CORnet-Z_decoder_avgpool...\nProcessing layer CORnet-Z_IT_output...\nProcessing layer CORnet-S_decoder_avgpool...\n" ], [ "!cp -r /content/results/rdms /content/gdrive/My\\ Drive/cornet-rdms-$(date +%Y-%m-%dT%H:%M:%S)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ecd194ea6e111215ba34df16047ea0c3c92834c7
403,118
ipynb
Jupyter Notebook
notebooks/linear_models_regularization.ipynb
marcobonifacio/scikit-learn-mooc
5be03b64893310ea331bf2cdf8766cf29b79c062
[ "CC-BY-4.0" ]
null
null
null
notebooks/linear_models_regularization.ipynb
marcobonifacio/scikit-learn-mooc
5be03b64893310ea331bf2cdf8766cf29b79c062
[ "CC-BY-4.0" ]
null
null
null
notebooks/linear_models_regularization.ipynb
marcobonifacio/scikit-learn-mooc
5be03b64893310ea331bf2cdf8766cf29b79c062
[ "CC-BY-4.0" ]
null
null
null
350.537391
113,632
0.924719
[ [ [ "# Regularization of linear regression model\n\nIn this notebook, we will see the limitations of linear regression models and\nthe advantage of using regularized models instead.\n\nBesides, we will also present the preprocessing required when dealing\nwith regularized models, furthermore when the regularization parameter\nneeds to be tuned.\n\nWe will start by highlighting the over-fitting issue that can arise with\na simple linear regression model.\n\n## Effect of regularization\n\nWe will first load the California housing dataset.", "_____no_output_____" ], [ "<div class=\"admonition note alert alert-info\">\n<p class=\"first admonition-title\" style=\"font-weight: bold;\">Note</p>\n<p class=\"last\">If you want a deeper overview regarding this dataset, you can refer to the\nAppendix - Datasets description section at the end of this MOOC.</p>\n</div>", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_california_housing\n\ndata, target = fetch_california_housing(as_frame=True, return_X_y=True)\ntarget *= 100 # rescale the target in k$\ndata.head()", "_____no_output_____" ] ], [ [ "In one of the previous notebook, we showed that linear models could be used\neven in settings where `data` and `target` are not linearly linked.\n\nWe showed that one can use the `PolynomialFeatures` transformer to create\nadditional features encoding non-linear interactions between features.\n\nHere, we will use this transformer to augment the feature space.\nSubsequently, we will train a linear regression model. We will use the\nout-of-sample test set to evaluate the generalization capabilities of our\nmodel.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_validate\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\n\nlinear_regression = make_pipeline(PolynomialFeatures(degree=2),\n LinearRegression())\ncv_results = cross_validate(linear_regression, data, target,\n cv=10, scoring=\"neg_mean_squared_error\",\n return_train_score=True,\n return_estimator=True)", "_____no_output_____" ] ], [ [ "We can compare the mean squared error on the training and testing set to\nassess the generalization performance of our model.", "_____no_output_____" ] ], [ [ "train_error = -cv_results[\"train_score\"]\nprint(f\"Mean squared error of linear regression model on the train set:\\n\"\n f\"{train_error.mean():.3f} +/- {train_error.std():.3f}\")", "Mean squared error of linear regression model on the train set:\n4190.212 +/- 151.123\n" ], [ "test_error = -cv_results[\"test_score\"]\nprint(f\"Mean squared error of linear regression model on the test set:\\n\"\n f\"{test_error.mean():.3f} +/- {test_error.std():.3f}\")", "Mean squared error of linear regression model on the test set:\n13334.945 +/- 20292.686\n" ] ], [ [ "The score on the training set is much better. This statistical performance\ngap between the training and testing score is an indication that our model\noverfitted our training set.\n\nIndeed, this is one of the danger when augmenting the number of features\nwith a `PolynomialFeatures` transformer. Our model will focus on some\nspecific features. We can check the weights of the model to have a\nconfirmation. Let's create a dataframe: the columns will contain the name\nof the feature while the line the coefficients values stored by each model\nduring the cross-validation.\n\nSince we used a `PolynomialFeatures` to augment the data, we will create\nfeature names representative of the feature combination. Scikit-learn\nprovides a `get_feature_names` method for this purpose. First, let's get\nthe first fitted model from the cross-validation.", "_____no_output_____" ] ], [ [ "model_first_fold = cv_results[\"estimator\"][0]", "_____no_output_____" ] ], [ [ "Now, we can access to the fitted `PolynomialFeatures` to generate the feature\nnames", "_____no_output_____" ] ], [ [ "feature_names = model_first_fold[0].get_feature_names(\n input_features=data.columns)\nfeature_names", "_____no_output_____" ] ], [ [ "Finally, we can create the dataframe containing all the information.", "_____no_output_____" ] ], [ [ "import pandas as pd\n\ncoefs = [est[-1].coef_ for est in cv_results[\"estimator\"]]\nweights_linear_regression = pd.DataFrame(coefs, columns=feature_names)", "_____no_output_____" ] ], [ [ "Now, let's use a box plot to see the coefficients variations.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\ncolor = {\"whiskers\": \"black\", \"medians\": \"black\", \"caps\": \"black\"}\nweights_linear_regression.plot.box(color=color, vert=False, figsize=(6, 16))\n_ = plt.title(\"Linear regression coefficients\")", "_____no_output_____" ] ], [ [ "We can force the linear regression model to consider all features in a more\nhomogeneous manner. In fact, we could force large positive or negative weight\nto shrink toward zero. This is known as regularization. We will use a ridge\nmodel which enforces such behavior.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Ridge\n\nridge = make_pipeline(PolynomialFeatures(degree=2),\n Ridge(alpha=100))\ncv_results = cross_validate(ridge, data, target,\n cv=10, scoring=\"neg_mean_squared_error\",\n return_train_score=True,\n return_estimator=True)", "C:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.672e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.67257e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.75536e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.67367e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=3.5546e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.75974e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.82401e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=4.96672e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.68318e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\nC:\\Users\\Bonifacio\\miniforge3\\envs\\blog\\lib\\site-packages\\sklearn\\linear_model\\_ridge.py:147: LinAlgWarning: Ill-conditioned matrix (rcond=2.68514e-17): result may not be accurate.\n return linalg.solve(A, Xy, sym_pos=True,\n" ], [ "train_error = -cv_results[\"train_score\"]\nprint(f\"Mean squared error of linear regression model on the train set:\\n\"\n f\"{train_error.mean():.3f} +/- {train_error.std():.3f}\")", "Mean squared error of linear regression model on the train set:\n4373.180 +/- 153.942\n" ], [ "test_error = -cv_results[\"test_score\"]\nprint(f\"Mean squared error of linear regression model on the test set:\\n\"\n f\"{test_error.mean():.3f} +/- {test_error.std():.3f}\")", "Mean squared error of linear regression model on the test set:\n7303.589 +/- 4950.732\n" ] ], [ [ "We see that the training and testing scores are much closer, indicating that\nour model is less overfitting. We can compare the values of the weights of\nridge with the un-regularized linear regression.", "_____no_output_____" ] ], [ [ "coefs = [est[-1].coef_ for est in cv_results[\"estimator\"]]\nweights_ridge = pd.DataFrame(coefs, columns=feature_names)", "_____no_output_____" ], [ "weights_ridge.plot.box(color=color, vert=False, figsize=(6, 16))\n_ = plt.title(\"Ridge weights\")", "_____no_output_____" ] ], [ [ "By comparing the magnitude of the weights on this plot compared to the\nprevious plot, we see that the magnitude of the weights are shrunk towards\nzero in comparison with the linear regression model.\n\nHowever, in this example, we omitted two important aspects: (i) the need to\nscale the data and (ii) the need to search for the best regularization\nparameter.\n\n## Scale your data!\n\nRegularization will add constraints on weights of the model. We saw in the\nprevious example that a ridge model will enforce that all weights have a\nsimilar magnitude. Indeed, the larger alpha is, the larger this enforcement\nwill be.\n\nThis procedure should make us think about feature rescaling. Let's consider\nthe case where features have an identical data dispersion: if two features\nare found equally important by the model, they will be affected similarly by\nregularization strength.\n\nNow, let's consider the scenario where features have completely different\ndata dispersion (for instance age in years and annual revenue in dollars).\nIf two features are as important, our model will boost the weights of\nfeatures with small dispersion and reduce the weights of features with\nhigh dispersion.\n\nWe recall that regularization forces weights to be closer. Therefore, we get\nan intuition that if we want to use regularization, dealing with rescaled\ndata would make it easier to find an optimal regularization parameter and\nthus an adequate model.\n\nAs a side note, some solvers based on gradient computation are expecting such\nrescaled data. Unscaled data will be detrimental when computing the optimal\nweights. Therefore, when working with a linear model and numerical data, it\nis generally good practice to scale the data.\n\nThus, we will add a `StandardScaler` in the machine learning pipeline. This\nscaler will be placed just before the regressor.", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\n\nridge = make_pipeline(PolynomialFeatures(degree=2), StandardScaler(),\n Ridge(alpha=0.5))\ncv_results = cross_validate(ridge, data, target,\n cv=10, scoring=\"neg_mean_squared_error\",\n return_train_score=True,\n return_estimator=True)", "_____no_output_____" ], [ "train_error = -cv_results[\"train_score\"]\nprint(f\"Mean squared error of linear regression model on the train set:\\n\"\n f\"{train_error.mean():.3f} +/- {train_error.std():.3f}\")", "Mean squared error of linear regression model on the train set:\n4347.036 +/- 156.666\n" ], [ "test_error = -cv_results[\"test_score\"]\nprint(f\"Mean squared error of linear regression model on the test set:\\n\"\n f\"{test_error.mean():.3f} +/- {test_error.std():.3f}\")", "Mean squared error of linear regression model on the test set:\n5508.472 +/- 1816.642\n" ] ], [ [ "We observe that scaling data has a positive impact on the test score and that\nthe test score is closer to the train score. It means that our model is less\noverfitted and that we are getting closer to the best generalization sweet\nspot.\n\nLet's have an additional look to the different weights.", "_____no_output_____" ] ], [ [ "coefs = [est[-1].coef_ for est in cv_results[\"estimator\"]]\nweights_ridge = pd.DataFrame(coefs, columns=feature_names)", "_____no_output_____" ], [ "weights_ridge.plot.box(color=color, vert=False, figsize=(6, 16))\n_ = plt.title(\"Ridge weights with data scaling\")", "_____no_output_____" ] ], [ [ "Compare to the previous plots, we see that now all weight manitudes are\ncloser and that all weights are more equally contributing.\n\nIn the previous analysis, we did not study if the parameter `alpha` will have\nan effect on the performance. We chose the parameter beforehand and fix it\nfor the analysis.\n\nIn the next section, we will check the impact of this hyperparameter and how\nit should be tuned.\n\n## Fine tuning the regularization parameter\n\nAs mentioned, the regularization parameter needs to be tuned on each dataset.\nThe default parameter will not lead to the optimal model. Therefore, we need\nto tune the `alpha` parameter.\n\nModel hyperparameter tuning should be done with care. Indeed, we want to\nfind an optimal parameter that maximizes some metrics. Thus, it requires both\na training set and testing set.\n\nHowever, this testing set should be different from the out-of-sample testing\nset that we used to evaluate our model: if we use the same one, we are using\nan `alpha` which was optimized for this testing set and it breaks the\nout-of-sample rule.\n\nTherefore, we should include search of the hyperparameter `alpha` within the\ncross-validation. As we saw in previous notebooks, we could use a\ngrid-search. However, some predictor in scikit-learn are available with\nan integrated hyperparameter search, more efficient than using a grid-search.\nThe name of these predictors finishes by `CV`. In the case of `Ridge`,\nscikit-learn provides a `RidgeCV` regressor.\n\nTherefore, we can use this predictor as the last step of the pipeline.\nIncluding the pipeline a cross-validation allows to make a nested\ncross-validation: the inner cross-validation will search for the best\nalpha, while the outer cross-validation will give an estimate of the\ntesting score.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.linear_model import RidgeCV\n\nalphas = np.logspace(-2, 0, num=20)\nridge = make_pipeline(PolynomialFeatures(degree=2), StandardScaler(),\n RidgeCV(alphas=alphas, store_cv_values=True))", "_____no_output_____" ], [ "from sklearn.model_selection import ShuffleSplit\n\ncv = ShuffleSplit(n_splits=5, random_state=1)\ncv_results = cross_validate(ridge, data, target,\n cv=cv, scoring=\"neg_mean_squared_error\",\n return_train_score=True,\n return_estimator=True, n_jobs=2)", "_____no_output_____" ], [ "train_error = -cv_results[\"train_score\"]\nprint(f\"Mean squared error of linear regression model on the train set:\\n\"\n f\"{train_error.mean():.3f} +/- {train_error.std():.3f}\")", "Mean squared error of linear regression model on the train set:\n4306.562 +/- 25.918\n" ], [ "test_error = -cv_results[\"test_score\"]\nprint(f\"Mean squared error of linear regression model on the test set:\\n\"\n f\"{test_error.mean():.3f} +/- {test_error.std():.3f}\")", "Mean squared error of linear regression model on the test set:\n4348.657 +/- 252.921\n" ] ], [ [ "By optimizing `alpha`, we see that the training an testing scores are closed.\nIt indicates that our model is not overfitting.\n\nWhen fitting the ridge regressor, we also requested to store the error found\nduring cross-validation (by setting the parameter `store_cv_values=True`).\nWe will plot the mean squared error for the different `alphas` regularization\nstrength that we tried.", "_____no_output_____" ] ], [ [ "mse_alphas = [est[-1].cv_values_.mean(axis=0)\n for est in cv_results[\"estimator\"]]\ncv_alphas = pd.DataFrame(mse_alphas, columns=alphas)\ncv_alphas", "_____no_output_____" ], [ "cv_alphas.mean(axis=0).plot(marker=\"+\")\nplt.ylabel(\"Mean squared error\\n (lower is better)\")\nplt.xlabel(\"alpha\")\n_ = plt.title(\"Error obtained by cross-validation\")", "_____no_output_____" ] ], [ [ "As we can see, regularization is just like salt in cooking: one must balance\nits amount to get the best statistical performance. We can check if the best\n`alpha` found is stable across the cross-validation fold.", "_____no_output_____" ] ], [ [ "best_alphas = [est[-1].alpha_ for est in cv_results[\"estimator\"]]\nbest_alphas", "_____no_output_____" ] ], [ [ "In this notebook, you learned about the concept of regularization and\nthe importance of preprocessing and parameter tuning.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecd1988fdea539773e07dc726c464208290f57a3
7,607
ipynb
Jupyter Notebook
docs/jupyter_notebook_examples/maps/plot_map.ipynb
Duseong/CAM-chem
4e1ae05dde5730f693abcc106b8c8b966c7ed275
[ "Apache-2.0" ]
5
2018-04-13T16:49:54.000Z
2022-02-24T23:22:04.000Z
docs/jupyter_notebook_examples/maps/plot_map.ipynb
Duseong/CAM-chem
4e1ae05dde5730f693abcc106b8c8b966c7ed275
[ "Apache-2.0" ]
2
2018-07-10T20:39:13.000Z
2018-07-10T21:07:16.000Z
docs/jupyter_notebook_examples/maps/plot_map.ipynb
Duseong/CAM-chem
4e1ae05dde5730f693abcc106b8c8b966c7ed275
[ "Apache-2.0" ]
4
2020-07-22T21:03:46.000Z
2021-12-05T09:36:09.000Z
29.145594
191
0.564349
[ [ [ "# Example Map Plotting", "_____no_output_____" ], [ "### At the start of a Jupyter notebook you need to import all modules that you will use", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import griddata\nimport cartopy.crs as ccrs # For plotting maps\nimport cartopy.feature as cfeature # For plotting maps\nfrom cartopy.util import add_cyclic_point # For plotting maps\nimport datetime", "_____no_output_____" ] ], [ [ "### Define the directories and file of interest for your results. This can be shortened to less lines as well.", "_____no_output_____" ] ], [ [ "result_dir = \"https://github.com/NCAR/CAM-chem/tree/master/docs/data\"\nfile = \"CAM_chem_merra2_FCSD_1deg_QFED_monthoutput_201801.nc\"\n#the netcdf file is now held in an xarray dataset named 'nc' and can be referenced later in the notebook\nnc_load = xr.open_dataset(result_dir+file)\n#to see what the netCDF file contains, just call the variable you read it into\nnc_load", "_____no_output_____" ] ], [ [ "### Extract the variable of choice at the time and level of choice", "_____no_output_____" ] ], [ [ "#extract grid variables\nlat = nc_load['lat']\nlon = nc_load['lon']\n\n#extract variable\nvar_sel = nc_load['CO']\nprint(var_sel)\n\n#select the surface level at a specific time and convert to ppbv from vmr\n#var_srf = var_sel.isel(time=0, lev=55)\n#select the surface level for an average over three times and convert to ppbv from vmr\nvar_srf = var_sel.isel(time=[0,1,2], lev=55)\nvar_srf = var_srf.mean('time')\nvar_srf = var_srf*1e9\nprint(var_srf.shape)", "_____no_output_____" ], [ "# Add cyclic point to avoid white line over Africa\nvar_srf_cyc, lon_cyc = add_cyclic_point(var_srf, coord=lon) ", "_____no_output_____" ] ], [ [ "### Plot the value over a specific region", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(20,8))\n\n#Define projection\nax = plt.axes(projection=ccrs.PlateCarree())\n\n#define contour levels\nclev = np.arange(0, 100, 1)\n\n#plot the data\nplt.contourf(lon_cyc,lat,var_srf_cyc,clev,cmap='Spectral_r')\n\n# add coastlines\nax.add_feature(cfeature.COASTLINE)\n\n#add lat lon grids\nax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')\n\n#longitude limits in degrees\nax.set_xlim(20,120)\n#latitude limits in degrees\nax.set_ylim(5,60)\n\n# Title\nplt.title(\"CAM-chem January 2018 CO\")\n\n#axes\n# y-axis\nax.text(-0.09, 0.55, 'Latitude', va='bottom', ha='center',\n rotation='vertical', rotation_mode='anchor',\n transform=ax.transAxes)\n# x-axis\nax.text(0.5, -0.10, 'Longitude', va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes)\n# legend\nax.text(1.18, 0.5, 'CO (ppb)', va='bottom', ha='center',\n rotation='vertical', rotation_mode='anchor',\n transform=ax.transAxes)\n\nplt.colorbar()\nplt.show() ", "_____no_output_____" ] ], [ [ "### Add location markers", "_____no_output_____" ] ], [ [ "##Now lets look at the sufrace plot again, but this time add markers for observations at several points.\n#first we need to define our observational data into an array\n#this can also be imported from text files using various routines\n# Kyzylorda, Urzhar, Almaty, Balkhash\nobs_lat = np.array([44.8488,47.0870,43.2220,46.2161])\nobs_lon = np.array([65.4823,81.6315,76.8512,74.3775])\nobs_names = [\"Kyzylorda\", \"Urzhar\", \"Almaty\", \"Balkhash\"]", "_____no_output_____" ], [ "plt.figure(figsize=(20,8))\n\n#Define projection\nax = plt.axes(projection=ccrs.PlateCarree())\n\n#define contour levels\nclev = np.arange(0, 100, 1)\n\n#plot the data\nplt.contourf(lon_cyc,lat,var_srf_cyc,clev,cmap='Spectral_r')\n\n# add coastlines\nax.add_feature(cfeature.COASTLINE)\n\n#add lat lon grids\nax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')\n\n#longitude limits in degrees\nax.set_xlim(20,120)\n#latitude limits in degrees\nax.set_ylim(5,60)\n\n# Title\nplt.title(\"CAM-chem January 2018 CO\")\n\n#axes\n# y-axis\nax.text(-0.09, 0.55, 'Latitude', va='bottom', ha='center',\n rotation='vertical', rotation_mode='anchor',\n transform=ax.transAxes)\n# x-axis\nax.text(0.5, -0.10, 'Longitude', va='bottom', ha='center',\n rotation='horizontal', rotation_mode='anchor',\n transform=ax.transAxes)\n# legend\nax.text(1.18, 0.5, 'CO (ppb)', va='bottom', ha='center',\n rotation='vertical', rotation_mode='anchor',\n transform=ax.transAxes)\n\n#convert your observation lat/lon to Lambert-Conformal grid points\n#xpt,ypt = m(obs_lon,obs_lat)\n\n#to specify the color of each point it is easiest plot individual points in a loop\nfor i in range(4):\n plt.plot(obs_lon[i], obs_lat[i], linestyle='none', marker=\"o\", markersize=8, alpha=0.8, c=\"black\", markeredgecolor=\"black\", markeredgewidth=1, transform=ccrs.PlateCarree())\n plt.text(obs_lon[i] - 0.8, obs_lat[i] - 0.5, obs_names[i], horizontalalignment='right', transform=ccrs.PlateCarree())\n\n \nplt.colorbar()\nplt.show() ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd19b9422d2f528ffbdc8ac0ae46c8f34163f8e
8,591
ipynb
Jupyter Notebook
Array/quality_mosaic.ipynb
jdgomezmo/gee
7016c47ee902dbf60b1aeb6319424c61c1107345
[ "MIT" ]
1
2020-11-16T22:07:42.000Z
2020-11-16T22:07:42.000Z
Array/quality_mosaic.ipynb
tingli3/earthengine-py-notebooks
7016c47ee902dbf60b1aeb6319424c61c1107345
[ "MIT" ]
null
null
null
Array/quality_mosaic.ipynb
tingli3/earthengine-py-notebooks
7016c47ee902dbf60b1aeb6319424c61c1107345
[ "MIT" ]
null
null
null
46.945355
1,031
0.593179
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Array/quality_mosaic.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Array/quality_mosaic.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Array/quality_mosaic.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as geemap\nexcept:\n import geemap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = geemap.Map(center=[40,-100], zoom=4)\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Add Earth Engine dataset\n# Array-based quality mosaic.\n\n# Returns a mosaic built by sorting each stack of pixels by the first band\n# in descending order, and taking the highest quality pixel.\n# function qualityMosaic(bands) {\ndef qualityMosaic(bands):\n # Convert to an array, and declare names for the axes and indices along the\n # band axis.\n array = bands.toArray()\n imageAxis = 0\n bandAxis = 1\n qualityIndex = 0\n valuesIndex = 1\n\n # Slice the quality and values off the main array, and sort the values by the\n # quality in descending order.\n quality = array.arraySlice(bandAxis, qualityIndex, qualityIndex + 1)\n values = array.arraySlice(bandAxis, valuesIndex)\n valuesByQuality = values.arraySort(quality.multiply(-1))\n\n # Get an image where each pixel is the array of band values where the quality\n # band is greatest. Note that while the array is 2-D, the first axis is\n # length one.\n best = valuesByQuality.arraySlice(imageAxis, 0, 1)\n\n # Project the best 2D array down to a single dimension, and convert it back\n # to a regular scalar image by naming each position along the axis. Note we\n # provide the original band names, but slice off the first band since the\n # quality band is not part of the result. Also note to get at the band names,\n # we have to do some kind of reduction, but it won't really calculate pixels\n # if we only access the band names.\n bandNames = bands.min().bandNames().slice(1)\n return best.arrayProject([bandAxis]).arrayFlatten([bandNames])\n# }\n\n# Load the l7_l1t collection for the year 2000, and make sure the first band\n# is our quality measure, in this case the normalized difference values.\nl7 = ee.ImageCollection('LANDSAT/LE07/C01/T1') \\\n .filterDate('2000-01-01', '2001-01-01')\nwithNd = l7.map(lambda image: image.normalizedDifference(['B4', 'B3']).addBands(image))\n\n# Build a mosaic using the NDVI of bands 4 and 3, essentially showing the\n# greenest pixels from the year 2000.\ngreenest = qualityMosaic(withNd)\n\n# Select out the color bands to visualize. An interesting artifact of this\n# approach is that clouds are greener than water. So all the water is white.\nrgb = greenest.select(['B3', 'B2', 'B1'])\n\nMap.addLayer(rgb, {'gain': [1.4, 1.4, 1.1]}, 'Greenest')\nMap.setCenter(-90.08789, 16.38339, 11)\n\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd1a1a849a916b878f5de884ca29c0eb550a14b
49,249
ipynb
Jupyter Notebook
Lectures/Regression/3. Multiple Linear Regression.ipynb
cr718c3ramire/ml_boot_camp2021
425ad04b29404fb4e1bca90addef2ee69a5bc692
[ "ECL-2.0" ]
null
null
null
Lectures/Regression/3. Multiple Linear Regression.ipynb
cr718c3ramire/ml_boot_camp2021
425ad04b29404fb4e1bca90addef2ee69a5bc692
[ "ECL-2.0" ]
null
null
null
Lectures/Regression/3. Multiple Linear Regression.ipynb
cr718c3ramire/ml_boot_camp2021
425ad04b29404fb4e1bca90addef2ee69a5bc692
[ "ECL-2.0" ]
null
null
null
28.550145
431
0.559565
[ [ [ "# Multiple Linear Regression\n\nSo far we've covered simple linear regression. As we saw, this algorithm/model is quite limitted in what it can accomplish. We'll now expand this model to make a more versatile model.\n\n## What We'll Accomplish in This Notebook\n\nIn this notebook we'll do the following:\n\n- Set up the multiple linear regression statistical model,\n- Derive the least squares estimate for the model,\n- Discuss modeling of categorical variables with one-hot encoding,\n- Introduce interaction terms,\n- Show how to model polynomial and other nonlinear terms in a linear regression setting\n\nRemember my note about the math from the last notebook, if you're not a math person don't become panicked if there is math content you don't entirely understand. You can always ask questions during a breakout session or on Slack :).", "_____no_output_____" ] ], [ [ "# import the packages we'll use\n## For data handling\nimport pandas as pd\nimport numpy as np\n\n# We'll use this later\nfrom numpy import meshgrid\n\n## For plotting\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# This is new, it will allow us to interact with\n# 3d plots in the notebook\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n## This sets the plot style\n## to have a grid on a white background\nsns.set_style(\"whitegrid\")", "_____no_output_____" ] ], [ [ "## What Changes Going From Feature to Features?\n\n### The Statistical Model\n\nAssume that our data set has $n$ observations.\n\nRecall that in simple linear regression the statistical model is:\n$$\ny = \\beta_0 + \\beta_1 X + \\epsilon,\n$$\nwhere $X$ is an $n\\times 1$ feature vector, $y$ is an $n\\times 1$ target vector, and $\\epsilon$ is an $n\\times 1$ vector of independent $\\epsilon_i \\sim N(0,\\sigma^2)$ for all $i$. \n\n\nFor multiple linear regression instead of a single feature we have $m$ features. In this setting the model becomes:\n$$\ny = \\beta_0 + \\beta_1 X_1 + \\beta_2 X_2 + \\dots + \\beta_m X_m + \\epsilon ,\n$$\nwhere $X_1,X_2,\\dots,X_m$ are the $m$ features, and $\\epsilon$ is the same as in SLR. If we collect all of the features as a single column along with a column of $1$s in an $n \\times m$ feature matrix,\n$$\nX = \\left(\\begin{array}{c | c | c | c | c}\n 1 & X_1 & X_2 & \\dots & X_m\n\\end{array}\\right),\n$$\nand let $\\beta=\\left(\\beta_0, \\beta_1, \\beta_2,\\dots,\\beta_m\\right)^T$ then the statistical model for MLR becomes:\n$$\ny = X\\beta + \\epsilon.\n$$\n\nIn the explanatory modeling setting we're still making all of the same assumptions as in SLR, but now $y$ is a linear function of $m$ predictors instead of just $1$. \n\n### Estimating the Model\n\nWhen it comes to estimating $\\beta$ we can get away with just using `sklearn` or `statsmodels`. However it's useful to know how to calculate the estimate 'by hand'.\n\n#### Minimizing MSE\n\nWe again set out to minimize the MSE\n$$\nMSE = \\frac{1}{n} \\sum_{i=1}^n \\left( y_i - \\hat{y_i} \\right)^2 = \\frac{1}{n} \\sum_{i=1}^n \\left( y_i - X_{i,\\bullet} \\hat{\\beta} \\right)^2 = \\frac{1}{n} (y-X\\beta)^T ( y-X\\beta) = y^T y - \\beta^TX^Ty - y^T X \\beta + \\beta^T X^T X \\beta.\n$$\n\nTaking the derivative with respect to $\\beta$ and setting equal to $0$ gives:\n$$\nX^T X \\beta - X^T y = 0, \\text{ and so } \\hat{\\beta} = \\left( X^TX \\right)^{-1} X^T y.\n$$\nThis is the estimate of $\\beta$ that minimizes the MSE, you may hear people refer to this as the normal equation or the Ordinary Least Squares (OLS) Solution.\n\n### Predicting Sales Based on Ad Buys\n\nNow that we understand the MLR setup. Let's use it to model sales given how much money was spent on various advertising mediums. The data is stored in `Advertising.csv`. ", "_____no_output_____" ] ], [ [ "## Read in the data\nads = pd.read_csv(\"Advertising.csv\")\n\nprint(\"There are\", len(ads), \"observations in the data set.\")\nprint(\"The columns are\", ads.columns)", "_____no_output_____" ], [ "## Make the train test split\nads_copy = ads.copy()\n\n## Set aside 20% of the data\n## make 614 the random_state\nads_train = ads_copy.sample(frac = .80, random_state = 614)\nads_test = ads_copy.drop(ads_train.index)", "_____no_output_____" ], [ "# examine training head\nads_train.head()", "_____no_output_____" ] ], [ [ "This data set came from the book, <a href=\"https://www.statlearning.com/\">Introduction to Statistical Learning</a>, by Gareth James, Daniela Witten, Trevor Hastie and Robert Tibshirani. \n\nThe data can be found here, <a href=\"https://www.statlearning.com/resources-first-edition\">https://www.statlearning.com/resources-first-edition</a>.\n\nHow can we decide which features to include? We'll dive more into model selection in the next notebook, for now let's look at what features have the strongest correlation with `sales`. ", "_____no_output_____" ] ], [ [ "ads_train.corr()['sales'].sort_values()", "_____no_output_____" ] ], [ [ "It looks like `radio` and `TV` are the most promising when it comes to having a linear relationship. Both have relatively strong positive linear relationships according to the Pearson correlation. In addition to examining $r$, this data set has so few features we can produce what is called a scatter matrix. <i>This should look familiar to those that did the python prep notebooks<i>.", "_____no_output_____" ] ], [ [ "## scatter matrix makes a matrix of scatter plots\nfrom pandas.plotting import scatter_matrix", "_____no_output_____" ], [ "## first input the dataframe you want to see a \n## scatter matrix for\n## then enter figsize and other plotting arguments\nscatter_matrix(ads_train, figsize = (12,12), alpha = 1)\n\nplt.show()", "_____no_output_____" ] ], [ [ "There definitely appears to be some relationship between `sales` and `TV` as well as `sales` and `radio`. For now we'll treat these as linear relationships, but stay tuned.\n\nIt seems that a reasonable starting model would be\n$$\n\\text{sales} = \\beta_0 + \\beta_1 \\text{TV} + \\beta_2 \\text{radio} + \\epsilon\n$$\nLet's fit the coefficients using the formula we just learned before just using `sklearn` or `statsmodels`.", "_____no_output_____" ] ], [ [ "## make X\nX_train = np.ones([len(ads_train),3])\nX_train[:,1] = np.array(ads_train.TV)\nX_train[:,2] = np.array(ads_train.radio)\n\n## Make y\ny_train = np.array(ads_train.sales)\n\n## Calculate beta_hat\nbeta_hat = np.linalg.inv(X_train.transpose().dot(X_train)).dot(X_train.transpose()).dot(y_train)", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(beta_hat[0],5))\nprint(\"beta_1_hat is\",np.round(beta_hat[1],5))\nprint(\"beta_2_hat is\",np.round(beta_hat[2],5))", "_____no_output_____" ] ], [ [ "#### You Code\n\nUse `statsmodels` and `sklearn` to fit the model we just fit by hand. ", "_____no_output_____" ] ], [ [ "## Import the packages we'll need here\n\n\n", "_____no_output_____" ], [ "## Fit the model using statsmodels here\n## Be sure to print out the table\n\n\n", "_____no_output_____" ], [ "## Fit the model using sklearn here\n## store your model object in reg\n\n\n", "_____no_output_____" ], [ "## Print out the coefficients and then intercept \n## from the sklearn model here\n\n\n", "_____no_output_____" ], [ "## We can use sklearn to make a prediction on the\n## training data as well, do so here\n\n\n", "_____no_output_____" ] ], [ [ "#### A Note Before Continuing: A Slight Change in Interpretation\n\nWe can still interpret the fit, but the specific details are slightly different. Let's interpret $\\hat{\\beta_1}$, this has been estimated as $0.04645$. We can interpret this as:\n\n<i>for a $1$ unit increase in TV holding all other variables constant, we estimate a $0.04645$ increase in sales.</i>\n\n", "_____no_output_____" ], [ "### Plotting the Regression\n\nThis is more difficult with multiple features. Because we only have two features we can make a $3$-D plot like so.\n\nThe code below will produce a mesh grid of input values for `TV` and `radio`. We'll then use that grid to generate the model predictions and plot them in a 3-D graph.", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression\nreg = LinearRegression(copy_X = True)\n\nreg.fit(ads_train[['TV','radio']],ads_train['sales'])", "_____no_output_____" ], [ "## get the prediction this is where meshgrid comes in handy\nx1s = np.linspace(ads_train.TV.min(), ads_train.TV.max(), 10)\nx2s = np.linspace(ads_train.radio.min(), ads_train.radio.max(), 10)\n\n## this makes a meshgrid\nx1v, x2v = np.meshgrid(x1s, x2s)\n\n## combine x1v and x2v into a single array for prediction\nX_grid = np.concatenate([x1v.reshape(-1,1), x2v.reshape(-1,1)], axis=1)\n\n## predict at each grid point\npred_grid = reg.predict(X_grid)\n\n## add the predictions to the grid array\nX_grid = np.concatenate([X_grid, pred_grid.reshape(-1,1)], axis=1)", "_____no_output_____" ], [ "%matplotlib notebook\n## Now we plot the regression plane\n## along with the training observations\n\n## Make a figure object\nfig = plt.figure(figsize=(8,6))\n\n## We'll add a 3d subplot object\nax = fig.add_subplot(111, projection='3d')\n\n## plot_trisurf makes a surface out of triangles\n## it will take in the TV grid on the x-axis\n## the radio grid on the y-axis\n## and the prediction grid on the z-axis\n## alpha <1 allows us to see through the surface\nax.plot_trisurf(X_grid[:, 0], X_grid[:, 1], X_grid[:,2], alpha=0.4)\n\n## scatter will plot the observations from the training set\nax.scatter(ads_train['TV'], ads_train['radio'], ads_train['sales'], c=\"r\", alpha=1, label=\"Training Data\")\n\n## Add labels\nax.set_xlabel(\"TV\", fontsize=14)\nax.set_ylabel(\"radio\", fontsize=14)\nax.set_zlabel(\"sales\", fontsize=14)\n\nplt.legend(fontsize=14)\n\nplt.show()", "_____no_output_____" ] ], [ [ "You can rotate the plot and examine how well we fit the data with our model. As you rotate it does seem like the residuals may not be random, so perhaps our model was not the best choice. We'll examine this more closely in later in the notebook when we introduce transformations of features and interaction terms.", "_____no_output_____" ] ], [ [ "# We have to do this because of\n# the %matplotlib notebook argument in the above code block\nplt.close()", "_____no_output_____" ] ], [ [ "### You Code\n\nReturn once again to the `carseats` data set. Examine the continuous features using correlations and scatter plots and produce a MLR model to predict `Sales`. Just use your best judgement, it's okay if you don't produce the best model right now. We will be touching on model selection later.\n\nNote you can read about the variables in `carseats` here, <a href=\"https://rdrr.io/cran/ISLR/man/Carseats.html\">https://rdrr.io/cran/ISLR/man/Carseats.html</a>.", "_____no_output_____" ] ], [ [ "## Read in the carseats data\ncarseats = pd.read_csv(\"carseats.csv\")\n\n## Make the same train test split as the last notebook\ncarseats_train = carseats.copy().sample(frac=.75,random_state = 440)\ncarseats_test = carseats.copy().drop(carseats_train.index)", "_____no_output_____" ], [ "## Try plotting a scatter matrix here\n\n\n", "_____no_output_____" ], [ "## You'll need to run this after\nplt.close()", "_____no_output_____" ], [ "## Fit a Model here\n\n\n", "_____no_output_____" ], [ "## Code here (if needed)\n\n", "_____no_output_____" ] ], [ [ "## Dealing with Qualitative Predictors with Beer Data\n\nNow that we have a grasp of how MLR works with continuous predictors let's examine how we can also include qualitative predictors like binary or categorical features.\n\nTo do this we'll look at `beer.csv`, our goal is to model `IBU`.", "_____no_output_____" ] ], [ [ "## Read in the csv\nbeers = pd.read_csv(\"beer.csv\")\n\nprint(\"There are\", len(beers), \"observations in the data set.\")\nprint(\"The columns are\", beers.columns)\n\n## Check the percentage of each Beer_Type to\n## see if we need to stratify our train test split\nbeers.Beer_Type.value_counts(normalize=True)", "_____no_output_____" ], [ "## train test split\nbeers_copy = beers.copy()\n\nbeers_train = beers_copy.sample(frac=.75,random_state = 614)\nbeers_test = beers_copy.drop(beers_train.index)", "_____no_output_____" ], [ "## Sanity check to see our percentages are close \n## to the same as the original\nbeers_train.Beer_Type.value_counts(normalize=True)", "_____no_output_____" ] ], [ [ "### Why Add Qualitative Data to Regression\n\nNow why might we want to add in this qualitative data? Because it can add more context. Let's use `seaborn`'s `lmplot` to demonstrate with the `beer` data.", "_____no_output_____" ] ], [ [ "## First plot a normal regression of ibu on abv\nsns.lmplot(data = beers_train, x=\"ABV\", y=\"IBU\", \n height=6, ci = False)\n\nplt.xlabel(\"ABV\", fontsize=16)\nplt.ylabel(\"IBU\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ], [ "# now plot a regression with \"hue\" listed as Beer_Type\nsns.lmplot(data = beers_train, x=\"ABV\", y=\"IBU\", \n hue = \"Beer_Type\", height=2, \n ci = False)\n\nplt.xlabel(\"ABV\", fontsize=16)\nplt.ylabel(\"IBU\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "So the idea is that while there may exist a linear relationship between `ABV` and `IBU`, it is possible that the exact relationship is different based on the type of beer you're looking at.\n\nMaking a `lmplot` is one way to see if there is an effect from a qualitative variable, but it isn't always feasible, especially with a lot of possible qualitative values. Another way to probe for an effect is with a box and whisker plot.", "_____no_output_____" ] ], [ [ "# We'll use seaborn to make this\nfig,ax = plt.subplots(figsize=(6,8))\n\n# boxplot takes in the data, the x-axis variable, the y-axis variable\n# and produces a box and whisker plot\nsns.boxplot(data = beers_train,x = \"Beer_Type\",y = \"IBU\",ax=ax)\n\nplt.xlabel(\"Beer Type\", fontsize = 16)\nplt.ylabel(\"IBU\", fontsize = 16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "A box plot is a way of showing the distribution of a continuous variable. The bottom of each box shows the $25^\\text{th}$ percentile of the distribution, the middle line shows the median of the distribution, and the $75^\\text{th}$ percentile of the distribution. \n\nFor the `beers` data our plot shows that the distribution of `IBU` is slightly different by `Beer_Type`. Giving more evidence that we may want to include `Beer_Type` as a qualitative feature in the model.\n\n\n### How to Include Qualitative Features - One-Hot Encoding\n\nWe cannot just include the `Beer_Type` variable in the model like `reg.fit(beers_train[['ABV','Beer_Type']],beers_train['IBV'])` because our features must be numeric not strings. So we must change our strings into numbers. The way this is done in general is called <i>one-hot encoding</i>.\n\n#### How to One-Hot Encode\n\nIn general suppose your qualitative variable, $Q$, has $k$ possible values. In our example $k = 2$. Then you create $k-1$ new indicator variables,\n$$\n1_{l,i} = \\left\\lbrace \\begin{array}{c c}\n 0 & \\text{if } Q_i \\neq l \\\\\n 1 & \\text{if } Q_i = l\n\\end{array} \\right.,\n$$\nfor $l$ being any of the first $k-1$ $k$ possible values, and where $i$ denotes the $i^\\text{th}$ observation in the data set. By the process of elimination if each of the $k-1$ $1_l$ are $0$ then the value of $Q$ must be the $k^\\text{th}$ possible value.\n\nSo for our `beers` data we would create a `Stout` indicator that is $1$ if the beer is a `Stout` and $0$ if not.\n\n<i>Note: We only need $k-1$ indicators because of the process of elimination. In our example we only need $1$ indicator because if an observation is not a stout we know it is an ipa, and vice versa. Pay careful attention to the number of indicators you use in your model, this is a common mistake new modelers make.</i>\n\nLet's see how to create this in `python`.", "_____no_output_____" ] ], [ [ "## We can do it by hand\n## First set all beers as 0 for the variable Stout\nbeers_train['Stout'] = 0\n\n## Then locate all the stouts and set Stout to 1\nbeers_train.loc[beers_train.Beer_Type == \"Stout\",'Stout'] = 1", "_____no_output_____" ], [ "beers_train.head(10)", "_____no_output_____" ], [ "## By hand is easy for a binary option, but\n## tedious for variables with many categories\n## in that case pandas.get_dummies is useful\n## https://pandas.pydata.org/docs/reference/api/pandas.get_dummies.html, docs\n\npd.get_dummies(beers_train['Beer_Type'])", "_____no_output_____" ], [ "## For us we'd only keep the `Stout` column\nbeers_train['Stout'] = pd.get_dummies(beers_train['Beer_Type'])['Stout']", "_____no_output_____" ], [ "beers_train.head(10)", "_____no_output_____" ] ], [ [ "Now we are ready to fit the following model:\n$$\n\\text{IBU} = \\beta_0 + \\beta_1 \\text{ABV} + \\beta_2 \\text{Stout} + \\epsilon\n$$", "_____no_output_____" ] ], [ [ "## We'll use sklearn\nreg = LinearRegression(copy_X = True)\n\n## fit the model\nreg.fit(beers_train[['ABV','Stout']],beers_train['IBU'])", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(reg.intercept_,5))\nprint(\"beta_1_hat is\",np.round(reg.coef_[0],5))\nprint(\"beta_2_hat is\",np.round(reg.coef_[1],5))", "_____no_output_____" ], [ "### Here we'll plot our regression line\n\n## We can plot the two lines by limiting our prediction input\nstout_values = np.ones((100,2))\nstout_values[:,0] = np.linspace(beers_train['ABV'].min(),beers_train['ABV'].max(),100)\n\n## make a stout prediction\nstout_pred = reg.predict(stout_values)\n\n## make ipa values\nipa_values = np.zeros((100,2))\nipa_values[:,0] = np.linspace(beers_train['ABV'].min(),beers_train['ABV'].max(),100)\nipa_pred = reg.predict(ipa_values)\n\n## Let's plot\nplt.figure(figsize=(6,5))\n\n## plot stout values\nplt.scatter(beers_train.loc[beers_train.Stout == 1,'ABV'], \n beers_train.loc[beers_train.Stout == 1,'IBU'],\n c = 'blue', alpha = .8, label=\"Stout Training Data\")\n\n## plot the stout line\nplt.plot(stout_values[:,0], stout_pred, \"k--\", label=\"Stout Regression Line\")\n\n## plot ipa values\nplt.scatter(beers_train.loc[beers_train.Stout == 0,'ABV'], \n beers_train.loc[beers_train.Stout == 0,'IBU'],\n c = 'orange', alpha = .8, label=\"IPA Training Data\")\n\n## plot the ipa line\nplt.plot(ipa_values[:,0], ipa_pred, \"k\", label=\"IPA Regression Line\")\n\nplt.legend(fontsize = 14)\n\nplt.xlabel(\"ABV\",fontsize=16)\nplt.ylabel(\"IBU\",fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "### Why isn't My Line the Same?\n\nNow notice, this is NOT the line we produced above with the the `sns.lmplot` command. That's because we have not included the interaction between `ABV` and `Stout`. \n\n#### A Better Explanation - Look at the Model\n\nRecall our model\n$$\n\\text{IBU} = \\beta_0 + \\beta_1 \\text{ABV} + \\beta_2 \\text{Stout} + \\epsilon.\n$$\n\nWhen Stout $=0$ the intercept for our line is $\\beta_0$. When Stout $=1$ the intercept becomes $\\beta_0 + \\beta_2$. But, notice in either case the slope of the line stays the same, $\\beta_1$. If you look back at the `seaborn` plot we produced, the lines do not have the same slope. So clearly our statistical model is missing something that the `seaborn` model has. We'll return to this after you practice some coding.\n\n\n### You Code\n\nTake your model for the `carseats` data from earlier. Examine the feature labeled `ShelveLoc`. Does it appear that this qualitatve variable has an impact on `Sales`? Add this feature to the model you made earlier in the notebook.", "_____no_output_____" ] ], [ [ "## Examine ShelveLoc here\n\n", "_____no_output_____" ], [ "## Remember to run this if you make a plot\nplt.close()", "_____no_output_____" ], [ "## Make indicator variables here for ShelveLoc here\n\n", "_____no_output_____" ], [ "## Fit your model with indicators here\n\n## Find the training MSE\n\n\n", "_____no_output_____" ] ], [ [ "## Interaction Terms\n\nLet's return to our issue with the beer regression line.\n\nRemember the model we fit:\n$$\n\\text{IBU} = \\beta_0 + \\beta_1 \\text{ABV} + \\beta_2 \\text{Stout} + \\epsilon.\n$$\n\nWe noted that this model only allows for different intercepts for Stouts and IPAs, but not slopes. \n\n\nHowever if we add in the term $\\beta_3 \\text{Stout} \\times \\text{ABV}$ like so,\n$$\n\\text{IBU} = \\beta_0 + \\beta_1 \\text{ABV} + \\beta_2 \\text{Stout} + \\beta_3 \\text{Stout} \\times \\text{ABV} + \\epsilon\n$$\n\nthen when Stout $=0$ the slope of the line is $\\beta_1$ and when Stout $=1$ the slope of the line $\\beta_1 + \\beta_3$. The $\\text{Stout} \\times \\text{ABV}$ is the interaction term between Stout and ABV.\n\n<i>Note in the Regression Homework Sets you are asked to interpret the coefficient estimates for this model.</i>\n\nLet's make an interaction term and then fit this new model with `sklearn`.", "_____no_output_____" ] ], [ [ "## make the interaction term\nbeers_train['Stout_ABV'] = beers_train['Stout']*beers_train['ABV']", "_____no_output_____" ], [ "## You make and fit the model here\n## use sklearn and store the model in reg\nreg = LinearRegression(copy_X = True)\n\nreg.fit(beers_train[['ABV','Stout','Stout_ABV']],beers_train['IBU'])", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(reg.intercept_,5))\nprint(\"beta_1_hat is\",np.round(reg.coef_[0],5))\nprint(\"beta_2_hat is\",np.round(reg.coef_[1],5))\nprint(\"beta_3_hat is\",np.round(reg.coef_[2],5))", "_____no_output_____" ], [ "### Again this code will plot the lines, don't worry about it\n### for now\n\n## We can plot the two lines by limiting our prediction input\nstout_values = np.ones((100,3))\n\n## make the ABV values\nstout_values[:,0] = np.linspace(beers_train['ABV'].min(),beers_train['ABV'].max(),100)\n\n## now the interaction term\nstout_values[:,2] = stout_values[:,0]*stout_values[:,1]\n\n## make a stout prediction\nstout_pred = reg.predict(stout_values)\n\n## make ipa values\nipa_values = np.zeros((100,3))\nipa_values[:,0] = np.linspace(beers_train['ABV'].min(),beers_train['ABV'].max(),100)\nipa_values[:,2] = ipa_values[:,0]*ipa_values[:,1]\nipa_pred = reg.predict(ipa_values)\n\n\n## Let's plot\nplt.figure(figsize=(6,5))\n\n## plot stout values\nplt.scatter(beers_train.loc[beers_train.Stout == 1,'ABV'], \n beers_train.loc[beers_train.Stout == 1,'IBU'],\n c = 'blue', alpha = .8, label=\"Stout Training Data\")\n\n## plot the stout line\nplt.plot(stout_values[:,0], stout_pred, \"k--\", label=\"Stout Regression Line\")\n\n## plot ipa values\nplt.scatter(beers_train.loc[beers_train.Stout == 0,'ABV'], \n beers_train.loc[beers_train.Stout == 0,'IBU'],\n c = 'orange', alpha = .8, label=\"IPA Training Data\")\n\n## plot the ipa line\nplt.plot(ipa_values[:,0], ipa_pred, \"k\", label=\"IPA Regression Line\")\n\nplt.legend(fontsize = 14)\n\nplt.xlabel(\"ABV\",fontsize=16)\nplt.ylabel(\"IBU\",fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "Now this looks more like the `seaborn.lmplot`!\n\nAdding interaction terms can also help with non-random residual plots as we'll see in a next example of this notebook.\n\n### You Code\n\nLoad the following data set called `inter`. Build a regression model regressing $y$ on $x_1$ and $x_2$.", "_____no_output_____" ] ], [ [ "inter = pd.read_csv(\"inter.csv\")\n\ninter.head()", "_____no_output_____" ], [ "## Explore the categorical variable x2 with\n## sns.lmplot here\n\n", "_____no_output_____" ], [ "## Run this to close the plot\nplt.close()", "_____no_output_____" ], [ "## Get the appropriate indicator and interaction terms here\n\n\n", "_____no_output_____" ], [ "## Fit the model here\n\n\n", "_____no_output_____" ] ], [ [ "## Polynomial Regression\n\nNow not every relationship in the world is a line or a plane. \n\nLet's look at a synthetic dataset and see an example.", "_____no_output_____" ] ], [ [ "df = pd.read_csv(\"poly.csv\")\n\nprint(\"There are\",len(df),\"observations.\")\nprint(\"The columns are\",df.columns)", "_____no_output_____" ], [ "# Make the train test split\ndf_copy = df.copy()\n\ndf_train = df_copy.sample(frac=.75,random_state = 614)\ndf_test = df_copy.drop(df_train.index)", "_____no_output_____" ], [ "df_train.head()", "_____no_output_____" ], [ "scatter_matrix(df_train, figsize=(8,8))\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "Now there certainly appears to be a relationship between $x_1$ and $y$ and $x_2$ and $y$. While the relationship between $x_2$ and $y$ might be linear, the relationship between $x_1$ and $y$ is definitely not linear.\n\nOne way to address this is to make a polynomial transformation of $x_1$. For instance $x_1^2$.", "_____no_output_____" ] ], [ [ "df_train['x1_sq'] = df_train['x1']**2", "_____no_output_____" ], [ "scatter_matrix(df_train, figsize=(8,8))\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "The relationship between $y$ and $x_1^2$ seems somewhat linear, let's now include it in a model:\n$$\ny = \\beta_0 + \\beta_1 x_1 + \\beta_2 x_1^2 + \\beta_3 x_2 + \\epsilon,\n$$\nand then we'll fit this model.", "_____no_output_____" ] ], [ [ "reg = LinearRegression(copy_X = True)\n\nreg.fit(df_train[['x1','x1_sq','x2']], df_train['y'])", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(reg.intercept_,5))\nprint(\"beta_1_hat is\",np.round(reg.coef_[0],5))\nprint(\"beta_2_hat is\",np.round(reg.coef_[1],5))\nprint(\"beta_3_hat is\",np.round(reg.coef_[2],5))", "_____no_output_____" ], [ "# Let's examine the residual plot\npred = reg.predict(df_train[['x1','x1_sq','x2']])\n\nres = df_train['y'] - pred", "_____no_output_____" ], [ "plt.figure(figsize = (5,4))\n\nplt.scatter(pred,res)\n\nplt.xlabel(\"Predicted Values\", fontsize=16)\nplt.ylabel(\"Residuals\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "Definitely not random!\n\nAn obviously nonrandom residual plot indicates that there is some signal in the data not being captured by our model. One way to address this is to add an interaction term. Let's try adding in $x_1 x_2$, so our model becomes:\n$$\ny = \\beta_0 + \\beta_1 x_1 + \\beta_2 x_1^2 + \\beta_3 x_2 + \\beta_4 x_1 x_2 + \\epsilon\n$$", "_____no_output_____" ] ], [ [ "# First add the interaction term to the df\ndf_train['x1_x2'] = df_train['x1']*df_train['x2']", "_____no_output_____" ], [ "reg = LinearRegression(copy_X = True)\n\nreg.fit(df_train[['x1','x1_sq','x2','x1_x2']], df_train['y'])", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(reg.intercept_,5))\nprint(\"beta_1_hat is\",np.round(reg.coef_[0],5))\nprint(\"beta_2_hat is\",np.round(reg.coef_[1],5))\nprint(\"beta_3_hat is\",np.round(reg.coef_[2],5))\nprint(\"beta_4_hat is\",np.round(reg.coef_[3],5))", "_____no_output_____" ], [ "# Let's re-examine the residual plot\npred = reg.predict(df_train[['x1','x1_sq','x2','x1_x2']])\n\nres = df_train['y'] - pred", "_____no_output_____" ], [ "plt.figure(figsize = (8,6))\n\nplt.scatter(pred, res)\n\nplt.xlabel(\"Predicted Values\", fontsize=16)\nplt.ylabel(\"Residuals\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "So much better!\n\n#### Which is the Best Model?\n\nWhile this certainly seems like the best model, we'll discuss how we can compare the two models in a more robust way in a later notebook.\n\n#### Do I Need to Include it?\n\nIf you notice the coefficient on $x_1$ is close to $0$ in the interaction term model. It may be tempting to remove this feature from the model especially if the true relationship was:\n$$\ny = 2 + x_1^2 - 10 x_2 + x_1 x_2.\n$$\nHowever, there is no way for you to know ahead of time what the true relationship is between the target and the features, if there was there'd be no need for regression. \n\nTo further illustrate this point, imagine the true relationship was such that:\n$$\ny \\propto x_1^2,\n$$\nif we do not include $x_1$ in our model we are limiting ourselves to parabolas of the form\n$$\n\\beta_0 + \\beta_1 x_1^2,\n$$\nwhich leaves out a number of possible parabolas.\n\nIt is important to remember that anytime you make a model that includes a polynomial transformation you need to include all of the lesser powers as well. So with $x_1^2$ as the highest power you'd need to include $x_1$, with $x_1^3$ as the highest power you'd need to include $x_1^2$ and $x_1$, and so on for $x_1^n$.\n\nThis also holds for interaction terms. If you include $x_1 x_2$ you need to include both $x_1$ and $x_2$ as predictors as well.\n\n\n### You Code\n\nThe data labeled `df` below came from a job interview problem set. In the homework you'll have to build the best predictive model you can on the data. For now examine the relationship between `y` and `x1`. Build a model to predict `y` using `x1`.", "_____no_output_____" ] ], [ [ "## Load the data\ndf = pd.read_csv(\"PredictiveModelingAssessmentData.csv\")\n\n## Make a train test split\ndf_train = df.copy().sample(frac = .75, random_state = 440)\ndf_test = df.copy().drop(df_train.index)\ndf_train.head()", "_____no_output_____" ], [ "## Examine the scatter matrix here\n\n\n", "_____no_output_____" ], [ "## Run this to close the plot\nplt.close()", "_____no_output_____" ], [ "## Get the appropriately transformed column\n\n\n", "_____no_output_____" ], [ "## Fit the Regression here\n\n\n", "_____no_output_____" ] ], [ [ "## Other Popular Transformations\n\nWe can add in more than just polynomials. There are other popular transformations including $\\log$s, roots, $\\sin$, $\\cos$, $\\tan$, exponentials, and more.\n\nWe'll work through an example returning to our `Advertising` data then you'll make a model for the interview data.", "_____no_output_____" ] ], [ [ "scatter_matrix(ads_train, figsize = (8,8), alpha = 1)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ], [ "plt.figure(figsize = (5,5))\n\nplt.scatter(np.sqrt(ads_train.TV),ads_train.sales)\n\nplt.xlabel(\"$\\sqrt{TV}$\", fontsize = 16)\nplt.ylabel(\"sales\", fontsize = 16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "This looks much more linear. Let's replace `TV` in our model from Notebook 3 with root `TV`.\n$$\n\\text{sales} = \\beta_0 + \\beta_1 \\sqrt{\\text{TV}} + \\beta_2 \\text{radio} + \\epsilon\n$$", "_____no_output_____" ] ], [ [ "# Add in root tv to the df\nads_train['sqrt_TV'] = np.sqrt(ads_train.TV)", "_____no_output_____" ], [ "# Make the model object\nreg = LinearRegression(copy_X = True)", "_____no_output_____" ], [ "# Fit the data\nreg.fit(ads_train[['sqrt_TV','radio']],ads_train['sales'])", "_____no_output_____" ], [ "print(\"beta_0_hat is\",np.round(reg.intercept_,5))\nprint(\"beta_1_hat is\",np.round(reg.coef_[0],5))\nprint(\"beta_2_hat is\",np.round(reg.coef_[1],5))", "_____no_output_____" ], [ "# Look at the residual plot\npred = reg.predict(ads_train[['sqrt_TV','radio']])\n\nres = ads_train['sales'] - pred", "_____no_output_____" ], [ "plt.figure(figsize = (8,6))\n\nplt.scatter(pred,res)\n\nplt.xlabel(\"Predicted Values\", fontsize=16)\nplt.ylabel(\"Residuals\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "Again not random. Let's add in the interaction term.", "_____no_output_____" ] ], [ [ "ads_train['sqrtTV_radio'] = ads_train['sqrt_TV'] * ads_train['radio']", "_____no_output_____" ], [ "## Make the model object\nreg = LinearRegression(copy_X = True)", "_____no_output_____" ], [ "## Fit the data\nreg.fit(ads_train[['sqrt_TV','radio','sqrtTV_radio']],ads_train['sales'])\n\n## We'll want to look at these later\nprint(reg.coef_)", "_____no_output_____" ], [ "## Let's re-examine the residual plot\npred = reg.predict(ads_train[['sqrt_TV','radio','sqrtTV_radio']])\n\nres = ads_train['sales'] - pred", "_____no_output_____" ], [ "plt.figure(figsize = (5,4))\n\nplt.scatter(pred,res)\n\nplt.xlabel(\"Predicted Values\", fontsize=16)\nplt.ylabel(\"Residuals\", fontsize=16)\n\nplt.show()", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "### You Code\n\nReturn to the `PredictiveModelingAssessmentData.csv` data set. Does it appear that $y$ and $x_2$ have a linear relationship?\n\nTry playing around with different non-linear transformations of $x_2$ (note don't use polynomials).", "_____no_output_____" ] ], [ [ "## You code here\n\n", "_____no_output_____" ], [ "plt.close()", "_____no_output_____" ] ], [ [ "That's it for this notebook in the next notebook we'll discuss model selection for predictive models.", "_____no_output_____" ], [ "This notebook was written for the Erd&#337;s Institute C&#337;de Data Science Boot Camp by Matthew Osborne, Ph. D., 2021.\n\nRedistribution of the material contained in this repository is conditional on acknowledgement of Matthew Tyler Osborne, Ph.D.'s original authorship and sponsorship of the Erdős Institute as subject to the license (see License.md)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ecd1b7fce23ea9e08a91add7a1563e356c501296
10,170
ipynb
Jupyter Notebook
Untitled.ipynb
dantegates/dantegates.github.io
ef81968a060cdd6c107c1ce865866a87947ad547
[ "MIT" ]
null
null
null
Untitled.ipynb
dantegates/dantegates.github.io
ef81968a060cdd6c107c1ce865866a87947ad547
[ "MIT" ]
null
null
null
Untitled.ipynb
dantegates/dantegates.github.io
ef81968a060cdd6c107c1ce865866a87947ad547
[ "MIT" ]
1
2018-07-25T20:16:56.000Z
2018-07-25T20:16:56.000Z
59.823529
580
0.69233
[ [ [ "# Seven must know tools for a Python developer\n\nThis year I spent six months at startup leading the development of a large scale analytics platform written in Python. This post lists some of the tools I found particularly useful for writing high quality software. I won't discuss any of these in great detail here (although some topics are likely to be covered at greater length in a future post). Rather, this post simply documents some of the great tools out there that a Python developer should be aware of.\n\nAs an aside, after writing this post I noticed that all of these tools have one thing in common: minimal barrier to entry. However that's no surprise since I favor elegant, simplistic and easy-to-use solutions.", "_____no_output_____" ], [ "## Redis\n\n**Why you should know about it**: Redis is an open source Key-Value store that plays *really* nicely with Python. It's lightweight and has a very small learning curve. Queues, caches and pub-sub are only the beginning.\n\n**Commentary**: Redis was certainly one of my favorite tools while working on this project and became my go to solution for just about every need that surfaced due to distributed or parallel processing.\n\nWhat really got me hooked on Redis was the that I was able to hook it into Python's built in logging library to allow logging accross many Python instances at once in under a day with no prior experience using the library. It's simplicity are its strongest features in my book.\n\nFurthermore, though incredibly simple, Redis is an incredibly flexible data structure, essentially a hash map that you can access from any application over a network. In addition to using its message broker capabilities as a logging solution, some other things I have used it for include building a [functools.lru_cache](https://docs.python.org/3/library/functools.html#functools.lru_cache)-like decorator that works accross separate Python instances, a cache for API requests and the backend of a service used for coordinating the assignment of uniue IDs accross processes.", "_____no_output_____" ], [ "## Docker\n\n**Why you should know about it**: You need your apps to be portable. You need to be sure that your team is developing in the same environment. You're in charge of deployment even though you aren't devops (work in a startup?). You would like to just be able to *pull* an environment with all of your codes dependencies (java, C, odbc, etc.) pre-installed for you.\n\n**Commentary**: I feel like there is so much talk about docker right now that I don't need to say much about it here. I'll only add that from my experience as a lead software engineer some of its biggest benefits were:\n\n- Starting a usable Redis instance with a *single* command.\n- A replacement for virtualenv.\n- Portability. When it looked one of our VMs in the cloud needed to be completely wiped and restarted i didn't sweat a drop. If your app is Dockerized there's no need to worry about the pains of the installation process of your software's dependencies.", "_____no_output_____" ], [ "## Flask\n\n**Why you should know about it**: Flask is the *simplest* framework out there (in Python) for building webapps. It's perfect for small applications, like microservices, and prototypes.\n\n**Commentary**: Python developers shouldn't think of the well known [Flask](http://flask.pocoo.org/) library as for web developers only. In my opinion Flask is the best solution for writing REST APIs in Python. It's simplicity makes the task of turning your code into apps incredibly simple.\n\nPair with [Gunicorn](http://gunicorn.org/) and package with Docker for a tried and true pattern for deploying your apps.", "_____no_output_____" ], [ "## coverage.py\n\n**Why you should know about it**: coverage.py is a straightforward resource for generating reports on the test coverage of your code base.\n\n**Commentary**: [coverage](https://coverage.readthedocs.io/en/coverage-4.4.1/) is a great library for analyzing your applications test coverage. It plays very nicely with pythons built-in [unittest](https://docs.python.org/3/library/unittest.html) library so if you already have tests for your application its very easy to get started with coverage.\n\nOf course, this library can't tell you anything of the quality of your tests but it builds really nice reports giving high-level information on your code that has test coverage as well as very low-level information (down to the line level). This library was great for identifying which modules needed tests and more than once it showed me that I was missing a test for a certain case (as in an `if-else` clause).", "_____no_output_____" ], [ "## Makefiles\n\n**Why you should know about it**: But we're writing in Python right? Yes, but it turns out Makefiles are a great tool for building quick CLI tools in a Python repository. Link below says it all in 40 words.\n\n**Commentary**: I came accross this gem of an idea in this section of the [Hitchhiker's guide to Python](http://docs.python-guide.org/en/latest/writing/structure/#makefile). Want to have the ability to initialize your repository's filesystem, execute the entrypoint of your code or run your tests suite with simple commands at the command line with a *minimum* amount of effort? If so, then check out the link above.", "_____no_output_____" ], [ "## git's pre-commit hook\n\n**Why you should know about it**: git's pre-commit hook is a nice little tool you can use to customize your commits.\n\n**Commentary**: If you are using [git](https://git-scm.com/) as the VCS for your project then the [pre-commit hook](https://git-scm.com/docs/githooks#_pre_commit) is a feature you should definitely know of. The pre-commit hook simply executes a script every time you call `git commit` and if the script returns a non-zero exit code the commit gets aborted.\n\nOne of the most common use cases for this hook is to run a test suite each time you commit. If the test suite fails, so does the commit. Even if you have your test suite running via some CI tool when you push your code to the master repository it's helpful to have these tests run automatically when developing on your local machine.", "_____no_output_____" ], [ "## mock.patch\n\n**Why you should know about it**. The [unittest.mock](https://docs.python.org/3/library/unittest.mock.html) package is a great built-in (in Python3.3+, for Python2 you need to `pip` install `mock`) for overriding dependencies you don't want to run in your tests.\n\n**Commentary**: Before I used `unitest.mock` when I needed to override some dependency (say a class that accesses a database) my test code looked something like this:\n\n```python\nimport unittest\nfrom mymod import ClassWithDependency\n\nclass SomeFakeClass:\n ...\n\nclass PseudoClassWithDependency(ClassWithDependency):\n attribute_to_override = SomeFakeClass\n \nclass TestClassWithDependency(unittest.TestCase):\n def test(self):\n # do something uses calls PseudoClassWithDependency\n ...\n```\n\nNow often this sort of design pattern for dependency injection is actually the right choice. However, the test code is rather verbose as it requires creating new classes. Furthermore, suppose you need to test several behaviors you might experience when using `ClassWithDependency.attribute_to_override`. If this is so, you now may end up with classes such as `PseudoClassWithDependencySuccess` and `PseudoClassWithDependencyFail` and your test code is even more verbose than before.\n\nIt's essential for developers to be confident in there tests and this means that test code sould be as simple as possible. However the pattern above violates this principle.\n\nFortunately, we can use `unittest.mock` to give us the same functionality with a standard interface supported by the PSF. Here's an example of what the code above would look like using `unittest.mock.patch`.\n\n```python\nimport unittest\nfrom unittest import mock\nfrom mymod import ClassWithDependency\n\nclass TestClassWithDependency(unittest.TestCase):\n @mock.patch('mymod.ClassWithDependency.attribute_to_override')\n def test_success(self, mock_attr_to_override):\n mock_attr_to_override.return_value = True\n # do something with the *real* class ClassWithDependency\n ...\n\n @mock.patch('mymod.ClassWithDependency.attribute_to_override')\n def test_success(self, mock_attr_to_override):\n mock_attr_to_override.return_value = False\n # do something with the *real* class ClassWithDependency\n ...\n```", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
ecd1bb636c35bd9b8fda610c044413fc5675fe80
4,578
ipynb
Jupyter Notebook
examples/Jupyter/ClarityViz Tutorial.ipynb
jonl1096/seelvizorg
ae4e3567ce89eb62edcd742060619fdf1883b991
[ "Apache-2.0" ]
null
null
null
examples/Jupyter/ClarityViz Tutorial.ipynb
jonl1096/seelvizorg
ae4e3567ce89eb62edcd742060619fdf1883b991
[ "Apache-2.0" ]
null
null
null
examples/Jupyter/ClarityViz Tutorial.ipynb
jonl1096/seelvizorg
ae4e3567ce89eb62edcd742060619fdf1883b991
[ "Apache-2.0" ]
null
null
null
30.52
209
0.628222
[ [ [ "# ClarityViz Tutorial", "_____no_output_____" ], [ "## Import the clarityviz modules\n- the **claritybase** module loads all the initial img files, and generates a csv file for the points and \n- the **densitygraph** module does takes the graphml file generated from clarityviz and performs all the necessary caluclations and generates a graph with a color scheme representative of node density\n- the **atlasregiongraph** module takes a csv and generates a graph color coded by region according to the atlas", "_____no_output_____" ] ], [ [ "from clarityviz import claritybase\nfrom clarityviz import densitygraph\nfrom clarityviz import atlasregiongraph", "_____no_output_____" ] ], [ [ "## After modules are imported, begin with claritybase\nThe following are the functions used to produce the essential files. After you perform these operations, you can choose to choose to do the calculations and or display which ever graphs you want.", "_____no_output_____" ] ], [ [ "token = 'Fear199'\nsource_directory = '/cis/home/alee/claritycontrol/code/data/raw'\n\n# Initialize the claritybase object, the initial basis for all operations.\n# After you initialize with a token and source directory, a folder will be created in your current directory\n# with the token name, and all the output files will be stored there.\ncb = claritybase(token, source_directory)\n\ncb.applyLocalEq()\n\ncb.loadGeneratedNii()\n\ncb.calculatePoints(threshold = 0.9, sample = 0.1)\n\n# After you calculate all the points you can generate your first graph, a basic plotly that shows all\n# the nodes. Fancier graphs come later, after edges are calculated and some other operations are performed.\ncb.generate_plotly_html()\n\n# savePoints generates the csv file of all the points in the graph.\ncb.savePoints() \n\n# plot3d calculates all the edges between the nodes.\ncb.plot3d()\n\n# graphmlconvert() creates a graphml file based on the nodes and edges file generated in plo3d.\ncb.graphmlconvert()", "_____no_output_____" ] ], [ [ "## Once the graphml file is generated with graphmlconvert(), you can use the densitygraph module.\nThe density graph module is used to visualize the density of nodes in the graph, i.e. the density of neurons, in a colored fashion.", "_____no_output_____" ] ], [ [ "# Uses the same token before, must be in the same directory as before.\ndg = densitygraph(token)\n\n# generates a 3d plotly with color representations of density\ndg.generate_density_graph()\n\n# generates a heat map, essentially a legend, telling how many edges a certain color represents,\n# with number of edges representing how dense a certain node clustering may be.\ndg.generate_heat_map()", "_____no_output_____" ] ], [ [ "## Once the csv file is generated with savePoints(), we can use the atlasregiongraph module.\nThis module creates a graph color coded to the different regions of the brain according to the atlas.", "_____no_output_____" ] ], [ [ "regiongraph = atlasregiongraph(token)\n\nregiongraph.generate_atlas_region_graph()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd1c9fb2e8dffb437432605616e07a2de7bedc0
51,166
ipynb
Jupyter Notebook
source/examples/demo/titanic.ipynb
ASmirnov-HORIS/lets-plot-docs
fb15e81ca0f03d54539c098ce4ee725f03a03d2f
[ "MIT" ]
null
null
null
source/examples/demo/titanic.ipynb
ASmirnov-HORIS/lets-plot-docs
fb15e81ca0f03d54539c098ce4ee725f03a03d2f
[ "MIT" ]
null
null
null
source/examples/demo/titanic.ipynb
ASmirnov-HORIS/lets-plot-docs
fb15e81ca0f03d54539c098ce4ee725f03a03d2f
[ "MIT" ]
1
2021-06-30T10:05:13.000Z
2021-06-30T10:05:13.000Z
34.247657
308
0.450807
[ [ [ "## Visualization of the Titanic's voyage.\n\nThe tasks completed in this notebook:\n- Load an interactive basemap layer.\n- Geocode Titanic's ports of of embarkation and show them as markers on the map.\n- Show the \"Titanic's site\" on the map.\n- Geocode the Titanic destination port and show on the map.\n- Connect all markers on the map with dashed lines.\n- Compute a simple statistic related to the ports of of embarkation and show the plot and the map on the same figure.\n\nWe will use the [Lets-Plot for Python](https://github.com/JetBrains/lets-plot#lets-plot-for-python) library for all charting and geocoding tasks in this notebook.\n\nThe Titanic dataset for this demo was downloaded from [\"Titanic: cleaned data\" dataset](https://www.kaggle.com/jamesleslie/titanic-cleaned-data?select=train_clean.csv) (train_clean.csv) available at [kaggle](https://www.kaggle.com).", "_____no_output_____" ] ], [ [ "from lets_plot import *\n\nLetsPlot.setup_html()", "_____no_output_____" ] ], [ [ "### The ports of embarkation.\n\nTitanic's ports of of embarkation were:\n- Southampton (UK)\n- Cherbourg (France)\n- Cobh (Ireland)\n\nLets find geographical coordinates of these cities using the `Lets-Plot` geocoding package.", "_____no_output_____" ] ], [ [ "from lets_plot.geo_data import *\n\nports_of_embarkation = ['Southampton', 'Cherbourg', 'Cobh']", "The geodata is provided by © OpenStreetMap contributors and is made available here under the Open Database License (ODbL).\n" ] ], [ [ "#### 1. Using the `geocode..` function.\n\nTo geocode our port cities we can try to call the `geocode()` function:\n\n geocode(level='city', names=ports_of_embarkation)\nor its equivalent:\n\n geocode_cities(names=ports_of_embarkation)\n\nUnfortunately, this call results in a `ValueError`:\n\n>Multiple objects (6) were found for Southampton:\n>- Southampton (United Kingdom, England, South East)\n>- Southampton (United States of America, New York, Suffolk County)\n>- Southampton (United States of America, Massachusetts)\n>- Southampton Township (United States of America, New Jersey, Burlington County)\n>- Lower Southampton Township (United States of America, Pennsylvania, Bucks County)\n>- Upper Southampton Township (United States of America, Pennsylvania, Bucks County)\n>Multiple objects (2) were found for Cherbourg:\n>- Saint-Jean-de-Cherbourg (Canada, Québec, Bas-Saint-Laurent, La Matanie)\n>- Cherbourg-en-Cotentin (France, France métropolitaine, Normandie, Manche)\n", "_____no_output_____" ] ], [ [ "#\n# This call will fail with an error shown above.\n#\n#geocode_cities(ports_of_embarkation)", "_____no_output_____" ] ], [ [ "#### 2. Resolving geocoding ambiguity using the `scope()` method.\n\nWe can try to resolve ambiguity of the name \"Southampton\" (found in the United Kingdom and in the US)\nand the name \"Cherbourg\" (found in Canada and France) by narrowing the scope of search using \nthe `scope()` function:\n\n geocode_cities(ports_of_embarkation).scope(geocode_countries(['France', 'UK']))\n\nBut this call results in another `ValueError`:\n\n>No objects were found for Cobh.", "_____no_output_____" ] ], [ [ "#\n# This call will fail with \"No objects were found for Cobh.\" error.\n#\n#geocode_cities(ports_of_embarkation).scope(geocode_countries(['France', 'UK']))", "_____no_output_____" ] ], [ [ "An alternative of resolving these geo-coding issues is to specify\nthe names of all \"parent\" countries. \n\nThe \"parent\" names must be in the same order \nas the names of the geocoded cities:", "_____no_output_____" ] ], [ [ "cities_gcoder=geocode_cities(ports_of_embarkation).countries(['UK', 'France', 'Ireland'])\ncities_gcoder.get_geocodes()", "_____no_output_____" ] ], [ [ "#### 3. Using `where()` qualifiers for advanced geocoding.\n\nThere are situations when `scope()` or \"parents\" methods \nwill not resolve all geocoding ambiguities.\n\nLet's resolve ambiguity of names \"Southampton\" and \"Cherbourg\" with the help of the `where()` qualifier.", "_____no_output_____" ] ], [ [ "ports_of_embarkation_gcoder = geocode_cities(ports_of_embarkation) \\\n .where('Cherbourg', scope='France') \\\n .where('Southampton', scope='England')\nports_of_embarkation_gcoder.get_geocodes()", "_____no_output_____" ] ], [ [ "### Markers on interactive base-map.\n\nThe `Lets-Plot` API makes it easy to create an interactive basemap layer using either its own vector tiles service or \nby configuring a 3-rd party ZXY raster tile providers.\n\nIn this notebook we will use beautifull *CARTO Antique* raster tiles by [CARTO](https://carto.com/attribution/) as our basemap.\n\nSimple markers (points) can be added to the base-map either via the `geom_point` layer\nor directly on the `livemap` base-layer.\n\nIn this demo we will add the ports of embarkation markers right to the `livemap` base-layer (using the `map` parameter)\nand, later, add the other markers and shapes via additional `geom` layers.", "_____no_output_____" ] ], [ [ "LetsPlot.set(\n maptiles_zxy(\n url='https://cartocdn_c.global.ssl.fastly.net/base-antique/{z}/{x}/{y}@2x.png',\n attribution='<a href=\"https://www.openstreetmap.org/copyright\">© OpenStreetMap contributors</a> <a href=\"https://carto.com/attributions#basemaps\">© CARTO</a>, <a href=\"https://carto.com/attributions\">© CARTO</a>'\n )\n)", "_____no_output_____" ], [ "basemap = (ggplot() + ggsize(800, 300) +\n geom_livemap(map=ports_of_embarkation_gcoder,\n size=7, \n shape=21, color='black', fill='yellow'))\n\nbasemap", "_____no_output_____" ] ], [ [ "### The 'Titanic's site' marker", "_____no_output_____" ] ], [ [ "from shapely.geometry import Point, LineString\ntitanic_site = Point(-38.056641, 46.920255)\n\n# Add marker using `geom_point` geometry layer.\ntitanic_site_marker = geom_point(x=titanic_site.x, y = titanic_site.y, size=10, shape=9, color='red')\nbasemap + titanic_site_marker", "_____no_output_____" ] ], [ [ "### Connecting markers on map.\n\nThe `ports_of_embarkation_gcoder` variable in this demo is an object of the type `Geocoder`. \n\nObject `Geocoder`, if necessary, can be tranfrormed to a `GeoDataFrame`\nby calling its `get_centroids()`, `get_boundaries()` or `get_limits()` method.\n\nTo create the Titanic's path we will use the `get_centroids()` method to obtain the points of embarkation and then append the \"Titanic's site\" point to complete the polyline.", "_____no_output_____" ] ], [ [ "from geopandas import GeoSeries\nfrom geopandas import GeoDataFrame\n\n# The points of embarkation\nembarkation_points = ports_of_embarkation_gcoder.get_centroids().geometry\ntitanic_journey_points = embarkation_points.append(GeoSeries(titanic_site), ignore_index=True)\n\n# Create a new GeoDataFrame containing a `LineString` geometry.\ntitanic_journey_gdf = GeoDataFrame(dict(geometry=[LineString(titanic_journey_points)]))\n\n# Add polyline to the plot using the `geom_path` layer.\ntitanic_path = geom_path(map=titanic_journey_gdf, color='dark-blue', linetype='dotted', size=1.2)\nbasemap + titanic_path + titanic_site_marker", "_____no_output_____" ] ], [ [ "### The last segment that Titanic didn't made.", "_____no_output_____" ] ], [ [ "# Geocoding of The New York City is a trivial task.\nNYC = geocode_cities(['New York']).get_centroids().geometry[0]\n\nmap_layers = (titanic_path \n + geom_segment(x=titanic_site.x, y=titanic_site.y, \n xend=NYC.x, yend=NYC.y, \n color='gray', linetype='dotted', size=1.2)\n + geom_point(x=NYC.x, y=NYC.y, size=7, shape=21, color='black', fill='white')\n + titanic_site_marker)\n\nbasemap + map_layers", "_____no_output_____" ] ], [ [ "### Titanic's survival rates by the port of embarkation.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv(\"https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/titanic.csv\")\ndf.head(3)", "_____no_output_____" ] ], [ [ "In this Titanic dataset the column `Embarked`contains a single-letter codes of the Titanic's ports of embarkation:\n- S: Southampton (UK)\n- C: Cherbourg (France)\n- Q: Cobh (Ireland)\n\nLets visualize the \"Survived\" counts by the port of embarkation:", "_____no_output_____" ] ], [ [ "from lets_plot.mapping import as_discrete\n\nbars = ggplot(df) \\\n + geom_bar(aes('Embarked', fill=as_discrete('Survived')), position='dodge') \\\n + scale_fill_discrete(labels=['No', 'Yes']) \\\n + scale_x_discrete(labels=['Southampton', 'Cobh', 'Cherbourg'], limits=['S', 'C', 'Q'])\n\nbars + ggsize(800, 250)", "_____no_output_____" ] ], [ [ "### The final figure.", "_____no_output_____" ] ], [ [ "bars_settings = theme(axis_title='blank', \n axis_line='blank', \n axis_ticks_y='blank',\n axis_text_y='blank',\n panel_grid='blank',\n legend_position=[1.12, 1.07],\n legend_justification=[1, 1]) + scale_x_discrete(expand=[0, 0.05])\n\n\nmap = ggplot() + ggsize(800, 300) \\\n + geom_livemap(map=ports_of_embarkation_gcoder, \n size=8, \n shape=21, color='black', fill='yellow',\n zoom=4, location=[-12, 48])\n\nfig = GGBunch()\nfig.add_plot(map + map_layers, 0, 0)\nfig.add_plot(bars + bars_settings, 535, 135, 250, 150)\nfig.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd1e2004fe52cdf65444bceb423934677e38d9d
6,389
ipynb
Jupyter Notebook
Image/symbology.ipynb
c11/earthengine-py-notebooks
144b57e4d952da095ba73c3cc8ce2f36291162ff
[ "MIT" ]
1
2020-05-31T14:19:59.000Z
2020-05-31T14:19:59.000Z
Image/symbology.ipynb
c11/earthengine-py-notebooks
144b57e4d952da095ba73c3cc8ce2f36291162ff
[ "MIT" ]
null
null
null
Image/symbology.ipynb
c11/earthengine-py-notebooks
144b57e4d952da095ba73c3cc8ce2f36291162ff
[ "MIT" ]
null
null
null
42.593333
1,031
0.578338
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/symbology.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/symbology.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/symbology.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as emap\nexcept:\n import geemap as emap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = emap.Map(center=[40,-100], zoom=4)\nMap.add_basemap('ROADMAP') # Add Google Map\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Add Earth Engine dataset\ncover = ee.Image('MODIS/051/MCD12Q1/2012_01_01').select('Land_Cover_Type_1')\n\nigbpPalette = [\n 'aec3d4',\n '152106', '225129', '369b47', '30eb5b', '387242',\n '6a2325', 'c3aa69', 'b76031', 'd9903d', '91af40',\n '111149',\n 'cdb33b',\n 'cc0013',\n '33280d',\n 'd7cdcc',\n 'f7e084',\n '6f6f6f'\n]\n\nMap.setCenter(-99.229, 40.413, 5)\nMap.addLayer(cover, {'min': 0, 'max': 17, 'palette': igbpPalette}, 'MODIS Land Cover')\n\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd1e2fd9135717f26596a49b8ace9dd72500c69
45,377
ipynb
Jupyter Notebook
quests/bq-teradata/01_teradata_bq_essentials/solution/bigquery_essentials_for_teradata_users.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
2
2022-01-06T11:52:57.000Z
2022-01-09T01:53:56.000Z
quests/bq-teradata/01_teradata_bq_essentials/solution/bigquery_essentials_for_teradata_users.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
null
null
null
quests/bq-teradata/01_teradata_bq_essentials/solution/bigquery_essentials_for_teradata_users.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
null
null
null
41.592117
856
0.641052
[ [ [ "# BigQuery Essentials for Teradata Users\n", "_____no_output_____" ], [ "\n\nIn this lab you will take an existing 2TB+ [TPC-DS benchmark dataset](http://www.tpc.org/tpc_documents_current_versions/pdf/tpc-ds_v2.10.0.pdf) and learn common day-to-day activities you'll perform in BigQuery. \n\n### What you'll do\n\nIn this lab, you will learn how to:\n\n- Use BigQuery to access and query the TPC-DS benchmark dataset\n- Understand common differences between Teradata and BigQuery\n- Run pre-defined queries to establish baseline performance benchmarks\n\n\n### BigQuery\n\n[BigQuery](https://cloud.google.com/bigquery/) is Google's fully managed, NoOps, low cost analytics database. With BigQuery you can query terabytes and terabytes of data without managing infrastructure. BigQuery allows you to focus on analyzing data to find meaningful insights.\n\n## TPC-DS Background\nIn order to benchmark the performance of a data warehouse we first must get tables and data to run queries against. There is a public organization, TPC, that provides large benchmarking datasets to companies explicitly for this purpose. The purpose of TPC benchmarks is to provide relevant, objective performance data to industry users.\n\nThe TPC-DS Dataset we will be using comprises of __25 tables__ and __99 queries__ that simulate common data analysis tasks. View the full documentation [here](http://www.tpc.org/tpc_documents_current_versions/pdf/tpc-ds_v2.11.0.pdf).\n\n## Exploring TPC-DS in BigQuery\n\nThe TPC-DS tables have been loaded into BigQuery for you to explore. We have limited the size to 2TB for the timing of this lab but the dataset itself can be expanded as needed.\n\nNote: The TPC Benchmark and TPC-DS are trademarks of the Transaction Processing Performance Council (http://www.tpc.org). The Cloud DW benchmark is derived from the TPC-DS Benchmark and as such is not comparable to published TPC-DS results.", "_____no_output_____" ], [ "## Google Cloud and BigQuery organization", "_____no_output_____" ], [ "First, a note on resource hierarchy. At the lowest level, resources are the fundamental components that make up all Google Cloud services. Examples of resources include Compute Engine Virtual Machines (VMs), Pub/Sub topics, Cloud Storage buckets, App Engine instances, and BigQuery datasets. All these lower level resources can only be parented by projects, which represent the first grouping mechanism of the Google Cloud resource hierarchy.\n\nYou may have noticed you had a project name in the upper left of the console when you opened this notebook:\n\n\n<img src=\"img/project.png\">\n", "_____no_output_____" ], [ "You can also run a local `gcloud` command to detect what your project and id currently are set:", "_____no_output_____" ] ], [ [ "%%bash\n\ngcloud config list", "_____no_output_____" ] ], [ [ "Google Cloud resources are organized hierarchically. Starting from the bottom of the hierarchy, projects are the first level, and they contain other resources. All resources except for organizations have exactly one parent. The Organization is the top of the hierarchy and does not have a parent.\n\nFolders are an additional grouping mechanism on top of projects.\n\n<img src=\"img/cloud-folders-hierarchy.png\">", "_____no_output_____" ], [ "For the purposes of a BigQuery user, this is helpful to know as access management policies (IAM) and Organizational policies are largely imposed at the project, folder, or organizational level. Also, BigQuery \"Reservations\", or chunks of allocated Bigcompute (but not storage) are currently assigned at the project or folder level.", "_____no_output_____" ], [ "### BigQuery Datasets", "_____no_output_____" ], [ "<p>A dataset is contained within a specific <a href=\"https://cloud.google.com/bigquery/docs/projects\">project</a>. Datasets\n are top-level containers that are used to organize and control access to your\n <a href=\"https://cloud.google.com/bigquery/docs/tables\">tables</a> and <a href=\"https://cloud.google.com/bigquery/docs/views\">views</a>. A table\n or view must belong to a dataset, so you need to create at least one dataset before\n <a href=\"https://cloud.google.com/bigquery/docs/loading-data\">loading data into BigQuery</a>.</p>\n \n BigQuery datasets are subject to the following limitations:\n\n* You can set the geographic location at creation time only. After a dataset has\n been created, the location becomes immutable and can't be changed by using the\n Console, using the `bq` tool, or calling the `patch` or\n `update` API methods.\n* All tables that are referenced in a query must be stored in datasets in the\n same location\n* When [you copy a table](https://cloud.google.com/bigquery/docs/managing-tables#copy-table), the\n datasets that contain the source table and destination table must reside in\n the same location.\n* Dataset names must be unique for each project.", "_____no_output_____" ], [ "How many datasets are in your current project? Run the following to find out:", "_____no_output_____" ] ], [ [ "!bq ls", "_____no_output_____" ] ], [ [ "For this lab, you will be accessing data stored in _another_ project, in this case a publically accessible sample project `qwiklabs-resources`. See how many datasets exist in this project:", "_____no_output_____" ] ], [ [ "!bq ls --project_id qwiklabs-resources", "_____no_output_____" ] ], [ [ "And let's look at the tables and views in one of these datasets:", "_____no_output_____" ] ], [ [ "!bq ls --project_id qwiklabs-resources tpcds_2t_baseline", "_____no_output_____" ] ], [ [ "But how are we able to access other data? And won't querying that data create work in that user's cluster? Not at all! Because BigQuery has completely separated the compute and storage layers so they can scale independently, we can easily query data (so long as we have permissions) that are in public datasets or datasets from other teams, without incurring compute costs for them, _and without slowing their queries down, even if we're accessing the same data_.\n\nTo explain why, we dive a little deeper into the architecture of BigQuery.", "_____no_output_____" ], [ "## BigQuery Architecture\n\nBigQuery’s serverless architecture decouples storage and compute and allows them to scale independently on demand. This structure offers both immense flexibility and cost controls for customers because they don’t need to keep their expensive compute resources up and running all the time. This is very different from traditional node-based cloud data warehouse solutions or on-premise massively parallel processing (MPP) systems. This approach also allows customers of any size to bring their data into the data warehouse and start analyzing their data using Standard SQL without worrying about database operations and system engineering.\n\n<img src=\"img/bq_explained_2.jpg\">", "_____no_output_____" ], [ "Under the hood, BigQuery employs a vast set of multi-tenant services driven by low-level Google infrastructure technologies like [Dremel, Colossus, Jupiter and Borg](https://cloud.google.com/blog/products/gcp/bigquery-under-the-hood).\n\n<img src=\"img/bq_explained_3.jpg\">", "_____no_output_____" ], [ "__Compute is Dremel, a large multi-tenant cluster that executes SQL queries.__\n\nDremel turns SQL queries into distributed, scaled-out execution plans. The nodes of these execution plans are called slots and do the heavy lifting of reading data from storage and any necessary computation. \n\nDremel dynamically apportions slots to queries on an as-needed basis, maintaining fairness for concurrent queries from multiple users. A single user can get thousands of slots to run their queries. These slots are assigned just-in-time to your query, and the moment that unit of work is done it gets assigned new work, potentially for someone else's query. This is how BigQuery is able to execute so quickly at low cost. You don't have to over-provision resources like you would with statically sized clusters.", "_____no_output_____" ], [ "__Storage is Colossus, Google’s global storage system.__\n\nBigQuery leverages the [columnar storage format](https://cloud.google.com/blog/products/gcp/inside-capacitor-bigquerys-next-generation-columnar-storage-format) and compression algorithm to store data in Colossus, optimized for reading large amounts of structured data. This is the same technology powering Google Cloud's blog storage services - [GCS](https://cloud.google.com/storage).\n\nColossus also handles replication, recovery (when disks crash) and distributed management (so there is no single point of failure). Colossus allows BigQuery users to scale to dozens of petabytes of data stored seamlessly, without paying the penalty of attaching much more expensive compute resources as in traditional data warehouses.", "_____no_output_____" ], [ "__Compute and storage talk to each other through the petabit Jupiter network.__\n\nIn between storage and compute is ‘shuffle’, which takes advantage of Google’s Jupiter network to move data extremely rapidly from one place to another.", "_____no_output_____" ], [ "__BigQuery is orchestrated via Borg, Google’s precursor to Kubernetes.__\n\nThe mixers and slots are all run by Borg, which allocates hardware resources. Essentially, a single BigQuery 'cluster' is able to run thousands of physical machines at once _and_ be securely shared between users, giving massive compute power just-in-time to those who need it.", "_____no_output_____" ], [ "### What does this mean for you?\n\nWorking with BigQuery is different. Some concepts that are __important__:\n* Compute and storage are separate and storage is CHEAP - making copies of data will not waste compute space on nodes like in previous systems. It is also easy to set a TTL on temporary datasets and tables to the garbage collect automatically.\n* The 'workers' in bigquery are called Slots. These are scheduled fairly amongst all the users and queries within a project. Sometimes your query is bound by the amount of parallelism that BigQuery can achieve. Sometimes it is bound by the number of slots available to your organization - hence getting more slots will speed it up\n* While your organization may have a reservation for Slots, meaning a guaranteed number of compute power available to teams, your organization doesn't have it's own BigQuery cluster, per se. It is running in a much larger installation of BigQuery, shared securely amongst other customers. This means you can easily increase and decrease the amount of slots your organization has reserved at a moment's notice with [Flex Slots](https://cloud.google.com/blog/products/data-analytics/introducing-bigquery-flex-slots).", "_____no_output_____" ], [ "## Exploring the TPC-DS Schema with SQL\n\nQuestion: \n- How many tables are in the dataset?\n- What is the name of the largest table (in GB)? How many rows does it have?\n- Note the `FROM` clause - which identifier is the project, which is the datasets, and which is the table or view?", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT \n dataset_id,\n table_id,\n -- Convert bytes to GB.\n ROUND(size_bytes/pow(10,9),2) as size_gb,\n -- Convert UNIX EPOCH to a timestamp.\n TIMESTAMP_MILLIS(creation_time) AS creation_time,\n TIMESTAMP_MILLIS(last_modified_time) as last_modified_time,\n row_count,\n CASE \n WHEN type = 1 THEN 'table'\n WHEN type = 2 THEN 'view'\n ELSE NULL\n END AS type\nFROM\n `qwiklabs-resources.tpcds_2t_baseline.__TABLES__`\nORDER BY size_gb DESC", "_____no_output_____" ] ], [ [ "The core tables in the data warehouse are derived from 5 separate core operational systems (each with many tables):\n\n![tpc-ds-components.png](img/tpc-ds-components.png)\n\nThese systems are driven by the core functions of our retail business. As you can see, our store accepts sales from online (web), mail-order (catalog), and in-store. The business must keep track of inventory and can offer promotional discounts on items sold. ", "_____no_output_____" ], [ "### Exploring all available columns of data\n\nQuestion:\n- How many columns of data are in the entire dataset (all tables)?", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT * FROM \n `qwiklabs-resources.tpcds_2t_baseline.INFORMATION_SCHEMA.COLUMNS`", "_____no_output_____" ] ], [ [ "Question:\n- Are any of the columns of data in this baseline dataset partitioned or clustered? (This will be covered in another lab)", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT * FROM \n `qwiklabs-resources.tpcds_2t_baseline.INFORMATION_SCHEMA.COLUMNS`\nWHERE \n is_partitioning_column = 'YES' OR clustering_ordinal_position IS NOT NULL", "_____no_output_____" ] ], [ [ "Question\n- How many columns of data does each table have (sorted by most to least?)\n- Which table has the most columns of data?", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT \n COUNT(column_name) AS column_count, \n table_name \nFROM \n `qwiklabs-resources.tpcds_2t_baseline.INFORMATION_SCHEMA.COLUMNS`\nGROUP BY table_name\nORDER BY column_count DESC, table_name", "_____no_output_____" ] ], [ [ "### Previewing sample rows of data values\n\nClick on the `catalog_sales` table name for the `tpcds_2t_baseline` dataset under `qwiklabs-resources`\n\nQuestion\n- How many rows are in the table?\n- How large is the table in TB?\n", "_____no_output_____" ] ], [ [ "!bq show qwiklabs-resources:tpcds_2t_baseline.catalog_sales", "_____no_output_____" ] ], [ [ "\nQuestion:\n- `Preview` the data and find the Catalog Sales Extended Sales Price `cs_ext_sales_price` field (which is calculated based on product quantity * sales price)\n- Are there any missing data values for Catalog Sales Quantity (`cs_quantity`)? \n- Are there any missing values for cs_ext_ship_cost? For what type of product could this be expected? (Digital products)\n\nWe are using the `bq head` command line tool to avoid a full table scan with a `SELECT * LIMIT 15`", "_____no_output_____" ] ], [ [ "!bq head -n 15 --selected_fields \"cs_order_number,cs_quantity,cs_ext_sales_price,cs_ext_ship_cost\" qwiklabs-resources:tpcds_2t_baseline.catalog_sales ", "_____no_output_____" ] ], [ [ "### Create an example sales report\n\n__TODO(you):__ Write a query that shows key sales stats for each item sold from the Catalog and execute it here:\n- total orders\n- total unit quantity\n- total revenue\n- total profit\n- sorted by total orders highest to lowest, limit 10", "_____no_output_____" ] ], [ [ "%%bigquery --verbose\n--Query should fail\n\nSELECT\n \nFROM\n `qwiklabs-resources.tpcds_2t_baseline.catalog_sales`\n\nLIMIT\n 10", "_____no_output_____" ], [ "%%bigquery --verbose\n--Query should succeed\n\nSELECT\n cs_item_sk,\n COUNT(cs_order_number) AS total_orders,\n SUM(cs_quantity) AS total_quantity,\n SUM(cs_ext_sales_price) AS total_revenue,\n SUM(cs_net_profit) AS total_profit\nFROM\n `qwiklabs-resources.tpcds_2t_baseline.catalog_sales`\nGROUP BY\n cs_item_sk\nORDER BY\n total_orders DESC\nLIMIT\n 10", "_____no_output_____" ] ], [ [ "A note on our data: The TPC-DS benchmark allows data warehouse practitioners to generate any volume of data programmatically. Since the rows of data are system generated, they may not make the most sense in a business context (like why are we selling our top product at such a huge profit loss!).\n\nThe good news is that to benchmark our performance we care most about the volume of rows and columns to run our benchmark against. ", "_____no_output_____" ], [ "## Analyzing query performance\n\nYou can use the [INFORMATION_SCHEMA](https://cloud.google.com/bigquery/docs/information-schema-intro) to inspect your query performance. A lot of this data is also presented in the UI under __Execution Details__.\n\nRefer to the query below (which should be similar to your results) and answer the following questions.\n\nQuestion\n- How long did it take the query to run? 14s\n- How much data in GB was processed? 150GB\n- How much slot time was consumed? 1hr 7min", "_____no_output_____" ] ], [ [ "%%bigquery\n\nSELECT\n project_id,\n job_id,\n query,\n cache_hit,\n reservation_id,\n EXTRACT(DATE FROM creation_time) AS creation_date,\n creation_time,\n end_time,\n TIMESTAMP_DIFF(end_time, start_time, SECOND) AS job_duration_seconds,\n job_type,\n user_email,\n state,\n error_result,\n total_bytes_processed,\n total_slot_ms / 1000 / 60 AS slot_minutes,\n -- Average slot utilization per job is calculated by dividing\n -- total_slot_ms by the millisecond duration of the job\n total_slot_ms / (TIMESTAMP_DIFF(end_time, start_time, MILLISECOND)) AS avg_slots\nFROM\n `region-us`.INFORMATION_SCHEMA.JOBS_BY_PROJECT\nORDER BY\n creation_time DESC\nLIMIT 15;", "_____no_output_____" ], [ "!bq ls -j -a -n 15", "_____no_output_____" ] ], [ [ "## Side note: Slot Time\n\nWe know the query took 15 seconds to run so what does the 1hr 7 min slot time metric mean?\n\nInside of the BigQuery service are lots of virtual machines that massively process your data and query logic in parallel. These workers, or \"slots\", work together to process a single query job really quickly. For accounts with on-demand pricing, you can have up to 2,000 slots.\n\nSo say we had 30 minutes of slot time or 1800 seconds. If the query took 20 seconds in total to run, \nbut it was 1800 seconds worth of work, how many workers at minimum worked on it? \n1800/20 = 90\n\nAnd that's assuming each worker instantly had all the data it needed (no shuffling of data between workers) and was at full capacity for all 20 seconds!\n\nIn reality, workers have a variety of tasks (waiting for data, reading it, performing computations, and writing data)\nand also need to compare notes with each other on what work was already done on the job. The good news for you is\nthat you don't need to worry about optimizing these workers or the underlying data to run perfectly in parallel. That's why BigQuery is a managed service -- there's an entire team dedicated to hardware and data storage optimization.\n\nThe \"avg_slots\" metric indicates the average number of slots being utilized by your query at any given time. Often, portions of the query plan will have different amounts of parallelism and thus can benefit (or not) from more slots. For example, if you're performing a basic READ+FILTER+AGGREGATE query, reading data from a large table may require 1,000 slots for the `INPUT` phase since each slot reads a file, but if a lot of the data is immediately filtered, there may be fewer slots or even one slot needed for the next stage to aggregate. Certain portions of your queries may become bottlenecks for parallelism, for example, `JOIN`s, `SORT`s, etc. BigQuery can execute many of these in a parallel manner and optimizing this queries is a more advanced topic. At this point, it's important to know slot_time, and conceptually what a slot is.\n\nIn case you were wondering, the worker limit for your project is 2,000 slots at once. In a production setting, this will vary depending on whether your organization is using \"flat-rate\" pricing on \"on-demand\". If you're \"flat-rate\", the amount of slots will depend on the organization's reservation, how that reservations is apportioned to different folders, projects, and teams, and how busy each slice of the reservation is at any given moment.", "_____no_output_____" ], [ "## Running a performance benchmark\n\nTo performance benchmark our data warehouse in BigQuery we need to create more than just a single SQL report. The good news is the TPC-DS dataset ships with __99 standard benchmark queries__ that we can run and log the performance outcomes. \n\nIn this lab, we are doing no adjustments to the existing data warehouse tables (no partitioning, no clustering, no nesting) so we can establish a performance benchmark to beat in future labs.", "_____no_output_____" ], [ "### Viewing the 99 pre-made SQL queries\n\nWe have a long SQL file with 99 standard queries against this dataset stored in our /sql/ directory.\n\nLet's view the first 50 lines of those baseline queries to get familiar with how we will be performance benchmarking our dataset.", "_____no_output_____" ] ], [ [ "!head --lines=50 'sql/example_baseline_queries.sql'", "_____no_output_____" ] ], [ [ "### Running the first benchmark test\nNow let's run the first query against our dataset and note the execution time. Tip: You can use the [--verbose flag](https://googleapis.dev/python/bigquery/latest/magics.html) in %%bigquery magics to return the job and completion time. ", "_____no_output_____" ] ], [ [ "%%bigquery --verbose\n# start query 1 in stream 0 using template query96.tpl\nselect count(*) \nfrom `qwiklabs-resources.tpcds_2t_baseline.store_sales` as store_sales\n ,`qwiklabs-resources.tpcds_2t_baseline.household_demographics` as household_demographics \n ,`qwiklabs-resources.tpcds_2t_baseline.time_dim` as time_dim, \n `qwiklabs-resources.tpcds_2t_baseline.store` as store\nwhere ss_sold_time_sk = time_dim.t_time_sk \n and ss_hdemo_sk = household_demographics.hd_demo_sk \n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and household_demographics.hd_dep_count = 5\n and store.s_store_name = 'ese'\norder by count(*)\nlimit 100;", "_____no_output_____" ] ], [ [ "It should execute in just a few seconds. __Then try running it again__ and see if you get the same performance. BigQuery will automatically [cache the results](https://cloud.google.com/bigquery/docs/cached-results) from the first time you ran the query and then serve those same results to you when you can the query again. We can confirm this by analyzing the query job statistics. ", "_____no_output_____" ], [ "### Viewing BigQuery job statistics\n\nLet's list our five most recent query jobs run on BigQuery using the `bq` [command line interface](https://cloud.google.com/bigquery/docs/managing-jobs#viewing_information_about_jobs). Then we will get even more detail on our most recent job with the `bq show` command.", "_____no_output_____" ] ], [ [ "!bq ls -j -a -n 5", "_____no_output_____" ] ], [ [ "__Be sure to replace the job id with your own most recent job.__", "_____no_output_____" ] ], [ [ "!bq show --format=prettyjson -j fae46669-5e96-4744-9d2c-2b1b95fa21e7", "_____no_output_____" ] ], [ [ "Looking at the job statistics we can see our most recent query hit cache \n- `cacheHit: true` and therefore \n- `totalBytesProcessed: 0`. \n\nWhile this is great in normal uses for BigQuery (you aren't charged for queries that hit cache) it kind of ruins our performance test. While cache is super useful we want to disable it for testing purposes.", "_____no_output_____" ], [ "### Disabling Cache and Dry Running Queries\nAs of the time this lab was created, you can't pass a flag to `%%bigquery` iPython notebook magics to disable cache or to quickly see the amount of data processed. So we will use the traditional `bq` [command line interface in bash](https://cloud.google.com/bigquery/docs/reference/bq-cli-reference#bq_query).\n\nFirst we will do a `dry run` of the query without processing any data just to see how many bytes of data would be processed. Then we will remove that flag and ensure `nouse_cache` is set to avoid hitting cache as well.", "_____no_output_____" ] ], [ [ "%%bash \nbq query \\\n--dry_run \\\n--nouse_cache \\\n--use_legacy_sql=false \\\n\"\"\"\\\nselect count(*) \nfrom \\`qwiklabs-resources.tpcds_2t_baseline.store_sales\\` as store_sales\n ,\\`qwiklabs-resources.tpcds_2t_baseline.household_demographics\\` as household_demographics \n ,\\`qwiklabs-resources.tpcds_2t_baseline.time_dim\\` as time_dim, \\`qwiklabs-resources.tpcds_2t_baseline.store\\` as store\nwhere ss_sold_time_sk = time_dim.t_time_sk \n and ss_hdemo_sk = household_demographics.hd_demo_sk \n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and household_demographics.hd_dep_count = 5\n and store.s_store_name = 'ese'\norder by count(*)\nlimit 100;\n\"\"\"", "_____no_output_____" ], [ "# Convert bytes to GB\n132086388641 / 1e+9", "_____no_output_____" ] ], [ [ "132 GB will be processed. At the time of writing, [BigQuery pricing](https://cloud.google.com/bigquery/pricing) is \\\\$5 per 1 TB (or 1000 GB) of data after the first free 1 TB each month. Assuming we've exhausted our 1 TB free this month, this would be \\\\$0.66 to run.\n\nNow let's run it an ensure we're not pulling from cache so we get an accurate time-to-completion benchmark.", "_____no_output_____" ] ], [ [ "%%bash \nbq query \\\n--nouse_cache \\\n--use_legacy_sql=false \\\n\"\"\"\\\nselect count(*) \nfrom \\`qwiklabs-resources.tpcds_2t_baseline.store_sales\\` as store_sales\n ,\\`qwiklabs-resources.tpcds_2t_baseline.household_demographics\\` as household_demographics \n ,\\`qwiklabs-resources.tpcds_2t_baseline.time_dim\\` as time_dim, \\`qwiklabs-resources.tpcds_2t_baseline.store\\` as store\nwhere ss_sold_time_sk = time_dim.t_time_sk \n and ss_hdemo_sk = household_demographics.hd_demo_sk \n and ss_store_sk = s_store_sk\n and time_dim.t_hour = 8\n and time_dim.t_minute >= 30\n and household_demographics.hd_dep_count = 5\n and store.s_store_name = 'ese'\norder by count(*)\nlimit 100;\n\"\"\"", "_____no_output_____" ] ], [ [ "If you're an experienced BigQuery user, you likely have seen these same metrics in the Web UI as well as highlighted in the red box below:\n\n![img/bq-ui-results.png](img/bq-ui-results.png)\n\nIt's a matter of preference whether you do your work in the Web UI or the command line -- each has it's advantages.\n\nOne major advantage of using the `bq` command line interface is the ability to create a script that will run the remaining 98 benchmark queries for us and log the results. ", "_____no_output_____" ], [ "### Copy the qwiklabs-resources dataset into your own GCP project\n\nWe will use the new [BigQuery Transfer Service](https://cloud.google.com/bigquery/docs/copying-datasets) to quickly copy our large dataset from the `qwiklabs-resources` GCP project into your own so you can perform the benchmarking. \n\n### Create a new baseline dataset in your project", "_____no_output_____" ] ], [ [ "%%bash\n\nexport PROJECT_ID=$(gcloud config list --format 'value(core.project)')\nexport BENCHMARK_DATASET_NAME=tpcds_2t_baseline # Name of the dataset you want to create\n\n## Create a BigQuery dataset for tpcds_2t_flat_part_clust if it doesn't exist\ndatasetexists=$(bq ls -d | grep -w $BENCHMARK_DATASET_NAME)\n\nif [ -n \"$datasetexists\" ]; then\n echo -e \"BigQuery dataset $BENCHMARK_DATASET_NAME already exists, let's not recreate it.\"\n\nelse\n echo \"Creating BigQuery dataset titled: $BENCHMARK_DATASET_NAME\"\n \n bq --location=US mk --dataset \\\n --description 'Benchmark Dataset' \\\n $PROJECT:$BENCHMARK_DATASET_NAME\n\nfi", "_____no_output_____" ], [ "# Inspect your project and datasets\n!bq ls \n!bq ls tpcds_2t_baseline", "_____no_output_____" ] ], [ [ "Here we will use the `bq cp` command to copy tables over. If you need to periodically refresh data, the BQ Transfer service or scheduled queries are good tools as well.", "_____no_output_____" ] ], [ [ "%%bash\n\n# Should take about 30 seconds, starts a bunch of asynchronous copy jobs\n\n\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.call_center tpcds_2t_baseline.call_center\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.catalog_page tpcds_2t_baseline.catalog_page\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.catalog_returns tpcds_2t_baseline.catalog_returns\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.catalog_sales tpcds_2t_baseline.catalog_sales\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.customer tpcds_2t_baseline.customer\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.customer_address tpcds_2t_baseline.customer_address\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.customer_demographics tpcds_2t_baseline.customer_demographics\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.date_dim tpcds_2t_baseline.date_dim\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.dbgen_version tpcds_2t_baseline.dbgen_version\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.household_demographics tpcds_2t_baseline.household_demographics\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.income_band tpcds_2t_baseline.income_band\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.inventory tpcds_2t_baseline.inventory\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.item tpcds_2t_baseline.item\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.perf tpcds_2t_baseline.perf\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.promotion tpcds_2t_baseline.promotion\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.reason tpcds_2t_baseline.reason\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.ship_mode tpcds_2t_baseline.ship_mode\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.store tpcds_2t_baseline.store\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.store_returns tpcds_2t_baseline.store_returns\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.store_sales tpcds_2t_baseline.store_sales\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.time_dim tpcds_2t_baseline.time_dim\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.warehouse tpcds_2t_baseline.warehouse\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.web_page tpcds_2t_baseline.web_page\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.web_returns tpcds_2t_baseline.web_returns\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.web_sales tpcds_2t_baseline.web_sales\nbq cp -nosync qwiklabs-resources:tpcds_2t_baseline.web_site tpcds_2t_baseline.web_site\n ", "_____no_output_____" ] ], [ [ "Inspect the tables now in your project.", "_____no_output_____" ] ], [ [ "!bq ls tpcds_2t_baseline", "_____no_output_____" ] ], [ [ "### Verify you now have the baseline data in your project\n\nRun the below query and confirm you see data. Note that if you omit the `project-id` ahead of the dataset name in the `FROM` clause, BigQuery will assume your default project.", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT COUNT(*) AS store_transaction_count\nFROM tpcds_2t_baseline.store_sales", "_____no_output_____" ] ], [ [ "### Setup an automated test\n\nRunning each of the 99 queries manually via the Console UI would be a tedious effort. We'll show you how you can run all 99 programmatically and automatically log the output (time and GB processed) to a log file for analysis. \n\nBelow is a shell script that:\n1. Accepts a BigQuery dataset to benchmark\n2. Accepts a list of semi-colon separated queries to run\n3. Loops through each query and calls the `bq` query command\n4. Records the execution time into a separate BigQuery performance table `perf`\n\nExecute the below statement and follow along with the results as you benchmark a few example queries (don't worry, we've already ran the full 99 recently so you won't have to).\n\n__After executing, wait 1-2 minutes for the benchmark test to complete__\n", "_____no_output_____" ] ], [ [ "%%bash\n# runs the SQL queries from the TPCDS benchmark \n\n# Pull the current Google Cloud Platform project name\n\nBQ_DATASET=\"tpcds_2t_baseline\" # let's start by benchmarking our baseline dataset \nQUERY_FILE_PATH=\"./sql/example_baseline_queries.sql\" # the full test is on 99_baseline_queries but that will take 80+ mins to run\nIFS=\";\"\n\n# create perf table to keep track of run times for all 99 queries\nprintf \"\\033[32;1m Housekeeping tasks... \\033[0m\\n\\n\";\nprintf \"Creating a reporting table perf to track how fast each query runs...\";\nperf_table_ddl=\"CREATE TABLE IF NOT EXISTS $BQ_DATASET.perf(performance_test_num int64, query_num int64, elapsed_time_sec int64, ran_on int64)\"\nbq rm -f $BQ_DATASET.perf\nbq query --nouse_legacy_sql $perf_table_ddl \n\nstart=$(date +%s)\nindex=0\nfor select_stmt in $(<$QUERY_FILE_PATH) \ndo \n # run the test until you hit a line with the string 'END OF BENCHMARK' in the file\n if [[ \"$select_stmt\" == *'END OF BENCHMARK'* ]]; then\n break\n fi\n\n printf \"\\n\\033[32;1m Let's benchmark this query... \\033[0m\\n\";\n printf \"$select_stmt\";\n \n SECONDS=0;\n bq query --use_cache=false --nouse_legacy_sql $select_stmt # critical to turn cache off for this test\n duration=$SECONDS\n\n # get current timestamp in milliseconds \n ran_on=$(date +%s)\n\n index=$((index+1))\n\n printf \"\\n\\033[32;1m Here's how long it took... \\033[0m\\n\\n\";\n echo \"Query $index ran in $(($duration / 60)) minutes and $(($duration % 60)) seconds.\"\n\n printf \"\\n\\033[32;1m Writing to our benchmark table... \\033[0m\\n\\n\";\n insert_stmt=\"insert into $BQ_DATASET.perf(performance_test_num, query_num, elapsed_time_sec, ran_on) values($start, $index, $duration, $ran_on)\"\n printf \"$insert_stmt\"\n bq query --nouse_legacy_sql $insert_stmt\ndone\n\nend=$(date +%s)\n\nprintf \"Benchmark test complete\"\n", "_____no_output_____" ] ], [ [ "## Viewing the benchmark results\n\nAs part of the benchmark test, we stored the processing time of each query into a new `perf` BigQuery table. We can query that table and get some performance stats for our test. \n\nFirst are each of the tests we ran:", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT * FROM tpcds_2t_baseline.perf\nWHERE \n # Let's only pull the results from our most recent test\n performance_test_num = (SELECT MAX(performance_test_num) FROM tpcds_2t_baseline.perf)\nORDER BY ran_on", "_____no_output_____" ] ], [ [ "And finally, the overall statistics for the entire test:", "_____no_output_____" ] ], [ [ "%%bigquery\nSELECT\n TIMESTAMP_SECONDS(MAX(performance_test_num)) AS test_date,\n MAX(performance_test_num) AS latest_performance_test_num,\n COUNT(DISTINCT query_num) AS count_queries_benchmarked,\n SUM(elapsed_time_sec) AS total_time_sec,\n MIN(elapsed_time_sec) AS fastest_query_time_sec,\n MAX(elapsed_time_sec) AS slowest_query_time_sec\nFROM\n tpcds_2t_baseline.perf\nWHERE\n performance_test_num = (SELECT MAX(performance_test_num) FROM tpcds_2t_baseline.perf)", "_____no_output_____" ] ], [ [ "## Congratulations!\n\nAnd there you have it! You successfully ran a performance benchmark test against your data warehouse.\n", "_____no_output_____" ], [ "Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ecd1e7d7fc4cc2cbe11720cc7b3fb3d5950ccd07
48,994
ipynb
Jupyter Notebook
facial-emotion-detection-and-classification.ipynb
GaurabStha/facial-emotion-detection-and-classification
1d15f0680c8f31dd3580e9e2b6c1a67323f93cf1
[ "MIT" ]
null
null
null
facial-emotion-detection-and-classification.ipynb
GaurabStha/facial-emotion-detection-and-classification
1d15f0680c8f31dd3580e9e2b6c1a67323f93cf1
[ "MIT" ]
null
null
null
facial-emotion-detection-and-classification.ipynb
GaurabStha/facial-emotion-detection-and-classification
1d15f0680c8f31dd3580e9e2b6c1a67323f93cf1
[ "MIT" ]
null
null
null
60.115337
19,024
0.77883
[ [ [ "# facial-emotion-detection-and-classification", "_____no_output_____" ], [ "### Importing the libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "import cv2\nimport pywt\nimport os\nimport pickle\nimport random", "_____no_output_____" ] ], [ [ "### Let's visualize some image", "_____no_output_____" ] ], [ [ "test_img = cv2.imread('CK+48/happy/S011_006_00000011.png')\nplt.imshow(test_img)\nplt.show()", "_____no_output_____" ], [ "test_img.shape", "_____no_output_____" ] ], [ [ "### Taking all the images from the image dataset", "_____no_output_____" ] ], [ [ "path_to_data = 'CK+48'\nimg_dirs = []\nfor entry in os.scandir(path_to_data):\n if entry.is_dir():\n img_dirs.append(entry.path)", "_____no_output_____" ], [ "img_dirs", "_____no_output_____" ], [ "emotions = []\nfor img_dir in img_dirs:\n emotion_name = img_dir.split('/')[-1]\n emotions.append(emotion_name)", "_____no_output_____" ], [ "emotions", "_____no_output_____" ], [ "data = []\nfor emotion in emotions:\n path = os.path.join(path_to_data, emotion)\n label = emotions.index(emotion)\n print('{} --> {}'.format(emotion, label))\n for img in os.listdir(path):\n imgpath = os.path.join(path, img)\n emotion_img = cv2.imread(imgpath, 0)\n try:\n emotion_img = cv2.resize(emotion_img, (48, 48))\n image = np.array(emotion_img).flatten()\n \n data.append([image, label])\n except Exception as e:\n pass", "angry --> 0\nfear --> 1\nneutral --> 2\nsurprise --> 3\nhappy --> 4\nsad --> 5\ndisgust --> 6\n" ], [ "print(len(data))", "1400\n" ] ], [ [ "### Splitting the data into features and labels where X --> features & y --> labels", "_____no_output_____" ] ], [ [ "# random.shuffle(data)\nX = []\ny = []\n\nfor feature, label in data:\n X.append(feature)\n y.append(label)", "_____no_output_____" ] ], [ [ "### Now let's do some feature scaling", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\nss = StandardScaler()\n\nX = ss.fit_transform(X)", "_____no_output_____" ] ], [ [ "### Splitting the data into training and test set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)", "_____no_output_____" ] ], [ [ "### Fitting the model into train set", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import cross_val_score\nfrom sklearn.svm import SVC\nmodel = SVC(kernel='rbf', C=100, gamma='auto')\nmodel.fit(X_train, y_train)\nmodel.score(X_test, y_test)", "_____no_output_____" ], [ "SVC(kernel='linear', C=100, gamma='auto').fit(X_train, y_train).score(X_test, y_test)", "_____no_output_____" ], [ "SVC(kernel='poly', C=100, gamma='auto').fit(X_train, y_train).score(X_test, y_test)", "_____no_output_____" ] ], [ [ "### K-fold Cross validation", "_____no_output_____" ] ], [ [ "kernels = ['linear', 'rbf', 'poly']\nC = [1, 10, 20, 50, 100]\navg_scores = {}\nfor kernel in kernels:\n for cval in C:\n cv_scores = cross_val_score(SVC(kernel=kernel, C=cval, gamma='auto'), X, y, cv=5)\n avg_scores[kernel+\"_\"+str(cval)] = np.average(cv_scores)\n\navg_scores", "_____no_output_____" ] ], [ [ "### Grid Search CV", "_____no_output_____" ] ], [ [ "from sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import GridSearchCV", "_____no_output_____" ], [ "model_params = {\n 'svm': {\n 'model': svm.SVC(gamma='auto',probability=True),\n 'params' : {\n 'svc__C': [1,10,100,1000],\n 'svc__kernel': ['rbf','linear']\n } \n },\n 'random_forest': {\n 'model': RandomForestClassifier(),\n 'params' : {\n 'randomforestclassifier__n_estimators': [1,5,10]\n }\n },\n 'logistic_regression' : {\n 'model': LogisticRegression(solver='liblinear',multi_class='auto'),\n 'params': {\n 'logisticregression__C': [1,5,10]\n }\n }\n}", "_____no_output_____" ], [ "scores = []\nbest_estimators = {}\nfor algo, mp in model_params.items():\n pipe = make_pipeline(StandardScaler(), mp['model'])\n clf = GridSearchCV(pipe, mp['params'], cv=5, return_train_score=False)\n clf.fit(X_train, y_train)\n scores.append({\n 'model': algo,\n 'best_score': clf.best_score_,\n 'best_params': clf.best_params_\n })\n best_estimators[algo] = clf.best_estimator_\n \ndf = pd.DataFrame(scores,columns=['model','best_score','best_params'])\ndf", "_____no_output_____" ], [ "best_estimators", "_____no_output_____" ], [ "best_estimators['svm'].score(X_test,y_test)", "_____no_output_____" ], [ "best_estimators['logistic_regression'].score(X_test,y_test)", "_____no_output_____" ], [ "best_estimators['random_forest'].score(X_test,y_test)", "_____no_output_____" ], [ "best_clf = best_estimators['svm']", "_____no_output_____" ], [ "best_clf", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, best_clf.predict(X_test))\ncm", "_____no_output_____" ], [ "plt.figure(figsize = (10,7))\nsns.heatmap(cm, annot=True)\nplt.xlabel('Predicted')\nplt.ylabel('Truth')", "_____no_output_____" ] ], [ [ "- angry --> 0\n- fear --> 1\n- neutral --> 2\n- surprise --> 3\n- happy --> 4\n- sad --> 5\n- disgust --> 6", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ecd1ef19ce0c484f910de4e525f7eff2c5c5be82
185,379
ipynb
Jupyter Notebook
LS_DS_431_RNN_and_LSTM_Lecture.ipynb
Neha-kumari31/DS-Unit-4-Sprint-3-Deep-Learning
46354e4f0784932c1eb6c14af0a713da7cf1a238
[ "MIT" ]
null
null
null
LS_DS_431_RNN_and_LSTM_Lecture.ipynb
Neha-kumari31/DS-Unit-4-Sprint-3-Deep-Learning
46354e4f0784932c1eb6c14af0a713da7cf1a238
[ "MIT" ]
null
null
null
LS_DS_431_RNN_and_LSTM_Lecture.ipynb
Neha-kumari31/DS-Unit-4-Sprint-3-Deep-Learning
46354e4f0784932c1eb6c14af0a713da7cf1a238
[ "MIT" ]
null
null
null
101.521906
31,626
0.781011
[ [ [ "<a href=\"https://colab.research.google.com/github/Neha-kumari31/DS-Unit-4-Sprint-3-Deep-Learning/blob/master/LS_DS_431_RNN_and_LSTM_Lecture.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 4, Sprint 3, Module 1*\n\n---\n", "_____no_output_____" ], [ "# Recurrent Neural Networks (RNNs) and Long Short Term Memory (LSTM) (Prepare)\n\n<img src=\"https://media.giphy.com/media/l2JJu8U8SoHhQEnoQ/giphy.gif\" width=480 height=356>\n<br></br>\n<br></br>", "_____no_output_____" ], [ "## Learning Objectives\n- <a href=\"#p1\">Part 1: </a>Describe Neural Networks used for modeling sequences\n- <a href=\"#p2\">Part 2: </a>Apply a LSTM to a text generation problem using Keras", "_____no_output_____" ], [ "## Overview\n\n> \"Yesterday's just a memory - tomorrow is never what it's supposed to be.\" -- Bob Dylan\n\nWish you could save [Time In A Bottle](https://www.youtube.com/watch?v=AnWWj6xOleY)? With statistics you can do the next best thing - understand how data varies over time (or any sequential order), and use the order/time dimension predictively.\n\nA sequence is just any enumerated collection - order counts, and repetition is allowed. Python lists are a good elemental example - `[1, 2, 2, -1]` is a valid list, and is different from `[1, 2, -1, 2]`. The data structures we tend to use (e.g. NumPy arrays) are often built on this fundamental structure.\n\nA time series is data where you have not just the order but some actual continuous marker for where they lie \"in time\" - this could be a date, a timestamp, [Unix time](https://en.wikipedia.org/wiki/Unix_time), or something else. All time series are also sequences, and for some techniques you may just consider their order and not \"how far apart\" the entries are (if you have particularly consistent data collected at regular intervals it may not matter).", "_____no_output_____" ], [ "# Neural Networks for Sequences (Learn)", "_____no_output_____" ], [ "## Overview\n\nThere's plenty more to \"traditional\" time series, but the latest and greatest technique for sequence data is recurrent neural networks. A recurrence relation in math is an equation that uses recursion to define a sequence - a famous example is the Fibonacci numbers:\n\n$F_n = F_{n-1} + F_{n-2}$\n\nFor formal math you also need a base case $F_0=1, F_1=1$, and then the rest builds from there. But for neural networks what we're really talking about are loops:\n\n![Recurrent neural network](https://upload.wikimedia.org/wikipedia/commons/b/b5/Recurrent_neural_network_unfold.svg)\n\nThe hidden layers have edges (output) going back to their own input - this loop means that for any time `t` the training is at least partly based on the output from time `t-1`. The entire network is being represented on the left, and you can unfold the network explicitly to see how it behaves at any given `t`.\n\nDifferent units can have this \"loop\", but a particularly successful one is the long short-term memory unit (LSTM):\n\n![Long short-term memory unit](https://upload.wikimedia.org/wikipedia/commons/thumb/6/63/Long_Short-Term_Memory.svg/1024px-Long_Short-Term_Memory.svg.png)\n\nThere's a lot going on here - in a nutshell, the calculus still works out and backpropagation can still be implemented. The advantage (ane namesake) of LSTM is that it can generally put more weight on recent (short-term) events while not completely losing older (long-term) information.\n\nAfter enough iterations, a typical neural network will start calculating prior gradients that are so small they effectively become zero - this is the [vanishing gradient problem](https://en.wikipedia.org/wiki/Vanishing_gradient_problem), and is what RNN with LSTM addresses. Pay special attention to the $c_t$ parameters and how they pass through the unit to get an intuition for how this problem is solved.\n\nSo why are these cool? One particularly compelling application is actually not time series but language modeling - language is inherently ordered data (letters/words go one after another, and the order *matters*). [The Unreasonable Effectiveness of Recurrent Neural Networks](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) is a famous and worth reading blog post on this topic.\n\nFor our purposes, let's use TensorFlow and Keras to train RNNs with natural language. Resources:\n\n- https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py\n- https://keras.io/layers/recurrent/#lstm\n- http://adventuresinmachinelearning.com/keras-lstm-tutorial/\n\nNote that `tensorflow.contrib` [also has an implementation of RNN/LSTM](https://www.tensorflow.org/tutorials/sequences/recurrent).", "_____no_output_____" ], [ "## Follow Along\n\nSequences come in many shapes and forms from stock prices to text. We'll focus on text, because modeling text as a sequence is a strength of Neural Networks. Let's start with a simple classification task using a TensorFlow tutorial. ", "_____no_output_____" ], [ "### RNN/LSTM Sentiment Classification with Keras", "_____no_output_____" ] ], [ [ "# Check that we have a GPU instance of Colab\ngpu_info = !nvidia-smi\ngpu_info = '\\n'.join(gpu_info)\nif gpu_info.find('failed') >= 0:\n print('Select the Runtime → \"Change runtime type\" menu to enable a GPU accelerator, ')\n print('and then re-execute this cell.')\nelse:\n print(gpu_info)", "Thu Apr 23 02:59:19 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 440.64.00 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n|===============================+======================+======================|\n| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |\n| N/A 36C P0 27W / 250W | 0MiB / 16280MiB | 0% Default |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: GPU Memory |\n| GPU PID Type Process name Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "'''\n#Trains an LSTM model on the IMDB sentiment classification task.\nThe dataset is actually too small for LSTM to be of any advantage\ncompared to simpler, much faster methods such as TF-IDF + LogReg.\n**Notes**\n- RNNs are tricky. Choice of batch size is important,\nchoice of loss and optimizer is critical, etc.\nSome configurations won't converge.\n- LSTM loss decrease patterns during training can be quite different\nfrom what you see with CNNs/MLPs/etc.\n'''\nfrom __future__ import print_function\n\nimport numpy as np\nfrom tensorflow.keras.preprocessing import sequence\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Embedding, Dropout, SimpleRNN, LSTM, Bidirectional\nfrom tensorflow.keras.datasets import imdb\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\n# Set top N words and batch_size\nmax_features = 20000\nbatch_size = 32\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\n# Map for readable classnames\nclass_names = [\"Negative\", \"Positive\"]", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "x_train[0]", "_____no_output_____" ] ], [ [ "Reviews in the IMDB dataset have been encoded as a sequence of integers. Luckily the dataset also contains an index for converting the reviews back into human readable form.", "_____no_output_____" ] ], [ [ "# Get the word index from the dataset\nword_index = imdb.get_word_index()\n\n# Ensure that \"special\" words are mapped into human readable terms \nword_index = {k:(v+3) for k,v in word_index.items()}\nword_index[\"<PAD>\"] = 0\nword_index[\"<START>\"] = 1\nword_index[\"<UNKNOWN>\"] = 2\nword_index[\"<UNUSED>\"] = 3\n\n# Perform reverse word lookup and make it callable\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\ndef decode_review(text):\n return ' '.join([reverse_word_index.get(i, '?') for i in text])", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json\n1646592/1641221 [==============================] - 0s 0us/step\n" ], [ "# Concatonate test and training datasets\nallreviews = np.concatenate((x_train, x_test), axis=0)\n\n# Review lengths across test and training whole datasets\nprint(\"Maximum review length: {}\".format(len(max((allreviews), key=len))))\nprint(\"Minimum review length: {}\".format(len(min((allreviews), key=len))))\nresult = [len(x) for x in allreviews]\nprint(\"Mean review length: {}\".format(np.mean(result)))\n\n# Print a review and it's class as stored in the dataset. Replace the number\n# to select a different review.\nprint(\"\")\nprint(\"Machine readable Review\")\nprint(\" Review Text: \" + str(x_train[60]))\nprint(\" Review Sentiment: \" + str(y_train[60]))\n\n# Print a review and it's class in human readable format. Replace the number\n# to select a different review.\nprint(\"\")\nprint(\"Human Readable Review\")\nprint(\" Review Text: \" + decode_review(x_train[60]))\nprint(\" Review Sentiment: \" + class_names[y_train[60]])", "Maximum review length: 2494\nMinimum review length: 7\nMean review length: 234.75892\n\nMachine readable Review\n Review Text: [1, 13, 219, 14, 33, 4, 12180, 22, 1413, 12, 16, 373, 175, 2711, 1115, 1026, 430, 939, 16, 23, 2444, 25, 43, 697, 89, 12, 16, 170, 8, 130, 262, 19, 32, 4, 665, 7, 4, 2, 322, 5, 4, 1520, 7, 4, 86, 250, 10, 10, 4, 249, 173, 16, 4, 3891, 6, 19, 4, 167, 564, 5, 564, 1325, 36, 805, 8, 216, 638, 17, 11076, 21, 25, 100, 376, 507, 4, 2110, 15, 79, 125, 23, 567, 13, 2134, 233, 36, 4852, 2, 5, 81, 1672, 10, 10, 92, 437, 129, 58, 13, 69, 8, 401, 61, 1432, 39, 1286, 46, 7, 12]\n Review Sentiment: 0\n\nHuman Readable Review\n Review Text: <START> i saw this at the edinburgh film festival it was awful every clichéd violent rich boy fantasy was on display you just knew how it was going to end especially with all the shots of the <UNKNOWN> wife and the rape of the first girl br br the worst part was the q a with the director writer and writer producer they tried to come across as intellectuals but you could tell they're the types that get off on violence i bet anything they frequent <UNKNOWN> and do drugs br br don't waste your time i had to keep my boyfriend from walking out of it\n Review Sentiment: Negative\n" ], [ "# Get the lengths for positive and negative reviews\nall_labels = np.concatenate([y_train, y_test])\npositive = np.array(result)[all_labels==1]\nnegative = np.array(result)[all_labels==0]\n\nplt.figure(figsize=(8,6))\nsns.distplot(positive, label='Positive Reviews', hist=False)\nsns.distplot(negative, label='Negative Reviews', hist=False)\nplt.title('Distribution of Positive and Negative Review Lengths', fontsize=14)\nplt.legend();", "_____no_output_____" ], [ "# If you want to pad the end of the sequences you can set padding='post'.\nmaxlen = 300\n\nprint('Pad Sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=maxlen)\nprint('x_train shape: ', x_train.shape)\nprint('x_test shape: ', x_test.shape)", "Pad Sequences (samples x time)\nx_train shape: (25000, 300)\nx_test shape: (25000, 300)\n" ], [ "x_train[60]", "_____no_output_____" ] ], [ [ "Let's get a baseline performance for Multi-Layer Perceptron", "_____no_output_____" ] ], [ [ "mlp = Sequential()\nmlp.add(Embedding(max_features, 128))\nmlp.add(Dense(128))\nmlp.add(Dropout(0.25))\nmlp.add(Dense(1, activation='sigmoid'))\n\nmlp.compile(loss='binary_crossentropy',\n optimizer='adam', \n metrics=['accuracy'])\n\nmlp.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding (Embedding) (None, None, 128) 2560000 \n_________________________________________________________________\ndense (Dense) (None, None, 128) 16512 \n_________________________________________________________________\ndropout (Dropout) (None, None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, None, 1) 129 \n=================================================================\nTotal params: 2,576,641\nTrainable params: 2,576,641\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "mlp_history = mlp.fit(x_train, y_train,\n batch_size=batch_size, \n epochs=5, \n validation_data=(x_test, y_test))", "Epoch 1/5\n782/782 [==============================] - 23s 29ms/step - loss: 0.6861 - accuracy: 0.5295 - val_loss: 0.6859 - val_accuracy: 0.5270\nEpoch 2/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6831 - accuracy: 0.5363 - val_loss: 0.6855 - val_accuracy: 0.5351\nEpoch 3/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6822 - accuracy: 0.5394 - val_loss: 0.6862 - val_accuracy: 0.5286\nEpoch 4/5\n782/782 [==============================] - 23s 29ms/step - loss: 0.6821 - accuracy: 0.5367 - val_loss: 0.6865 - val_accuracy: 0.5331\nEpoch 5/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6819 - accuracy: 0.5365 - val_loss: 0.6859 - val_accuracy: 0.5348\n" ], [ "# Plot training & validation loss values\nplt.plot(mlp_history.history['loss'])\nplt.plot(mlp_history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show();", "_____no_output_____" ] ], [ [ "Let's try using the SimpleRNN layers instead of Dense", "_____no_output_____" ] ], [ [ "rnn = Sequential()\nrnn.add(Embedding(max_features, 128))\nrnn.add(SimpleRNN(128))\nrnn.add(Dropout(0.25))\nrnn.add(Dense(1, activation='sigmoid'))\n\nrnn.compile(loss='binary_crossentropy',\n optimizer='adam', \n metrics=['accuracy'])\n\nrnn.summary()", "Model: \"sequential_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, None, 128) 2560000 \n_________________________________________________________________\nsimple_rnn (SimpleRNN) (None, 128) 32896 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 129 \n=================================================================\nTotal params: 2,593,025\nTrainable params: 2,593,025\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "rnn_history = mlp.fit(x_train, y_train,\n batch_size=batch_size, \n epochs=5, \n validation_data=(x_test, y_test))", "Epoch 1/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6818 - accuracy: 0.5377 - val_loss: 0.6857 - val_accuracy: 0.5353\nEpoch 2/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6816 - accuracy: 0.5382 - val_loss: 0.6861 - val_accuracy: 0.5355\nEpoch 3/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6815 - accuracy: 0.5385 - val_loss: 0.6860 - val_accuracy: 0.5346\nEpoch 4/5\n782/782 [==============================] - 22s 29ms/step - loss: 0.6815 - accuracy: 0.5373 - val_loss: 0.6863 - val_accuracy: 0.5297\nEpoch 5/5\n782/782 [==============================] - 22s 28ms/step - loss: 0.6814 - accuracy: 0.5396 - val_loss: 0.6862 - val_accuracy: 0.5296\n" ], [ "# Plot training & validation loss values\nplt.plot(rnn_history.history['loss'])\nplt.plot(rnn_history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show();", "_____no_output_____" ] ], [ [ "Lastly, let's try replacing the SimpleRNN layer with LSTM.", "_____no_output_____" ] ], [ [ "lstm = Sequential()\nlstm.add(Embedding(max_features, 128))\nlstm.add(LSTM(128))\nlstm.add(Dropout(0.25))\nlstm.add(Dense(1, activation='sigmoid'))\n\nlstm.compile(loss='binary_crossentropy',\n optimizer='adam', \n metrics=['accuracy'])\n\nlstm.summary()", "Model: \"sequential_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_2 (Embedding) (None, None, 128) 2560000 \n_________________________________________________________________\nlstm (LSTM) (None, 128) 131584 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 129 \n=================================================================\nTotal params: 2,691,713\nTrainable params: 2,691,713\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "from tensorflow.keras.utils import plot_model\n\nplot_model(lstm, to_file='lstm.png', show_shapes=True, show_layer_names=False)", "_____no_output_____" ], [ "lstm_history = lstm.fit(x_train, y_train,\n batch_size=batch_size, \n epochs=5, \n validation_data=(x_test, y_test))", "Epoch 1/5\n782/782 [==============================] - 37s 47ms/step - loss: 0.4354 - accuracy: 0.8023 - val_loss: 0.3635 - val_accuracy: 0.8502\nEpoch 2/5\n782/782 [==============================] - 35s 45ms/step - loss: 0.2556 - accuracy: 0.8996 - val_loss: 0.3793 - val_accuracy: 0.8626\nEpoch 3/5\n782/782 [==============================] - 37s 47ms/step - loss: 0.1682 - accuracy: 0.9387 - val_loss: 0.4207 - val_accuracy: 0.8675\nEpoch 4/5\n782/782 [==============================] - 35s 45ms/step - loss: 0.1039 - accuracy: 0.9647 - val_loss: 0.5313 - val_accuracy: 0.8614\nEpoch 5/5\n782/782 [==============================] - 35s 45ms/step - loss: 0.0772 - accuracy: 0.9740 - val_loss: 0.4706 - val_accuracy: 0.8501\n" ], [ "# Plot training & validation loss values\nplt.plot(lstm_history.history['loss'])\nplt.plot(lstm_history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show();", "_____no_output_____" ], [ "25000 / (2 * (300 + 1))", "_____no_output_____" ], [ "25000 / (10 * (300 + 1))", "_____no_output_____" ] ], [ [ "## Evaluate model with test data and view results", "_____no_output_____" ] ], [ [ "# Get Model Predictions for test data\nfrom sklearn.metrics import classification_report\n\npredicted_classes = lstm.predict_classes(x_test)\nprint(classification_report(y_test, predicted_classes, target_names=class_names))", "WARNING:tensorflow:From <ipython-input-24-4511b9dbf53d>:3: Sequential.predict_classes (from tensorflow.python.keras.engine.sequential) is deprecated and will be removed after 2021-01-01.\nInstructions for updating:\nPlease use instead:* `np.argmax(model.predict(x), axis=-1)`, if your model does multi-class classification (e.g. if it uses a `softmax` last-layer activation).* `(model.predict(x) > 0.5).astype(\"int32\")`, if your model does binary classification (e.g. if it uses a `sigmoid` last-layer activation).\n precision recall f1-score support\n\n Negative 0.86 0.83 0.85 12500\n Positive 0.84 0.87 0.85 12500\n\n accuracy 0.85 25000\n macro avg 0.85 0.85 0.85 25000\nweighted avg 0.85 0.85 0.85 25000\n\n" ] ], [ [ "## View some incorrect predictions\n\nLets have a look at some of the incorrectly classified reviews. For readability we remove the padding.", "_____no_output_____" ] ], [ [ "predicted_classes_reshaped = np.reshape(y_pred, 25000)\n\nincorrect = np.nonzero(predicted_classes_reshaped != y_test)[0]\n\n# We select the first 10 incorrectly classified reviews\nfor j, incorrect in enumerate(incorrect[0:3]):\n \n predicted = class_names[predicted_classes_reshaped[incorrect]]\n actual = class_names[y_test[incorrect]]\n human_readable_review = decode_review(x_test[incorrect])\n \n print(\"Incorrectly classified Test Review [\"+ str(j+1) +\"]\") \n print(\"Test Review #\" + str(incorrect) + \": Predicted [\"+ predicted + \"] Actual [\"+ actual + \"]\")\n print(\"Test Review Text: \" + human_readable_review.replace(\"<PAD> \", \"\"))\n print(\"\")", "_____no_output_____" ] ], [ [ "## Run your own text against the trained model", "_____no_output_____" ] ], [ [ "# Write your own review\nreview = \"this is the best film i have ever seen it is great and fantastic and i loved it\"\n\n# Encode review (replace word with integers)\ntmp = []\nfor word in review.split(\" \"):\n tmp.append(word_index[word])\n\n# Ensure review is 300 words long (by padding or truncating)\ntmp_padded = sequence.pad_sequences([tmp], maxlen=maxlen) \n\n# Run your processed review against the trained model\nrawprediction = lstm.predict(np.array([tmp_padded][0]))[0][0]\nprediction = int(round(rawprediction))\n\n# Test the model and print the result\nprint(\"Review: \" + review)\nprint(\"Raw Prediction: \" + str(rawprediction))\nprint(\"Predicted Class: \" + class_names[prediction])", "_____no_output_____" ] ], [ [ "## Challenge\n\nYou will be expected to use an Keras LSTM for a classicification task on the *Sprint Challenge*. \n\nReference for some of the code in the section above can be found: https://github.com/markwest1972/LSTM-Example-Google-Colaboratory/blob/master/LSTM_IMDB_Sentiment_Example.ipynb", "_____no_output_____" ], [ "# LSTM Text generation with Keras (Learn)", "_____no_output_____" ], [ "## Overview\n\nWhat else can we do with LSTMs? Since we're analyzing the *sequence*, we can do more than classify - we can *generate* text. I'ved pulled some news stories using [newspaper](https://github.com/codelucas/newspaper/).\n\nThis example is drawn from the Keras [documentation](https://keras.io/examples/lstm_text_generation/).", "_____no_output_____" ] ], [ [ "from tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom tensorflow.keras.optimizers import RMSprop\n\nimport numpy as np\nimport random\nimport sys\nimport os", "_____no_output_____" ], [ "data_files = os.listdir('./articles')", "_____no_output_____" ], [ "# Read in Data\ndata = []\n\nfor file in data_files:\n if file[-3:] == 'txt':\n with open(f'./articles/{file}', 'r', encoding='utf-8') as f:\n data.append(f.read())", "_____no_output_____" ], [ "len(data)", "_____no_output_____" ], [ "data[-1]", "_____no_output_____" ], [ "# Encode Data as Chars\n\n# Gather all text \n# Why? 1. See all possible characters 2. For training / splitting later\ntext = \" \".join(data)\n\n# Unique Characters\nchars = list(set(text))\n\n# Lookup Tables\nchar_int = {c:i for i, c in enumerate(chars)} \nint_char = {i:c for i, c in enumerate(chars)} ", "_____no_output_____" ], [ "len(chars)", "_____no_output_____" ], [ "chars", "_____no_output_____" ], [ "# Create the sequence data\nmaxlen = 40\nstep = 5\n\nencoded = [char_int[c] for c in text]\n\nsequences = [] # Each element is 40 chars long\nnext_char = [] # One element for each sequence\n\nfor i in range(0, len(encoded) - maxlen, step):\n \n sequences.append(encoded[i : i + maxlen])\n next_char.append(encoded[i + maxlen])\n \nprint('sequences: ', len(sequences))", "_____no_output_____" ], [ "sequences[0]", "_____no_output_____" ], [ "# Create x & y\nx = np.zeros((len(sequences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sequences),len(chars)), dtype=np.bool)\n\nfor i, sequence in enumerate(sequences):\n for t, char in enumerate(sequence):\n x[i,t,char] = 1\n \n y[i, next_char[i]] = 1", "_____no_output_____" ], [ "x.shape", "_____no_output_____" ], [ "y.shape", "_____no_output_____" ] ], [ [ "##Create Model", "_____no_output_____" ] ], [ [ "# build the model: a single LSTM\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(Dense(len(chars), activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', \n optimizer='adam',\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "#Add Callbacks", "_____no_output_____" ] ], [ [ "def sample(preds):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / 1\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)", "_____no_output_____" ], [ "def on_epoch_end(epoch, _):\n # Function invoked at end of each epoch. Prints generated text.\n print()\n print('----- Generating text after Epoch: %d' % epoch)\n \n start_index = random.randint(0, len(text) - maxlen - 1)\n \n generated = ''\n \n # sentence = text[start_index: start_index + maxlen]\n sentence = \"The police officer was given a medal of \"\n generated += sentence\n \n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n \n for i in range(50):\n x_pred = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_int[char]] = 1\n \n preds = model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds)\n next_char = int_char[next_index]\n \n sentence = sentence[1:] + next_char\n \n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n\nprint_callback = LambdaCallback(on_epoch_end=on_epoch_end)", "_____no_output_____" ], [ "# fit the model\nmodel.fit(x, y,\n batch_size=32,\n epochs=50,\n callbacks=[print_callback])", "_____no_output_____" ] ], [ [ "## Challenge\n\nYou will be expected to use a Keras LSTM to generate text on today's assignment. ", "_____no_output_____" ], [ "# Review\n\n- <a href=\"#p1\">Part 1: </a>Describe Neural Networks used for modeling sequences\n * Sequence Problems:\n - Time Series (like Stock Prices, Weather, etc.)\n - Text Classification\n - Text Generation\n - And many more! :D\n * LSTMs are generally preferred over RNNs for most problems\n * LSTMs are typically a single hidden layer of LSTM type; although, other architectures are possible.\n * Keras has LSTMs/RNN layer types implemented nicely\n- <a href=\"#p2\">Part 2: </a>Apply a LSTM to a text generation problem using Keras\n * Shape of input data is very important\n * Can take a while to train\n * You can use it to write movie scripts. :P ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
ecd1f3a66d35973c7a2d2f849f98f2cdd2cde9a0
57,243
ipynb
Jupyter Notebook
VGG2YoutubeData.ipynb
aronfothi/mask_cluster_rcnn
5e1dcf0269166f0d2bce36e5b11e0be4cb585355
[ "Apache-2.0" ]
null
null
null
VGG2YoutubeData.ipynb
aronfothi/mask_cluster_rcnn
5e1dcf0269166f0d2bce36e5b11e0be4cb585355
[ "Apache-2.0" ]
null
null
null
VGG2YoutubeData.ipynb
aronfothi/mask_cluster_rcnn
5e1dcf0269166f0d2bce36e5b11e0be4cb585355
[ "Apache-2.0" ]
null
null
null
120.006289
40,296
0.861293
[ [ [ "%matplotlib inline\nimport os\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nimport json\n\nfrom pycocotools import mask\nfrom skimage import measure\nfrom skimage.morphology import dilation, square\nfrom scipy import ndimage\nimport cv2", "_____no_output_____" ], [ "\n", "_____no_output_____" ], [ "data_root = '/home/fothar/data/vis/'\n\nwith open(os.path.join(data_root, 'annotations/instances_train_sub.json')) as json_file:\n vis_data = json.load(json_file)", "_____no_output_____" ], [ "print('vis_data', vis_data.keys())\nprint('videos', vis_data['videos'][0].keys())\nprint('annotations', vis_data['annotations'][0].keys())\n#print('segmentations', vis_data['annotations'][0]['segmentations'])", "vis_data dict_keys(['info', 'licenses', 'videos', 'categories', 'annotations'])\nvideos dict_keys(['width', 'length', 'date_captured', 'license', 'flickr_url', 'file_names', 'id', 'coco_url', 'height'])\nannotations dict_keys(['height', 'width', 'length', 'category_id', 'segmentations', 'bboxes', 'video_id', 'iscrowd', 'id', 'areas'])\n" ], [ "print('videos', vis_data['videos'][0].keys())\nprint('videos', vis_data['videos'][0]['file_names'])\nprint('annotations', vis_data['annotations'][0]['segmentations'][10])", "videos dict_keys(['width', 'length', 'date_captured', 'license', 'flickr_url', 'file_names', 'id', 'coco_url', 'height'])\nvideos ['0043f083b5/00000.jpg', '0043f083b5/00005.jpg', '0043f083b5/00010.jpg', '0043f083b5/00015.jpg', '0043f083b5/00020.jpg', '0043f083b5/00025.jpg', '0043f083b5/00030.jpg', '0043f083b5/00035.jpg', '0043f083b5/00040.jpg', '0043f083b5/00045.jpg', '0043f083b5/00050.jpg', '0043f083b5/00055.jpg', '0043f083b5/00060.jpg', '0043f083b5/00065.jpg', '0043f083b5/00070.jpg', '0043f083b5/00075.jpg', '0043f083b5/00080.jpg', '0043f083b5/00085.jpg', '0043f083b5/00090.jpg', '0043f083b5/00095.jpg']\nannotations {'counts': [802432, 64, 656, 65, 655, 65, 655, 65, 655, 65, 656, 64, 656, 64, 657, 63, 657, 63, 657, 63, 657, 62, 659, 61, 660, 59, 662, 57, 664, 55, 665, 52, 669, 50, 671, 48, 673, 46, 675, 45, 676, 44, 676, 44, 677, 43, 679, 41, 679, 42, 677, 6, 2, 37, 675, 5, 3, 37, 675, 5, 3, 37, 675, 5, 3, 37, 675, 5, 3, 37, 675, 5, 3, 38, 675, 4, 5, 36, 685, 35, 685, 35, 686, 34, 688, 32, 689, 30, 692, 27, 92471], 'size': [720, 1280]}\n" ], [ "image_dir = \"/home/fothar/rats/images\"\nmask_dir = \"/home/fothar/rats/masks\"\nbg_dir = \"/home/fothar/rats/back_ground\"\n\nvideo_dir = \"/home/fothar/rats/videos\"\nvideo_mask_dir = \"/home/fothar/rats/video_masks\"\n\naug_image_dir = \"/home/fothar/rats/aug_images\"\naug_mask_dir = \"/home/fothar/rats/aug_masks\"", "_____no_output_____" ], [ "def imFunc(e):\n return int(e[3:-4])\n\ndef maskFunc(e):\n return int(e[5:-4])\n\ndef bgFunc(e):\n return int(e[3:-4])", "_____no_output_____" ], [ "image_filenames = os.listdir(image_dir)\nimage_filenames.sort(key=imFunc)\n\nvalid_ids = [127, 255]\nmask_filenames = os.listdir(mask_dir)\nmask_filenames.sort(key=maskFunc)\n\nbg_filenames = os.listdir(bg_dir)\nbg_filenames.sort(key=bgFunc)\n\nbg_filenames = bg_filenames[21:]\n\nfig, axs = plt.subplots(nrows=10, ncols=4, figsize=(30, 60),\n subplot_kw={'xticks': [], 'yticks': []})\n\n#filenames = zip(axs, image_filenames[1::5][:10], mask_filenames[1::5][:10], bg_filenames[1::5][:10])\n#filenames = zip(axs, image_filenames, mask_filenames, bg_filenames)\nfilenames = zip(image_filenames, mask_filenames, bg_filenames)\n\nvid_idx_list = []\n\nis_vid = False\nstart_idx = 0\nfor i, (image_filename, mask_filename, bg_filename) in enumerate(filenames):\n frame_image = cv2.imread(os.path.join(image_dir, image_filename))\n frame_mask = cv2.imread(os.path.join(mask_dir, mask_filename), 0)\n \n frame_mask[frame_mask==valid_ids[0]] = 1\n frame_mask[frame_mask==valid_ids[1]] = 2\n \n blobs_labels = measure.label(frame_mask, background=0)\n #print(np.unique(blobs_labels))\n #print(np.sum(frame_mask==1), np.sum(frame_mask==2))\n \n if np.sum(frame_mask==1)> 0 and np.sum(frame_mask==2)>0:\n if not is_vid:\n start_idx = i\n is_vid = True\n else:\n if is_vid:\n vid_idx_list.append((start_idx, i))\n is_vid = False\n \nif is_vid:\n vid_idx_list.append((start_idx, i+1))\n \n#print(vid_idx_list)\nvid_idx_list = [vid_idx for vid_idx in vid_idx_list if (vid_idx[1] - vid_idx[0] > 5)]\n#print(vid_idx_list)\n \n \n #ax[3].imshow(dilated_one_rat_image) \n\n#plt.tight_layout()\n#plt.show()", "_____no_output_____" ], [ "\nif not os.path.exists(video_dir):\n os.mkdir(video_dir)\n \nif not os.path.exists(video_mask_dir):\n os.mkdir(video_mask_dir)\n \n", "_____no_output_____" ], [ "def augment_frame(frame_image, frame_mask, frame_bg):\n frame_mask[frame_mask==valid_ids[0]] = 1\n frame_mask[frame_mask==valid_ids[1]] = 2\n \n dilated_frame_mask = dilation(frame_mask, square(15)) \n \n center_rat_1 = ndimage.measurements.center_of_mass(frame_mask==1)\n center_rat_2 = ndimage.measurements.center_of_mass(frame_mask==2) \n move = (int(center_rat_2[0] - center_rat_1[0]), int(center_rat_2[1] - center_rat_1[1])) \n \n moved_mask = (frame_mask==1) \n whr0, whr1 = np.where(moved_mask) \n \n d = min(0, moved_mask.shape[0] - (move[0] + max(whr0) + 1))\n \n move = (move[0] + d, move[1]) \n whr0 = whr0 + move[0]\n whr1 = whr1 + move[1]\n \n moved_mask[...] = 0 \n moved_mask[whr0, whr1] = 1\n \n #ax[0].imshow(frame_image)\n #ax[1].imshow(frame_mask)\n \n new_mask = np.zeros_like(frame_mask)\n \n new_mask[frame_mask==2] = 2\n new_mask[moved_mask==1] = 1 \n #ax[2].imshow(new_mask)\n \n dilated_one_rat_image = frame_image.copy() \n dilated_one_rat_image[dilated_frame_mask==1] = frame_bg[dilated_frame_mask==1] \n dilated_one_rat_image[moved_mask==1] = frame_image[frame_mask==1]\n \n #### blur\n blurred_img = cv2.GaussianBlur(dilated_one_rat_image, (9, 9), 0)\n blur_moved_mask = moved_mask.copy().astype(np.uint8)\n #blur_moved_mask = np.stack((blur_moved_mask,blur_moved_mask, blur_moved_mask), axis=2)\n contours, hierarchy = cv2.findContours(blur_moved_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n cv2.drawContours(blur_moved_mask, contours, -1, (2),2)\n blur_moved_mask = np.stack((blur_moved_mask,blur_moved_mask, blur_moved_mask), axis=2)\n dilated_one_rat_image = np.where(blur_moved_mask==np.array([2, 2, 2]), blurred_img, dilated_one_rat_image)\n \n return new_mask, dilated_one_rat_image", "_____no_output_____" ], [ "def coco_segm(gt_mask):\n ground_truth_binary_mask = gt_mask.astype(np.uint8)\n fortran_ground_truth_binary_mask = np.asfortranarray(ground_truth_binary_mask)\n encoded_mask = mask.encode(fortran_ground_truth_binary_mask)\n\n area = mask.area(encoded_mask)\n bounding_box = mask.toBbox(encoded_mask)\n encoded_mask['counts'] = encoded_mask['counts'].decode('ascii')\n \n return encoded_mask, area.tolist(), bounding_box.tolist()\n ", "_____no_output_____" ], [ "rat_info = dict(description= 'Rats', url= 'https://rats.org/home', version= '0.1', year= 2020, contributor= 'ELTE', date_created= '2020-01-11 00:55:41.903634')\nrat_licenses = [dict(url= 'https://creativecommons.org/licenses/by/4.0/', id= 1, name= 'Creative Commons Attribution 4.0 License')]\nrat_categories = [dict(supercategory= 'object', id= 1, name ='rat')]\nrat_data = dict(info=rat_info, \n licenses=rat_licenses,\n categories=rat_categories,\n videos=[],\n annotations=[])\n\n\nann_id = 0\nfor v, vid_idx in enumerate(vid_idx_list):\n \n video = dict(width= 640,\n length= vid_idx[1]-vid_idx[0],\n date_captured= '',\n license= '',\n flickr_url= '',\n file_names= [],\n id= v,\n coco_url= '',\n height=420)\n \n aug_video = video.copy()\n aug_vid_id = v + len(vid_idx_list)\n aug_video['id'] = aug_vid_id\n \n ann_rat_1 = dict(height= 420,\n width= 640,\n length= 1,\n category_id= 1,\n segmentations= [],\n bboxes= [],\n video_id= v,\n iscrowd= False,\n id= ann_id,\n areas= [])\n \n ann_rat_2 = ann_rat_1.copy()\n ann_rat_2['id'] = ann_id + 1\n \n aug_ann_rat_1 = ann_rat_1.copy()\n aug_ann_rat_1['id'] = ann_id + 2\n aug_ann_rat_1['video_id'] = aug_vid_id\n \n aug_ann_rat_2 = aug_ann_rat_1.copy()\n aug_ann_rat_2['id'] = ann_id + 3\n \n ann_id += 4\n \n vid_path = os.path.join(video_dir, str(v))\n if not os.path.exists(vid_path):\n os.mkdir(vid_path)\n vid_mask_path=os.path.join(video_mask_dir, str(v))\n if not os.path.exists(vid_mask_path):\n os.mkdir(vid_mask_path)\n \n aug_vid_path = os.path.join(video_dir, 'aug_{}'.format(v))\n if not os.path.exists(aug_vid_path):\n os.mkdir(aug_vid_path)\n aug_vid_mask_path=os.path.join(video_mask_dir, 'aug_{}'.format(v))\n if not os.path.exists(aug_vid_mask_path):\n os.mkdir(aug_vid_mask_path)\n \n video_files = zip(image_filenames[vid_idx[0]:vid_idx[1]], mask_filenames[vid_idx[0]:vid_idx[1]], bg_filenames[vid_idx[0]:vid_idx[1]])\n for f, (image_filename, mask_filename, bg_filename) in enumerate(video_files): \n #print(image_filename, mask_filename, bg_filename)\n frame_image = cv2.imread(os.path.join(image_dir, image_filename))\n frame_mask = cv2.imread(os.path.join(mask_dir, mask_filename), 0)\n frame_bg = cv2.imread(os.path.join(bg_dir, bg_filename))\n \n \n video['file_names'].append(os.path.join(str(v), '{}.png'.format(f)))\n \n encoded_mask, area, bbox = coco_segm(frame_mask==valid_ids[0])\n ann_rat_1['segmentations'].append(encoded_mask)\n ann_rat_1['bboxes'].append(bbox)\n ann_rat_1['areas'].append(area)\n \n encoded_mask, area, bbox = coco_segm(frame_mask==valid_ids[1])\n ann_rat_2['segmentations'].append(encoded_mask)\n ann_rat_2['bboxes'].append(bbox)\n ann_rat_2['areas'].append(area)\n \n cv2.imwrite(os.path.join(vid_path, '{}.png'.format(f)), frame_image)\n cv2.imwrite(os.path.join(vid_mask_path, '{}.png'.format(f)), frame_mask)\n \n new_mask, dilated_one_rat_image = augment_frame(frame_image, frame_mask, frame_bg)\n \n encoded_mask, area, bbox = coco_segm(new_mask==1)\n aug_ann_rat_1['segmentations'].append(encoded_mask)\n aug_ann_rat_1['bboxes'].append(bbox)\n aug_ann_rat_1['areas'].append(area)\n \n encoded_mask, area, bbox = coco_segm(new_mask==2)\n aug_ann_rat_2['segmentations'].append(encoded_mask)\n aug_ann_rat_2['bboxes'].append(bbox)\n aug_ann_rat_2['areas'].append(area)\n \n new_mask[new_mask==1] = valid_ids[0]\n new_mask[new_mask==2] = valid_ids[1]\n \n aug_video['file_names'].append(os.path.join('aug_{}'.format(v), '{}.png'.format(f)))\n cv2.imwrite(os.path.join(aug_vid_path, '{}.png'.format(f)), dilated_one_rat_image)\n cv2.imwrite(os.path.join(aug_vid_mask_path, '{}.png'.format(f)), new_mask)\n \n #print('xxxx')\n rat_data['videos'].append(video)\n rat_data['videos'].append(aug_video)\n \n rat_data['annotations'].append(ann_rat_1)\n rat_data['annotations'].append(ann_rat_2)\n rat_data['annotations'].append(aug_ann_rat_1)\n rat_data['annotations'].append(aug_ann_rat_2)\n ", "_____no_output_____" ], [ "with open('/home/fothar/rats/annotations/instances_train.json', 'w') as outfile:\n json.dump(rat_data, outfile)", "_____no_output_____" ], [ "from mmdet.datasets import build_dataset\n\nfrom mmdet.datasets.youtube import RatDataset", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd201ed747377cecd6efc96bfe8067c1fd465a0
20,117
ipynb
Jupyter Notebook
sentiment-analysis/Sentiment-Analysis-RNN/rnn_sentiment_analysis.ipynb
gotamist/nlp
49ae117d2f1ef60c3c523696a4af61d816bf0469
[ "MIT" ]
null
null
null
sentiment-analysis/Sentiment-Analysis-RNN/rnn_sentiment_analysis.ipynb
gotamist/nlp
49ae117d2f1ef60c3c523696a4af61d816bf0469
[ "MIT" ]
null
null
null
sentiment-analysis/Sentiment-Analysis-RNN/rnn_sentiment_analysis.ipynb
gotamist/nlp
49ae117d2f1ef60c3c523696a4af61d816bf0469
[ "MIT" ]
1
2020-04-27T12:44:42.000Z
2020-04-27T12:44:42.000Z
47.223005
4,564
0.544017
[ [ [ "# Sentiment Analysis using RNNs\n", "_____no_output_____" ], [ "## Data \nHow to use RNNs to perform sentiment analysis in Keras. Keras has a built-in [IMDb movie reviews dataset](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) that we can use.", "_____no_output_____" ] ], [ [ "from keras.datasets import imdb # import the built-in imdb dataset in Keras\n\n# Set the vocabulary size\nvocabulary_size = 5000\n\n# Load in training and test data (note the difference in convention compared to scikit-learn)\n(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=vocabulary_size)\nprint(\"Loaded dataset with {} training samples, {} test samples\".format(len(X_train), len(X_test)))", "/home/thojo/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "# Inspect a sample review and its label\nprint(\"--- Review ---\")\nprint(X_train[7])\nprint(\"--- Label ---\")\nprint(y_train[7])", "--- Review ---\n[1, 4, 2, 716, 4, 65, 7, 4, 689, 4367, 2, 2343, 4804, 2, 2, 2, 2, 2315, 2, 2, 2, 2, 4, 2, 628, 2, 37, 9, 150, 4, 2, 4069, 11, 2909, 4, 2, 847, 313, 6, 176, 2, 9, 2, 138, 9, 4434, 19, 4, 96, 183, 26, 4, 192, 15, 27, 2, 799, 2, 2, 588, 84, 11, 4, 3231, 152, 339, 2, 42, 4869, 2, 2, 345, 4804, 2, 142, 43, 218, 208, 54, 29, 853, 659, 46, 4, 882, 183, 80, 115, 30, 4, 172, 174, 10, 10, 1001, 398, 1001, 1055, 526, 34, 3717, 2, 2, 2, 17, 4, 2, 1094, 871, 64, 85, 22, 2030, 1109, 38, 230, 9, 4, 4324, 2, 251, 2, 1034, 195, 301, 14, 16, 31, 7, 4, 2, 8, 783, 2, 33, 4, 2945, 103, 465, 2, 42, 845, 45, 446, 11, 1895, 19, 184, 76, 32, 4, 2, 207, 110, 13, 197, 4, 2, 16, 601, 964, 2152, 595, 13, 258, 4, 1730, 66, 338, 55, 2, 4, 550, 728, 65, 1196, 8, 1839, 61, 1546, 42, 2, 61, 602, 120, 45, 2, 6, 320, 786, 99, 196, 2, 786, 2, 4, 225, 4, 373, 1009, 33, 4, 130, 63, 69, 72, 1104, 46, 1292, 225, 14, 66, 194, 2, 1703, 56, 8, 803, 1004, 6, 2, 155, 11, 4, 2, 3231, 45, 853, 2029, 8, 30, 6, 117, 430, 19, 6, 2, 9, 15, 66, 424, 8, 2337, 178, 9, 15, 66, 424, 8, 1465, 178, 9, 15, 66, 142, 15, 9, 424, 8, 28, 178, 662, 44, 12, 17, 4, 130, 898, 1686, 9, 6, 2, 267, 185, 430, 4, 118, 2, 277, 15, 4, 1188, 100, 216, 56, 19, 4, 357, 114, 2, 367, 45, 115, 93, 788, 121, 4, 2, 79, 32, 68, 278, 39, 8, 818, 162, 4165, 237, 600, 7, 98, 306, 8, 157, 549, 628, 11, 6, 2, 13, 824, 15, 4104, 76, 42, 138, 36, 774, 77, 1059, 159, 150, 4, 229, 497, 8, 1493, 11, 175, 251, 453, 19, 2, 189, 12, 43, 127, 6, 394, 292, 7, 2, 4, 107, 8, 4, 2826, 15, 1082, 1251, 9, 906, 42, 1134, 6, 66, 78, 22, 15, 13, 244, 2519, 8, 135, 233, 52, 44, 10, 10, 466, 112, 398, 526, 34, 4, 1572, 4413, 2, 1094, 225, 57, 599, 133, 225, 6, 227, 7, 541, 4323, 6, 171, 139, 7, 539, 2, 56, 11, 6, 3231, 21, 164, 25, 426, 81, 33, 344, 624, 19, 6, 4617, 7, 2, 2, 6, 2, 4, 22, 9, 1082, 629, 237, 45, 188, 6, 55, 655, 707, 2, 956, 225, 1456, 841, 42, 1310, 225, 6, 2493, 1467, 2, 2828, 21, 4, 2, 9, 364, 23, 4, 2228, 2407, 225, 24, 76, 133, 18, 4, 189, 2293, 10, 10, 814, 11, 2, 11, 2642, 14, 47, 15, 682, 364, 352, 168, 44, 12, 45, 24, 913, 93, 21, 247, 2441, 4, 116, 34, 35, 1859, 8, 72, 177, 9, 164, 8, 901, 344, 44, 13, 191, 135, 13, 126, 421, 233, 18, 259, 10, 10, 4, 2, 2, 4, 2, 3074, 7, 112, 199, 753, 357, 39, 63, 12, 115, 2, 763, 8, 15, 35, 3282, 1523, 65, 57, 599, 6, 1916, 277, 1730, 37, 25, 92, 202, 6, 2, 44, 25, 28, 6, 22, 15, 122, 24, 4171, 72, 33, 32]\n--- Label ---\n0\n" ] ], [ [ "The label is an integer (0 for negative, 1 for positive), and the review itself is stored as a sequence of integers. These are word IDs that have been preassigned to individual words. To map them back to the original words, we can use the dictionary returned by `imdb.get_word_index()`.", "_____no_output_____" ] ], [ [ "# Map word IDs back to words\nword2id = imdb.get_word_index()\nid2word = {i: word for word, i in word2id.items()}\nprint(\"--- Review (with words) ---\")\nprint([id2word.get(i, \" \") for i in X_train[7]])\nprint(\"--- Label ---\")\nprint(y_train[7])", "--- Review (with words) ---\n['the', 'of', 'and', 'local', 'of', 'their', 'br', 'of', 'attention', 'widow', 'and', 'captures', 'parties', 'and', 'and', 'and', 'and', 'excitement', 'and', 'and', 'and', 'and', 'of', 'and', 'english', 'and', 'like', 'it', 'years', 'of', 'and', 'unintentional', 'this', 'hitchcock', 'of', 'and', 'learn', 'everyone', 'is', 'quite', 'and', 'it', 'and', 'such', 'it', 'bonus', 'film', 'of', 'too', 'seems', 'he', 'of', 'enough', 'for', 'be', 'and', 'editing', 'and', 'and', 'please', 'great', 'this', 'of', 'shoots', 'thing', '3', 'and', \"it's\", 'mentioning', 'and', 'and', 'given', 'parties', 'and', 'back', 'out', 'interesting', 'times', 'no', 'all', 'average', 'talking', 'some', 'of', 'nor', 'seems', 'into', 'best', 'at', 'of', 'every', 'cast', 'i', 'i', 'inside', 'keep', 'inside', 'large', 'viewer', 'who', 'obscure', 'and', 'and', 'and', 'movie', 'of', 'and', 'entirely', \"you've\", 'see', 'because', 'you', 'deals', 'successful', 'her', 'anything', 'it', 'of', 'dedicated', 'and', 'hard', 'and', 'further', \"that's\", 'takes', 'as', 'with', 'by', 'br', 'of', 'and', 'in', 'minute', 'and', 'they', 'of', 'westerns', 'watch', 'seemed', 'and', \"it's\", 'lee', 'if', 'oh', 'this', 'japan', 'film', 'around', 'get', 'an', 'of', 'and', 'always', 'life', 'was', 'between', 'of', 'and', 'with', 'group', 'rate', 'code', \"film's\", 'was', 'although', 'of', 'arts', 'had', 'death', 'time', 'and', 'of', 'anyway', 'romantic', 'their', 'won', 'in', 'kevin', 'only', 'flying', \"it's\", 'and', 'only', 'cut', 'show', 'if', 'and', 'is', 'star', 'stay', 'movies', 'both', 'and', 'stay', 'and', 'of', 'music', 'of', 'tell', 'missing', 'they', 'of', 'here', 'really', 'me', 'we', 'value', 'some', 'silent', 'music', 'as', 'had', 'thought', 'and', 'realized', 'she', 'in', 'sorry', 'reasons', 'is', 'and', '10', 'this', 'of', 'and', 'shoots', 'if', 'average', 'remembered', 'in', 'at', 'is', 'over', 'worse', 'film', 'is', 'and', 'it', 'for', 'had', 'absolutely', 'in', 'naive', 'want', 'it', 'for', 'had', 'absolutely', 'in', 'j', 'want', 'it', 'for', 'had', 'back', 'for', 'it', 'absolutely', 'in', 'one', 'want', 'shots', 'has', 'that', 'movie', 'of', 'here', 'write', 'whatsoever', 'it', 'is', 'and', 'set', 'got', 'worse', 'of', 'where', 'and', 'once', 'for', 'of', 'accent', 'after', 'saw', 'she', 'film', 'of', 'rest', 'little', 'and', 'camera', 'if', 'best', 'way', 'elements', 'know', 'of', 'and', 'also', 'an', 'were', 'sense', 'or', 'in', 'realistic', 'actually', 'satan', \"he's\", 'score', 'br', 'any', 'himself', 'in', 'another', 'type', 'english', 'this', 'is', 'and', 'was', 'tom', 'for', 'dating', 'get', \"it's\", 'such', 'from', 'fantastic', 'will', 'pace', 'new', 'years', 'of', 'guy', 'game', 'in', 'murders', 'this', 'us', 'hard', 'lives', 'film', 'and', 'fact', 'that', 'out', 'end', 'is', 'getting', 'together', 'br', 'and', 'of', 'seen', 'in', 'of', 'jail', 'for', 'sees', 'utterly', 'it', 'meet', \"it's\", 'depth', 'is', 'had', 'do', 'you', 'for', 'was', 'rather', 'convince', 'in', 'why', 'last', 'very', 'has', 'i', 'i', 'throughout', 'never', 'keep', 'viewer', 'who', 'of', 'becoming', 'switch', 'and', 'entirely', 'music', 'even', 'interest', 'scene', 'music', 'is', 'far', 'br', 'voice', 'riveting', 'is', 'again', 'something', 'br', 'decent', 'and', 'she', 'this', 'is', 'shoots', 'not', 'director', 'have', 'against', 'people', 'they', 'line', 'cinematography', 'film', 'is', 'couples', 'br', 'and', 'and', 'is', 'and', 'of', 'you', 'it', 'sees', 'hero', \"he's\", 'if', \"can't\", 'is', 'time', 'husband', 'silly', 'and', 'result', 'music', 'image', 'sequences', \"it's\", 'chase', 'music', 'is', 'veteran', 'include', 'and', 'freeman', 'not', 'of', 'and', 'it', 'along', 'are', 'of', 'hearing', 'cutting', 'music', 'his', 'get', 'scene', 'but', 'of', 'fact', 'correct', 'i', 'i', 'means', 'this', 'and', 'this', 'blockbuster', 'as', 'there', 'for', 'disappointed', 'along', 'wrong', 'few', 'has', 'that', 'if', 'his', 'weird', 'way', 'not', 'girl', 'display', 'of', 'love', 'who', 'so', 'friendship', 'in', 'we', 'down', 'it', 'director', 'in', 'situation', 'line', 'has', 'was', 'big', 'why', 'was', 'your', 'supposed', 'last', 'but', 'especially', 'i', 'i', 'of', 'and', 'and', 'of', 'and', 'internet', 'br', 'never', 'give', 'theme', 'rest', 'or', 'really', 'that', 'best', 'and', 'release', 'in', 'for', 'so', 'multi', 'random', 'their', 'even', 'interest', 'is', 'judge', 'once', 'arts', 'like', 'have', 'then', 'own', 'is', 'and', 'has', 'have', 'one', 'is', 'you', 'for', 'off', 'his', 'dutch', 'we', 'they', 'an']\n--- Label ---\n0\n" ], [ "import numpy as np\nnp.max( [ np.max([len( review_int ) for review_int in X_train]), \n np.max([len( review_int ) for review_int in X_test]) ] )", "_____no_output_____" ], [ "np.min([len( review_int ) for review_int in X_train])", "_____no_output_____" ], [ "np.max([len( review_int ) for review_int in X_train])", "_____no_output_____" ] ], [ [ "Unlike our Bag-of-Words approach, where we simply summarized the counts of each word in a document, this representation essentially retains the entire sequence of words (minus punctuation, stopwords, etc.). This is critical for RNNs to function. But it also means that now the features can be of different lengths.\n\n\nMax review length in the training set is 2494 while the minimum is only 11\n\n\nIn order to feed this data into the RNN, all input documents must have the same length. Let's limit the maximum review length to `max_words` by truncating longer reviews and **padding** shorter reviews with a null value (0). This can be done using the [`pad_sequences()`](https://keras.io/preprocessing/sequence/#pad_sequences) function in Keras. For now, we set `max_words` to 500.", "_____no_output_____" ] ], [ [ "from keras.preprocessing import sequence\n\n# Set the maximum number of words per document (for both training and testing)\nmax_words = 500\n\n# TPad sequences in X_train and X_test\nX_train = sequence.pad_sequences(X_train, maxlen = max_words )\nX_test = sequence.pad_sequences(X_test, maxlen = max_words )", "_____no_output_____" ] ], [ [ "### Design an RNN model for sentiment analysis\n\nBuild your model architecture in the code cell below. \n\nThe input is a sequence of words (technically, integer word IDs) of maximum length = `max_words`, and the output is a binary sentiment label (0 or 1).", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Embedding, LSTM, Dense, Dropout, Activation\n\n# Design your model\nembedding_size = 50 # The embedding size\nuse_dropout = True\nmodel = Sequential()\nmodel.add( Embedding(vocabulary_size, embedding_size, input_length=max_words) )\nmodel.add( LSTM(50))\n\nif use_dropout:\n model.add(Dropout(0.25))\n# model.add(TimeDistributed(Dense(vocabulary)))\nmodel.add(Dense(1, activation='sigmoid')) #, input_dim=784\n# model.add(Activation('sigmoid'))\nprint(model.summary())", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 500, 50) 250000 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 50) 20200 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 50) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 51 \n=================================================================\nTotal params: 270,251\nTrainable params: 270,251\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "#### Architecture and parameters\n\nThe first layer is an embedding layer of size 32. Then there are two LSTM layers (100 hidden units each). Then a dropout layer and one dense layer of size 1, with a sigmoid activation.\n293,701 trainable parameters.\n\n\n### Train and evaluate model\n", "_____no_output_____" ] ], [ [ "# Compile your model, specifying a loss function, optimizer, and metrics\nmodel.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])", "_____no_output_____" ], [ "# Specify training parameters: batch size and number of epochs\nbatch_size = 64\nnum_epochs = 5\n\n# Reserve/specify some training data for validation (not to be used for training)\nX_valid, y_valid = X_train[:batch_size], y_train[:batch_size] # first batch_size samples\nX_train2, y_train2 = X_train[batch_size:], y_train[batch_size:] # rest for training\n# Train model\nmodel.fit( x=X_train2, y=y_train2, validation_data=(X_valid, y_valid),\n batch_size=batch_size, epochs=num_epochs )", "Train on 24936 samples, validate on 64 samples\nEpoch 1/5\n24936/24936 [==============================] - 277s 11ms/step - loss: 0.4647 - acc: 0.7727 - val_loss: 0.2064 - val_acc: 0.9219\nEpoch 2/5\n24936/24936 [==============================] - 273s 11ms/step - loss: 0.2829 - acc: 0.8863 - val_loss: 0.2024 - val_acc: 0.9062\nEpoch 3/5\n24936/24936 [==============================] - 273s 11ms/step - loss: 0.2416 - acc: 0.9047 - val_loss: 0.2663 - val_acc: 0.9062\nEpoch 4/5\n24936/24936 [==============================] - 273s 11ms/step - loss: 0.1976 - acc: 0.9249 - val_loss: 0.2217 - val_acc: 0.9219\nEpoch 5/5\n24936/24936 [==============================] - 273s 11ms/step - loss: 0.1969 - acc: 0.9237 - val_loss: 0.2887 - val_acc: 0.8906\n" ], [ "# Save your model, so that you can quickly load it in future (and perhaps resume training)\nmodel_file = \"rnn_model.h5\" # HDF5 file\nimport os\ncache_dir = os.path.join(\"cache\", \"sentiment_analysis\") # where to store cache files\nos.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists\n\nmodel.save(os.path.join(cache_dir, model_file))\n# Can load it using keras.models.load_model()\n# from keras.models import load_model\n# model = load_model(os.path.join(cache_dir, model_file))", "_____no_output_____" ] ], [ [ "Once you have trained your model, it's time to see how well it performs on unseen test data.", "_____no_output_____" ] ], [ [ "# Evaluate the model on the test set\nscores = model.evaluate(X_test, y_test, verbose=0) # returns loss and other metrics specified in model.compile()\nprint(\"Test accuracy:\", scores[1]) # scores[1] should correspond to accuracy if we passed in metrics=['accuracy']", "Test accuracy: 0.87664\n" ], [ "# n=5\n# print( model.predict(X_test[n:n+1]) ) #just trying out\n# # print([id2word[i] for i in X_test[0]])\n# print([id2word.get(i, \" \") for i in X_test[n]])", "_____no_output_____" ] ], [ [ "#### Comparing RNNs and Traditional Methods\n\nHow well does your RNN model perform compared to the BoW + Gradient-Boosted Decision Trees?\n\n[GaussianNB] Accuracy: train = 0.8198, test = 0.72768\n\n[GradientBoostingClassifier] Accuracy: train = 0.79472, test = 0.79004\n\nRNN Test accuracy: 0.87664\n\n## Extensions\n\nExperimenting with different architectures, layers and parameters. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecd2041b4a298ec237373ea1b9c46a277c119faa
230,352
ipynb
Jupyter Notebook
FPL fiddles 2018.ipynb
linkmic/fantasy
0055be606eabe8a99f020e5eb71bc77daf72fde7
[ "MIT" ]
null
null
null
FPL fiddles 2018.ipynb
linkmic/fantasy
0055be606eabe8a99f020e5eb71bc77daf72fde7
[ "MIT" ]
null
null
null
FPL fiddles 2018.ipynb
linkmic/fantasy
0055be606eabe8a99f020e5eb71bc77daf72fde7
[ "MIT" ]
null
null
null
46.526358
16,140
0.523642
[ [ [ "import pandas as pd\nfrom bs4 import BeautifulSoup\nimport bs4\nimport requests\nimport json\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\nimport datetime\nimport numpy as np\nimport os\nfrom numba import jit\n\n%matplotlib inline\nsns.set(style=\"whitegrid\")\n", "_____no_output_____" ], [ "# Download current premier league table\nteam_dict = {}\n\nurl = 'http://www.espnfc.com/barclays-premier-league/23/table'\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, 'html.parser') \n# Note: html5lib deals better with broken html than lxml\n\nfor td in soup.findAll('td', { 'class' : 'pos' }):\n rank = int(td.text)\n res = [i.text for i in td.next_siblings if isinstance(i, bs4.element.Tag) and i.text!='\\xa0']\n team_name = res[0].strip()\n \n values = []\n for i in res[1:]:\n a = i.replace(u'\\xa0', '0')\n values.append(int(a))\n \n team_dict[team_name] = [rank] + values\n \n\nteam_df = pd.DataFrame.from_dict(team_dict, orient='index')\ncols = ['Pos','P_ov','W_ov','D_ov','L_ov','F_ov','A_ov',\n 'W_hm','D_hm','L_hm','F_hm','A_hm', 'W_aw',\n 'D_aw','L_aw','F_aw','A_aw','GD','PTS']\nteam_df.columns = cols\nteam_df = team_df.sort_values('Pos')\nteam_df['team'] = team_df.index\nteam_df = team_df[['team']+cols]\nteam_df.head(5)\n", "_____no_output_____" ], [ "# Download players from the FPL site's resource API\nurl = \"https://fantasy.premierleague.com/drf/bootstrap-static\"\nr = requests.get(url)\ndata = json.loads(r.text)\n\nall_players = data['elements']\nall_teams = data['teams']\n\nteam_codes = {}\nfor team in all_teams:\n team_codes[int(team['code'])] = team['short_name']\n\nCE = data['current-event'] or 0\nNE = data['next-event'] or 0\nGW = CE\n\nplayer_dict = {}\nposition_dict = {}\ncost_dict = {}\nstatus_dict = {}\nteam_dict = {}\nfor i in all_players:\n player_dict[i['id']] = i['web_name']\n position_dict[i['id']] = i['element_type']\n cost_dict[i['id']] = i['now_cost']\n status_dict[i['id']] = i['status']\n \n # some players get transferred out of PL and have invalid teams\n try:\n team_dict[i['id']] = team_codes[i['team_code']]\n except KeyError:\n team_dict[i['id']] = ''\n print(\"KeyError - player has invalid team: id %d, team code%d\" % (i['id'], i['team_code']))\n \n\n# Download player history\nhistory_dict = {};\ncurrent_dict = {};\nfor i in player_dict: \n url = \"https://fantasy.premierleague.com/drf/element-summary/\" + str(i)\n r = requests.get(url)\n data = json.loads(r.text)\n # data['fixtures']\n history = data['history']\n history_past = data['history_past']\n \n # Dataset with total_points per historic season and calculated average points per season\n points_seasons = np.zeros((13,), dtype=np.int)\n for season in history_past:\n points_seasons[season['season']] = int(season['total_points']) \n \n history_dict[i] = points_seasons\n \n # Dataset with points for current season\n points_this = np.zeros((GW+1,), dtype=np.int)\n for game in history:\n points_this[int(game['round'])-1] = int(game['total_points'])\n \n current_dict[i] = points_this\n \nprint(\"Player data downloaded..\")\n ", "Player data downloaded..\n" ], [ "team_dict", "_____no_output_____" ], [ "# GameWeek\nGW", "_____no_output_____" ], [ "# Next GameWeek\nNE", "_____no_output_____" ], [ "df = pd.DataFrame.from_dict(history_dict, orient='index')\ncols = ['2005/06', '2006/07', '2007/08', '2008/09', '2009/10', '2010/11',\n '2011/12', '2012/13', '2013/14', '2014/15', '2015/16', '2016/17', '2017/18']\ndf.columns = cols\ndf.insert(0, 'Name', player_dict.values())\ndf.insert(1, 'Position', position_dict.values())\ndf.insert(2, \"Cost\", cost_dict.values())\ndf.insert(3, \"Status\", status_dict.values())\ndf.insert(4, \"Team\", team_dict.values())\ndf = df.replace(0, np.NaN)\ndf2 = df[['2016/17','2017/18']]\ndf['avg2yr'] = df2.mean(axis=1)\ndf['cost-pt'] = df['2017/18']/df['Cost']\ndf = df.sort_values('cost-pt', axis=0, ascending=False)\ndf.drop(['2005/06','2006/07'], axis=1, inplace=True)\ndf.head(20)", "_____no_output_____" ], [ "df1 = pd.DataFrame.from_dict(current_dict, orient='index')\ncols = list(range(1,NE+1))\ndf1.columns = cols", "_____no_output_____" ], [ "df1.insert(0, 'Name', player_dict.values())\ndf1.insert(1, 'Position', position_dict.values())\ndf1.insert(2, \"Cost\", cost_dict.values())\ndf1.insert(3, \"Total_Points\", df1[list(range(1,GW+2))].sum(axis=1))\ndf1.insert(4, \"Points_Cost\", df1[\"Total_Points\"]/df1[\"Cost\"])\ndf1.insert(5, \"Points_Fixture\", df1[\"Total_Points\"]/GW)\ndf1.insert(6, \"Status\", status_dict.values())\ndf1.insert(7, \"Team\", team_dict.values())\ndf1 = df1.replace(0, np.NaN)\ndf1 = df1.sort_values(['Position','Points_Cost'], axis=0, ascending=False)\n\ndf1.head()", "_____no_output_____" ], [ "#Attackers\ndf1_attack = df1[df1['Position']==4]\ndf1_attack.sort_values(['Points_Fixture', 'Cost'], axis=0, ascending=False).head(20)", "_____no_output_____" ], [ "#Goalies\ndf1_gk = df1[df1['Position']==1]\ndf1_gk.sort_values(['Points_Fixture', 'Cost'], axis=0, ascending=False).head(10)", "_____no_output_____" ], [ "#Defs\ndf1_def = df1[df1['Position']==2]\ndf1_def.sort_values(['Points_Fixture', 'Cost'], axis=0, ascending=False).head(10)", "_____no_output_____" ], [ "#Mids\ndf1_mid = df1[df1['Position']==3]\ndf1_mid.sort_values(['Points_Fixture', 'Cost'], axis=0, ascending=False).head(10)", "_____no_output_____" ], [ "# Zero scores where no data\ng = df1_gk.fillna(0)\nd = df1_def.fillna(0)\nm = df1_mid.fillna(0)\na = df1_attack.fillna(0)\n\n# Last 6 match rolling average\n#r6 = list(range(GW-5, GW+1))\n#g['PFA6'] = g[r6].sum(axis=1)/6\n#d['PFA6'] = d[r6].sum(axis=1)/6\n#m['PFA6'] = m[r6].sum(axis=1)/6\n#a['PFA6'] = a[r6].sum(axis=1)/6\n#df1['PFA6'] = df1[r6].sum(axis=1)/6\n\n# Last 3 match rolling average\n#r3 = list(range(GW-2, GW+1))\n#g['PFA3'] = g[r3].sum(axis=1)/6\n#d['PFA3'] = d[r3].sum(axis=1)/6\n#m['PFA3'] = m[r3].sum(axis=1)/6\n#a['PFA3'] = a[r3].sum(axis=1)/6\n#df1['PFA3'] = df1[r3].sum(axis=1)/6\n\n# not injured\ng = g[g['Status']=='a']\nd = d[d['Status']=='a']\nm = m[m['Status']=='a']\na = a[a['Status']=='a']\n", "_____no_output_____" ], [ "# Team constellation\nMAX_NGK = 2\nMAX_NDEF = 5\nMAX_NMID = 5\nMAX_NFWD = 3\n\n#High Ownership Players\n\n# Otamendi (245), Alonso, Davies\nHO_DEF = np.array([])\n\n# Salah, Sterling 247, Mahrez 199\nHO_MID = np.array([199, 234])\n\n# Kane, (Firmino is 235), (Augero 257), Lukaku (285)\n# Kane is injured :(\n# 616 is Aubameyang\nHO_FWD = np.array([])", "_____no_output_____" ], [ "# Filter out DGW34 and DGW37 Teams\n# Removed BHA due to fixtures\n# LIV are there because of Salah / high ownership\n# dgw_teams=['TOT', 'CHE', 'MUN', 'SOU', 'LEI', 'ARS', 'LIV']\n\n#g = g[g['Team'].isin(dgw_teams)]\n#d = d[d['Team'].isin(dgw_teams)]\n#m = m[m['Team'].isin(dgw_teams)]\n#a = a[a['Team'].isin(dgw_teams)]", "_____no_output_____" ], [ "# Only pick from performing players who are not earmarked as High Ownership (HO)\n#np1_gk_f = g[(g['PFA6'] > np.percentile(g['PFA6'],60))].index.values\n#np1_def_f = d[(d['PFA6'] > np.percentile(d['PFA6'],65))].drop(HO_DEF).index.values\n#np1_mid_f = m[(m['PFA6'] > np.percentile(m['PFA6'],65))].drop(HO_MID).index.values\n#np1_fwd_f = a[(a['PFA6'] > np.percentile(a['PFA6'],75))].drop(HO_FWD).index.values\n", "_____no_output_____" ], [ "# How many players to pick in each position\npdef = MAX_NDEF - len(HO_DEF)\npmid = MAX_NMID - len(HO_MID)\npfwd = MAX_NFWD - len(HO_FWD)", "_____no_output_____" ], [ "# Max team budget\nMAX_COST = 1000", "_____no_output_____" ], [ "# Department of Brute force\n\nMAX_EPOCHS = 2000000\nINTERVAL = MAX_EPOCHS // 100\n\nepoch = 0\nmax_score = 60\nmax_tm = df1.sample(n=15)\n\ndef TimestampMillisec64():\n return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000) \n\n@jit(nopython=True, cache=True)\ndef pick_n(n, arr):\n rnd = np.random.choice(arr, n, replace=False)\n return rnd\n\n@jit(nopython=True, cache=True)\ndef build_team(gks, defs, mids, mfwd):\n tm = np.concatenate((HO_DEF,defs,HO_MID,mids,HO_FWD,mfwd,gks))\n return tm\n\nt1 = TimestampMillisec64()\nwhile (epoch < MAX_EPOCHS):\n epoch = epoch + 1\n if epoch%INTERVAL == 0:\n t2 = TimestampMillisec64()\n print(\"Time %d, Epoch %d ..\" % (round((t2-t1)/1000), epoch))\n t1 = t2\n \n tm = build_team(pick_n(MAX_NGK, np1_gk_f),\n pick_n(pdef, np1_def_f),\n pick_n(pmid, np1_mid_f),\n pick_n(pfwd, np1_fwd_f)\n )\n \n pd_max_tm = df1.loc[tm]\n tm_cost = pd_max_tm.sum(axis=0)['Cost']\n tm_score = pd_max_tm.sum(axis=0)['PFA6'] \n \n if ((tm_score > max_score) and (tm_cost < MAX_COST)):\n max_score = tm_score\n fname = os.path.join('output', 'DGW_1015_Team_Agg_%d_PFA6_%d.csv' % (max_score, TimestampMillisec64()))\n pd_max_tm.to_csv(path_or_buf=fname)\n print(\"Epoch: %d, PFA6: %d, Cost: %d\" % (epoch, tm_score, tm_cost))\n ", "Time 25, Epoch 20000 ..\nEpoch: 21912, PFA6: 70, Cost: 1014\nTime 25, Epoch 40000 ..\nTime 25, Epoch 60000 ..\nEpoch: 79086, PFA6: 70, Cost: 1014\nTime 25, Epoch 80000 ..\nTime 25, Epoch 100000 ..\nTime 25, Epoch 120000 ..\nEpoch: 121425, PFA6: 70, Cost: 1007\nEpoch: 123969, PFA6: 72, Cost: 1011\nTime 25, Epoch 140000 ..\nTime 25, Epoch 160000 ..\nTime 25, Epoch 180000 ..\nTime 25, Epoch 200000 ..\nTime 25, Epoch 220000 ..\nTime 25, Epoch 240000 ..\nTime 25, Epoch 260000 ..\nTime 25, Epoch 280000 ..\nTime 25, Epoch 300000 ..\nTime 25, Epoch 320000 ..\nTime 26, Epoch 340000 ..\nTime 25, Epoch 360000 ..\nTime 26, Epoch 380000 ..\nTime 25, Epoch 400000 ..\nTime 25, Epoch 420000 ..\nTime 25, Epoch 440000 ..\nTime 26, Epoch 460000 ..\nTime 26, Epoch 480000 ..\nTime 26, Epoch 500000 ..\nTime 25, Epoch 520000 ..\nTime 25, Epoch 540000 ..\nTime 25, Epoch 560000 ..\nTime 25, Epoch 580000 ..\nTime 25, Epoch 600000 ..\nTime 25, Epoch 620000 ..\nTime 25, Epoch 640000 ..\nTime 25, Epoch 660000 ..\nTime 25, Epoch 680000 ..\nTime 25, Epoch 700000 ..\nTime 25, Epoch 720000 ..\nTime 25, Epoch 740000 ..\nTime 25, Epoch 760000 ..\nTime 25, Epoch 780000 ..\nTime 25, Epoch 800000 ..\nTime 25, Epoch 820000 ..\nTime 25, Epoch 840000 ..\nTime 25, Epoch 860000 ..\nTime 25, Epoch 880000 ..\nTime 25, Epoch 900000 ..\nTime 25, Epoch 920000 ..\nTime 25, Epoch 940000 ..\nTime 25, Epoch 960000 ..\nTime 26, Epoch 980000 ..\nTime 26, Epoch 1000000 ..\nTime 26, Epoch 1020000 ..\nTime 26, Epoch 1040000 ..\nTime 26, Epoch 1060000 ..\nTime 26, Epoch 1080000 ..\nTime 25, Epoch 1100000 ..\nTime 25, Epoch 1120000 ..\nTime 26, Epoch 1140000 ..\nTime 26, Epoch 1160000 ..\nTime 26, Epoch 1180000 ..\nTime 26, Epoch 1200000 ..\nTime 26, Epoch 1220000 ..\nTime 26, Epoch 1240000 ..\nTime 26, Epoch 1260000 ..\nTime 26, Epoch 1280000 ..\nTime 25, Epoch 1300000 ..\nTime 26, Epoch 1320000 ..\nTime 26, Epoch 1340000 ..\nTime 25, Epoch 1360000 ..\nTime 25, Epoch 1380000 ..\nTime 25, Epoch 1400000 ..\nTime 26, Epoch 1420000 ..\nTime 26, Epoch 1440000 ..\nTime 25, Epoch 1460000 ..\nTime 25, Epoch 1480000 ..\nTime 25, Epoch 1500000 ..\nTime 25, Epoch 1520000 ..\nTime 25, Epoch 1540000 ..\nTime 25, Epoch 1560000 ..\nTime 25, Epoch 1580000 ..\nTime 25, Epoch 1600000 ..\nTime 25, Epoch 1620000 ..\nTime 25, Epoch 1640000 ..\nTime 25, Epoch 1660000 ..\nTime 25, Epoch 1680000 ..\nTime 25, Epoch 1700000 ..\nTime 25, Epoch 1720000 ..\nTime 25, Epoch 1740000 ..\nTime 25, Epoch 1760000 ..\nTime 25, Epoch 1780000 ..\nTime 25, Epoch 1800000 ..\nTime 25, Epoch 1820000 ..\nTime 25, Epoch 1840000 ..\nTime 25, Epoch 1860000 ..\nTime 25, Epoch 1880000 ..\nTime 25, Epoch 1900000 ..\nTime 25, Epoch 1920000 ..\nTime 25, Epoch 1940000 ..\nTime 25, Epoch 1960000 ..\nTime 25, Epoch 1980000 ..\nTime 25, Epoch 2000000 ..\n" ], [ "%%timeit\npick_n(pmid, np1_mid_f)", "580 ns ± 4.93 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n" ], [ "%%timeit\nbuild_team(pick_n(MAX_NGK, np1_gk_f),\n pick_n(pdef, np1_def_f),\n pick_n(pmid, np1_mid_f),\n pick_n(pfwd, np1_fwd_f)\n )", "3.06 µs ± 16.2 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n" ], [ "pd_max_tm.sum(axis=0)['Cost']", "_____no_output_____" ], [ "pd_max_tm.sort_values(['Position', 'PFA6', 'Cost'], axis=0, ascending=False)", "_____no_output_____" ], [ "sns.distplot(g['PFA6'])", "_____no_output_____" ], [ "np.percentile(g['PFA6'],80)", "_____no_output_____" ], [ "sns.distplot(d['PFA6'])", "_____no_output_____" ], [ "np.percentile(d['PFA6'],95)", "_____no_output_____" ], [ "sns.distplot(m['PFA6'])", "_____no_output_____" ], [ "np.percentile(m['PFA6'],95)", "_____no_output_____" ], [ "sns.distplot(a['PFA6'])", "_____no_output_____" ], [ "np.percentile(a['PFA6'],95)", "_____no_output_____" ], [ "attributes = ['Points_Cost','Points_Fixture', 'PFA6']\n\ndata_attributes = a[attributes]\n\ndata_attributes.head()", "_____no_output_____" ], [ "from sklearn.cluster import KMeans\nfrom sklearn import metrics\n\n# Create silhouette score dictionary\ns_score_dict = {}\nfor i in range(2,11):\n km = KMeans(n_clusters=i, random_state=1)\n l = km.fit_predict(data_attributes)\n s_s = metrics.silhouette_score(data_attributes, l)\n s_score_dict[i] = [s_s]\n\n# Print out `s_score_dict`\nprint(s_score_dict)", "{2: [0.53395239660382943], 3: [0.53947549024959995], 4: [0.51124320267135925], 5: [0.5006202416221357], 6: [0.49962716968016058], 7: [0.50252860325323301], 8: [0.51645336848634349], 9: [0.51450802491744008], 10: [0.51665883304268334]}\n" ], [ "# Create K-means model and determine euclidian distances for each data point\nkmeans_model = KMeans(n_clusters=4, random_state=1)\ndistances = kmeans_model.fit_transform(data_attributes)\n\n# Create scatter plot using labels from K-means model as color\nlabels = kmeans_model.labels_\n\nplt.scatter(distances[:,0], distances[:,1], c=labels)\nplt.title('Kmeans Clusters')\n\nplt.show()", "_____no_output_____" ], [ "# Add labels from K-means model to `df` DataFrame and attributes list\na['labels'] = labels\nattributes.append('labels')", "_____no_output_____" ], [ "a[a['labels']==3].head(20)", "_____no_output_____" ], [ "a[a['labels']==2].head(20)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd21a4305c28add388a04ac6c233a512ea57472
672,373
ipynb
Jupyter Notebook
prep_data/image_data_guide/04c_pytorch_training.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
5
2019-01-19T23:53:35.000Z
2022-01-29T14:04:31.000Z
prep_data/image_data_guide/04c_pytorch_training.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
2
2021-08-25T16:15:24.000Z
2022-02-10T02:49:50.000Z
prep_data/image_data_guide/04c_pytorch_training.ipynb
P15241328/amazon-sagemaker-examples
00cba545be0822474f070321a62d22865187e09b
[ "Apache-2.0" ]
7
2020-03-04T22:23:51.000Z
2021-07-13T14:05:46.000Z
758.886005
637,516
0.946836
[ [ [ "# PyTorch Training with SageMaker (Part 4/4)", "_____no_output_____" ], [ "Download | Structure | Preprocessing (PyTorch) | **Train Model (PyTorch)** ", "_____no_output_____" ], [ "**Notes**: \n* This notebook should be used with the conda_pytorch_latest_p36 kernel\n* This notebook is part of a series of notebooks beginning with `01_download_data`, `02_structuring_data` and `03_pytorch_preprocessing`.\n* You can also explore preprocessing with SageMaker's built-in algorithms and TensorFlow by running `04a_builtin_training` and `04c_tensorflow_training`, respectively.", "_____no_output_____" ], [ "<pre>\n</pre>", "_____no_output_____" ], [ "In this notebook, you will train a model using the SageMaker SDK's TensorFlow framework on a remote EC2 instance. After training, you will load the trained model for predicting animal labels on your test dataset. ", "_____no_output_____" ], [ "<pre>\n</pre>", "_____no_output_____" ], [ "## Overview\n* #### [Dependencies](#idg4c.1)\n* #### [Algorithm hyperparameters](#idg4c.2)\n* #### [Review the training script](#idg4c.3)\n* #### [Estimator configuration](#idg4c.4)\n* #### [Training on EC2 instances](#idg4c.5)\n* #### [Load trained model and predict](#idg4c.6)", "_____no_output_____" ], [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.1'></a>\n## Dependencies\n___", "_____no_output_____" ], [ "### Import packages and check SageMaker version", "_____no_output_____" ] ], [ [ "import json\nimport torch\nimport tarfile\nimport pickle\nimport matplotlib.pyplot as plt\nimport torchvision as tv\nimport pathlib # Path management tool (standard library)\nimport subprocess # Runs shell commands via Python (standard library)\nimport sagemaker # SageMaker Python SDK\nfrom sagemaker.pytorch import PyTorch # PyTorch Estimator for TensorFlow", "_____no_output_____" ] ], [ [ "### Load S3 bucket name & category labels\nThe `category_labels` file was generated from the first notebook in this series `01_download_data.ipynb`. You will need to run that notebook before running the code here. \n\nAn S3 bucket for this guide was created in Part 3.", "_____no_output_____" ] ], [ [ "with open('pickled_data/category_labels.pickle', 'rb') as f:\n category_labels = pickle.load(f)\n\nwith open('pickled_data/pytorch_bucket_name.pickle', 'rb') as f:\n bucket_name = pickle.load(f)\nprint(f'Using bucket: {bucket_name}')", "_____no_output_____" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.2'></a>\n## Algorithm hyperparameters\n___\nHyperparamters represent the tuning knobs for our algorithm which we set before training begins. Typically they are pre-set to defaults so if we don't specify them we can still run the training algorithm, but they usually need tweaking to get optimal results. What these values should be depend entirely on the dataset. Unfortunately, there's no formula to tell us what the best settings are, we just have to try them ourselves and see what we get, but there are best practices and tips to help guide us in choosing them.\n\n* **Optimizer** - The optimizer refers to the optimization algorithm being used to choose the best weights. For deep learning on image data, SGD or ADAM is typically used.\n\n* **Learning Rate** - After each batch of training we update the model's weights to give us the best possible results for that batch. The learning rate controls by how much we should update the weights. Best practices dictate a value between 0.2 and .001, typically never going higher than 1. The higher the learning rate, the faster your training will converge to the optimal weights, but going too fast can lead you to overshoot the target. In this example, we're using the weights from a pre-trained model so we'd want to start with a lower learning rate because the weights have already been optimized and we don't want move too far away from them.\n\n* **Epochs** - An epoch refers to one cycle through the training set and having more epochs to train means having more oppotunities to improve accracy. Suitable values range from 5 to 25 epochs depending on your time and budget constraints. Ideally, the right number of epochs is right before your validation accuracy plateaus.\n\n* **Batch Size** - Training on batches reduces the amount of data you need to hold in RAM and can speed up the training algorithm. For these reasons the training data is nearly always batched. The optimal batch size will depended on the dataset, how large the images are and how much RAM the training computer has. For a dataset like ours reasonable vaules would be bewteen 8 and 64 images per batch.\n\n* **Criterion** - This is the type of loss function that will be used by the optimizer to update the model's weights during training. For training on a dataset with with more than two classes, the most common loss function is Cross-Entropy Loss.", "_____no_output_____" ], [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.3'></a>\n## Review the training script\n___", "_____no_output_____" ], [ "### The training function\nUnlike other frameworks, PyTorch doesn't use model objects with a `.fit()` method to train them. Instead the user must define their own training function. This adds more code to our training script, but offers more transparency for customizing and debugging the model training. This is one major reasaon why researchers enjoy using PyTorch. In this example we use the training fuction defined in the PyTorch tutorial for transfer learning here: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html", "_____no_output_____" ] ], [ [ "!pygmentize \"training_pytorch/pytorch_train.py\" | sed -n 12,78p", "\u001b[37m# the training fuction is based off the sample training fuction provided\u001b[39;49;00m\n\u001b[37m# by Pytorch in their transfer learning tutorial:\u001b[39;49;00m\n\u001b[37m# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html\u001b[39;49;00m\n\n\u001b[34mdef\u001b[39;49;00m \u001b[32mtrain\u001b[39;49;00m(model, criterion, optimizer, scheduler, epochs=\u001b[34m1\u001b[39;49;00m):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = \u001b[34m0.0\u001b[39;49;00m\n\n \u001b[34mfor\u001b[39;49;00m epoch \u001b[35min\u001b[39;49;00m \u001b[36mrange\u001b[39;49;00m(epochs):\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mEpoch \u001b[39;49;00m\u001b[33m{}\u001b[39;49;00m\u001b[33m/\u001b[39;49;00m\u001b[33m{}\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m.format(epoch, epochs - \u001b[34m1\u001b[39;49;00m))\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33m-\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m * \u001b[34m10\u001b[39;49;00m)\n\n \u001b[37m# Each epoch has a training and validation phase\u001b[39;49;00m\n \u001b[34mfor\u001b[39;49;00m phase \u001b[35min\u001b[39;49;00m [\u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[33m'\u001b[39;49;00m\u001b[33mval\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m]:\n \u001b[34mif\u001b[39;49;00m phase == \u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n model.train() \u001b[37m# Set model to training mode\u001b[39;49;00m\n \u001b[34melse\u001b[39;49;00m:\n model.eval() \u001b[37m# Set model to evaluate mode\u001b[39;49;00m\n\n running_loss = \u001b[34m0.0\u001b[39;49;00m\n running_corrects = \u001b[34m0\u001b[39;49;00m\n\n \u001b[37m# Iterate over data.\u001b[39;49;00m\n \u001b[34mfor\u001b[39;49;00m inputs, labels \u001b[35min\u001b[39;49;00m dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n \u001b[37m# zero the parameter gradients\u001b[39;49;00m\n optimizer.zero_grad()\n\n \u001b[37m# forward\u001b[39;49;00m\n \u001b[37m# track history if only in train\u001b[39;49;00m\n \u001b[34mwith\u001b[39;49;00m torch.set_grad_enabled(phase == \u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m):\n outputs = model(inputs)\n _, preds = torch.max(outputs, \u001b[34m1\u001b[39;49;00m)\n loss = criterion(outputs, labels)\n\n \u001b[37m# backward + optimize only if in training phase\u001b[39;49;00m\n \u001b[34mif\u001b[39;49;00m phase == \u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n loss.backward()\n optimizer.step()\n\n \u001b[37m# statistics\u001b[39;49;00m\n running_loss += loss.item() * inputs.size(\u001b[34m0\u001b[39;49;00m)\n running_corrects += torch.sum(preds == labels.data)\n \u001b[34mif\u001b[39;49;00m phase == \u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n scheduler.step()\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33m{}\u001b[39;49;00m\u001b[33m Loss: \u001b[39;49;00m\u001b[33m{:.4f}\u001b[39;49;00m\u001b[33m Acc: \u001b[39;49;00m\u001b[33m{:.4f}\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m.format(\n phase, epoch_loss, epoch_acc))\n\n \u001b[37m# deep copy the model\u001b[39;49;00m\n \u001b[34mif\u001b[39;49;00m phase == \u001b[33m'\u001b[39;49;00m\u001b[33mval\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m \u001b[35mand\u001b[39;49;00m epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n \u001b[36mprint\u001b[39;49;00m()\n\n time_elapsed = time.time() - since\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mTraining complete in \u001b[39;49;00m\u001b[33m{:.0f}\u001b[39;49;00m\u001b[33mm \u001b[39;49;00m\u001b[33m{:.0f}\u001b[39;49;00m\u001b[33ms\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m.format(\n time_elapsed // \u001b[34m60\u001b[39;49;00m, time_elapsed % \u001b[34m60\u001b[39;49;00m))\n \u001b[36mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mBest val Acc: \u001b[39;49;00m\u001b[33m{:4f}\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m.format(best_acc))\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "### Execution safety\nFor safety we wrap the training code in this standard if statement though it is not strictly required", "_____no_output_____" ] ], [ [ "!pygmentize \"training_pytorch/pytorch_train.py\" | sed -n 81p", "\u001b[34mif\u001b[39;49;00m \u001b[31m__name__\u001b[39;49;00m ==\u001b[33m'\u001b[39;49;00m\u001b[33m__main__\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "### Parse argument variables\nThese argument variables are passed via the hyperparameter argument for the estimator configuration.", "_____no_output_____" ] ], [ [ "!pygmentize \"training_pytorch/pytorch_train.py\" | sed -n 83,90p", " parser = argparse.ArgumentParser()\n\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--epochs\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m50\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--batch-size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m4\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--learning-rate\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m0.001\u001b[39;49;00m)\n parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--workers\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m0\u001b[39;49;00m)\n\n args, _ = parser.parse_known_args()\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "### Define data transformations and load data\nThese are the transformations from the pre-processing guide. Since the data was resized before it was saved to S3, we don't need to do any resizing except for random cropping of the training dataset and center cropping the valications dataset.", "_____no_output_____" ] ], [ [ "!pygmentize \"training_pytorch/pytorch_train.py\" | sed -n 92,127p", " data_dir = pathlib.Path(\u001b[33m'\u001b[39;49;00m\u001b[33m/opt/ml/input/data\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n\n \u001b[37m# define transformations\u001b[39;49;00m\n data_transforms = {\n \u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m: tv.transforms.Compose([\n tv.transforms.RandomResizedCrop(\u001b[34m224\u001b[39;49;00m),\n tv.transforms.RandomHorizontalFlip(p=\u001b[34m0.5\u001b[39;49;00m),\n tv.transforms.RandomVerticalFlip(p=\u001b[34m0.5\u001b[39;49;00m),\n tv.transforms.ColorJitter(\n brightness=.\u001b[34m2\u001b[39;49;00m, \n contrast=.\u001b[34m2\u001b[39;49;00m, \n saturation=.\u001b[34m2\u001b[39;49;00m, \n hue=.\u001b[34m2\u001b[39;49;00m),\n tv.transforms.ToTensor()\n ]),\n \u001b[33m'\u001b[39;49;00m\u001b[33mval\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m: tv.transforms.Compose([\n tv.transforms.CenterCrop(\u001b[34m224\u001b[39;49;00m),\n tv.transforms.ToTensor()\n ]),\n }\n \n \u001b[37m# create datasets and dataloaders\u001b[39;49;00m\n splits = [\u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[33m'\u001b[39;49;00m\u001b[33mval\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m]\n datasets = {}\n \u001b[34mfor\u001b[39;49;00m s \u001b[35min\u001b[39;49;00m splits:\n datasets[s] = tv.datasets.ImageFolder(\n root = data_dir / s, \n transform = data_transforms[s])\n\n dataloaders = {}\n \u001b[34mfor\u001b[39;49;00m s \u001b[35min\u001b[39;49;00m splits:\n dataloaders[s] = torch.utils.data.DataLoader(\n datasets[s], \n batch_size=args.batch_size, \n shuffle=\u001b[34mTrue\u001b[39;49;00m, \n num_workers=args.workers)\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "### Detect device and create and modify the base model\nThe base model for this guide is a RestNet18 model using pre-trained weights. We need to modify the base model by replacing the fully connected layer with a dense layer to classify our animal images. The model is then loaded for the device (GPU or CPU) that our EC@ instance is using.", "_____no_output_____" ] ], [ [ "!pygmentize \"pytorch_train/pytorch_train-revised.py\" | sed -n 128,140p", " dataset_sizes = {x: \u001b[36mlen\u001b[39;49;00m(datasets[x]) \u001b[34mfor\u001b[39;49;00m x \u001b[35min\u001b[39;49;00m [\u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[33m'\u001b[39;49;00m\u001b[33mval\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m]}\n num_classes = \u001b[36mlen\u001b[39;49;00m(datasets[\u001b[33m'\u001b[39;49;00m\u001b[33mtrain\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m].classes)\n device = torch.device(\u001b[33m\"\u001b[39;49;00m\u001b[33mcuda:0\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m \u001b[34mif\u001b[39;49;00m torch.cuda.is_available() \u001b[34melse\u001b[39;49;00m \u001b[33m\"\u001b[39;49;00m\u001b[33mcpu\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m)\n \u001b[36mprint\u001b[39;49;00m(device)\n \n model = tv.models.resnet18(pretrained=\u001b[34mTrue\u001b[39;49;00m)\n \n \u001b[34mfor\u001b[39;49;00m param \u001b[35min\u001b[39;49;00m model.parameters():\n param.requires_grad = \u001b[34mFalse\u001b[39;49;00m\n \n num_ftrs = model.fc.in_features\n model.fc = torch.nn.Linear(num_ftrs, num_classes)\n model = model.to(device)\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "### Define loss criterion, optimization algorithm and train the model\nThe weights for the epoch with the best accuracy are saved so we can load the model after training and make predictions on our test data.", "_____no_output_____" ] ], [ [ "!pygmentize \"pytorch_train/pytorch_train-revised.py\" | sed -n 142,150p", " criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=\u001b[34m0.9\u001b[39;49;00m)\n exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=\u001b[34m7\u001b[39;49;00m, gamma=\u001b[34m0.1\u001b[39;49;00m)\n model = train(model, criterion, optimizer, exp_lr_scheduler, epochs=args.epochs)\n \n model_dir = pathlib.Path(args.model_dir,)\n \n \u001b[34mwith\u001b[39;49;00m \u001b[36mopen\u001b[39;49;00m(model_dir / \u001b[33m'\u001b[39;49;00m\u001b[33mmodel.pth\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[33m'\u001b[39;49;00m\u001b[33mwb\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m) \u001b[34mas\u001b[39;49;00m f:\n torch.save(model.state_dict(), f)\n" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.4'></a>\n## Estimator configuration\n___\n\nThese define the the resources to use for training and how they are configured. Here are some important one to single out:\n\n* **entry_point (str)** – Path (absolute or relative) to the Python source file which should be executed as the entry point to training. If source_dir is specified, then entry_point must point to a file located at the root of source_dir.\n\n* **framework_version (str)** – PyTorch version you want to use for executing your model training code. Defaults to None. Required unless image_uri is provided. List of supported versions: https://github.com/aws/sagemaker-python-sdk#pytorch-sagemaker-estimators.\n\n* **py_version (str)** – Python version you want to use for executing your model training code. One of ‘py2’ or ‘py3’. Defaults to None. Required unless image_uri is provided.\n\n* **source_dir (str)** – Path (absolute, relative or an S3 URI) to a directory with any other training source code dependencies aside from the entry point file (default: None). If source_dir is an S3 URI, it must point to a tar.gz file. Structure within this directory are preserved when training on Amazon SageMaker.\n\n* **dependencies (list[str])** – A list of paths to directories (absolute or relative) with any additional libraries that will be exported to the container (default: []). The library folders will be copied to SageMaker in the same folder where the entrypoint is copied. If ‘git_config’ is provided, ‘dependencies’ should be a list of relative locations to directories with any additional libraries needed in the Git repo.\n\n* **git_config (dict[str, str])** – Git configurations used for cloning files, including repo, branch, commit, 2FA_enabled, username, password and token. The repo field is required. All other fields are optional. repo specifies the Git repository where your training script is stored. If you don’t provide branch, the default value ‘master’ is used. If you don’t provide commit, the latest commit in the specified branch is used.\n\n* **role (str)** – An AWS IAM role (either name or full ARN). The Amazon SageMaker training jobs and APIs that create Amazon SageMaker endpoints use this role to access training data and model artifacts. After the endpoint is created, the inference code might use the IAM role, if it needs to access an AWS resource.\n\n* **instance_count (int)** – Number of Amazon EC2 instances to use for training.\n\n* **instance_type (str)** – Type of EC2 instance to use for training, for example, ‘ml.c4.xlarge’.\n\n* **volume_size (int)** – Size in GB of the EBS volume to use for storing input data during training (default: 30). Must be large enough to store training data if File Mode is used (which is the default).\n\n* **model_uri (str)** – URI where a pre-trained model is stored, either locally or in S3 (default: None). If specified, the estimator will create a channel pointing to the model so the training job can download it. This model can be a ‘model.tar.gz’ from a previous training job, or other artifacts coming from a different source. In local mode, this should point to the path in which the model is located and not the file itself, as local Docker containers will try to mount the URI as a volume.\n\n* **output_path (str)** - S3 location for saving the training result (model artifacts and output files). If not specified, results are stored to a default bucket. If the bucket with the specific name does not exist, the estimator creates the bucket during the fit() method execution. file:// urls are used for local mode. For example: ‘file://model/’ will save to the model folder in the current directory.", "_____no_output_____" ], [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.5'></a>\n## Training on an EC2 instance\n___\nNow that we've worked out any bugs in our trainging script we can send the training job to an EC2 instance with a GPU with a larger batch size, number of workers and number of epochs.", "_____no_output_____" ], [ "### Define the hyperparamters for EC2 training", "_____no_output_____" ] ], [ [ "hyperparameters = {\n 'epochs': 10, \n 'batch-size': 64, \n 'learning-rate': 0.001, \n 'workers': 4\n}", "_____no_output_____" ] ], [ [ "### Define the estimator configuration for EC2 training", "_____no_output_____" ] ], [ [ "estimator_config = {\n 'entry_point': 'pytorch_train.py',\n 'source_dir': 'training_pytorch',\n 'framework_version': '1.6.0',\n 'py_version': 'py3',\n 'instance_type': 'ml.p3.2xlarge',\n 'instance_count': 1,\n 'role': sagemaker.get_execution_role(),\n 'output_path': f's3://{bucket_name}',\n 'hyperparameters': hyperparameters\n}", "_____no_output_____" ] ], [ [ "### Create the estimator configured for EC2 training", "_____no_output_____" ] ], [ [ "pytorch_estimator = PyTorch(**estimator_config)", "_____no_output_____" ] ], [ [ "### Define the data channels using the proper S3 URIs", "_____no_output_____" ] ], [ [ "data_channels = {\n 'train': f's3://{bucket_name}/data/train',\n 'val': f's3://{bucket_name}/data/val'\n}", "_____no_output_____" ], [ "pytorch_estimator.fit(data_channels)", "_____no_output_____" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "<a id='idg4c.6'></a>\n## Load the Trained Model and Predict\n___\nAfter training the model and saving its parameters (weights) to S3, we can retrive the parameters and load them back into PyTorch to generate predicions.", "_____no_output_____" ], [ "### Download the trained weights from S3", "_____no_output_____" ] ], [ [ "sagemaker.s3.S3Downloader().download(pytorch_estimator.model_data, 'training_pytorch')\ntf = tarfile.open('training_pytorch/model.tar.gz')\ntf.extractall('training_pytorch')", "_____no_output_____" ] ], [ [ "### Load the weights back into a PyTorch model\nSince the model was trained on a GPU we need to use the `map_location=torch.device('cpu')` kwarg to load the model on a CPU backed notebook instance.", "_____no_output_____" ] ], [ [ "model = tv.models.resnet18() \nnum_ftrs = model.fc.in_features\nmodel.fc = torch.nn.Linear(num_ftrs, 11)\nmodel.load_state_dict(torch.load('training_pytorch/model.pt', map_location=torch.device('cpu')))\nmodel.eval();", "_____no_output_____" ] ], [ [ "### Link the model predictions (0 to 10) back to original class names (bear to zebra)\nTo map the index number back to the category label, we need to use the category labels created in the first guide of this series (Downloading Data).", "_____no_output_____" ] ], [ [ "category_labels = {idx: name for idx, name in enumerate(sorted(category_labels.values()))}\ncategory_labels", "_____no_output_____" ] ], [ [ "### Load validation images for predictions", "_____no_output_____" ] ], [ [ "test_ds = sample = tv.datasets.ImageFolder(\n root='data_resized/test', \n transform=tv.transforms.Compose([\n tv.transforms.CenterCrop(244),\n tv.transforms.ToTensor()]\n )\n) \n\ntest_ds = torch.utils.data.DataLoader(test_ds, batch_size=4, shuffle=True)", "_____no_output_____" ] ], [ [ "### Show validation images with model predictions", "_____no_output_____" ] ], [ [ "rows = 3\ncols = 4\nfig, axs = plt.subplots(rows, cols, figsize=(10, 7))\n\nfor row in range(rows):\n batch = next(iter(test_ds))\n images, labels = batch\n _, preds = torch.max(model(images), 1)\n preds = preds.numpy()\n for col, image in enumerate(images):\n ax = axs[row, col]\n ax.imshow(image.permute(1, 2, 0))\n ax.axis('off')\n ax.set_title(f'predicted: {category_labels[preds[col]]}')\n\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "## Rollback to default version of SDK and PyTorch\nOnly do this if you're done with this guide and want to use the same kernel for other notebooks with an incompatible version of the SageMaker SDK or PyTorch.", "_____no_output_____" ] ], [ [ "# print(f'Original version: sagemaker {original_sagemaker_version[0]}, torch {original_pytorch_version[0]}')\n# print(f'Current version: sagemaker {sagemaker.__version__}, torch {torch.__version__}')\n# print('')\n# print(f'Rolling back to sagemaker {original_sagemaker_version[0]}, torch {original_pytorch_version[0]}')\n# print('Restart notebook kernel to use changes.')\n# print('')\n# s = f'sagemaker=={original_sagemaker_version[0]} torch=={original_pytorch_version[0]}'\n# !{sys.executable} -m pip install {s}", "_____no_output_____" ] ], [ [ "<pre>\n</pre>", "_____no_output_____" ], [ "## Next Steps\nThis concludes the Image Data Guide for SageMaker's PyTorch framework. If you'd like to deploy your model and get predictions on your test data, all the info you'll need to get going can be foud here: [Deploy Models for Inference](https://docs.aws.amazon.com/sagemaker/latest/dg/deploy-model.html)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ecd231fe88f4dc194192bc87131c1bf6ac780d7c
79,065
ipynb
Jupyter Notebook
Wikipedia network generator.ipynb
matheusmas132/wikipedia-pages-analysis
643dfbfbb37beeefba1620d24fb36fa96dc228d9
[ "ADSL" ]
null
null
null
Wikipedia network generator.ipynb
matheusmas132/wikipedia-pages-analysis
643dfbfbb37beeefba1620d24fb36fa96dc228d9
[ "ADSL" ]
null
null
null
Wikipedia network generator.ipynb
matheusmas132/wikipedia-pages-analysis
643dfbfbb37beeefba1620d24fb36fa96dc228d9
[ "ADSL" ]
null
null
null
79,065
79,065
0.796623
[ [ [ "# 1.0 Case Study: constructing a network of wikipedia pages", "_____no_output_____" ], [ "## 1.1 Introduction", "_____no_output_____" ], [ "So far we have learned two ways of constructing a complex network:\n- from a CSV file \n- manually\n\nWhat is hard for small networks may be impossible for medium-to-large scale networks; it may be impossible even for small networks if you must repeat the analysis many\ntimes. The case study in this lesson shows you **how to construct a large\nnetwork in an easy way**: \n- automatically collecting node and edge data from the Internet.\n\nThe other goal of this study (aside from mastering new network construction\ntechniques) is quite pragmatic. \n- Wouldn’t you want to know where the **complex\nnetwork analysis** fits in the context of other subjects and disciplines? \n\nAn answer to this question is near at hand: [on Wikipedia](https://en.wikipedia.org/wiki/Complex_network).\n\n\nLet’s start with the Wikipedia page about complex networks—the seed page.\n(Unfortunately, there is no page on complex network analysis itself.) The page\nbody has external links and links to other Wikipedia pages. Those other pages\npresumably are somewhat related to complex networks, or else why would\nthe Wikipedia editors provide them?\nTo build a network out of the seed page and other relevant pages, **let’s treat\nthe pages (and the respective Wikipedia subjects) as the network nodes and\nthe links between the pages as the network edges**. You will use snowball\nsampling (a breadth-first search or [BFS algorithm](https://en.wikipedia.org/wiki/Breadth-first_search)) to discover all the nodes and edges of interest.\n\n<img width=\"200\" src=\"https://drive.google.com/uc?export=view&id=1t9ZXB5Q-wRt_qWbf8eU_1RiTwVGaStHU\">\n\nAs a result, you will have a network of all pages related to complex networks\nand hopefully, you will make some conclusions about it.", "_____no_output_____" ], [ "## 1.2 Get the Data, Build the Network", "_____no_output_____" ], [ "The first half of the project script consists of:\n\n1. the initialization prologue\n2. a heavy-duty loop that retrieves the Wikipedia pages and simultaneously builds the network of nodes and edges.\n\nLet’s first import all necessary modules. We will need:\n\n- the module **wikipedia** for fetching and exploring Wikipedia pages\n- the operator **itemgetter** for sorting a list of tuples\n- and, naturally, **networkx** itself.\n\nTo target the **snowballing process**, define the constant **SEED, the name of the\nstarting page**. \n\n> As a side note, by changing the name of the seed page, you can\napply this analysis to any other subject on Wikipedia.\n\n\nLast but not least, when you start the snowballing, you will eventually (and\nquite soon) bump into the pages describing ISBN and ISSN numbers, the\narXiv, PubMed, and the like. Almost all other Wikipedia pages refer to one or\nmore of those pages. This hyper-connectedness transforms any network into\na collection of almost perfect gigantic stars, making all Wikipedia-based networks\nlook similar. **To avoid the stardom syndrome**, treat the known “star”\npages as stop words in information retrieval—in other words, ignore any links\nto them. \n\nConstructing the **black list of stop words**, STOPS, is a matter of trial\nand error. We put thirteen subjects on it; you may want to add more when you\ncome across other “stars.” We also excluded pages whose names begin with\n**\"List of\"**, because they are simply lists of other subjects.", "_____no_output_____" ] ], [ [ "# always check the current version in github\n!pip install networkx==2.6.2", "Requirement already satisfied: networkx==2.6.2 in /usr/local/lib/python3.7/dist-packages (2.6.2)\n" ], [ "!pip install wikipedia", "Collecting wikipedia\n Downloading wikipedia-1.4.0.tar.gz (27 kB)\nRequirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.7/dist-packages (from wikipedia) (4.6.3)\nRequirement already satisfied: requests<3.0.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from wikipedia) (2.23.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (1.24.3)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (2021.5.30)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests<3.0.0,>=2.0.0->wikipedia) (2.10)\nBuilding wheels for collected packages: wikipedia\n Building wheel for wikipedia (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for wikipedia: filename=wikipedia-1.4.0-py3-none-any.whl size=11696 sha256=25e22d9819c75644c6b9784ccb9b7aee7fdd1d1261bd8281c8c3f6ab291be8f4\n Stored in directory: /root/.cache/pip/wheels/15/93/6d/5b2c68b8a64c7a7a04947b4ed6d89fb557dcc6bc27d1d7f3ba\nSuccessfully built wikipedia\nInstalling collected packages: wikipedia\nSuccessfully installed wikipedia-1.4.0\n" ], [ "from operator import itemgetter\nimport networkx as nx\nimport wikipedia", "_____no_output_____" ], [ "nx.__version__", "_____no_output_____" ], [ "#https://en.wikipedia.org/wiki/Pantheon,_Rome\nSEED = \"Pantheon,_Rome\".title()", "_____no_output_____" ] ], [ [ "The next code fragment deals with setting up the **snowballing process**. A\nbreadth-first search, or BFS (sometimes known to computer programmers\nas a snowballing algorithm), must remember which **pages have been already\nprocessed** and which have been **discovered but not yet processed**. \n- the former are stored in the set **done_set**; \n- the latter, in the list **todo_lst** and **todo_set**. \n\nYou need two data structures for the unprocessed pages because you want to\nknow whether a page has been already recorded (an unordered lookup, *todo_set*) and\nwhich page is the next to be processed (an ordered lookup, *todo_lst*). \n\n\nSnowballing an extensive network—and Wikipedia with 6,196,713 articles in\nthe English segment alone can produce a huge network!—takes considerable\ntime.\n\n<img width=\"300\" src=\"https://drive.google.com/uc?export=view&id=1cOpkuhQEHJ3UxQOP3hoCd3KzSlqyf1AG\">\n\nSuppose you start with one seed node, and let’s say it has $N\\approx 100$ neighbors.\nEach of them has $N$ neighbors, too, to the total of $\\approx N+N×N$ nodes. The\nthird round of discovery adds $\\approx N×N×N$ more nodes. The time to shave each\nnext layer of nodes grows exponentially. For this exercise, let’s process only\nthe seed node itself and its immediate neighbors (layers 0 and 1). Processing\nlayer 2 is still feasible, but layer 3 requires $N×N×N×N\\approx 10^8$ page downloads. To keep track of the distance from the currently processed node to the seed, store both the layer to which a node belongs and the node name together as a tuple on the **todo_lst** list.", "_____no_output_____" ] ], [ [ "todo_lst = [(0, SEED)] # The SEED is in the layer 0\ntodo_set = set(SEED) # The SEED itself\ndone_set = set() # Nothing is done yet", "_____no_output_____" ] ], [ [ "**The output of the exercise is a NetworkX graph**. The next fragment will create\nan empty directed graph that will later absorb discovered nodes and edges.\n\n> We choose a **directed graph** because the edges that represent HTML links\nare naturally directed: a link from page A to page B does not imply a reciprocal\nlink. \n\nThe same fragment primes the algorithm by extracting the first “to-do” item\n(both its layer and page name) from the namesake list.\n\n", "_____no_output_____" ] ], [ [ "g = nx.DiGraph()\nlayer, page = todo_lst[0]", "_____no_output_____" ] ], [ [ "It may take a fraction of a second to execute the first five lines of the script.\nIt may take the whole next year or longer to finish the next twenty lines\nbecause they contain the main collection/construction loop of the project.", "_____no_output_____" ] ], [ [ "%%time\nwhile layer < 2:\n # Remove the name page of the current page from the todo_lst, \n # and add it to the set of processed pages. \n # If the script encounters this page again, it will skip over it.\n del todo_lst[0]\n done_set.add(page)\n \n # Show progress\n print(layer, page) \n \n # Attempt to download the selected page.\n try:\n wiki = wikipedia.page(page)\n except:\n layer, page = todo_lst[0]\n print(\"Could not load\", page)\n continue\n \n for link in wiki.links:\n link = link.title()\n if not link.startswith(\"List Of\"):\n if link not in todo_set and link not in done_set:\n todo_lst.append((layer + 1, link))\n todo_set.add(link)\n g.add_edge(page, link)\n layer, page = todo_lst[0]", "1 Accademia Nazionale Di Santa Cecilia Musical Instruments Museum\n1 Adiabene\n1 Agostino Rivarola\n1 Alessandro Algardi\n1 Alessandro Specchi\n1 Altar\n" ], [ "print(\"{} nodes, {} edges\".format(len(g), nx.number_of_edges(g)))", "51292 nodes, 240248 edges\n" ], [ "STOPS", "_____no_output_____" ] ], [ [ "The network of interest is now in the variable g. But it is “dirty”: inaccurate, incomplete, and erroneous.", "_____no_output_____" ], [ "## 1.3 Eliminate Duplicates", "_____no_output_____" ], [ "Many Wikipedia pages exist under two or more names. For example, there\nare pages about **Complex Network** and **Complex Networks**. The latter redirects\nto the former, but NetworkX does not know about the redirection.\n\nAccurately merging all duplicate nodes involves **natural language processing\n(NLP) tools that are outside of the scope of this course**. It may suffice to join\nonly those nodes that differ by the presence/absence of the letter s at the end\nor a hyphen in the middle.\n\nStart removing self-loops (pages referring to themselves). The loops don’t change\nthe network properties but affect the correctness of duplicate node elimination.\nNow, you need a list of at least some duplicate nodes. You can build it by\nlooking at each node in g and checking if a node with the same name, but\nwith an s at the end, is also in g. \n\nPass each pair of duplicated node names to the function **nx.contracted_nodes(g,u,v)** that merges node v into node u in the graph g. The function reassigns all edges previously incident to v, to u. If you don’t pass the option **self_loops=False**, the function converts an edge from v to u (if any) to a self-loop.", "_____no_output_____" ] ], [ [ "# make a copy of raw graph\noriginal = g.copy()", "_____no_output_____" ], [ "# remove self loops\ng.remove_edges_from(nx.selfloop_edges(g))\n\n# identify duplicates like that: 'network' and 'networks'\nduplicates = [(node, node + \"s\") \n for node in g if node + \"s\" in g\n ]\n\nfor dup in duplicates:\n # *dup is a technique named 'unpacking'\n g = nx.contracted_nodes(g, *dup, self_loops=False)\n\nprint(duplicates)\n\nduplicates = [(x, y) for x, y in \n [(node, node.replace(\"-\", \" \")) for node in g]\n if x != y and y in g]\nprint(duplicates)\n\nfor dup in duplicates:\n g = nx.contracted_nodes(g, *dup, self_loops=False)\n\n# nx.contracted creates a new node/edge attribute called contraction\n# the value of the attribute is a dictionary, but GraphML\n# does not support dictionary attributes\nnx.set_node_attributes(g, 0,\"contraction\")\nnx.set_edge_attributes(g, 0,\"contraction\")", "[('Architect', 'Architects'), ('Architectural Style', 'Architectural Styles'), ('Barrel Vault', 'Barrel Vaults'), ('Dome', 'Domes'), ('Pediment', 'Pediments'), ('Pilaster', 'Pilasters'), ('Roman Catholic', 'Roman Catholics'), ('Servian Wall', 'Servian Walls'), ('Lyre', 'Lyres'), ('Ancient Greek', 'Ancient Greeks'), ('Arab', 'Arabs'), ('Archbishop', 'Archbishops'), ('Sculpture', 'Sculptures'), ('Colonnade', 'Colonnades'), ('Lunette', 'Lunettes'), ('Metaphor', 'Metaphors'), ('Mosaic', 'Mosaics'), ('Relief', 'Reliefs'), ('Roman Province', 'Roman Provinces'), ('Aesthetic', 'Aesthetics'), ('Aurelian Wall', 'Aurelian Walls'), ('Basilica', 'Basilicas'), ('Classical Order', 'Classical Orders'), ('Column', 'Columns'), ('Herodian', 'Herodians'), ('Lighthouse', 'Lighthouses'), ('Obelisk', 'Obelisks'), ('Praetor', 'Praetors'), ('Roman Emperor', 'Roman Emperors'), ('Roman Aqueduct', 'Roman Aqueducts'), ('Roman Bath', 'Roman Baths'), ('Roman Legion', 'Roman Legions'), ('Roman Magistrate', 'Roman Magistrates'), ('Roman Road', 'Roman Roads'), ('Roman Triumph', 'Roman Triumphs'), ('Spandrel', 'Spandrels'), ('Tribune', 'Tribunes'), ('Water Wheel', 'Water Wheels'), ('Gaul', 'Gauls'), ('Jew', 'Jews'), ('Painting', 'Paintings'), ('Apollo', 'Apollos'), ('Archaeologist', 'Archaeologists'), ('Nabataean', 'Nabataeans'), ('Apostolic Constitution', 'Apostolic Constitutions'), ('Papal Conclave', 'Papal Conclaves'), ('Pope', 'Popes'), ('Swiss Guard', 'Swiss Guards'), ('World Heritage Site', 'World Heritage Sites'), ('Ancient Roman', 'Ancient Romans'), ('Corinthian Capital', 'Corinthian Capitals'), ('Entablature', 'Entablatures'), ('Benedictine', 'Benedictines'), ('Building Material', 'Building Materials'), ('Christian Denomination', 'Christian Denominations'), ('Ecumenical Council', 'Ecumenical Councils'), ('Gospel', 'Gospels'), ('Nave', 'Naves'), ('Plebeian', 'Plebeians'), ('Vandal', 'Vandals'), ('Engineer', 'Engineers'), ('Antonine', 'Antonines'), ('Gerano', 'Geranos'), ('Saracen', 'Saracens'), ('Cupid', 'Cupids'), ('City Wall', 'City Walls'), ('Satire', 'Satires'), ('Aztec', 'Aztecs'), ('Gallic War', 'Gallic Wars'), ('Franciscan', 'Franciscans'), ('Marian Apparition', 'Marian Apparitions'), ('Relic', 'Relics'), ('Founding Father', 'Founding Fathers'), ('Christian Martyr', 'Christian Martyrs'), ('Mendicant Order', 'Mendicant Orders'), ('Mithra', 'Mithras'), ('Carthusian', 'Carthusians'), ('Mamluk', 'Mamluks'), ('Bean', 'Beans'), ('Muslim', 'Muslims'), ('Republic', 'Republics'), ('Brown Bear', 'Brown Bears'), ('Firearm', 'Firearms'), ('Musket', 'Muskets'), ('Art', 'Arts'), ('Anglo-Saxon', 'Anglo-Saxons'), ('Archive', 'Archives'), ('Astrolabe', 'Astrolabes'), ('Balustrade', 'Balustrades'), ('Caryatid', 'Caryatids'), ('Christian', 'Christians'), ('Huguenot', 'Huguenots'), ('Imperial War Museum', 'Imperial War Museums'), ('Lachish Relief', 'Lachish Reliefs'), ('Manuscript', 'Manuscripts'), ('Milton Keynes Hoard', 'Milton Keynes Hoards'), ('Byzantine Greek', 'Byzantine Greeks'), ('Italo-Norman', 'Italo-Normans'), ('Pendentive', 'Pendentives'), ('Pontic Greek', 'Pontic Greeks'), ('Sultan', 'Sultans'), ('Altar Server', 'Altar Servers'), ('Friar', 'Friars'), ('Monk', 'Monks'), ('Saint', 'Saints'), ('Maenad', 'Maenads'), ('Germanic People', 'Germanic Peoples'), ('Catholic School', 'Catholic Schools'), ('Third Order', 'Third Orders'), ('Angel', 'Angels'), ('Fortification', 'Fortifications'), ('Catacomb', 'Catacombs'), ('Carbon Fiber', 'Carbon Fibers'), ('Luthier', 'Luthiers'), ('Musical Instrument', 'Musical Instruments'), ('String Quintet', 'String Quintets'), ('Official Residence', 'Official Residences'), ('Tithe', 'Tithes'), ('Roman Festival', 'Roman Festivals'), ('Tartu', 'Tartus'), ('Cereal', 'Cereals'), ('Corbel', 'Corbels'), ('Hydraulic', 'Hydraulics'), ('Telecommunication', 'Telecommunications'), ('Sarmatian', 'Sarmatians'), ('Sabine', 'Sabines'), ('Dam', 'Dams'), ('Road', 'Roads'), ('Masonic Lodge', 'Masonic Lodges'), ('Festoon', 'Festoons'), ('Solomonic Column', 'Solomonic Columns'), ('Jesuit', 'Jesuits'), ('Political Science', 'Political Sciences'), ('Onion Dome', 'Onion Domes'), ('State Room', 'State Rooms'), ('Benefice', 'Benefices'), ('Indulgence', 'Indulgences'), ('Puritan', 'Puritans'), ('Sacrament', 'Sacraments'), ('Carmelite', 'Carmelites'), ('Copt', 'Copts'), ('Hunter-Gatherer', 'Hunter-Gatherers'), ('Turkic People', 'Turkic Peoples'), ('Plinth', 'Plinths'), ('Fountain Of The Triton', 'Fountain Of The Tritons'), ('Snake', 'Snakes'), ('Corinthian Column', 'Corinthian Columns'), ('Papal State', 'Papal States'), ('Atheist', 'Atheists'), ('Fédéré', 'Fédérés'), ('Marseille', 'Marseilles'), ('Sans-Culotte', 'Sans-Culottes'), ('Niobid', 'Niobids'), ('Greek Muslim', 'Greek Muslims'), ('Romance Language', 'Romance Languages'), ('Dionysia', 'Dionysias'), ('Intergovernmental Organization', 'Intergovernmental Organizations'), ('Gregorian Reform', 'Gregorian Reforms'), ('Religious Congregation', 'Religious Congregations'), ('Blackshirt', 'Blackshirts'), ('Vestal Virgin', 'Vestal Virgins'), ('E-Book', 'E-Books'), ('Ducat', 'Ducats'), ('Monolith', 'Monoliths'), ('Participle', 'Participles'), ('Vernacular Language', 'Vernacular Languages'), ('Mystery Play', 'Mystery Plays'), ('Fatimid', 'Fatimids'), ('Semitic Language', 'Semitic Languages'), ('Vatican Museum', 'Vatican Museums'), ('Evaporite', 'Evaporites'), ('Geodynamic', 'Geodynamics'), ('Merchant', 'Merchants'), ('Rifian Corridor', 'Rifian Corridors'), ('Sea', 'Seas'), ('Tide', 'Tides'), ('Barbarian', 'Barbarians'), ('Cataphract', 'Cataphracts'), ('Siege', 'Sieges'), ('Troubadour', 'Troubadours'), ('Roman College', 'Roman Colleges'), ('Numismatic', 'Numismatics'), ('Bas-Relief', 'Bas-Reliefs'), ('Lost Work', 'Lost Works'), ('Mammal', 'Mammals'), ('Headwater', 'Headwaters'), ('Tectonic', 'Tectonics'), ('Cardinal Direction', 'Cardinal Directions'), ('Trojan War', 'Trojan Wars'), ('Habsburg', 'Habsburgs'), ('Celt', 'Celts'), ('Sassanid', 'Sassanids'), ('Madrigal', 'Madrigals'), ('Capitoline Museum', 'Capitoline Museums'), ('Sesterce', 'Sesterces'), ('Traditionalist Catholic', 'Traditionalist Catholics'), ('Phenocryst', 'Phenocrysts'), ('Battlement', 'Battlements'), ('Meditation', 'Meditations'), ('Igneous Rock', 'Igneous Rocks'), ('Borgia Apartment', 'Borgia Apartments'), ('Carbonate Mineral', 'Carbonate Minerals'), ('Genre Painting', 'Genre Paintings'), ('Tented Roof', 'Tented Roofs'), ('Church Father', 'Church Fathers'), ('Jesuit Reduction', 'Jesuit Reductions'), ('Non-Denominational Muslim', 'Non-Denominational Muslims'), ('Umbilicus Urbi', 'Umbilicus Urbis'), ('Psalm', 'Psalms'), ('Roman Numeral', 'Roman Numerals'), ('Campana Relief', 'Campana Reliefs'), ('Decorative Art', 'Decorative Arts'), ('Fayum Mummy Portrait', 'Fayum Mummy Portraits'), ('Ceramic', 'Ceramics'), ('Barbegal Aqueduct And Mill', 'Barbegal Aqueduct And Mills'), ('Basilian Monk', 'Basilian Monks'), ('Ostrogoth', 'Ostrogoths'), ('Cistercian', 'Cistercians'), ('Theatine', 'Theatines'), ('Robert Somercote', 'Robert Somercotes'), ('Arabian', 'Arabians'), ('Putlog Hole', 'Putlog Holes'), ('Papal Basilica', 'Papal Basilicas'), ('Olympiad', 'Olympiads'), ('Modillion', 'Modillions'), ('Greek Terracotta Figurine', 'Greek Terracotta Figurines'), ('Tanagra Figurine', 'Tanagra Figurines'), ('Fine Art', 'Fine Arts'), ('Lime Kiln', 'Lime Kilns'), ('Stalagmite', 'Stalagmites'), ('Hot Spring', 'Hot Springs'), ('Speleothem', 'Speleothems'), ('African American', 'African Americans'), ('Radio Wave', 'Radio Waves'), ('Language', 'Languages'), ('Pavilion', 'Pavilions'), ('Vineyard', 'Vineyards'), ('Signature Tower', 'Signature Towers')]\n[('Cardinal-Deacon', 'Cardinal Deacon'), ('Palazzo Vidoni-Caffarelli', 'Palazzo Vidoni Caffarelli'), ('Neo-Assyrian Empire', 'Neo Assyrian Empire'), ('African-American Vernacular English', 'African American Vernacular English'), ('National Geospatial-Intelligence Agency', 'National Geospatial Intelligence Agency'), ('North-Central American English', 'North Central American English'), ('Fiber-Reinforced Concrete', 'Fiber Reinforced Concrete'), ('Ground Granulated Blast-Furnace Slag', 'Ground Granulated Blast Furnace Slag'), (\"Peter'S-Pence\", \"Peter'S Pence\"), ('Champs-Élysées', 'Champs Élysées'), ('Counter-Reformation', 'Counter Reformation'), ('Francesco Marchetti-Selvaggiani', 'Francesco Marchetti Selvaggiani'), ('Cardinal-Priest', 'Cardinal Priest'), ('Bode-Museum', 'Bode Museum'), ('Pot-De-Fer', 'Pot De Fer'), ('Sound-Hole', 'Sound Hole'), ('Notre-Dame De Paris', 'Notre Dame De Paris'), ('Île-De-France', 'Île De France'), ('High-Relief', 'High Relief'), ('Sharm El-Sheikh', 'Sharm El Sheikh'), ('Jean-Sylvain Bailly', 'Jean Sylvain Bailly'), ('Marie-Antoinette', 'Marie Antoinette'), ('Pierre-Joseph Cambon', 'Pierre Joseph Cambon'), ('Quasi-War', 'Quasi War'), ('Gold-Leaf', 'Gold Leaf'), ('Egg-And-Dart', 'Egg And Dart'), ('City-State', 'City State'), ('Chi-Rho', 'Chi Rho'), ('Ex-Voto', 'Ex Voto'), ('Bas-Relief', 'Bas Relief'), ('Jean-Baptiste Treilhard', 'Jean Baptiste Treilhard'), ('Louis-Antoine De Bougainville', 'Louis Antoine De Bougainville'), ('Saint-Etienne-Du-Mont', 'Saint Etienne Du Mont'), ('Comédie-Française', 'Comédie Française'), ('Parc Des Buttes-Chaumont', 'Parc Des Buttes Chaumont'), (\"Val-D'Oise\", \"Val D'Oise\"), ('Barrel-Vault', 'Barrel Vault'), ('Nation-State', 'Nation State'), ('Terra-Cotta', 'Terra Cotta'), ('Ludovico Flangini-Giovanelli', 'Ludovico Flangini Giovanelli'), ('Lapis-Lazuli', 'Lapis Lazuli'), ('Fleur-De-Lys', 'Fleur De Lys'), ('Prentice-Hall', 'Prentice Hall'), ('Bava-Beccaris Massacre', 'Bava Beccaris Massacre'), ('Marie-José Of Belgium', 'Marie José Of Belgium')]\n" ], [ "print(\"{} nodes, {} edges\".format(len(g), nx.number_of_edges(g)))", "51009 nodes, 240038 edges\n" ] ], [ [ "## 1.4 Truncate the network", "_____no_output_____" ], [ "Why did you go through all those Wikipedia troubles? First, to construct a\nnetwork of subjects related to complex networks—and here it is. Second, to\nfind other significant topics related to complex networks. **But what is the\nmeasure of significance?**\n\n\nYou will discover a variety of network measures latter in this course. For now, let’s concentrate on a **node indegree**—the number of edges directed into the node. (In the same spirit, the number of edges directed out of the node is called **outdegree**.) The indegree of a node equals the number of HTML links pointing to the respective page. **If a page has a lot of links to it, the topic of the page must be significant.**\n\n\nThe choice of indegree as a yardstick of significance incidentally makes it\npossible to shrink the graph size by almost 75 percent. The extracted graph\nhas 13,401 nodes and 24104 edges—an average of 1.79 edges per node. **Most\nof the nodes have only one connection**. (Interestingly, there are no isolated\nnodes with no connection in the graph. Even if they exist, you will not find\nthem because of the way snowballing works.) **You can remove all nodes with\nonly one incident edge to make the network more compact and less hairy\nwithout hurting the final results**.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.style.use(\"default\")\n# degree sequence\ndegree_sequence = sorted([d for n, d in g.degree()], reverse=True) \n\nfig, ax = plt.subplots(1,2,figsize=(8,4))\n\n# all_data has information about degree_sequence and the width of each bin\nax[0].hist(degree_sequence)\nax[1].hist(degree_sequence,bins=[1,2,3,4,5,6,7,8,9,10])\n\nax[0].set_title(\"Degree Histogram\")\nax[0].set_ylabel(\"Count\")\nax[0].set_xlabel(\"Degree\")\nax[0].set_ylim(0,15000)\n\nax[1].set_title(\"Degree Histogram - Zoom\")\nax[1].set_ylabel(\"Count\")\nax[1].set_xlabel(\"Degree\")\nax[1].set_xlim(0,10)\nax[1].set_ylim(0,15000)\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "# filter nodes with degree greater than or equal to 2\ncore = [node for node, deg in dict(g.degree()).items() if deg >= 2]\n\n# select a subgraph with 'core' nodes\ngsub = nx.subgraph(g, core)\n\nprint(\"{} nodes, {} edges\".format(len(gsub), nx.number_of_edges(gsub)))\n\nnx.write_graphml(gsub, \"cna.graphml\")", "15780 nodes, 204809 edges\n" ], [ "print(\"Nodes removed: {:.2f}%\".format(100*(1 - 3195/13899)))\nprint(\"Edges removed: {:.2f}%\".format(100*(1 - 13899/24105)))\nprint(\"Edges per nodes: {:.2f}\".format(13899/3195))", "Nodes removed: 77.01%\nEdges removed: 42.34%\nEdges per nodes: 4.35\n" ] ], [ [ "As you can see, the following code fragment safely removes 77 percent of the\nnodes and 42 percent of the edges, raising the average number of edges per\nnode to 4.35.", "_____no_output_____" ], [ "## 1.5 Explore the network", "_____no_output_____" ], [ "The following figure is a [Gephi](https://gephi.org/) rendering of **gsub**. Node\nand label font sizes represent the indegrees (a filter was added in gephi just to show only node with indegree greater than 50). The most in-connected, most significant\nnodes are in the upper-left corner of the network. What are they?\n\n<img width=\"600\" src=\"https://drive.google.com/uc?export=view&id=1Cemnpe6yBttyZ8Vw2i9ifMiWx9psIV7C\">\n\n\nThe last code fragment of the exercise efficiently calculates the answer by\ncalling the method **gsub.in_degree()**. The method (and its sister method **gsub.out_degree()**)\nare very similar to **gsub.degree()** except that they report different edge counts in the form of objects InDegreeView and OutDegreeView, respectively.", "_____no_output_____" ] ], [ [ "top_indegree = sorted(dict(gsub.in_degree()).items(),\n reverse=True, key=itemgetter(1))[:100]\nprint(\"\\n\".join(map(lambda t: \"{} {}\".format(*reversed(t)), top_indegree)))", "427 Pantheon, Rome\n396 Rome\n388 St. Peter'S Basilica\n383 Vatican City\n371 Colosseum\n370 Roman Art\n368 Sistine Chapel\n367 Basilica Di Santa Maria Maggiore\n366 Archbasilica Of Saint John Lateran\n365 Hadrian'S Villa\n364 Roman Forum\n364 Basilica Of Saint Paul Outside The Walls\n363 Santi Apostoli, Rome\n362 Sant'Apollinare Alle Terme Neroniane-Alessandrine\n362 Lateran Palace\n362 Trajan'S Column\n361 Baths Of Diocletian\n361 Geographic Coordinate System\n361 Ostia Antica\n361 Palazzo Della Cancelleria\n360 Sistine Chapel Ceiling\n360 Palazzo Barberini\n360 Santa Maria Degli Angeli E Dei Martiri\n360 San Nicola In Carcere\n360 Capitoline Museum\n360 Baths Of Caracalla\n359 San Silvestro In Capite\n359 Piazza Navona\n359 Domus Aurea\n359 Forum Of Augustus\n359 Santi Cosma E Damiano\n359 Capitoline Hill\n359 St. Peter'S Square\n359 Palatine Hill\n359 Santa Sabina\n359 Ara Pacis\n359 San Lorenzo In Damaso\n359 Raphael Rooms\n358 Temple Of Hercules Victor\n358 Castel Sant'Angelo\n358 Santa Maria In Trastevere\n358 Santa Cecilia In Trastevere\n358 San Giovanni Dei Fiorentini\n358 Santa Maria In Via Lata\n358 Altare Della Patria\n358 Villa D'Este\n358 Santa Croce In Gerusalemme\n358 Santa Maria Sopra Minerva\n358 Arch Of Septimius Severus\n358 Sant'Andrea Delle Fratte\n358 Galleria Nazionale D'Arte Antica\n358 Quirinal Palace\n357 Basilica Of Sant'Agostino\n357 San Lorenzo In Lucina\n357 Santi Quattro Coronati\n357 Villa Farnese\n357 Baths Of Agrippa\n357 Villa Farnesina\n357 Via Della Conciliazione\n357 Sant'Eugenio\n357 Churches Of Rome\n357 Santa Francesca Romana, Rome\n357 Leonine City\n357 Santa Maria In Cosmedin\n357 Cloaca Maxima\n357 Mausoleum Of Augustus\n357 San Carlo Al Corso\n357 Basilica Ulpia\n357 Trajan'S Market\n357 Santo Stefano Al Monte Celio\n357 Frascati\n357 Arch Of Constantine\n357 Aurelian Wall\n357 Appian Way\n357 Santa Prassede\n357 Temple Of Janus (Forum Holitorium)\n357 Circus Of Nero\n357 Basilica Of Maxentius\n357 Santa Maria Della Vittoria, Rome\n357 Curia Julia\n357 Basilica Of San Clemente\n356 Villa Giulia\n356 San Crisogono\n356 San Pancrazio\n356 Santa Maria In Domnica\n356 Basilica Di Sant'Anastasia Al Palatino\n356 Temple Of Portunus\n356 Forum Boarium\n356 Sacro Cuore Di Maria\n356 San Bartolomeo All'Isola\n356 San Pietro In Vincoli\n356 Santa Sofia A Via Boccea\n356 San Vitale, Rome\n356 Basilica Of Santa Maria Del Popolo\n356 Santa Teresa, Rome\n356 Basilica Of Saint Lawrence Outside The Walls\n356 Theatre Of Marcellus\n356 San Saba, Rome\n356 Santa Maria In Aracoeli\n356 San Sebastiano Fuori Le Mura\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
ecd23b74faf3fc229e49cc2657ede025a6c883d6
75,726
ipynb
Jupyter Notebook
notebook/exploration.ipynb
JamesLoughridge/titanic
0a007c2e581556e89c42ae808a39f7275299e4b8
[ "MIT" ]
null
null
null
notebook/exploration.ipynb
JamesLoughridge/titanic
0a007c2e581556e89c42ae808a39f7275299e4b8
[ "MIT" ]
null
null
null
notebook/exploration.ipynb
JamesLoughridge/titanic
0a007c2e581556e89c42ae808a39f7275299e4b8
[ "MIT" ]
null
null
null
54.557637
17,100
0.651863
[ [ [ "%matplotlib inline\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\n\nos.chdir('/Users/jamesl/Workspace/titanic')\nsns.set()", "_____no_output_____" ] ], [ [ "# Reading the data\nThe first step will be to read in the data from data/raw/train.csv into a pandas dataframe. ", "_____no_output_____" ] ], [ [ "train = pd.read_csv('data/train.csv')\ntest = pd.read_csv('data/test.csv')\ncombine = [train, test]\ntrain.head()", "_____no_output_____" ] ], [ [ "## Looking for Missing Data and Taking a First Look\nNext I will look for any missing data that we may have. Since most of the cabin information is missing, I may want to consider dropping that column. However, the letter infront of the cabin number indicates what deck someone was on in the titanic. I belive there may be a strong correlation between deck and survival and fare. However, I will explore that later. ", "_____no_output_____" ] ], [ [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 12 columns):\nPassengerId 891 non-null int64\nSurvived 891 non-null int64\nPclass 891 non-null int64\nName 891 non-null object\nSex 891 non-null object\nAge 714 non-null float64\nSibSp 891 non-null int64\nParch 891 non-null int64\nTicket 891 non-null object\nFare 891 non-null float64\nCabin 204 non-null object\nEmbarked 889 non-null object\ndtypes: float64(2), int64(5), object(5)\nmemory usage: 83.6+ KB\n" ], [ "train['Name'].sample(n=20)", "_____no_output_____" ] ], [ [ "Every passenger seems to have a title attached to them. Next I will make a separate column containing their titles. Titles like mlle and mme are just french equivalents if ms. There are several titles such as Major or Captain that are rare so we can move all of those into their own rare title class. ", "_____no_output_____" ] ], [ [ "train.describe(include=['O'])", "_____no_output_____" ], [ "train[['Pclass', 'Survived']].groupby('Pclass').mean()", "_____no_output_____" ], [ "train[['Sex', 'Survived']].groupby('Sex').mean()", "_____no_output_____" ], [ "train[['SibSp', 'Survived']].groupby('SibSp').mean()", "_____no_output_____" ], [ "train[['Parch', 'Survived']].groupby('Parch').mean()", "_____no_output_____" ], [ "g = sns.FacetGrid(train, col='Survived', row='Sex')\ng.map(sns.distplot, 'Age', kde=False)", "_____no_output_____" ], [ "h = sns.FacetGrid(train, col='Survived')\nh.map(sns.distplot, 'Age', kde=False)", "_____no_output_____" ], [ "i = sns.FacetGrid(train, row='Survived', col='Pclass')\ni.map(sns.distplot, 'Age', kde=False)", "_____no_output_____" ] ], [ [ "## Dropping and Adding Features\nWe will be dropping the cabin and the ticket features.", "_____no_output_____" ] ], [ [ "train = train.drop(['Ticket', 'Cabin'], axis=1)\ntest = test.drop(['Ticket', 'Cabin'], axis=1)\ncombine = [train, test]", "_____no_output_____" ], [ "for dataset in combine:\n dataset['Title'] = dataset['Name'].str.extract(' (\\w+)\\.', expand=False)", "_____no_output_____" ], [ "pd.crosstab(train['Title'], train['Sex'])", "_____no_output_____" ], [ "for dataset in combine:\n dataset['Title'] = dataset['Title'].replace(['Capt', 'Col', 'Don', 'Countess', 'Dr', \\\n 'Jonkheer', 'Lady', 'Major', 'Rev', 'Sir'], 'Rare')\n dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Mme', 'Miss')\n dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')\n \ntrain[['Title', 'Survived']].groupby('Title').mean()", "_____no_output_____" ], [ "title_mapping = {\"Mr\": 1, \"Miss\": 2, \"Mrs\": 3, \"Master\": 4, \"Rare\": 5}\nfor dataset in combine:\n dataset['Title'] = dataset['Title'].map(title_mapping)\n dataset['Title'] = dataset['Title'].fillna(0)\n \ntrain.head()", "_____no_output_____" ], [ "train = train.drop(['Name', 'PassengerId'], axis=1)\ntest = test.drop(['Name', 'PassengerId'], axis=1)\ncombine = [train, test]", "_____no_output_____" ], [ "for dataset in combine:\n dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0})\n \ntrain.head()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecd23d05057c791b9ac1ba24a9be09e9758c5104
604,524
ipynb
Jupyter Notebook
Wizualizacje5.ipynb
KonradBier/kBI
a70015c61602cc256ac804c9b5ac1d3547781897
[ "MIT" ]
null
null
null
Wizualizacje5.ipynb
KonradBier/kBI
a70015c61602cc256ac804c9b5ac1d3547781897
[ "MIT" ]
null
null
null
Wizualizacje5.ipynb
KonradBier/kBI
a70015c61602cc256ac804c9b5ac1d3547781897
[ "MIT" ]
null
null
null
1,415.747073
300,866
0.952877
[ [ [ "<a href=\"https://colab.research.google.com/github/KonradBier/kBI/blob/master/Wizualizacje5.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np", "/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "df = sns.load_dataset('iris')\ndf.head(3)", "_____no_output_____" ], [ "sns.set()\nsns.pairplot(data=df)", "_____no_output_____" ], [ "sns.pairplot(data=df, hue='species')", "_____no_output_____" ], [ "g = sns.PairGrid(df, hue='species')\ng.map_diag(sns.kdeplot)\ng.map_offdiag(sns.kdeplot, n_levels=6)", "_____no_output_____" ] ], [ [ "### **REGPLOT**\n\n---\n\n", "_____no_output_____" ] ], [ [ "df = sns.load_dataset('tips')\ndf.head(3)", "_____no_output_____" ], [ "sns.regplot(data= df, x='total_bill', y='tip')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd23f33762305519a391de7e6e4ded4bcac9222
12,379
ipynb
Jupyter Notebook
API/secrets.ipynb
joelvisroman/MSDataPrepDocs
b517e47b7b982809618b6d937a2d8e72f8c2e32b
[ "MIT" ]
null
null
null
API/secrets.ipynb
joelvisroman/MSDataPrepDocs
b517e47b7b982809618b6d937a2d8e72f8c2e32b
[ "MIT" ]
null
null
null
API/secrets.ipynb
joelvisroman/MSDataPrepDocs
b517e47b7b982809618b6d937a2d8e72f8c2e32b
[ "MIT" ]
null
null
null
35.469914
424
0.404475
[ [ [ "# Providing Secrets\nCopyright (c) Microsoft Corporation. All rights reserved.<br>\nLicensed under the MIT License.", "_____no_output_____" ], [ "Currently, secrets are only persisted for the lifetime of the engine process and they are not part of the dprep file. If you started a new session (hence start an engine process) and load a package and try to run a dataflow within that package, you will need to call `use_secrets` to register the required secrets to use during execution, otherwise the execution will fail as the required secrets are not available.\n\nIn this notebook, we will:\n1. Loading a previously saved package\n2. Call `get_missing_secrets` to determine the missing secrets\n3. Call `use_secrets` and pass in the missing secrets to register it with the engine for this session\n4. Call `head` to see the a preview of the data", "_____no_output_____" ] ], [ [ "import azureml.dataprep as dprep\n\nimport os", "_____no_output_____" ] ], [ [ "Let's load the previously saved package.", "_____no_output_____" ] ], [ [ "package = dprep.Package.open(file_path='./data/secrets.dprep')\ndataflow = package.dataflows[0]", "_____no_output_____" ] ], [ [ "Let's call `get_missing_secrets` to see what secrets are required missing in the engine.", "_____no_output_____" ] ], [ [ "dataflow.get_missing_secrets()", "_____no_output_____" ] ], [ [ "Let's now read the secrets from an environment variable and put it in our secret dictionary and call `use_secrets` with the secrets. This will register these secrets in the engine so you don't need to provide them again in this session.\n\n_Note: It is a bad practice to have secrets in files that will be checked into source control._", "_____no_output_____" ] ], [ [ "sas = os.environ['SCENARIOS_SECRETS']\nsecrets = {\n 'https://dpreptestfiles.blob.core.windows.net/testfiles/read_csv_duplicate_headers.csv': sas\n}\ndataflow.use_secrets(secrets=secrets)", "_____no_output_____" ] ], [ [ "We can now call `head` without passing in `secrets` and the engine will happily execute and show us a preview of the data.", "_____no_output_____" ] ], [ [ "dataflow.head(5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd276121499262dd959412179158d901304c114
5,862
ipynb
Jupyter Notebook
examples/lightgbm/README.ipynb
lynnmatrix/MLServer
d43d6685a9fc3a0e7a15ba80c4a3b1228d90b48c
[ "Apache-2.0" ]
null
null
null
examples/lightgbm/README.ipynb
lynnmatrix/MLServer
d43d6685a9fc3a0e7a15ba80c4a3b1228d90b48c
[ "Apache-2.0" ]
null
null
null
examples/lightgbm/README.ipynb
lynnmatrix/MLServer
d43d6685a9fc3a0e7a15ba80c4a3b1228d90b48c
[ "Apache-2.0" ]
null
null
null
26.889908
222
0.551348
[ [ [ "# Serving a `LightGBM` model\nOut of the box, `mlserver` supports the deployment and serving of `lightgbm` models.\nBy default, it will assume that these models have been [serialised using the `bst.save_model()` method](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.Booster.html).\n\nIn this example, we will cover how we can train and serialise a simple model, to then serve it using `mlserver`.", "_____no_output_____" ], [ "## Training\n\nTo test the LightGBM Server, first we need to generate a simple LightGBM model using Python. ", "_____no_output_____" ] ], [ [ "import lightgbm as lgb\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nimport os\n\nmodel_dir = \".\"\nBST_FILE = \"iris-lightgbm.bst\"\n\niris = load_iris()\ny = iris['target']\nX = iris['data']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)\ndtrain = lgb.Dataset(X_train, label=y_train)\n\nparams = {\n 'objective':'multiclass', \n 'metric':'softmax',\n 'num_class': 3\n}\nlgb_model = lgb.train(params=params, train_set=dtrain)\nmodel_file = os.path.join(model_dir, BST_FILE)\nlgb_model.save_model(model_file)", "_____no_output_____" ] ], [ [ "Our model will be persisted as a file named `iris-lightgbm.bst`.", "_____no_output_____" ], [ "## Serving\n\nNow that we have trained and saved our model, the next step will be to serve it using `mlserver`. \nFor that, we will need to create 2 configuration files: \n\n- `settings.json`: holds the configuration of our server (e.g. ports, log level, etc.).\n- `model-settings.json`: holds the configuration of our model (e.g. input type, runtime to use, etc.).", "_____no_output_____" ], [ "### `settings.json`", "_____no_output_____" ] ], [ [ "%%writefile settings.json\n{\n \"debug\": \"true\"\n}", "_____no_output_____" ] ], [ [ "### `model-settings.json`", "_____no_output_____" ] ], [ [ "%%writefile model-settings.json\n{\n \"name\": \"iris-lgb\",\n \"implementation\": \"mlserver_lightgbm.LightGBMModel\",\n \"parameters\": {\n \"uri\": \"./iris-lightgbm.bst\",\n \"version\": \"v0.1.0\"\n }\n}", "_____no_output_____" ] ], [ [ "### Start serving our model\n\nNow that we have our config in-place, we can start the server by running `mlserver start .`. This needs to either be ran from the same directory where our config files are or pointing to the folder where they are.\n\n```shell\nmlserver start .\n```\n\nSince this command will start the server and block the terminal, waiting for requests, this will need to be ran in the background on a separate terminal.", "_____no_output_____" ], [ "### Send test inference request\n\nWe now have our model being served by `mlserver`.\nTo make sure that everything is working as expected, let's send a request from our test set.\n\nFor that, we can use the Python types that `mlserver` provides out of box, or we can build our request manually.", "_____no_output_____" ] ], [ [ "import requests\n\nx_0 = X_test[0:1]\ninference_request = {\n \"inputs\": [\n {\n \"name\": \"predict-prob\",\n \"shape\": x_0.shape,\n \"datatype\": \"FP32\",\n \"data\": x_0.tolist()\n }\n ]\n}\n\nendpoint = \"http://localhost:8788/v2/models/iris-lgb/versions/v0.1.0/infer\"\nresponse = requests.post(endpoint, json=inference_request)\n\nresponse.json()", "_____no_output_____" ] ], [ [ "As we can see above, the model predicted the probability for each class, and the probability of class `1` is the biggest, close to `0.99`, which matches what's on the test set.", "_____no_output_____" ] ], [ [ "y_test[0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd27a35921033167419aeb0155585ef26598413
950
ipynb
Jupyter Notebook
examples/Intro Folium.ipynb
shakurgds/geo_prof
d2c5150df9d5b5d77986ca76292b11eed3e32c2e
[ "MIT" ]
null
null
null
examples/Intro Folium.ipynb
shakurgds/geo_prof
d2c5150df9d5b5d77986ca76292b11eed3e32c2e
[ "MIT" ]
null
null
null
examples/Intro Folium.ipynb
shakurgds/geo_prof
d2c5150df9d5b5d77986ca76292b11eed3e32c2e
[ "MIT" ]
null
null
null
16.666667
51
0.493684
[ [ [ "import folium\n\n\nm = folium.Map(location=[45.5236, -122.6750])", "_____no_output_____" ], [ "m", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ecd2825a1499cd8bbc6dde2345eddacfe8f5edee
93,858
ipynb
Jupyter Notebook
notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb
davidslater/adversarial-robustness-toolbox
3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd
[ "MIT" ]
2
2019-10-26T08:35:37.000Z
2020-09-02T18:38:00.000Z
notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb
davidslater/adversarial-robustness-toolbox
3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd
[ "MIT" ]
1
2020-03-09T07:04:39.000Z
2020-03-09T07:04:39.000Z
notebooks/fabric_for_deep_learning_adversarial_samples_fashion_mnist.ipynb
davidslater/adversarial-robustness-toolbox
3cfa6de196cb32a3efafab2ff6bbf44247c9ddbd
[ "MIT" ]
1
2021-03-16T11:49:02.000Z
2021-03-16T11:49:02.000Z
65.133935
39,316
0.750112
[ [ [ "# Generate Adversarial Samples for Deep Learning Models with the Adversarial Robustness Toolbox (ART)\n\nThis notebook shows how to use adversarial attack techniques from the [Adversarial Robustness Toolbox (ART)](https://developer.ibm.com/code/open/projects/adversarial-robustness-toolbox/) on Deep Learning models trained with *FfDL*. The *ART* library supports crafting and analyzing various attack and defense methods for deep learning models. \n\nIn this notebook, you will learn how to incorporate one of the attack methods supported by *ART*, the *Fast Gradient Method* (*FGM*), into your training pipeline to generate adversarial samples for the purposes of evaluating the robustness of the trained model. The model is a Convolutional Neural Network (CNN) trained on the *[MNIST handwritten digit data](http://yann.lecun.com/exdb/mnist/)* using [Keras](https://keras.io/) with a [TensorFlow](https://www.tensorflow.org/) backend.\n\nThe *ART* Github repository can be found here - https://github.com/IBM/adversarial-robustness-toolbox\n\nThis notebook uses Python 3.\n\n\n## Contents\n\n1.\t[Set up the environment](#setup)\n2.\t[Create a Keras model](#model)\n3. [Train the model](#train)\n4.\t[Generate adversarial samples for a robustness check](#art)\n5.\t[Summary and next steps](#summary)", "_____no_output_____" ], [ "<a id=\"setup\"></a>\n## 1. Setup\n\nIt is recommended that you run this notebook inside a Python 3 virtual environment. Make sure you have all required libraries installed.\n\nTo store model and training data, this notebook requires access to a Cloud Object Storage (COS) instance. [BlueMix Cloud Object Storage](https://console.bluemix.net/catalog/services/cloud-object-storage) offers a free *lite plan*. Follow [these instructions](https://dataplatform.ibm.com/docs/content/analyze-data/ml_dlaas_object_store.html) to create your COS instance and generate [service credentials](https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials) with [HMAC keys](https://console.bluemix.net/docs/services/cloud-object-storage/hmac/credentials.html#using-hmac-credentials).\n", "_____no_output_____" ], [ "**Enter your cluster and object storage information:**", "_____no_output_____" ] ], [ [ "import os\n\nuser_data = {\n \"ffdl_dir\" : os.environ.get(\"FFDL_DIR\"),\n \"ffdl_cluster_name\" : os.environ.get(\"CLUSTER_NAME\"),\n \"vm_type\" : os.environ.get(\"VM_TYPE\"),\n \"cos_hmac_access_key_id\" : os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n \"cos_hmac_secret_access_key\" : os.environ.get(\"AWS_SECRET_ACCESS_KEY\"),\n \"cos_region_name\" : os.environ.get(\"AWS_DEFAULT_REGION\"),\n \"cos_service_endpoint\" : os.environ.get(\"AWS_ENDPOINT_URL\") \n}", "_____no_output_____" ], [ "unset_vars = [key for (key, value) in user_data.items() if not value]\n\nfor var in unset_vars:\n print(\"Dictionary 'user_data' is missing '%s'\" % var)\n \nassert not unset_vars, \"Enter 'user_data' to run this notebook!\"", "_____no_output_____" ] ], [ [ "### 1.1. Verify or Install Required Python Libraries", "_____no_output_____" ] ], [ [ "import sys\n\ndef is_venv():\n return (hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix))\n\ntry:\n import keras, tensorflow, requests, wget, boto3, art\n print(\"All required libraries are installed.\")\n !cat requirements.txt\nexcept ModuleNotFoundError:\n if is_venv:\n print(\"Installing required libraries into virtual environment.\")\n !python -m pip install -r requirements.txt\n else:\n print(\"Please install the required libraries.\")\n !cat requirements.txt", "Using TensorFlow backend.\n" ] ], [ [ "### 1.2. Connect to Cloud Object Storage (COS)", "_____no_output_____" ], [ "Create a `boto3.resource` to interact with the COS instance. The `boto3` library allows Python developers to manage Cloud Object Storage (COS).", "_____no_output_____" ] ], [ [ "cos = boto3.resource(\"s3\", \n aws_access_key_id = user_data[\"cos_hmac_access_key_id\"],\n aws_secret_access_key = user_data[\"cos_hmac_secret_access_key\"],\n endpoint_url = user_data[\"cos_service_endpoint\"],\n region_name = user_data[\"cos_region_name\"]\n)", "_____no_output_____" ], [ "# for bucket in cos.buckets.all():\n# print(bucket.name)", "_____no_output_____" ] ], [ [ "Create two buckets, which you will use to store training data and training results.\n\n**Note:** The bucket names must be unique.", "_____no_output_____" ] ], [ [ "from uuid import uuid4\n\nbucket_uid = str(uuid4())\ntraining_data_bucket = 'training-data-' + bucket_uid\ntraining_result_bucket = 'training-results-' + bucket_uid\n\ndef create_buckets(bucket_names):\n for bucket in bucket_names:\n print('Creating bucket \"{}\" ...'.format(bucket))\n try:\n cos.create_bucket(Bucket=bucket)\n except boto3.exceptions.botocore.client.ClientError as e:\n print('Error: {}.'.format(e.response['Error']['Message']))\n\nbuckets = [training_data_bucket, training_result_bucket]\n\ncreate_buckets(buckets)\n", "Creating bucket \"training-data-c279ded3-c921-4360-9e05-aa086271a009\" ...\nCreating bucket \"training-results-c279ded3-c921-4360-9e05-aa086271a009\" ...\n" ] ], [ [ "Now you should have 2 buckets.", "_____no_output_____" ], [ "### 1.3. Download MNIST Training Data and Upload it to the COS Buckets", "_____no_output_____" ], [ "Select a data set (https://keras.io/datasets/):\n- `mnist.npz`\n- `fashion_mnist.npz`", "_____no_output_____" ] ], [ [ "datasets = [\"mnist.npz\", \"fashion_mnist.npz\"]\n\ndataset_filename = datasets[1] # 'fashion_mnist.npz'", "_____no_output_____" ] ], [ [ "Download the training data and upload it to the `training-data` bucket.", "_____no_output_____" ] ], [ [ "from keras.datasets import mnist, fashion_mnist\nimport numpy as np\n\nif \"fashion\" in dataset_filename:\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() \nelse:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \nnp.savez_compressed(dataset_filename, x_train=x_train , y_train=y_train, x_test=x_test, y_test=y_test)\n\nbucket_obj = cos.Bucket(training_data_bucket)\nprint(\"Uploading files to {}:\".format(training_data_bucket))\n\nbucket_obj.upload_file(dataset_filename, dataset_filename)\nprint('- {} was uploaded'.format(dataset_filename)) ", "Uploading files to training-data-c279ded3-c921-4360-9e05-aa086271a009:\n- fashion_mnist.npz was uploaded\n" ] ], [ [ "Have a look at the list of the created buckets and their contents.", "_____no_output_____" ] ], [ [ "def print_bucket_contents(buckets):\n for bucket_name in buckets:\n print(bucket_name)\n bucket_obj = cos.Bucket(bucket_name)\n for obj in bucket_obj.objects.all():\n print(\" File: {}, {:4.2f}kB\".format(obj.key, obj.size/1024))\n\nprint_bucket_contents(buckets)", "training-data-c279ded3-c921-4360-9e05-aa086271a009\n File: fashion_mnist.npz, 30146.33kB\ntraining-results-c279ded3-c921-4360-9e05-aa086271a009\n" ] ], [ [ "You are done with COS, and you are ready to train your model!", "_____no_output_____" ], [ "<a id=\"model\"></a>\n## 2. Create the Keras model\n\nIn this section we:\n\n- [2.1 Package the model definition](#zip)\n- [2.2 Prepare the training definition metadata](#manifest)\n", "_____no_output_____" ], [ "### 2.1. Create the Model Zip File <a id=\"zip\"></a>\n\nLet's create the model [`convolutional_keras.py`](../edit/convolutional_keras.py) and add it to a zip file.", "_____no_output_____" ] ], [ [ "script_filename = \"convolutional_keras.py\"\narchive_filename = 'model.zip'", "_____no_output_____" ], [ "%%writefile $script_filename\n\nfrom __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\nimport keras\nimport numpy as np\nimport sys\nimport os\n\nbatch_size = 128\nnum_classes = 10\nepochs = 1\n\nimg_rows, img_cols = 28, 28\n\n\ndef main(argv):\n if len(argv) < 2:\n sys.exit(\"Not enough arguments provided.\")\n global image_path\n i = 1\n while i <= 2:\n arg = str(argv[i])\n if arg == \"--data\":\n image_path = os.path.join(os.environ[\"DATA_DIR\"], str(argv[i+1]))\n i += 2\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n\n# load training and test data from npz file\nf = np.load(image_path)\nx_train = f['x_train']\ny_train = f['y_train']\nx_test = f['x_test']\ny_test = f['y_test']\nf.close()\n\nif K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train /= 255\nx_test /= 255\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# model\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\nmodel.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=0.1)\n\nscore = model.evaluate(x_test, y_test, verbose=0)\n\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\nmodel_wt_path = os.environ[\"RESULT_DIR\"] + \"/keras_original_model.hdf5\"\nmodel.save(model_wt_path)\nprint(\"Model saved to file: %s\" % model_wt_path)\n\nmodel_def_path = os.environ[\"RESULT_DIR\"] + \"/keras_original_model.json\"\nmodel_json = model.to_json()\nwith open(model_def_path, \"w\") as json_file:\n json_file.write(model_json)\nprint(\"Model definition saved to file: %s\" % model_def_path)\n", "Overwriting convolutional_keras.py\n" ], [ "import zipfile\n\nzipfile.ZipFile(archive_filename, mode='w').write(script_filename)", "_____no_output_____" ] ], [ [ "### 2.2. Prepare the Training Definition Metadata <a id=\"manifest\"></a>\n- *FfDL* does not have a *Keras* community image so we need to `pip`-install *Keras* prior to running the `training_command` \n- Your COS credentials are referenced in the `data_stores` > `connection` data.", "_____no_output_____" ] ], [ [ "import yaml\n\ntraining_command = \"pip3 install keras; python3 %s --data ${DATA_DIR}/%s\" % (script_filename, dataset_filename)\n\nmanifest = {\n \"name\": \"keras_digit_recognition\",\n \"description\": \"Hand-written Digit Recognition Training\",\n \"version\": \"1.0\",\n \"gpus\": 0,\n \"cpus\": 2,\n \"memory\": \"2Gb\",\n \"data_stores\": [\n {\n \"id\": \"sl-internal-os\",\n \"type\": \"s3_datastore\",\n \"training_data\": {\n \"container\": training_data_bucket\n },\n \"training_results\": {\n \"container\": training_result_bucket\n },\n \"connection\": {\n \"type\": \"s3_datastore\",\n \"auth_url\": user_data[\"cos_service_endpoint\"],\n \"user_name\": user_data[\"cos_hmac_access_key_id\"],\n \"password\": user_data[\"cos_hmac_secret_access_key\"]\n }\n }\n ],\n \"framework\": {\n \"name\": \"tensorflow\",\n \"version\": \"1.5.0-py3\",\n \"command\": training_command\n },\n \"evaluation_metrics\": {\n \"type\": \"tensorboard\",\n \"in\": \"$JOB_STATE_DIR/logs/tb\"\n }\n}\n\nyaml.dump(manifest, open(\"manifest.yml\", \"w\"), default_flow_style=False)", "_____no_output_____" ] ], [ [ "## 3. Train the Model<a id=\"train\"></a>\n\nIn this section, learn how to:\n- [3.1 Setup the command line environment](#cmd_setup)\n- [3.2 Train the model in the background](#backg)\n- [3.3 Monitor the training log](#log)\n- [3.4 Cancel the training](#cancel)", "_____no_output_____" ], [ "### 3.1. Setup the Command Line Environment <a id=\"cmd_setup\"></a>", "_____no_output_____" ], [ "Load the Kubernetes cluster configuration using the [BlueMix CLI](https://console.bluemix.net/docs/cli/index.html#overview). Make sure your machine is logged in with `bx login`.", "_____no_output_____" ] ], [ [ "try:\n %env VM_TYPE {user_data[\"vm_type\"]}\n %env CLUSTER_NAME {user_data[\"ffdl_cluster_name\"]}\n cluster_config = !bx cs cluster-config {user_data[\"ffdl_cluster_name\"]} | grep \"export KUBECONFIG=\"\n %env KUBECONFIG {cluster_config[-1].split(\"=\")[-1]}\nexcept IndexError:\n print(\"The cluster %s could not be found.\" % {user_data[\"ffdl_cluster_name\"]})\n print(\"Run 'bx cs clusters' to list all clusters you have access to.\")\n #!bx cs clusters\n raise", "env: VM_TYPE=ibmcloud\nenv: CLUSTER_NAME=my-ffdl-cluster\nenv: KUBECONFIG=~/.bluemix/plugins/container-service/clusters/my-ffdl-cluster/kube-config-dal12-my-ffdl-cluster.yml\n" ] ], [ [ "Setup the DLaaS URL, username and password", "_____no_output_____" ] ], [ [ "node_ip = !(cd {user_data[\"ffdl_dir\"]} && make --no-print-directory kubernetes-ip)\nrestapi_port = !kubectl get service ffdl-restapi -o jsonpath='{.spec.ports[0].nodePort}'\ndlaas_url = \"http://%s:%s\" % (node_ip[0], restapi_port[0])\n\n%env DLAAS_URL $dlaas_url\n%env DLAAS_USERNAME = test-user\n%env DLAAS_PASSWORD = test", "env: DLAAS_URL=http://169.48.201.210:30020\nenv: DLAAS_USERNAME=test-user\nenv: DLAAS_PASSWORD=test\n" ] ], [ [ "Obtain the correct FfDL CLI for your machine", "_____no_output_____" ] ], [ [ "import platform\n\nffdl = \"%s/cli/bin/ffdl-%s\" % (user_data[\"ffdl_dir\"], \"osx\" if platform.system() == \"Darwin\" else \"linux\")", "_____no_output_____" ] ], [ [ "### 3.2. Start the Training Job<a id=\"backg\"></a>\n", "_____no_output_____" ] ], [ [ "out = !{ffdl} train \"manifest.yml\" \"model.zip\"\nout", "_____no_output_____" ] ], [ [ "### 3.3. Monitor the Training Logs<a id=\"log\"></a>", "_____no_output_____" ] ], [ [ "if \"Model ID\" in out[1]:\n model_id = out.fields()[1][-1]\n !{ffdl} logs --follow {model_id}", "Getting model training logs for '\u001b[1;36mtraining-MyCDwcHmg\u001b[0m'...\nStatus: PENDING\nStatus: Not Started\nTraining with training/test data at:\n DATA_DIR: /job/training-data-c279ded3-c921-4360-9e05-aa086271a009\n MODEL_DIR: /job/model-code\n TRAINING_JOB: \n TRAINING_COMMAND: pip3 install keras; python3 convolutional_keras.py --data ${DATA_DIR}/fashion_mnist.npz\nStoring trained model at:\n RESULT_DIR: /job/training-results-c279ded3-c921-4360-9e05-aa086271a009\nContents of $MODEL_DIR\ntotal 12\ndrwxrwxrwx 2 6342627 root 4096 Jun 18 18:04 .\ndrwxrwxrwx 6 root root 4096 Jun 18 18:04 ..\n-rwxrwxrwx 1 6342627 root 2673 Jun 18 11:02 convolutional_keras.py\nContents of $DATA_DIR\ntotal 30156\ndrwxr-xr-x 2 6342627 root 4096 Jun 18 18:04 .\ndrwxrwxrwx 6 root root 4096 Jun 18 18:04 ..\n-rw-r--r-- 1 6342627 root 30869845 Jun 18 18:02 fashion_mnist.npz\nDATA_DIR=/job/training-data-c279ded3-c921-4360-9e05-aa086271a009\nELASTICSEARCH_PORT=tcp://172.21.40.112:9200\nELASTICSEARCH_PORT_9200_TCP=tcp://172.21.40.112:9200\nELASTICSEARCH_PORT_9200_TCP_ADDR=172.21.40.112\nELASTICSEARCH_PORT_9200_TCP_PORT=9200\nELASTICSEARCH_PORT_9200_TCP_PROTO=tcp\nELASTICSEARCH_SERVICE_HOST=172.21.40.112\nELASTICSEARCH_SERVICE_PORT=9200\nELASTICSEARCH_SERVICE_PORT_HTTP=9200\nFFDL_LCM_PORT=tcp://172.21.112.20:80\nFFDL_LCM_PORT_80_TCP=tcp://172.21.112.20:80\nFFDL_LCM_PORT_80_TCP_ADDR=172.21.112.20\nFFDL_LCM_PORT_80_TCP_PORT=80\nFFDL_LCM_PORT_80_TCP_PROTO=tcp\nFFDL_LCM_SERVICE_HOST=172.21.112.20\nFFDL_LCM_SERVICE_PORT=80\nFFDL_LCM_SERVICE_PORT_GRPC=80\nFFDL_RESTAPI_PORT=tcp://172.21.130.217:80\nFFDL_RESTAPI_PORT_80_TCP=tcp://172.21.130.217:80\nFFDL_RESTAPI_PORT_80_TCP_ADDR=172.21.130.217\nFFDL_RESTAPI_PORT_80_TCP_PORT=80\nFFDL_RESTAPI_PORT_80_TCP_PROTO=tcp\nFFDL_RESTAPI_SERVICE_HOST=172.21.130.217\nFFDL_RESTAPI_SERVICE_PORT=80\nFFDL_RESTAPI_SERVICE_PORT_FFDL=80\nFFDL_TRAINER_PORT=tcp://172.21.226.67:80\nFFDL_TRAINER_PORT_80_TCP=tcp://172.21.226.67:80\nFFDL_TRAINER_PORT_80_TCP_ADDR=172.21.226.67\nFFDL_TRAINER_PORT_80_TCP_PORT=80\nFFDL_TRAINER_PORT_80_TCP_PROTO=tcp\nFFDL_TRAINER_SERVICE_HOST=172.21.226.67\nFFDL_TRAINER_SERVICE_PORT=80\nFFDL_TRAINER_SERVICE_PORT_GRPC=80\nFFDL_TRAININGDATA_PORT=tcp://172.21.106.158:80\nFFDL_TRAININGDATA_PORT_80_TCP=tcp://172.21.106.158:80\nFFDL_TRAININGDATA_PORT_80_TCP_ADDR=172.21.106.158\nFFDL_TRAININGDATA_PORT_80_TCP_PORT=80\nFFDL_TRAININGDATA_PORT_80_TCP_PROTO=tcp\nFFDL_TRAININGDATA_SERVICE_HOST=172.21.106.158\nFFDL_TRAININGDATA_SERVICE_PORT=80\nFFDL_TRAININGDATA_SERVICE_PORT_GRPC=80\nFFDL_UI_PORT=tcp://172.21.201.22:80\nFFDL_UI_PORT_80_TCP=tcp://172.21.201.22:80\nFFDL_UI_PORT_80_TCP_ADDR=172.21.201.22\nFFDL_UI_PORT_80_TCP_PORT=80\nFFDL_UI_PORT_80_TCP_PROTO=tcp\nFFDL_UI_SERVICE_HOST=172.21.201.22\nFFDL_UI_SERVICE_PORT=80\nFFDL_UI_SERVICE_PORT_HTTP=80\nGPU_COUNT=0.000000\nHOME=/root\nJOB_STATE_DIR=/job\nLEARNER_ID=1\nLOG_DIR=/job/logs\nMODEL_DIR=/job/model-code\nOLDPWD=/notebooks\nPATH=/usr/local/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\nPROMETHEUS_PORT=tcp://172.21.53.216:9090\nPROMETHEUS_PORT_9090_TCP=tcp://172.21.53.216:9090\nPROMETHEUS_PORT_9090_TCP_ADDR=172.21.53.216\nPROMETHEUS_PORT_9090_TCP_PORT=9090\nPROMETHEUS_PORT_9090_TCP_PROTO=tcp\nPROMETHEUS_SERVICE_HOST=172.21.53.216\nPROMETHEUS_SERVICE_PORT=9090\nPROMETHEUS_SERVICE_PORT_PROMETHEUS=9090\nPWD=/job/model-code\nPYTHONPATH=:/job/model-code\nRESULT_DIR=/job/training-results-c279ded3-c921-4360-9e05-aa086271a009\nS3_PORT=tcp://172.21.95.18:80\nS3_PORT_80_TCP=tcp://172.21.95.18:80\nS3_PORT_80_TCP_ADDR=172.21.95.18\nS3_PORT_80_TCP_PORT=80\nS3_PORT_80_TCP_PROTO=tcp\nS3_SERVICE_HOST=172.21.95.18\nS3_SERVICE_PORT=80\nSHLVL=3\nTRAINING_COMMAND=pip3 install keras; python3 convolutional_keras.py --data ${DATA_DIR}/fashion_mnist.npz\nTRAINING_ID=training-MyCDwcHmg\n_=/usr/bin/env\nMon Jun 18 18:04:22 UTC 2018: Running training job\nCollecting keras\n Downloading https://files.pythonhosted.org/packages/68/12/4cabc5c01451eb3b413d19ea151f36e33026fc0efb932bf51bcaf54acbf5/Keras-2.2.0-py2.py3-none-any.whl (300kB)\nRequirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.5/dist-packages (from keras)\nRequirement already satisfied: h5py in /usr/local/lib/python3.5/dist-packages (from keras)\nRequirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.5/dist-packages (from keras)\nCollecting keras-preprocessing==1.0.1 (from keras)\n Downloading https://files.pythonhosted.org/packages/f8/33/275506afe1d96b221f66f95adba94d1b73f6b6087cfb6132a5655b6fe338/Keras_Preprocessing-1.0.1-py2.py3-none-any.whl\nCollecting keras-applications==1.0.2 (from keras)\n Downloading https://files.pythonhosted.org/packages/e2/60/c557075e586e968d7a9c314aa38c236b37cb3ee6b37e8d57152b1a5e0b47/Keras_Applications-1.0.2-py2.py3-none-any.whl (43kB)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.5/dist-packages (from keras)\nCollecting pyyaml (from keras)\n Downloading https://files.pythonhosted.org/packages/4a/85/db5a2df477072b2902b0eb892feb37d88ac635d36245a72a6a69b23b383a/PyYAML-3.12.tar.gz (253kB)\nBuilding wheels for collected packages: pyyaml\n Running setup.py bdist_wheel for pyyaml: started\n Running setup.py bdist_wheel for pyyaml: finished with status 'done'\n Stored in directory: /root/.cache/pip/wheels/03/05/65/bdc14f2c6e09e82ae3e0f13d021e1b6b2481437ea2f207df3f\nSuccessfully built pyyaml\nInstalling collected packages: keras-preprocessing, keras-applications, pyyaml, keras\nSuccessfully installed keras-2.2.0 keras-applications-1.0.2 keras-preprocessing-1.0.1 pyyaml-3.12\nYou are using pip version 9.0.1, however version 10.0.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\n2018-06-18 18:04:27.811560: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA\nTrain on 54000 samples, validate on 6000 samples\nEpoch 1/1\n\n 128/54000 [..............................] - ETA: 4:54 - loss: 2.3122 - acc: 0.1094\n 256/54000 [..............................] - ETA: 4:26 - loss: 2.2559 - acc: 0.1484\n 384/54000 [..............................] - ETA: 4:09 - loss: 2.2135 - acc: 0.1849\n 512/54000 [..............................] - ETA: 4:05 - loss: 2.1616 - acc: 0.2031\n 640/54000 [..............................] - ETA: 4:00 - loss: 2.1043 - acc: 0.2297\n 768/54000 [..............................] - ETA: 3:55 - loss: 2.0374 - acc: 0.2578\n 896/54000 [..............................] - ETA: 3:54 - loss: 1.9933 - acc: 0.2757\n 1024/54000 [..............................] - ETA: 3:52 - loss: 1.9585 - acc: 0.2930\n 1152/54000 [..............................] - ETA: 3:51 - loss: 1.9320 - acc: 0.3047\n 1280/54000 [..............................] - ETA: 3:49 - loss: 1.8973 - acc: 0.3211\n 1408/54000 [..............................] - ETA: 3:47 - loss: 1.8475 - acc: 0.3388\n 1536/54000 [..............................] - ETA: 3:47 - loss: 1.8413 - acc: 0.3405\n 1664/54000 [..............................] - ETA: 3:46 - loss: 1.8464 - acc: 0.3504\n 1792/54000 [..............................] - ETA: 3:46 - loss: 1.8266 - acc: 0.3666\n 1920/54000 [>.............................] - ETA: 3:44 - loss: 1.7821 - acc: 0.3812\n 2048/54000 [>.............................] - ETA: 3:44 - loss: 1.7542 - acc: 0.3892\n 2176/54000 [>.............................] - ETA: 3:43 - loss: 1.7296 - acc: 0.3966\n 2304/54000 [>.............................] - ETA: 3:42 - loss: 1.6966 - acc: 0.4102\n 2432/54000 [>.............................] - ETA: 3:41 - loss: 1.6631 - acc: 0.4219\n 2560/54000 [>.............................] - ETA: 3:40 - loss: 1.6452 - acc: 0.4273\n 2688/54000 [>.............................] - ETA: 3:39 - loss: 1.6376 - acc: 0.4301\n 2816/54000 [>.............................] - ETA: 3:38 - loss: 1.6211 - acc: 0.4332\n 2944/54000 [>.............................] - ETA: 3:37 - loss: 1.6019 - acc: 0.4406\n 3072/54000 [>.............................] - ETA: 3:37 - loss: 1.5782 - acc: 0.4489\n 3200/54000 [>.............................] - ETA: 3:36 - loss: 1.5601 - acc: 0.4541\n 3328/54000 [>.............................] - ETA: 3:36 - loss: 1.5428 - acc: 0.4600\n 3456/54000 [>.............................] - ETA: 3:36 - loss: 1.5245 - acc: 0.4682\n 3584/54000 [>............\n" ] ], [ [ "## 4. Generate Adversarial Samples <a id=\"art\"></a>\n\nIn this section, we learn how to:\n- [4.1 Generate adversarial samples with ART (synchronously in notebook)](#artLocal)\n- [4.2 Generate adversarial samples with ART (asynchronously using FfDL)](#artWithFfDL)", "_____no_output_____" ], [ "### 4.1. Generate Adversarial Samples Locally <a id=\"artLocal\"></a>\n\nThis section shows how to use the ART Fast Gradient Method (FGM) to generate adversarial samples for the model previously trained synchronously in this notebook. \n\nA trained model should have been created in the `training_result_bucket`. Now ART can be used to check the robustness of the trained model. \n\nThe original dataset used to train the model as well as the trained model serve as inputs to the `robustness_check.py` script. We can download both from the `training_data_bucket` and the `training_result_bucket` respectively.", "_____no_output_____" ], [ "First, download the original data set and the trained model from Cloud Object Store.", "_____no_output_____" ] ], [ [ "weights_filename = \"keras_original_model.hdf5\"\nnetwork_definition_filename = \"keras_original_model.json\"", "_____no_output_____" ] ], [ [ "Print contents of COS buckets used in the previous training run", "_____no_output_____" ] ], [ [ "print_bucket_contents([training_data_bucket, training_result_bucket])", "training-data-c279ded3-c921-4360-9e05-aa086271a009\n File: fashion_mnist.npz, 30146.33kB\ntraining-results-c279ded3-c921-4360-9e05-aa086271a009\n File: training-MyCDwcHmg/keras_original_model.hdf5, 14092.55kB\n File: training-MyCDwcHmg/keras_original_model.json, 2.75kB\n File: training-MyCDwcHmg/learner-1/load-data.log, 8.32kB\n File: training-MyCDwcHmg/learner-1/load-model.log, 0.42kB\n File: training-MyCDwcHmg/learner-1/training-log.txt, 41.91kB\n" ], [ "# download network definition and weights to current working directory\n\nweights_file_in_cos_bucket = os.path.join(model_id, weights_filename)\nnetwork_definition_file_in_cos_bucket = os.path.join(model_id, network_definition_filename)\n\nbucket_obj = cos.Bucket(training_result_bucket)\n\nbucket_obj.download_file(weights_file_in_cos_bucket, weights_filename)\nprint('Downloaded', weights_filename)\n\nbucket_obj.download_file(network_definition_file_in_cos_bucket, network_definition_filename)\nprint('Downloaded', network_definition_filename)", "Downloaded keras_original_model.hdf5\nDownloaded keras_original_model.json\n" ] ], [ [ "Load & compile the model that we created using `convolutional_keras.py`", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import model_from_json\n\nprint('Network Definition:', network_definition_filename)\nprint('Weights: ', weights_filename)\n\n# load model\njson_file = open(network_definition_filename, 'r')\nmodel_json = json_file.read()\njson_file.close()\n\nmodel = model_from_json(model_json)\nmodel.load_weights(weights_filename)\ncomp_params = {'loss': 'categorical_crossentropy',\n 'optimizer': 'adam',\n 'metrics': ['accuracy']}\nmodel.compile(**comp_params)", "Network Definition: keras_original_model.json\nWeights: keras_original_model.hdf5\n" ] ], [ [ "After loading & compiling the model, the next step is to create a KerasClassifier", "_____no_output_____" ] ], [ [ "# create ART classifier object\nfrom art.classifiers import KerasClassifier\n\nclassifier = KerasClassifier(clip_values=(0, 1), model=model)", "_____no_output_____" ] ], [ [ "Load the test data and labels from `.npz` file", "_____no_output_____" ] ], [ [ "from keras.utils import np_utils\n\nf = np.load(dataset_filename)\nx_original = f['x_test']\ny = f['y_test']\nf.close()", "_____no_output_____" ] ], [ [ "Visualize the original (non-adversarial) sample", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.figure(figsize=(2, 2))\nplt.imshow(x_original[1], cmap='gray')\nprint(y[1])", "2\n" ] ], [ [ "Standardize the Numpy array", "_____no_output_____" ] ], [ [ "# preprocess\nx_original = np.expand_dims(x_original, axis=3)\nx_original = x_original.astype('float32') / 255\ny = np_utils.to_categorical(y, 10)", "_____no_output_____" ] ], [ [ "Evaluate the model and calculated test accuracy", "_____no_output_____" ] ], [ [ "# evaluate\nscores = model.evaluate(x_original, y, verbose=0)\nprint('model test loss: ', scores[0]*100)\nprint('model test accuracy:', scores[1]*100)\nmodel_accuracy = scores[1]*100", "model test loss: 38.81626313686371\nmodel test accuracy: 86.06\n" ] ], [ [ "ART exposes many attacks like FGM, NewtonFool, DeepFool, Carlini etc. The code below shows how to use one of ART's attack methods (Fast Gradient Method or FGM) to craft adversarial samples based on x_test", "_____no_output_____" ] ], [ [ "from art.attacks import FastGradientMethod\n\n# configuration\nepsilon = 0.2\n\n# create crafter object\ncrafter = FastGradientMethod(classifier, eps=epsilon)\n\n# craft samples on x_test (stored in variable x_original)\nx_adv_samples = crafter.generate(x_original)\n\nadv_samples_filename = \"adv_samples.npz\"\nnp.savez(adv_samples_filename, x_original=x_original, x_adversarial=x_adv_samples, y=y)\n\nprint(\"Number of adversarial samples crafted:\", len(x_adv_samples))\nprint(\"adversarial samples saved to:\", adv_samples_filename)", "Number of adversarial samples crafted: 10000\nadversarial samples saved to: adv_samples.npz\n" ] ], [ [ "The following functions can be used for gathering metrics like model robustness, confidence metric, perturbation metric", "_____no_output_____" ] ], [ [ "import numpy.linalg as la\nimport json\n\n\ndef get_metrics(model, x_original, x_adv_samples, y):\n scores = model.evaluate(x_original, y, verbose=0)\n model_accuracy_on_non_adversarial_samples = scores[1] * 100\n\n y_pred = model.predict(x_original, verbose=0)\n y_pred_adv = model.predict(x_adv_samples, verbose=0)\n\n scores = model.evaluate(x_adv_samples, y, verbose=0)\n model_accuracy_on_adversarial_samples = scores[1] * 100\n\n pert_metric = get_perturbation_metric(x_original, x_adv_samples, y_pred, y_pred_adv, ord=2)\n conf_metric = get_confidence_metric(y_pred, y_pred_adv)\n\n data = {\n \"model accuracy on test data:\": model_accuracy_on_non_adversarial_samples,\n \"model accuracy on adversarial samples\": model_accuracy_on_adversarial_samples,\n \"reduction in confidence\": conf_metric * 100,\n \"average perturbation\": pert_metric * 100\n }\n return data\n\n\ndef get_perturbation_metric(x_original, x_adv, y_pred, y_pred_adv, ord=2):\n\n idxs = (np.argmax(y_pred_adv, axis=1) != np.argmax(y_pred, axis=1))\n\n if np.sum(idxs) == 0.0:\n return 0\n\n perts_norm = la.norm((x_adv - x_original).reshape(x_original.shape[0], -1), ord, axis=1)\n perts_norm = perts_norm[idxs]\n\n return np.mean(perts_norm / la.norm(x_original[idxs].reshape(np.sum(idxs), -1), ord, axis=1))\n\n\n# This computes the change in confidence for all images in the test set\ndef get_confidence_metric(y_pred, y_pred_adv):\n\n y_classidx = np.argmax(y_pred, axis=1)\n y_classconf = y_pred[np.arange(y_pred.shape[0]), y_classidx]\n\n y_adv_classidx = np.argmax(y_pred_adv, axis=1)\n y_adv_classconf = y_pred_adv[np.arange(y_pred_adv.shape[0]), y_adv_classidx]\n\n idxs = (y_classidx == y_adv_classidx)\n\n if np.sum(idxs) == 0.0:\n return 0\n\n idxnonzero = y_classconf != 0\n idxs = idxs & idxnonzero\n\n return np.mean((y_classconf[idxs] - y_adv_classconf[idxs]) / y_classconf[idxs])", "_____no_output_____" ] ], [ [ "Display the robustness check metrics\n\n1. Model accuracy on test data\n2. Model robustness on adversarial samples\n3. Reduction in confidence\n4. Perturbation metric", "_____no_output_____" ] ], [ [ "result = get_metrics(model, x_original, x_adv_samples, y)\n\nprint(json.dumps(result, indent=4, sort_keys=False))", "{\n \"model accuracy on test data:\": 86.06,\n \"model accuracy on adversarial samples\": 17.09,\n \"reduction in confidence\": 42.64292120933533,\n \"average perturbation\": 44.387608766555786\n}\n" ] ], [ [ "Compare original images with adversarial samples and test model predictions", "_____no_output_____" ] ], [ [ "# https://keras.io/datasets/#fashion-mnist-database-of-fashion-articles\n\nfashion_labels = {\n 0: \"T-shirt/top\",\n 1: \"Trouser\",\n 2: \"Pullover\",\n 3: \"Dress\",\n 4: \"Coat\",\n 5: \"Sandal\",\n 6: \"Shirt\",\n 7: \"Sneaker\",\n 8: \"Bag\",\n 9: \"Ankle boot\"\n}\n\ndef get_label(y):\n if \"fashion\" in dataset_filename:\n return fashion_labels[y]\n else:\n return \"Predict: %i\" % y", "_____no_output_____" ], [ "# x_adv_samples = np.load(\"adv_samples_from_cos.npz\")\n# x_original = x_adv_samples[\"x_original\"]\n# x_adversarial = x_adv_samples[\"x_adversarial\"]\n# y = x_adv_samples[\"y\"]\n\nx_adversarial = x_adv_samples\n\nx_orig = ((x_original ) * 255).astype('int')[:, :, :, 0]\nx_adv = ((x_adversarial) * 255).astype('int')[:, :, :, 0]\n\ny_pred_orig = model.predict(x_original, verbose=0)\ny_pred_adv = model.predict(x_adversarial, verbose=0)\n\nfig = plt.figure(figsize=(15, 3))\ncols = 10\nrows = 2\nimages = list(x_orig[:cols]) + list(x_adv[:cols])\npreds = list(y_pred_orig[:cols]) + list(y_pred_adv[:cols])\nlabels = list(y[:cols]) + list(y[:cols])\n\nfor i in range(0, len(images)):\n ax = fig.add_subplot(rows, cols, i+1)\n y_pred = np.argmax(preds[i])\n y_orig = np.argmax(labels[i])\n ax.set_xlabel(get_label(y_pred),\n color = \"green\" if y_pred == y_orig else \"red\")\n ax.tick_params(axis='both', which='both',\n bottom=False, top=False,\n right=False, left=False,\n labelbottom=False, labelleft=False)\n plt.imshow(images[i], cmap='gray')\n\nplt.show()", "_____no_output_____" ] ], [ [ "## 5. Summary and Next Steps <a id=\"summary\"></a>\n\nThis notebook only looked at one adversarial robustness technique (FGM). The *ART* library contains many more attacks, metrics and defenses to help you understand and improve your model's robustness. You can use this notebook as a template to experiment with all aspects of *ART*. Find more state-of-the-art methods for attacking and defending classifiers here:\n\nhttps://github.com/IBM/adversarial-robustness-toolbox", "_____no_output_____" ], [ "## Acknowledgements\n\nSpecial thanks to [Anupama-Murthi](https://github.ibm.com/Anupama-Murthi) and [Vijay Arya](https://github.ibm.com/vijay-arya) who created the original notebook which we modified here to showcase how to use *ART* with *FfDL*. If you would like to try *[Watson Machine Learning (WML) Service](https://console.bluemix.net/catalog/services/machine-learning)* with *ART* check out Anupama and Vijay's notebook here:\n\n[https://github.ibm.com/robust-dlaas/ART-in-WML/Use ART to check robustness of deep learning models.ipynb](https://github.ibm.com/robust-dlaas/ART-in-WML/blob/master/Use%20ART%20to%20check%20robustness%20of%20deep%20learning%20models.ipynb)", "_____no_output_____" ], [ "Copyright © 2017, 2018 IBM. This notebook and its source code are released under the terms of the MIT License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
ecd28719819fd0cdb2b8dde127cf5c4a6b4c98a8
104,439
ipynb
Jupyter Notebook
examples/example_01.ipynb
jtguibas/arc
e9df473ce5051f2b9f3981ef219b6a02076bdb42
[ "MIT" ]
null
null
null
examples/example_01.ipynb
jtguibas/arc
e9df473ce5051f2b9f3981ef219b6a02076bdb42
[ "MIT" ]
null
null
null
examples/example_01.ipynb
jtguibas/arc
e9df473ce5051f2b9f3981ef219b6a02076bdb42
[ "MIT" ]
null
null
null
37.77179
440
0.467488
[ [ [ "# Introduction to Adaptive and Reliable Classification\n\nThis notebook provides an introductory usage example of the Adaptive and Reliable Classification (ARC) Python package.\n\nAccompanying paper:\n - \"Classification with Valid and Adaptive Coverage\", Y. Romano, M. Sesia, E. Candès, 2020.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\nimport skgarden\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'svg'\n\nimport sys\nsys.path.insert(0, '..')\nimport arc\nfrom arc import models\nfrom arc import methods\nfrom arc import black_boxes\nfrom arc import others\nfrom arc import coverage", "/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:30: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n method='lar', copy_X=True, eps=np.finfo(np.float).eps,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:167: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n method='lar', copy_X=True, eps=np.finfo(np.float).eps,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:284: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:862: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1101: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, fit_path=True,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1127: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, positive=False):\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1362: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1602: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n max_n_alphas=1000, n_jobs=None, eps=np.finfo(np.float).eps,\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/linear_model/least_angle.py:1738: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n eps=np.finfo(np.float).eps, copy_X=True, positive=False):\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/decomposition/online_lda.py:29: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n EPS = np.finfo(np.float).eps\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/ensemble/gradient_boosting.py:32: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n from ._gradient_boosting import predict_stages\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/ensemble/gradient_boosting.py:32: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\nDeprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n from ._gradient_boosting import predict_stages\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/externals/joblib/__init__.py:15: DeprecationWarning: sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. Please import this functionality directly from joblib, which can be installed with: pip install joblib. If this warning is raised when loading pickled models, you may need to re-serialize those models with scikit-learn 0.21+.\n warnings.warn(msg, category=DeprecationWarning)\n/future/u/alexder/anaconda3/envs/CCAPS/lib/python3.7/site-packages/sklearn/externals/six.py:31: DeprecationWarning: The module is deprecated in version 0.21 and will be removed in version 0.23 since we've dropped support for Python 2.7. Please rely on the official version of six (https://pypi.org/project/six/).\n \"(https://pypi.org/project/six/).\", DeprecationWarning)\n" ], [ "np.random.seed(2020) # Make this notebook reproducible", "_____no_output_____" ] ], [ [ "## Data\n\nWe generate data from a toy model with 10 explanatory variables and a qualitative label for each sample, which is designed to mimic \"heteroschedasticity\" in a classification setting.\nMore precisely, the first variable controls the \"noise level\" in the label: small values of $X_0$ mean that all labels are more or less equally likely; large values of $X_0$ mean that one label is much more likely than the others (which one is determined by the other features).\nTo clarify, we visualize below the distribution of the true label probabilities (for one value of the label) as a function of $X_0$, which here can take only two possible values for simplicity. ", "_____no_output_____" ] ], [ [ "p = 10 # Number of features\nK = 10 # Number of possible labels\ndata_model = models.Model_Ex1(K,p) # Data generating model\n\nn = 10000 # Number of data samples\nX = data_model.sample_X(n) # Generate the data features\nY = data_model.sample_Y(X) # Generate the data labels conditional on the features\n\nn_test = 2000 # Number of test samples\nX_test = data_model.sample_X(n_test) # Generate independent test data\nY_test = data_model.sample_Y(X_test)", "_____no_output_____" ] ], [ [ "## Evaluating prediction sets\n\nWe will evaluate prediction sets in terms of marginal coverage, estimated worst-slice conditional coverage, size, and size conditional on coverage.", "_____no_output_____" ] ], [ [ "def evaluate_predictions(S, X, y):\n marg_coverage = np.mean([y[i] in S[i] for i in range(len(y))])\n wsc_coverage = coverage.wsc_unbiased(X, y, S)\n length = np.mean([len(S[i]) for i in range(len(y))])\n idx_cover = np.where([y[i] in S[i] for i in range(len(y))])[0]\n length_cover = np.mean([len(S[i]) for i in idx_cover])\n print('Marginal coverage: {:2.3%}'.format(marg_coverage))\n print('WS conditional coverage: {:2.3%}'.format(wsc_coverage))\n print('Average size: {:2.3f}'.format(length))\n print('Average size | coverage: {:2.3f}'.format(length_cover))\n \n k = np.amax(y) + 1\n cc_coverage = np.zeros(k)\n cc_size = np.zeros(k)\n for i in range(k):\n idxs = np.argwhere(y == i)[:, 0]\n for idx in idxs:\n if y[idx] in S[idx]:\n cc_coverage[i] += 1\n cc_size[i] += len(S[idx])\n cc_coverage[i] /= len(idxs)\n cc_size[i] /= len(idxs)\n print(\"CC Coverage\")\n print(cc_coverage)\n print(\"AVG CC Coverage\")\n print(cc_coverage.mean())\n print(\"CC Size\")\n print(cc_size)\n ", "_____no_output_____" ] ], [ [ "## Oracle predictions\n\nSince in this example we know the true data generating model, we can compute optimal prediction sets with an oracle.", "_____no_output_____" ] ], [ [ "alpha = 0.1 # Nominal coverage: 1-alpha \noracle = others.Oracle(data_model, alpha) # Classification oracle for this data model\nS_oracle = oracle.predict(X_test) # Oracle prediction sets", "_____no_output_____" ], [ "evaluate_predictions(S_oracle, X_test, Y_test) # Evaluate prediction sets", "Marginal coverage: 91.100%\nWS conditional coverage: 93.056%\nAverage size: 1.569\nAverage size | coverage: 1.637\nCC Coverage\n[0.92110092 1. 0.89494163 0.66666667 0.75757576 0.89181287\n 0.96551724 0.90909091 0.97560976 0.93103448]\nAVG CC Coverage\n0.8913350230738339\nCC Size\n[1.19266055 3.11428571 1.6459144 3. 3.18181818 1.47953216\n 2.84482759 3.09090909 2.63414634 3.03448276]\n" ] ], [ [ "Note that the oracle prediction sets have inhomogeneous sizes; some samples are more difficult to classify than others.", "_____no_output_____" ] ], [ [ "S_oracle_sizes = np.array([ len(s) for s in S_oracle ])\nplt.hist(S_oracle_sizes, bins=100); plt.show()", "_____no_output_____" ] ], [ [ "It is easy to verify that the value of $X_0$ controls the difficulty of the classification problem.", "_____no_output_____" ] ], [ [ "ax = sns.boxplot(x=X_test[:,0], y=S_oracle_sizes)\nax.set(xlabel='X_0', ylabel='Size of oracle prediction sets')\nplt.show()", "_____no_output_____" ] ], [ [ "## ARC methods\n\nFirst, we need to choose a black-box machine learning classifier to estimate the class probabilities, since the oracle normally does not exist because the true data-generating model is unknown.\nSome wrappers for commonly used classifiers are provided in this package; others can be easily incorporated in the same style For example, we can use a support vector classifier (SVC), as implemented in [sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html). ", "_____no_output_____" ] ], [ [ "black_box = black_boxes.SVC(clip_proba_factor = 1e-5)\nmethod_sc = methods.SplitConformal(X, Y, black_box, alpha)\nS_sc = method_sc.predict(X_test)\nevaluate_predictions(S_sc, X_test, Y_test)", "Marginal coverage: 90.900%\nWS conditional coverage: 91.489%\nAverage size: 1.654\nAverage size | coverage: 1.728\nCC Coverage\n[0.92110092 0.97142857 0.86381323 0.73333333 0.75757576 0.9005848\n 0.94827586 0.95454545 0.98780488 0.86206897]\nAVG CC Coverage\n0.890053176484292\nCC Size\n[1.19908257 3.51428571 1.73929961 3.6 3.51515152 1.49122807\n 3.39655172 3.56818182 2.96341463 3.44827586]\n" ] ], [ [ "### ARC with split-conformal calibration", "_____no_output_____" ] ], [ [ "black_box = black_boxes.SVC(clip_proba_factor = 1e-5)\nmethod_sc = methods.SplitConformalCC(X, Y, black_box, alpha, gamma=1)\nS_sc = method_sc.predict(X_test)\nevaluate_predictions(S_sc, X_test, Y_test)", "_____no_output_____" ] ], [ [ "### ARC with CV+ calibration", "_____no_output_____" ] ], [ [ "method_cv = methods.CVPlus(X, Y, black_box, alpha, n_folds=10)\nS_cv = method_cv.predict(X_test)", "_____no_output_____" ], [ "evaluate_predictions(S_cv, X_test, Y_test)", "_____no_output_____" ] ], [ [ "### ARC with Jackknife+ calibration", "_____no_output_____" ] ], [ [ "method_jk = methods.JackknifePlus(X, Y, black_box, alpha)\nS_jk = method_jk.predict(X_test)", "_____no_output_____" ], [ "evaluate_predictions(S_jk, X_test, Y_test)", "_____no_output_____" ] ], [ [ "### ARC with oracle black-box", "_____no_output_____" ], [ "As an example, we can also apply ARC with an oracle black-box that knows the true conditional probabilities (in combination with any choice of calibration method).", "_____no_output_____" ] ], [ [ "oracle_black_box = black_boxes.Oracle(data_model)\nmethod_oracle_cv = methods.CVPlus(X, Y, oracle_black_box, alpha)\nS_oracle_cv = method_oracle_cv.predict(X_test)", "_____no_output_____" ], [ "evaluate_predictions(S_oracle_cv, X_test, Y_test)", "_____no_output_____" ] ], [ [ "## Other conformal classification methods\n\nFinally, we have also implemented some benchmark alternative methods.", "_____no_output_____" ], [ "### Homogeneous split-conformal classification", "_____no_output_____" ] ], [ [ "method_oracle_hcc = others.SplitConformalHomogeneous(X, Y, oracle_black_box, alpha)\nS_oracle_hcc = method_oracle_hcc.predict(X_test)\nevaluate_predictions(S_oracle_hcc, X_test, Y_test)", "_____no_output_____" ] ], [ [ "### Conformal quantile classification", "_____no_output_____" ] ], [ [ "method_oracle_cqc = others.CQC(X, Y, oracle_black_box, alpha)\nS_oracle_cqc = method_oracle_cqc.predict(X_test)\nevaluate_predictions(S_oracle_cqc, X_test, Y_test)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd29060bc4e72495fdb37a19c76cf44e1b812ce
155,404
ipynb
Jupyter Notebook
courses/dl1/planet-AH.ipynb
holmesal/fastai
cbd2a0c91d01842fb2e780072aed510b1325d1e5
[ "Apache-2.0" ]
null
null
null
courses/dl1/planet-AH.ipynb
holmesal/fastai
cbd2a0c91d01842fb2e780072aed510b1325d1e5
[ "Apache-2.0" ]
null
null
null
courses/dl1/planet-AH.ipynb
holmesal/fastai
cbd2a0c91d01842fb2e780072aed510b1325d1e5
[ "Apache-2.0" ]
null
null
null
222.961263
117,532
0.910273
[ [ [ "# Planet Labs Kaggle Competition\n\nhttps://www.kaggle.com/c/planet-understanding-the-amazon-from-space/submit", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "from fastai.imports import *\nfrom fastai.torch_imports import *\nfrom fastai.transforms import *\nfrom fastai.conv_learner import *\nfrom fastai.model import *\nfrom fastai.dataset import *\nfrom fastai.sgdr import *\nfrom fastai.plots import *\nfrom planet import *", "_____no_output_____" ], [ "torch.cuda.set_device(0)", "_____no_output_____" ], [ "PATH = Path(\"data/planet\")\nsz = 224\narch = resnet34\nbs = 58", "_____no_output_____" ], [ "label_csv = PATH/\"train_v2.csv\"\nn = len(list(open(label_csv))) - 1\nvalidation_idxs = get_cv_idxs(n) # default - save 20% for validation", "_____no_output_____" ], [ "!ls {PATH}", "sample_submission_v2.csv test-jpg-additional\t train-jpg\r\ntest-jpg\t\t test_v2_file_mapping.csv train_v2.csv\r\n" ], [ "label_df = pd.read_csv(label_csv)", "_____no_output_____" ], [ "label_df.head()", "_____no_output_____" ], [ "# how to group on individual tags instead of combination?\n# label_df.pivot_table(index='tags', aggfunc=len).sort_values('image_name', ascending=False)", "_____no_output_____" ] ], [ [ "# Data augmentation", "_____no_output_____" ] ], [ [ "tfms = tfms_from_model(arch, sz, aug_tfms=transforms_top_down, max_zoom=1.1)\ndata = ImageClassifierData.from_csv(PATH, \n folder='train-jpg', \n test_name='test-jpg',\n val_idxs=validation_idxs,\n csv_fname=label_csv, \n bs=bs, \n tfms=tfms, \n suffix='.jpg')", "_____no_output_____" ], [ "data.classes", "_____no_output_____" ], [ "i = 5\nfn = PATH/data.trn_ds.fnames[i]; print(fn)\ncl = data.trn_ds.get_y(i); \nprint(np.array(data.classes)[np.where(cl)[0]])", "data/planet/train-jpg/train_8.jpg\n['agriculture' 'clear' 'cultivation' 'primary']\n" ], [ "img = PIL.Image.open(fn).convert('RGB'); img", "_____no_output_____" ], [ "img.size", "_____no_output_____" ] ], [ [ "# Precompute", "_____no_output_____" ] ], [ [ "learn = ConvLearner.pretrained(arch, data, metrics=[f2], precompute=True)", "100%|██████████| 559/559 [02:20<00:00, 3.98it/s]\n100%|██████████| 140/140 [00:35<00:00, 4.74it/s]\n100%|██████████| 702/702 [02:46<00:00, 4.21it/s]\n" ], [ "learn.fit(0.01, 3)", "_____no_output_____" ], [ "lrf = learn.lr_find()", "_____no_output_____" ], [ "lr = 0.01", "_____no_output_____" ], [ "learn.sched.plot()", "_____no_output_____" ], [ "learn.fit(lr, 3, cycle_len=1)", "_____no_output_____" ], [ "learn.precompute = False", "_____no_output_____" ], [ "learn.fit(lr, 3, cycle_len=1)", "_____no_output_____" ], [ "learn.sched.plot_lr()", "_____no_output_____" ] ], [ [ "# Checkpoint\n\nThese increases are getting slower and slower, so this seems like a good spot to save and load", "_____no_output_____" ] ], [ [ "learn.save('planet_224_lastlayer')\nlearn.load('planet_224_lastlayer')", "_____no_output_____" ] ], [ [ "# Deeper training", "_____no_output_____" ] ], [ [ "learn.unfreeze()", "_____no_output_____" ], [ "lrs = np.array([lr/100, lr/10, lr])", "_____no_output_____" ], [ "learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)", "_____no_output_____" ], [ "learn.save('planet_224_deeper')\nlearn.load('planet_224_deeper')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ecd290af13220914924f33bd92f1c883ff33f67b
8,343
ipynb
Jupyter Notebook
notebooks/05-Pandas_Plotting_and_SQL.ipynb
carlosfaham/jupyter-best-practices
740e90fb16f224e59c3a0aae301e0513798fee50
[ "MIT" ]
1
2021-01-27T16:22:44.000Z
2021-01-27T16:22:44.000Z
notebooks/05-Pandas_Plotting_and_SQL.ipynb
pierrewinter/jupyter-best-practices
740e90fb16f224e59c3a0aae301e0513798fee50
[ "MIT" ]
null
null
null
notebooks/05-Pandas_Plotting_and_SQL.ipynb
pierrewinter/jupyter-best-practices
740e90fb16f224e59c3a0aae301e0513798fee50
[ "MIT" ]
1
2020-11-24T03:03:18.000Z
2020-11-24T03:03:18.000Z
22.307487
313
0.553038
[ [ [ "# Pandas -- Love and Hate\n\nIf you've never used `pandas` before, it's amazing. It will also frustrate you to tears. \n\nHigh level tip -- try to represent data in the proper format: floats as floats; ints as ints; etc. Especially if you have dates, or timestamps, or datetimestamps, keep them in that format. The temptation to operate on them like strings may be overwhelming, but resist! In the long run you might prevail. :\\", "_____no_output_____" ] ], [ [ "# %install_ext http://raw.github.com/jrjohansson/version_information/master/version_information.py\n%load_ext version_information\n%reload_ext version_information\n%version_information numpy, scipy, matplotlib, pandas", "_____no_output_____" ], [ "# Doing this in python 2.7 code allows for most of the code to be python 3 portable.\n# But you have to write your print functions: print(\"Hello world.\")\n# from __future__ import division, absolute_import, print_function, unicode_literals\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('talk')\nsns.set_style('darkgrid') \nplt.rcParams['figure.figsize'] = 12, 8 # plotsize \n\n\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ] ], [ [ "### Note\n\nUsing cleaned data from [Data Cleaning](Data%20Cleaning.ipynb) Notebook. See Notebook for details.", "_____no_output_____" ] ], [ [ "dframe = pd.read_csv(\"../data/coal_prod_cleaned.csv\")", "_____no_output_____" ] ], [ [ "## Notebook Extensions -- qgrid", "_____no_output_____" ] ], [ [ "# Check out http://nbviewer.ipython.org/github/quantopian/qgrid/blob/master/qgrid_demo.ipynb for more (including demo)\nimport qgrid # Best practices is to put imports at the top of the Notebook.\nqgrid.nbinstall(overwrite=True)", "_____no_output_____" ], [ "dframe.head()", "_____no_output_____" ], [ "qgrid.show_grid(dframe[['MSHA_ID', 'Year', 'Mine_Name', 'Mine_State', 'Mine_County']], remote_js=True)", "_____no_output_____" ], [ "plt.scatter(dframe.Average_Employees, dframe.Labor_Hours)\nplt.xlabel(\"Number of Employees\")\nplt.ylabel(\"Total Hours Worked\")", "_____no_output_____" ], [ "plt.scatter(dframe.Labor_Hours, dframe.Production_short_tons, )\nplt.xlabel(\"Total Hours Worked\")\nplt.ylabel(\"Total Amount Produced\")", "_____no_output_____" ], [ "colors = sns.color_palette(n_colors=11)", "_____no_output_____" ], [ "color_dict = {key: value for key, value in zip(sorted(dframe.Year.unique()), colors)}", "_____no_output_____" ], [ "color_dict", "_____no_output_____" ], [ "for year in sorted(dframe.Year.unique()[[0,2, 5, -1]]):\n plt.scatter(dframe[dframe.Year == year].Labor_Hours,\n dframe[dframe.Year == year].Production_short_tons, \n c=color_dict[year],\n s=50,\n label=year,\n )\nplt.xlabel(\"Total Hours Worked\")\nplt.ylabel(\"Total Amount Produced\")\nplt.legend()\nplt.savefig(\"ex1.png\")", "_____no_output_____" ], [ "# facet grid", "_____no_output_____" ], [ "for col in dframe.columns:\n print col", "_____no_output_____" ] ], [ [ "# SQL connections\n\nYou will often use and interact with databases of some kind or another. Having the queries you ran to create the dataframes in a Notebook is great for future reference. There are many python/IPython connections to databases of all kinds: sqlite, mysql, impala, etc. ", "_____no_output_____" ] ], [ [ "# An updated implementation from Christian Perez at SVDS https://github.com/cfperez/ipython-sql\n%load_ext sql\n%reload_ext sql", "_____no_output_____" ], [ "coalproduction = dframe.copy()", "_____no_output_____" ], [ "%config SqlMagic.autopandas=True", "_____no_output_____" ], [ "%%sql sqlite://\nPERSIST coalproduction", "_____no_output_____" ], [ "%%sql sqlite://\nSELECT DISTINCT company_type FROM coalproduction \nWHERE msha_id = 5000030", "_____no_output_____" ], [ "dbtest = %sql SELECT * FROM coalproduction", "_____no_output_____" ], [ "type(dbtest)", "_____no_output_____" ], [ "dbtest.head()", "_____no_output_____" ] ], [ [ "## Use Cases for the Jupyter Notebook\n\n - Use Case 1: Teaching (Some basics to start!)\n - Use Case 2: Exploratory Data Analysis\n - Use Case 3: Running remotely (server)\n - Use Case 4: Sharing results\n - Use Case 5: Presentations\n", "_____no_output_____" ] ], [ [ "FileLink(\"Overview.ipynb\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecd2945dcb5b8f59c8cf5be1c3d1654d35084d32
49,266
ipynb
Jupyter Notebook
Case 1.ipynb
tuomasst/neural-networks-for-health-tech-apps
7604b2f3d0bf7d3f0d7d401cdb7cdf23448558ff
[ "MIT" ]
null
null
null
Case 1.ipynb
tuomasst/neural-networks-for-health-tech-apps
7604b2f3d0bf7d3f0d7d401cdb7cdf23448558ff
[ "MIT" ]
null
null
null
Case 1.ipynb
tuomasst/neural-networks-for-health-tech-apps
7604b2f3d0bf7d3f0d7d401cdb7cdf23448558ff
[ "MIT" ]
null
null
null
84.649485
14,992
0.735355
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Case-X.-Template\" data-toc-modified-id=\"Case-X.-Template-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Case X. Template</a></span></li><li><span><a href=\"#Background\" data-toc-modified-id=\"Background-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>Background</a></span></li><li><span><a href=\"#Data\" data-toc-modified-id=\"Data-3\"><span class=\"toc-item-num\">3&nbsp;&nbsp;</span>Data</a></span></li><li><span><a href=\"#Modes-and-training\" data-toc-modified-id=\"Modes-and-training-4\"><span class=\"toc-item-num\">4&nbsp;&nbsp;</span>Modes and training</a></span></li><li><span><a href=\"#Results-and-Discussion\" data-toc-modified-id=\"Results-and-Discussion-5\"><span class=\"toc-item-num\">5&nbsp;&nbsp;</span>Results and Discussion</a></span></li><li><span><a href=\"#Conclusions\" data-toc-modified-id=\"Conclusions-6\"><span class=\"toc-item-num\">6&nbsp;&nbsp;</span>Conclusions</a></span></li></ul></div>", "_____no_output_____" ], [ "# Case 1. Template\nTuomas Tilli<br>\nLast edited: 02.02.2020<br>\nNeural Networks for Health Technology Applications<br>\n[Helsinki Metropolia University of Applied Sciences](http://www.metropolia.fi/en/)<br>", "_____no_output_____" ], [ "# Background", "_____no_output_____" ], [ "The aim of this Notebook is to train a neural network based on known data to predict heart disease incidence. The dataset used here was collected from Cleveland Clinic Foundation and is called processed.cleveland.data.", "_____no_output_____" ], [ "# Data", "_____no_output_____" ] ], [ [ "%pylab inline\nimport pandas as pd\n\nurl = 'https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data'\ndf = pd.read_csv(url,\n index_col = None,\n header = None,\n na_values = '?')\ndf.describe()", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "The data contained 14 raw attributes and the amount of instances was 303. 6 values are missing and they are distinguished with value \"?\". The last attribute is the predicted attribute and represents the diagnosis of disease. This class variable had values of 0, 1, 2, 3 or 4. Value 0 represents a healthy sample and a value of 1 or higher represents >50% narrowing of arterial diameter.\n\nAttribute Information:\n 1. (age) \n 2. (sex) \n 3. (cp) \n 4. (trestbps) \n 5. (chol) \n 6. (fbs) \n 7. (restecg) \n 8. (thalach) \n 9. (exang) \n 10. (oldpeak) \n 11. (slope) \n 12. (ca) \n 13. (thal) \n 14. (num) (the predicted attribute)", "_____no_output_____" ], [ "The missing values were filled with values of 0. Data was split into input variables (first 12 columns) and predicted output variables (column 13). All output values of 1 or higher were edited into value 1 to represent class \"not healthy\" while 0 represented class \"healthy\".\n\nThe data was normalized and split for training (60%), validation (20%) and testing (20%). ", "_____no_output_____" ] ], [ [ "df = df.fillna(0)\n\ndata = df.loc[:, 0:12]\ntargets = 1.0*(df.loc[:, 13] > 0)\n\ndata = np.asarray(data)\ntargets = np.asarray(targets)\n\nfrom tensorflow.keras.utils import to_categorical\ntargets = to_categorical(targets)\n\n# Split data into training data and test data\ntrain_data = data[:241]\ntrain_targets = targets[:241]\ntest_data = data[241:]\ntest_targets = targets[241:]\n\n# Normalize training data\nmean = train_data.mean(axis=0)\ntrain_data -= mean\nstd = train_data.std(axis=0)\ntrain_data /= std\n\n# Normalize test data\ntest_data -= mean\ntest_data /= std\n\n# Split training data into training data and validation data\npartial_train_data = train_data[:181]\npartial_train_targets = train_targets[:181]\nval_data = train_data[181:]\nval_targets = train_targets[181:]", "_____no_output_____" ] ], [ [ "# Models and training", "_____no_output_____" ], [ "ReLU activation function was used for the hidden layers of every model tested. The chosen activation function\nfor the final output layer was sigmoid. The chosen loss function was binary_crossentropy and rmsprop was used as the optimizer. Plotting was used to evaluate model performance regarding loss and accuracy.<br>\n\nThe following models were tried out:<br><br>\nModel 1: 100 epochs, 1 hidden layer (64 neurons), batch_size 16<br>\nModel 2: 40 epochs, 1 hidden layer (64 neurons), batch_size 8<br>\nModel 3: 40 epochs, 1 hidden layer (64 neurons), batch_size 16<br>\nModel 4: 40 epochs, 1 hidden layer (64 neurons), batch_size 32<br>\n<br>\nModel 5: 40 epochs, 1 hidden layer (32 neurons), batch_size 32<br>\nModel 6: 40 epochs, 1 hidden layer (64 neurons), batch_size 48<br>\n<br>\nModel 7: 40 epochs, 2 hidden layers (64+64 neurons), batch_size 8<br>\nModel 8: 40 epochs, 2 hidden layers (64+64 neurons), batch_size 16<br>\nModel 9: 40 epochs, 2 hidden layers (64+64 neurons), batch_size 32<br>\nModel 10: 40 epochs, 3 hidden layers (64+64+32 neurons), batch_size 16<br>\n<br>\nModel 11: 40 epochs, 1 hidden layer (64 neurons), batch_size 32 [with regularizer l2 (0.05)]<br>\nModel 12: 40 epochs, 1 hidden layer (64 neurons), batch_size 32 [with dropout of 20%]<br>", "_____no_output_____" ] ], [ [ "# Model\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras import regularizers\nimport matplotlib.pyplot as plt\n\nmodel = Sequential()\nmodel.add(Dense(64, activation='relu', input_shape=(train_data[0].shape)))\n\n# Regularization\n\n#model.add(Dense(64, kernel_regularizer=regularizers.l2(0.05), activation='relu', input_shape=(train_data[0].shape)))\n\n# Dropout\n\n#model.add(Dense(64, activation='relu', input_shape=(train_data[0].shape)))\n#model.add(Dropout(0.2))\n\n\nmodel.add(Dense(2, activation='sigmoid'))\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n\nhistory = model.fit(partial_train_data, partial_train_targets,\n epochs=40, batch_size=32, verbose=0,\n validation_data = (val_data, val_targets))", "_____no_output_____" ], [ "# Plot the loss score and accuracy for both training and validation sets\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nmae = history.history['accuracy']\nval_mae = history.history['val_accuracy']\n\ntime = range(1,len(loss)+1)\n\nplt.plot(time, loss, 'b-')\nplt.plot(time, val_loss, 'r-')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.show()\n\n# Take out first 10 to get a nicer graph\nshort_acc = mae[10:]\nshort_val_acc = val_mae[10:]\n\nplt.plot(range(1,len(short_acc)+1), short_acc, 'b-')\nplt.plot(range(1,len(short_val_acc)+1), short_val_acc, 'r-')\nplt.xlabel('Epochs')\nplt.ylabel('Acc')\nplt.show()\n\n# LOSS/ACC = BLUE GRAPH\n# VALIDATION LOSS/ACC = RED GRAPH", "_____no_output_____" ], [ "# Final evaluation with test data.\n\nmodel = Sequential()\nmodel.add(Dense(64, activation='relu', input_shape=(train_data[0].shape)))\n\nmodel.add(Dense(2, activation='sigmoid'))\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])\n\nmodel.fit(train_data, train_targets, epochs=20, batch_size=32, verbose=0)\n\ntest_acc_score, test_acc_score = model.evaluate(test_data, test_targets)", "\r62/1 [====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================] - 0s 2ms/sample - loss: 0.4971 - accuracy: 0.7581\n" ] ], [ [ "# Results and Discussion", "_____no_output_____" ], [ "The final model consisted of only one layer with 64 neurons, a batch size of 32 and an epoch number of 20. This model was sufficient with more complex models providing no further increases in performance.\n\nAfter 20 epochs the overfitting of the model became apparent. While training accuracy kept increasing the validation accuracy started to go down. Regularization and dropout were briefly tested and were found to reduce overfitting but in the end had no meaningful impact on accuracy.\n\nResults from evaluation with test data:\nloss: 0.4971 - accuracy: 0.7581", "_____no_output_____" ], [ "# Conclusions", "_____no_output_____" ], [ "It was possible to build a neural network to predict the presence of heart disease using a series of patient data. The accuracy of the model was 75 % when evaluated with test data. The model used was relatively simple with complex setups proving no more accurate. To achieve higher prediction accuracy and to gain performance boosts from a more sophisticated model a much larger dataset would be needed. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
ecd29ad14a1a791cc9e538463125837e53364f07
7,809
ipynb
Jupyter Notebook
1_5_CNN_Layers/2. Pool Visualization.ipynb
jay-thakur/CV_Projects
3d36d9bd371400ef3904b754fbe3a30a14894320
[ "MIT" ]
null
null
null
1_5_CNN_Layers/2. Pool Visualization.ipynb
jay-thakur/CV_Projects
3d36d9bd371400ef3904b754fbe3a30a14894320
[ "MIT" ]
4
2021-06-08T22:50:39.000Z
2022-03-12T00:31:24.000Z
1_5_CNN_Layers/2. Pool Visualization.ipynb
jay-thakur/CV_Projects
3d36d9bd371400ef3904b754fbe3a30a14894320
[ "MIT" ]
null
null
null
31.236
314
0.54847
[ [ [ "## Pooling Layer\n\nIn this notebook, we add and visualize the output of a maxpooling layer in a CNN.", "_____no_output_____" ], [ "### Import the image", "_____no_output_____" ] ], [ [ "import cv2\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# TODO: Feel free to try out your own images here by changing img_path\n# to a file path to another image on your computer!\nimg_path = 'images/udacity_sdc.png'\n\n# load color image \nbgr_img = cv2.imread(img_path)\n# convert to grayscale\ngray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)\n\n# normalize, rescale entries to lie in [0,1]\ngray_img = gray_img.astype(\"float32\")/255\n\n# plot image\nplt.imshow(gray_img, cmap='gray')\nplt.show()", "_____no_output_____" ] ], [ [ "### Define and visualize the filters", "_____no_output_____" ] ], [ [ "import numpy as np\n\n## TODO: Feel free to modify the numbers here, to try out another filter!\nfilter_vals = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])\n\nprint('Filter shape: ', filter_vals.shape)\n", "_____no_output_____" ], [ "# Defining four different filters, \n# all of which are linear combinations of the `filter_vals` defined above\n\n# define four filters\nfilter_1 = filter_vals\nfilter_2 = -filter_1\nfilter_3 = filter_1.T\nfilter_4 = -filter_3\nfilters = np.array([filter_1, filter_2, filter_3, filter_4])\n\n# For an example, print out the values of filter 1\nprint('Filter 1: \\n', filter_1)", "_____no_output_____" ] ], [ [ "### Define convolutional and pooling layers\n\nInitialize a convolutional layer so that it contains all your created filters. Then add a maxpooling layer, [documented here](http://pytorch.org/docs/master/_modules/torch/nn/modules/pooling.html), with a kernel size of (4x4) so you can really see that the image resolution has been reduced after this step!", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n \n# define a neural network with a convolutional layer with four filters\n# AND a pooling layer of size (4, 4)\nclass Net(nn.Module):\n \n def __init__(self, weight):\n super(Net, self).__init__()\n # initializes the weights of the convolutional layer to be the weights of the 4 defined filters\n k_height, k_width = weight.shape[2:]\n # assumes there are 4 grayscale filters\n self.conv = nn.Conv2d(1, 4, kernel_size=(k_height, k_width), bias=False)\n self.conv.weight = torch.nn.Parameter(weight)\n # define a pooling layer\n self.pool = nn.MaxPool2d(4, 4)\n\n def forward(self, x):\n # calculates the output of a convolutional layer\n # pre- and post-activation\n conv_x = self.conv(x)\n activated_x = F.relu(conv_x)\n \n # applies pooling layer\n pooled_x = self.pool(activated_x)\n \n # returns all layers\n return conv_x, activated_x, pooled_x\n \n# instantiate the model and set the weights\nweight = torch.from_numpy(filters).unsqueeze(1).type(torch.FloatTensor)\nmodel = Net(weight)\n\n# print out the layer in the network\nprint(model)", "_____no_output_____" ] ], [ [ "### Visualize the output of each filter\n\nFirst, we'll define a helper function, `viz_layer` that takes in a specific layer and number of filters (optional argument), and displays the output of that layer once an image has been passed through.", "_____no_output_____" ] ], [ [ "# helper function for visualizing the output of a given layer\n# default number of filters is 4\ndef viz_layer(layer, n_filters= 4):\n fig = plt.figure(figsize=(20, 20))\n \n for i in range(n_filters):\n ax = fig.add_subplot(1, n_filters, i+1, xticks=[], yticks=[])\n # grab layer outputs\n ax.imshow(np.squeeze(layer[0,i].data.numpy()), cmap='gray')\n ax.set_title('Output %s' % str(i+1))", "_____no_output_____" ] ], [ [ "Let's look at the output of a convolutional layer after a ReLu activation function is applied.", "_____no_output_____" ] ], [ [ "# plot original image\nplt.imshow(gray_img, cmap='gray')\n\n# visualize all filters\nfig = plt.figure(figsize=(12, 6))\nfig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)\nfor i in range(4):\n ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i], cmap='gray')\n ax.set_title('Filter %s' % str(i+1))\n\n \n# convert the image into an input Tensor\ngray_img_tensor = torch.from_numpy(gray_img).unsqueeze(0).unsqueeze(1)\n\n# get all the layers \nconv_layer, activated_layer, pooled_layer = model(gray_img_tensor)\n\n# visualize the output of the activated conv layer\nviz_layer(activated_layer)", "_____no_output_____" ] ], [ [ "### Visualize the output of the pooling layer\n\nThen, take a look at the output of a pooling layer. The pooling layer takes as input the feature maps pictured above and reduces the dimensionality of those maps, by some pooling factor, by constructing a new, smaller image of only the maximum (brightest) values in a given kernel area.\n\n", "_____no_output_____" ] ], [ [ "# visualize the output of the pooling layer\nviz_layer(pooled_layer)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd2a3fcb99100e55fc01312013396e9dc494eee
87,757
ipynb
Jupyter Notebook
support/notebooks/Plots for paper.ipynb
glwhart/autoGR
4c031eaa6ade736418ad9136d77004c258ef6fb4
[ "MIT" ]
17
2019-12-17T11:46:32.000Z
2022-03-21T19:06:11.000Z
support/notebooks/Plots for paper.ipynb
glwhart/autoGR
4c031eaa6ade736418ad9136d77004c258ef6fb4
[ "MIT" ]
13
2019-10-28T14:37:16.000Z
2022-03-09T00:19:56.000Z
support/notebooks/Plots for paper.ipynb
glwhart/autoGR
4c031eaa6ade736418ad9136d77004c258ef6fb4
[ "MIT" ]
9
2019-12-17T11:48:28.000Z
2022-03-23T19:40:43.000Z
231.548813
46,478
0.893399
[ [ [ "import numpy as np \nimport matplotlib.pyplot as plt\n\nfrom opf_python.sc import sc_3\nfrom opf_python.hx import hex_12\nfrom opf_python.stet import stet_11\nfrom opf_python.rhom import rhom_4_2\nfrom opf_python.so import so_32\nfrom opf_python.sm import sm_33", "_____no_output_____" ], [ "def primes(n):\n primfac = {}\n d = 2\n while d*d <= n:\n while (n % d) == 0:\n if d not in primfac.keys():\n primfac[d] =1 # supposing you want multiple factors repeated\n else: \n primfac[d] += 1\n n //= d\n d += 1\n if n > 1:\n primfac[n] = 1\n return primfac", "_____no_output_____" ], [ "def n_HNFs(n):\n total = 1\n ps = primes(n)\n for k in ps:\n temp = (k**(ps[k]+2)-1)*(k**(ps[k]+1)-1)/((k+1)*(k-1)**2)\n total *= temp\n \n return total ", "_____no_output_____" ], [ "all_HNFs = []\ncubic_HNFs = []\nhex_HNFs = []\nrhom_HNFs = []\ntet_HNFs = []\north_HNFs = []\nmono_HNFs = []\nxs = []\nfor i in range(10000):\n if i%1000==0:\n print(\"i\",i)\n if i < 10:\n all_HNFs.append(n_HNFs(i))\n cubic_HNFs.append(len(sc_3(i)))\n hex_HNFs.append(len(hex_12(i)))\n rhom_HNFs.append(len(rhom_4_2(i)))\n tet_HNFs.append(len(stet_11(i)))\n orth_HNFs.append(len(so_32(i)))\n mono_HNFs.append(len(sm_33(i)))\n xs.append(i)\n elif i < 100 and i%10==0:\n all_HNFs.append(n_HNFs(i))\n cubic_HNFs.append(len(sc_3(i)))\n hex_HNFs.append(len(hex_12(i)))\n rhom_HNFs.append(len(rhom_4_2(i)))\n tet_HNFs.append(len(stet_11(i)))\n orth_HNFs.append(len(so_32(i)))\n mono_HNFs.append(len(sm_33(i)))\n xs.append(i)\n elif i>=100 and i<1000 and i%100==0:\n all_HNFs.append(n_HNFs(i))\n cubic_HNFs.append(len(sc_3(i)))\n hex_HNFs.append(len(hex_12(i)))\n rhom_HNFs.append(len(rhom_4_2(i)))\n tet_HNFs.append(len(stet_11(i)))\n orth_HNFs.append(len(so_32(i)))\n mono_HNFs.append(len(sm_33(i)))\n xs.append(i)\n elif i>=1000 and i<=10000 and i%1000==0:\n all_HNFs.append(n_HNFs(i))\n cubic_HNFs.append(len(sc_3(i)))\n hex_HNFs.append(len(hex_12(i)))\n rhom_HNFs.append(len(rhom_4_2(i)))\n tet_HNFs.append(len(stet_11(i)))\n orth_HNFs.append(len(so_32(i)))\n mono_HNFs.append(len(sm_33(i))) \n xs.append(i)", "('i', 0)\n('i', 1000)\n('i', 2000)\n('i', 3000)\n('i', 4000)\n('i', 5000)\n('i', 6000)\n('i', 7000)\n('i', 8000)\n('i', 9000)\n" ], [ "plt.plot(np.log10(xs), np.log10(all_HNFs),label=\"All\")\nplt.plot(np.log10(xs), np.log10(mono_HNFs),label=\"monoclinic\")\nplt.plot(np.log10(xs), np.log10(orth_HNFs),label=\"orthorhombic\")\nplt.plot(np.log10(xs), np.log10(tet_HNFs),label=\"tetragonal\")\nplt.plot(np.log10(xs), np.log10(rhom_HNFs),label=\"rhombohedral\")\nplt.plot(np.log10(xs), np.log10(hex_HNFs),label=\"hexagonal\")\n\nl = plt.gca().figure.subplotpars.left\nr = plt.gca().figure.subplotpars.right\nt = plt.gca().figure.subplotpars.top\nb = plt.gca().figure.subplotpars.bottom\nfigw = float(4)/(r-l)\nfigh = float(4)/(t-b)\nplt.gca().figure.set_size_inches(figw, figh)\n\nxticks = np.arange(0, 5, 1)\nxticklabels = [r\"$10^{}$\".format(tick) for tick in xticks]\nplt.xticks(xticks, xticklabels)\nyticks = np.arange(1, 11, 2)\nyticklabels = [r\"$10^{}$\".format(tick) for tick in yticks]\nplt.yticks(yticks, yticklabels)\n#plt.plot(xs, cubic_HNFs,label=\"cubic\")\n#plt.title(\"Number of Supercells for a Volume\")\nplt.xlabel(\"Volume Factor\")\nplt.ylabel(\"Number of Supercells\")\nplt.legend()\n#plt.axes().set_aspect('equal')\n#plt.loglog()\nplt.savefig(\"../plots/N_HNFs_per_vol_sparse.pdf\")\nplt.show()", "/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:1: RuntimeWarning: divide by zero encountered in log10\n if __name__ == '__main__':\n/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:2: RuntimeWarning: divide by zero encountered in log10\n from ipykernel import kernelapp as app\n/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:3: RuntimeWarning: divide by zero encountered in log10\n app.launch_new_instance()\n/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:4: RuntimeWarning: divide by zero encountered in log10\n/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:5: RuntimeWarning: divide by zero encountered in log10\n/Users/wileymorgan/.virtualenvs/kpoints/lib/python2.7/site-packages/ipykernel/__main__.py:6: RuntimeWarning: divide by zero encountered in log10\n" ], [ "max_fold = {\"pos_1\": 48, \"pos_2\": 16, \"pos_4\": 8,#24, \n \"pos_6\": 8, \"pos_5\": 12, \"pos_3\": 4}\ntitles = {\"pos_1\": \"cubic\", \"pos_2\": \"tetragonal\", \"pos_3\": \"monoclinic\", \n \"pos_4\": \"hexagonal\", \"pos_5\": \"rhombohedral\", \"pos_6\": \"orthorhombic\"}\ndata_dir = \"data/\"\nopts = [\"gamma\", \"shift\"]", "_____no_output_____" ], [ "from os import listdir, path\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}'] #for \\text command\n\nn = 1\nfig = plt.figure(figsize=(10,10))\n#ax = fig.add_subplot(1,1,1)\nfor case, fold in max_fold.items():\n s_total = []\n s_reduced = []\n g_total = []\n g_reduced = []\n for op in opts:\n temp_dir = \"{0}/{1}/{2}\".format(data_dir, op, case)\n files = listdir(temp_dir)\n g_eff = []\n s_eff = []\n for f_in in files:\n if op == \"gamma\":\n with open(path.join(temp_dir, f_in), \"r\") as f:\n for line in f:\n if \"total\" in f_in:\n g_total.append(int(line.strip()))\n else:\n g_reduced.append(int(line.strip()))\n elif op == \"shift\":\n with open(path.join(temp_dir, f_in), \"r\") as f:\n for line in f:\n if \"total\" in f_in:\n s_total.append(int(line.strip()))\n else:\n s_reduced.append(int(line.strip()))\n \n for i in range(len(g_total)):\n g_eff.append((float(g_total[i])/float(g_reduced[i]))/float(fold))\n for i in range(len(s_total)):\n s_eff.append((float(s_total[i])/float(s_reduced[i]))/float(fold))\n \n plt.subplot(3, 2, n)\n plt.scatter(g_total, g_eff, label=\"no offset\", s=3)\n plt.scatter(s_total, s_eff, label=\"offset\",s=3)\n \n l = plt.gca().figure.subplotpars.left\n r = plt.gca().figure.subplotpars.right\n t = plt.gca().figure.subplotpars.top\n b = plt.gca().figure.subplotpars.bottom\n figw = float(4)/(r-l)\n figh = float(4)/(t-b)\n plt.gca().figure.set_size_inches(figw, figh)\n\n plt.title(titles[case])\n plt.ylim(0,1)\n plt.xscale('log')\n #plt.rc('text', usetex=True)\n #plt.xlabel(r'Total $\\textbf{k}$-points')\n #plt.ylabel(\"Folding Efficiency\")\n plt.legend()\n n += 1\n#plt.subplots_adjust(hspace=0.75)\nfig.text(0.5, 0.03, r'Total $\\textbf{k}$-points', ha='center', va='center')\nfig.text(0.03, 0.5, \"Folding Efficiency\", ha=\"center\", va=\"center\", rotation=\"vertical\")\nfig.tight_layout(rect=[0,0,1,1])\nfig.subplots_adjust(0.1,0.1,.9,.9)\n\n#plt.savefig(\"../plots/shift_gamma_comp.pdf\")\nplt.show()", "_____no_output_____" ], [ "n_HNFs(3000)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd2a840a8f0cf9e2851c4f5258f43d743574729
21,494
ipynb
Jupyter Notebook
Notebooks/other-attempts/spaCy/data_preparation/.ipynb_checkpoints/lemmatizer-checkpoint.ipynb
daorse/RedditGenderClassification
a1443b8781e767d3c04ff45f1721717a1f7416ba
[ "MIT" ]
9
2021-01-09T06:51:10.000Z
2022-03-30T20:16:51.000Z
Notebooks/other-attempts/spaCy/data_preparation/.ipynb_checkpoints/lemmatizer-checkpoint.ipynb
daorse/RedditGenderClassification
a1443b8781e767d3c04ff45f1721717a1f7416ba
[ "MIT" ]
null
null
null
Notebooks/other-attempts/spaCy/data_preparation/.ipynb_checkpoints/lemmatizer-checkpoint.ipynb
daorse/RedditGenderClassification
a1443b8781e767d3c04ff45f1721717a1f7416ba
[ "MIT" ]
null
null
null
31.890208
167
0.512887
[ [ [ "# Lemmatizer\n\nThis notebook is the first il logical order. It takes the challenge data as input and produces various preprocessings/lemmatizations.", "_____no_output_____" ] ], [ [ "import spacy\nimport pandas as pd\nfrom progressbar import ProgressBar, Bar, Percentage\nimport numpy as np\nimport re \nimport string as stri", "_____no_output_____" ] ], [ [ "It can be verified that lg and bert models hve the same stop_words", "_____no_output_____" ] ], [ [ "# load the model\nnlplg = spacy.load(r\"Q:\\anaconda\\Lib\\site-packages\\en_core_web_lg\\en_core_web_lg-2.2.5\", disable = [\"parser\", \"ner\"])", "_____no_output_____" ], [ "# load data\ntrain_data_full = pd.read_csv(r\"train_data.csv\")\ntrain_target_full = pd.read_csv(r\"train_target.csv\")", "_____no_output_____" ], [ "#lowercase subreddit column\ntrain_data_full[\"subreddit\"] = list(map(lambda x: x.lower() , train_data_full[\"subreddit\"].tolist()))", "_____no_output_____" ], [ "# function to process subreddits\ndef proc_subs(l):\n s = set(l)\n return \" \".join([st.lower() for st in s])", "_____no_output_____" ], [ "# create merged df\ntrain_data_full_agg = train_data_full.groupby([\"author\"], as_index = False).agg({'subreddit': proc_subs, \"body\": \" \".join})\n\n# bodies to be preprocessed/lemmatized, both in indivdual and aggregate form\nto_be_lemmed = train_data_full[\"body\"].tolist()\nto_be_lemmed_agg = train_data_full_agg[\"body\"].tolist()", "_____no_output_____" ], [ "# three different function for simple preprocesisng\n# substitutes !,? and . with . to enable sentencizing, and removes punctuation and numbers \ndef preprocessPunctNumSent(tex):\n tex = tex.replace(\"\\r\",\"\")\n tex = tex.replace(\"\\\\n\",\".\") #\n tex = tex.replace(\"\\n\",\".\")\n tex = tex.translate(str.maketrans(\"\",\"\", '\"#$%&\\'()*+,-/:<=>@[\\\\]^_`{|}~')).translate(str.maketrans('','','1234567890')).lower().strip()\n tex = re.sub(r\"\\!+\", \".\", tex)\n tex = re.sub(r\";+\", \".\", tex)\n tex = re.sub(r\"\\?+\", \".\", tex)\n tex = re.sub(r\"(\\.+\\s*)+\", \". \", tex)\n tex = \" \".join(tex.split())\n return tex\n\n\n# removes punctuation and numbers\ndef preprocessPunctNum(tex):\n tex = tex.replace(\"\\r\",\"\")\n tex = tex.replace(\"\\\\n\",\"\") #\n tex = tex.replace(\"\\n\",\"\")\n tex = tex.translate(str.maketrans(\"\",\"\", stri.punctuation)).translate(str.maketrans('','','1234567890')).lower().strip()\n tex = \" \".join(tex.split())\n return tex\n\n\n\n\n# only removes punctuation and \\ characters\ndef preprocessPunct(tex):\n tex = tex.replace(\"\\r\",\"\")\n tex = tex.replace(\"\\\\n\",\"\")\n tex = tex.replace(\"\\n\",\"\")\n tex = tex.translate(str.maketrans(\"\",\"\", stri.punctuation)).lower().strip()\n tex = \" \".join(tex.split())\n return tex", "_____no_output_____" ] ], [ [ "Tokenize text:", "_____no_output_____" ] ], [ [ "docs = []\nj = 0\ntexts = to_be_lemmed\npbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(texts)).start()\nfor tex in texts:\n docs.append(nlplg(preprocessPunctNum(tex)))\n j += 1\n pbar.update(j)\npbar.finish()\n\ndocs_agg = []\nj = 0\ntexts = to_be_lemmed_agg\npbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(texts)).start()\nfor tex in texts:\n docs.append(nlplg(preprocessPunctNum(tex)))\n j += 1\n pbar.update(j)\npbar.finish()\n\n\n", "100%|#########################################################################|\n" ] ], [ [ "Different functions for different lemmatizations/ further preprocessing:", "_____no_output_____" ] ], [ [ "# only removes punctuation and \\ characters and oov words\ndef lemmatize0(texts):\n final_l = []\n print(\"lemmatize0\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(texts)).start()\n j = 0\n for tex in texts:\n final_l.append(preprocessPunct(tex))\n pbar.update(j)\n j += 1\n pbar.finish()\n return final_l\n ", "_____no_output_____" ], [ "# like lemmatize0 but also remove numbers\ndef lemmatize1(docs):\n final_l = []\n print(\"lemmatize1\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if token.pos_ not in [\"SPACE\"] and token.lemma_ not in [\"-PRON-\"]:\n final += token.lemma_ + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l\n ", "_____no_output_____" ], [ "#like lemmatize 1 but also remove oov \ndef lemmatize11(docs):\n final_l = []\n print(\"lemmatize11\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if not token.is_oov and token.pos_ not in [\"SPACE\"] and token.lemma_ not in [\"-PRON-\"]:\n final += token.lemma_ + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# like lemmatize1 but alse remove stopwords\ndef lemmatize2(docs):\n final_l = []\n print(\" lemmatize2\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if not token.is_stop and token.pos_ not in [\"SPACE\"]:\n final += token.lemma_ + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# lik lemmetize2 but alse remove oov\ndef lemmatize21(docs):\n final_l = []\n print(\"lemmatize21\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if not token.is_oov and token.pos_ not in [\"SPACE\"] and not token.is_stop :\n final += token.text + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# lik lemmetize21 but alse remove non alpha but laves \".\" for sentencizer (optional, see blow)\ndef lemmatize22(docs):\n final_l = []\n print(\"lemmatize22\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if not token.is_oov and token.pos_ not in [\"SPACE\"] and not token.is_stop and token.is_alpha:# add this to enable Sent: or token.text == \".\" \n if token.text != \".\":\n final += token.lemma_ + \" \"\n else:\n final = final.rstrip()\n final += \". \"\n final_l.append(final.strip()) # add this to enable Sent: re.sub(r\"(\\.+\\s*)+\", \". \", final).strip()\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# like lemmatize2 , but no longer removes stopwrds. It removes all pronouns, auxliliaries, words like \"how, what, why etc\"\ndef lemmatize3(docs):\n final_l = []\n print(\"lemmatize3\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if token.pos_ not in [\"SPACE\",\"PRON\",\"DET\",\"AUX\",\"ADP\"] and token.dep_ not in [\"WRB\"]:\n final += token.text + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# like lemmatize2 , but no longer removes stopwrds. It removes all pronouns, auxliliaries, words like \"how, what, why etc\" and misspelled words\ndef lemmatize31(docs):\n final_l = []\n print(\"lemmatize31\")\n pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docs)).start()\n i = 0\n for doc in docs:\n final = \"\"\n for token in doc:\n if token.pos_ not in [\"SPACE\",\"PRON\",\"DET\",\"AUX\",\"ADP\"] and token.dep_ not in [\"WRB\"] and not token.is_oov: \n final += token.text + \" \"\n final_l.append(final.strip())\n i += 1\n pbar.update(i)\n pbar.finish()\n return final_l", "_____no_output_____" ], [ "# this way one may understand spaCy abrreviations\nprint(spacy.explain(\"WRB\"))\nprint(spacy.explain(\"advmod\"))\nprint(spacy.explain(\"RB\"))\nprint(spacy.explain(\"PRP$\"))\nprint(spacy.explain(\"INTJ\"))\nprint(spacy.explain(\"X\"))\nprint(spacy.explain(\"SPACE\"))\nprint(spacy.explain(\"NN\"))\nprint(spacy.explain(\"VBN\"))\nprint(spacy.explain(\"NNP\"))\nprint(spacy.explain(\"PROPN\"))\nprint(spacy.explain(\"NFP\"))\nprint(spacy.explain(\"pobj\"))\nprint(spacy.explain(\"dobj\"))\nprint(spacy.explain(\"pcomp\"))\nprint(spacy.explain(\"ADP\"))\nprint(spacy.explain(\"JJ\"))\nprint(spacy.explain(\"compound\"))\nprint(spacy.explain(\"PART\"))", "wh-adverb\nadverbial modifier\nadverb\npronoun, possessive\ninterjection\nother\nspace\nnoun, singular or mass\nverb, past participle\nnoun, proper singular\nproper noun\nsuperfluous punctuation\nobject of preposition\ndirect object\ncomplement of preposition\nadposition\nadjective\ncompound\nparticle\n" ], [ "# create lists of interest\nauthors = train_data_full[\"author\"].tolist()\nauthors_agg = train_data_full_agg[\"author\"].tolist()\n\nsubreddits = train_data_full[\"subreddit\"].tolist()\nsubreddits_agg = train_data_full_agg[\"subreddit\"].tolist()", "_____no_output_____" ] ], [ [ "Produce all lemmatizations", "_____no_output_____" ] ], [ [ "lemmatizer0\n\n\nlemmed = lemmatize0(to_be_lemmed )\n\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunct.csv\", index=False)\nprint(\"done\")\n \nlemmed_agg = lemmatize0(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmatizer1\nlemmed = lemmatize1(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumLem.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize1(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumLemAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmed = lemmatize11(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumLemOov.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize11(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumLemOovAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmatizer2\nlemmed = lemmatize2(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumStop.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize2(nlplg,to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumStopAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmatize21\nlemmed = lemmatize21(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumStopLemOov.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize21(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumStopOovAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmatize22\nlemmed = lemmatize22(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumStopLemOovAlphaSent.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize22(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\csv\\lPunctNumStopLemOovAlphaAgg.csv\", index=False)\nprint(\"done\")", " 0%| |\r" ], [ "lemmatize3\nlemmed = lemmatize3(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumPers.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize3(to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumPersAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ], [ "lemmatize31\nlemmed = lemmatize3(to_be_lemmed )\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors , \"subreddit\" : subreddits, \"body\" : lemmed})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumPersOov.csv\", index=False)\nprint(\"done\")\n\nlemmed_agg = lemmatize3(nlplg,to_be_lemmed_agg)\nprint(\"writing to file\")\ndf = pd.DataFrame({\"author\": authors_agg , \"subreddit\" : subreddits_agg, \"body\" : lemmed_agg})\ndf.to_csv(r\"Q:\\\\tooBigToDrive\\data-mining\\kaggle\\data\\lPunctNumPersOovAgg.csv\", index=False)\nprint(\"done\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd2adf50a33765e8c052e05f84fd0504f36d128
16,600
ipynb
Jupyter Notebook
week7/BinaryCounter.ipynb
code-lab-org/sys611
3b8c46788dee629a9f2d6b7f84373e041b918ff0
[ "MIT" ]
3
2021-04-07T03:52:07.000Z
2022-03-04T18:16:16.000Z
week7/BinaryCounter.ipynb
code-lab-org/sys611
3b8c46788dee629a9f2d6b7f84373e041b918ff0
[ "MIT" ]
null
null
null
week7/BinaryCounter.ipynb
code-lab-org/sys611
3b8c46788dee629a9f2d6b7f84373e041b918ff0
[ "MIT" ]
6
2021-02-12T01:57:23.000Z
2022-03-04T18:05:27.000Z
87.368421
11,164
0.838976
[ [ [ "# SYS 611: Binary Counter (Discrete Time Simulation)\n\nPaul T. Grogan <[email protected]>\n\nThis example shows how to construct a discrete time simulation of a simple binary counter system which has the amazing ability of counting to 2. The binary counter system has one binary state variable (q), one binary input variable (x), and one binary output variable (y). The output function is defined by the logical operation y(t)=q(t) and x(t). The state transition function is defined by the logical operation q(t+1)=q(t) xor x(t). The expected behavior is to emit an output of 1 for every second input value of 1.\n\n## Dependencies\n\nThis example is compatible with Python 2 environments through use of the `__future__` library function. Additionally, this example uses the `matplotlib.pyplot` library for plotting.", "_____no_output_____" ] ], [ [ "# import the python3 behavior for importing, division, and printing in python2\nfrom __future__ import absolute_import, division, print_function\n\n# import the matplotlib pyplot package and refer to it as `plt`\n# see http://matplotlib.org/api/pyplot_api.html for documentation\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Transition and Output Functions\n\nDefine functions for the state transition function (delta) and output function (lambda). Note that `lambda` is a reserved word in Python (for lambda functions), so both Greek letters are prefixed by an underscore.", "_____no_output_____" ] ], [ [ "# define the state transition function\ndef _delta(q, x):\n # note that xor is equivalent to != in Python\n return q != x\n \n# define the output function\ndef _lambda(q, x): \n return q and x", "_____no_output_____" ] ], [ [ "## Input, State, and Output Trajectories\n\nDefine lists to store the input trajectory and store the computed output and state trajectories (initialize with zero value).", "_____no_output_____" ] ], [ [ "# define the input trajectory\nx = [1,1,0,0,1,0,0,0,1]\n \n# define the output and state trajectories (zero initial value)\ny = [0,0,0,0,0,0,0,0,0]\nq = [0,0,0,0,0,0,0,0,0,0]", "_____no_output_____" ] ], [ [ "## Discrete Time Simulation Logic\n\nThe discrete event simulation should start by setting the initial time and state. Then, enter a loop to:\n1. Compute the current output\n2. Compute the next state\n3. Update the time", "_____no_output_____" ] ], [ [ "# initialize the simulation\nt = 0\nq[0] = 0\n\n# execute the simulation\nwhile t <= 8:\n # record output value\n y[t] = _lambda(q[t], x[t])\n # record state update\n q[t+1] = _delta(q[t], x[t])\n # advance time\n t += 1", "_____no_output_____" ] ], [ [ "## Visualize Outcomes\n\nUse bar plots in `matplotlib` to plot the input, state, and output trajectories.", "_____no_output_____" ] ], [ [ "plt.figure()\n# create three subplots that all share a common x-axis\nf, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\n# plot the input trajectory on the first subplot\nax1.bar(range(9), x, color='k')\nax1.set_ylabel('Input ($x$)')\n# plot the state trajectory on the second subplot (up to second-to-last value)\nax2.bar(range(9), q[:-1], color='k')\nax2.set_ylabel('State ($q$)')\n# plot the output trajectory on the third subplot\nax3.bar(range(9), y, color='k')\nax3.set_ylabel('Output ($y$)')\n# add labels and display\nplt.xlabel('Time (ticks)')\nplt.suptitle('Binary Counter Model')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd2c5833dfa357c3a24ec33e975a8bdbf9026b9
2,303
ipynb
Jupyter Notebook
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/01_rates/000107_calc_swap_risk_historically.ipynb
webclinic017/gs-quant
ebb8ee5e1d954ab362aa567293906ce51818cfa8
[ "Apache-2.0" ]
4
2021-05-11T14:35:53.000Z
2022-03-14T03:52:34.000Z
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/01_rates/000107_calc_swap_risk_historically.ipynb
webclinic017/gs-quant
ebb8ee5e1d954ab362aa567293906ce51818cfa8
[ "Apache-2.0" ]
null
null
null
gs_quant/documentation/02_pricing_and_risk/00_instruments_and_measures/examples/01_rates/000107_calc_swap_risk_historically.ipynb
webclinic017/gs-quant
ebb8ee5e1d954ab362aa567293906ce51818cfa8
[ "Apache-2.0" ]
null
null
null
23.742268
125
0.591403
[ [ [ "from gs_quant.common import PayReceive, Currency\nfrom gs_quant.instrument import IRSwap\nfrom gs_quant.session import Environment, GsSession\nfrom gs_quant.risk import DollarPrice, IRDelta\nfrom gs_quant.common import AggregationLevel\nfrom gs_quant.markets import HistoricalPricingContext\nfrom datetime import date", "_____no_output_____" ], [ "# external users should substitute their client id and secret; please skip this step if using internal jupyterhub\nGsSession.use(Environment.PROD, client_id = None, client_secret=None, scopes=('run_analytics',))", "_____no_output_____" ], [ "swap_10bps = IRSwap(PayReceive.Receive, '5y', Currency.EUR, fixed_rate='atm+10')", "_____no_output_____" ], [ "with HistoricalPricingContext(date(2020, 3, 2), date(2020, 4, 1), show_progress=True):\n res_f = swap_10bps.calc((DollarPrice, IRDelta, IRDelta(aggregation_level=AggregationLevel.Type, currency='local')))", "_____no_output_____" ], [ "print(res_f.result()) # retrieve all results", "_____no_output_____" ], [ "print(res_f[DollarPrice]) # retrieve historical prices", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
ecd2c74d2bf6c3d651acc0989f3efd78f6a900d2
57,187
ipynb
Jupyter Notebook
FinBert_DataAnalysis_(Pie_Charts)(_12th_Jan_2022_).ipynb
Gaurav7004/NEWS_ARTICLES_DEPLOYMENT
4b566918940b882808d499a49d125bb2f8d8ad16
[ "Apache-2.0" ]
null
null
null
FinBert_DataAnalysis_(Pie_Charts)(_12th_Jan_2022_).ipynb
Gaurav7004/NEWS_ARTICLES_DEPLOYMENT
4b566918940b882808d499a49d125bb2f8d8ad16
[ "Apache-2.0" ]
null
null
null
FinBert_DataAnalysis_(Pie_Charts)(_12th_Jan_2022_).ipynb
Gaurav7004/NEWS_ARTICLES_DEPLOYMENT
4b566918940b882808d499a49d125bb2f8d8ad16
[ "Apache-2.0" ]
null
null
null
117.427105
40,022
0.800916
[ [ [ "<a href=\"https://colab.research.google.com/github/Gaurav7004/NEWS_ARTICLES_DEPLOYMENT/blob/main/FinBert_DataAnalysis_(Pie_Charts)(_12th_Jan_2022_).ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import table", "_____no_output_____" ], [ "df = pd.read_csv('FinBERT_Predictions.csv', encoding='utf-8')", "_____no_output_____" ], [ "df.head(3)", "_____no_output_____" ], [ "keywords = ['GST', 'taxes', 'Stock', 'Share market', 'shares', 'GDP', 'gst', 'stock', 'Taxes', 'RBI', 'Reserve Bank of India', 'inflation', \n 'Inflation', 'India Business', 'Indian Economy', 'Export and Import', 'Industrial output', 'EXIM', 'Profit and Loss', \n 'Commodities prices', 'price rise', 'Loans', 'Borrowings', 'financial aid', 'mortgage', 'microfinance', 'Banking regulations', \n 'domestic production', 'GNP', 'non industrial output', 'employment', 'unemployment', 'consumption expenditure', 'allowances', 'pay-cuts', 'recession',\n 'Forecasting', 'predictions', 'world bank', 'IMF', 'International Monetary Fund', 'Market volatility', 'Indian currency', 'exchange rates']\n\n## news to list\nnews = df['Headline'].tolist()\n\n## list to fill required news sentences\nlst_reqdWords = []\n\n## Extract required news\nfor i in range(len(keywords)):\n for j in range(len(news)):\n if keywords[i] in news[j]:\n lst_reqdWords.append(news[j])\n if keywords[i] in news[j]:\n lst_reqdWords.append(news[j])\n", "_____no_output_____" ], [ "## Separated and Total rows\nlen(lst_reqdWords), df.shape[0]", "_____no_output_____" ], [ "## ONLY for GST ##\n\n## Extract required news having 'gst' or 'GST' keyword\ndf_gst = df[df['Headline'].str.lower().str.contains('gst') == True]\n\n## Dataframes having gst haing positive, negative, and neutral values\ndf_gst_pos = df_gst[df_gst['Positive'] >= 0.5]\ndf_gst_neg = df_gst[df_gst['Negative'] >= 0.5]\ndf_gst_neu = df_gst[df_gst['Neutral'] >= 0.5]\n\n## Counts +ve, -ve , neutral\ncnt_pos = df_gst_pos.shape[0]\ncnt_neg = df_gst_neg.shape[0]\ncnt_neu = df_gst.shape[0] - (cnt_pos+ cnt_neg)\n\nprint(df_gst.shape[0])\nprint(cnt_pos + cnt_neg + cnt_neu)\n\n## Raw data dictionary\nraw_data = {'sentiments': ['Positive', 'Negative', 'Neutral'],\n 'results': [cnt_pos, cnt_neg, cnt_neu]}\n\n## Plot Dataframe\ndf_for_Plot = pd.DataFrame(raw_data, columns = ['sentiments', 'results'])\n\n#colors = ['red', 'gold', 'yellowgreen', 'blue', 'lightcoral', 'lightskyblue']\ncolors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\", \"#8c564b\"]\n\n# explode 1st slice\nexplode = (0.1, 0, 0) \n\nplt.figure(figsize=(16,8))\n# plot chart\nax1 = plt.subplot(121, aspect='equal')\ndf_for_Plot.plot(kind='pie', y = 'results', ax=ax1, autopct='%1.1f%%', explode=explode, colors=colors,\n startangle=90, shadow=False, labels=df_for_Plot['sentiments'], legend = False, fontsize=14)\n\n# plot table\nax2 = plt.subplot(122)\nplt.axis('off')\ntbl = table(ax2, df_for_Plot, loc='center')\ntbl.auto_set_font_size(False)\ntbl.set_fontsize(14)\nplt.title(\"Sentiments for the word - GST\", bbox={'facecolor':'0.8', 'pad':5})\nplt.show()\n", "4355\n4355\n" ], [ "df_gst.columns", "_____no_output_____" ], [ "# ### List to append required sentences\n# req_list_of_df_row = []\n\n# for i in range(0, len(df.iloc[i])):\n# for j in range(0, len(keywords)):\n# if string_lst[j] in x[i]:\n# # print('x List: ', x[i])\n# # print('string_lst : ', string_lst[j])\n# req_list_of_df_row.append(x[i])\n# else:\n# pass", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "''' EXAMPLE '''\n###############\n\nimport re\n\n## Our keywords\nstring_lst = ['fun of me', 'dum', 'sun', 'gum fun']\n\n## Our Row \nx = [\"I love to have fun of me.\", \"dum dfg\", \"sun rise\", \"gum fun wan\", \"this sun\", \"dum fun\", \"asdf\", \"qwert\"]\n\n# print(re.findall(r\"(?=(\"+'|'.join(string_lst)+r\"))\", x))\n\n### List to append required sentences\nreq_list_of_df_row = []\n\nfor i in range(0, len(x)):\n for j in range(0, len(string_lst)):\n if string_lst[j] in x[i]:\n # print('x List: ', x[i])\n # print('string_lst : ', string_lst[j])\n req_list_of_df_row.append(x[i])\n else:\n pass\n\nprint(req_list_of_df_row)", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd2cd26fef784c4cb59483a64bda3d35ef21b02
4,767
ipynb
Jupyter Notebook
notebooks/make_io_files.ipynb
lamsoa729/simcore
daf7056cb0c17563ed0f6bdee343fa1f6cd59729
[ "MIT" ]
4
2020-05-23T18:56:31.000Z
2022-01-13T04:06:50.000Z
notebooks/make_io_files.ipynb
lamsoa729/simcore
daf7056cb0c17563ed0f6bdee343fa1f6cd59729
[ "MIT" ]
18
2019-12-16T16:27:47.000Z
2020-04-21T21:17:09.000Z
notebooks/make_io_files.ipynb
lamsoa729/simcore
daf7056cb0c17563ed0f6bdee343fa1f6cd59729
[ "MIT" ]
2
2019-10-09T21:04:28.000Z
2020-05-22T19:46:22.000Z
24.321429
113
0.473044
[ [ [ "import numpy as np\nn_angles = 25\nn_dists = 10\nr = 50\nangle_names = np.linspace(np.pi/2-.1, np.pi-.1, n_angles+1)\nangles = np.linspace(-np.pi/2, 0, n_angles+1)\nangle_names = angle_names[1:]\nangles = angles[1:]\ndists = np.linspace(0, n_dists, n_dists+1)\n#alphas\n#alphas\n#n_runs = 40\n#angles = np.linspace(0.1,np.pi/2-.1, n_runs)\n", "_____no_output_____" ], [ "cm = open('copy_me.txt', 'w')\nfor theta_name, theta in zip(angle_names, angles):\n cm.write(\"[\")\n for alpha in dists:\n x = -r*np.cos(theta)\n y = r*np.sin(theta)\n yp = -(r+alpha)\n f = open(\"io_fil_th_{0:.2f}\".format(theta_name) + \"_a_{0:02d}.yaml\".format(int(alpha)), 'w')\n f.write('filament:\\n')\n f.write(' - [[0, {}, 0], [0, 1, 0]]\\n'.format(yp))\n f.write(' - [[{}, {}, 0], [{}, {}, 0]]\\n'.format(x, y, np.cos(theta), -np.sin(theta)))\n f.close()\n if alpha != dists[-1]:\n cm.write(\"io_fil_th_{0:.2f}\".format(theta) + \"_a_{0:02d}.yaml, \".format(int(alpha)))\n else:\n cm.write(\"io_fil_th_{0:.2f}\".format(theta) + \"_a_{0:02d}.yaml]\\n\".format(int(alpha)))\ncm.close()\n# for i,j in enumerate(angles):\n# f = open('io_fil_{0:02d}.yaml'.format(i), 'w')\n# f.write('filament:\\n')\n# f.write(' - [[-30, -60, 0], [{}, {}, 0]]\\n'.format(math.cos(j),math.sin(j)))\n# f.write(' - [[30, -59.5, 0], [-{}, {}, 0]]\\n'.format(math.cos(j),math.sin(j)))\n# f.close()", "_____no_output_____" ], [ "import sys", "_____no_output_____" ], [ "f = open('copy_me.txt', 'r')\nf.readline()\nprint(f.readline())\nf.close()", "_____no_output_____" ], [ "import os, glob\nfrom shutil import copyfile\nimport re", "_____no_output_____" ], [ "os.chdir(os.getcwd()+'/in_out_params')", "_____no_output_____" ], [ "files = glob.glob(\"io_fil_set*params.yaml\")\nfiles.sort()", "_____no_output_____" ], [ "new_files = []\nfor i, file in enumerate(files):\n new_file = re.sub(\"set_\\d+_\", \"set_{0:02d}_\".format(25+i), file)\n print(new_file)\n copyfile(file, new_file)\n new_files.append(new_file)", "_____no_output_____" ], [ "for i, (file, theta) in enumerate(zip(new_files, angles)):\n f = open(file, 'r')\n content = f.read()\n f.close()\n content = re.sub('_set_\\d+', '_set_{}'.format(25+i), content)\n content = re.sub('_th_\\d.\\d+', '_th_{0:.2f}'.format(theta), content)\n \n f = open(file, 'w')\n f.write(content)\n f.close()", "_____no_output_____" ], [ "L = [1,2,3]\nK = [4,5,6]\nfor i,(j,k) in enumerate(zip(L,K)):\n print(i,j,k)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd2eaa14ddf8d544add1744f3a7b0072e352344
90,823
ipynb
Jupyter Notebook
linear-regression/linear-regression-part-4.ipynb
U34rAli/machine-learning-tutorials
bdff13ef0c96306430ef5a1e976b52db3dd71496
[ "MIT" ]
null
null
null
linear-regression/linear-regression-part-4.ipynb
U34rAli/machine-learning-tutorials
bdff13ef0c96306430ef5a1e976b52db3dd71496
[ "MIT" ]
null
null
null
linear-regression/linear-regression-part-4.ipynb
U34rAli/machine-learning-tutorials
bdff13ef0c96306430ef5a1e976b52db3dd71496
[ "MIT" ]
null
null
null
550.442424
32,304
0.947656
[ [ [ "import pandas as pd # data handeling\nimport numpy as np # numeriacal computing\nimport matplotlib.pyplot as plt # plotting core\nimport seaborn as sns # higher level plotting tools\n%matplotlib inline\nsns.set()", "_____no_output_____" ], [ "def f(x) : # A parabola\n f = x**2\n return f\ndef Df(x) : # The derivative (gradient)\n Df = 2*x\n return Df\ndef xp1(x,alpha) : # update\n xp1 = x - alpha * Df(x)\n return xp1", "_____no_output_____" ], [ "def plot_steps( guess, alpha, nsteps) :\n fig, ax = plt.subplots()\n x = np.linspace(-3,3,100)\n ax.plot(x, f(x))\n x = guess\n ax.plot(x,f(x), 'o', label='start x=%.2f' %x )\n for i in range(nsteps):\n xold = x\n x = xp1(x,alpha)\n ax.plot(x,f(x), 'o', label='x = %.2f' %x)\n ax.plot([xold,x],[f(xold),f(x)], '-')\n plt.legend();", "_____no_output_____" ], [ "plot_steps( -2.5, 0.02, 10 )", "_____no_output_____" ], [ "plot_steps( -2.5, 0.9, 10 )", "_____no_output_____" ], [ "plot_steps( -2.5, 1.05, 10 )", "_____no_output_____" ], [ "from sklearn.preprocessing import StandardScaler", "_____no_output_____" ], [ "scaler = StandardScaler()\nx = scaler.fit([[1,10],])", "_____no_output_____" ], [ "x = scaler.transform([[1,100],[13,42]])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd2ec4aff5dc2c00742579e99fc4bc75b771f82
11,315
ipynb
Jupyter Notebook
21. cifar10/cifar10.ipynb
yaminikonka/ML-Notebook
90dc76676b35429badaa739fde7f0a0e5a0bd8c6
[ "MIT" ]
18
2019-01-30T16:32:05.000Z
2022-01-24T13:07:52.000Z
21. cifar10/cifar10.ipynb
getmrinal/MLcognizance
de0b2713f27ab18a2f4a0aef60e986b1c2f8e3f4
[ "MIT" ]
null
null
null
21. cifar10/cifar10.ipynb
getmrinal/MLcognizance
de0b2713f27ab18a2f4a0aef60e986b1c2f8e3f4
[ "MIT" ]
16
2018-06-22T12:22:40.000Z
2021-02-15T08:12:11.000Z
24.229122
177
0.459479
[ [ [ "[View in Colaboratory](https://colab.research.google.com/github/getmrinal/ML-Notebook/blob/master/21.%20cifar10/cifar10.ipynb)", "_____no_output_____" ], [ "**Loading Required Libraries and Files**", "_____no_output_____" ] ], [ [ "import cifar10\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom sklearn.decomposition import PCA\n\nfrom sklearn import svm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "cifar10.data_path = \"data/CIFAR-10/\"", "_____no_output_____" ], [ "cifar10.maybe_download_and_extract()", "_____no_output_____" ], [ "class_names = cifar10.load_class_names()\nclass_names", "_____no_output_____" ], [ "images_train, cls_train, labels_train = cifar10.load_training_data()\nimages_test, cls_test, labels_test = cifar10.load_test_data()", "_____no_output_____" ], [ "x_train = images_train.reshape(images_train.shape[0],-1)\nx_test = images_test.reshape(images_test.shape[0], -1)", "_____no_output_____" ], [ "y_train = cls_train \ny_test = cls_test", "_____no_output_____" ] ], [ [ "**Applied PCA to the Features of Cifar**", "_____no_output_____" ] ], [ [ "pca = PCA()\npca.fit_transform(x_train)", "_____no_output_____" ], [ "pca.explained_variance_.shape", "_____no_output_____" ] ], [ [ "**the optimal value of K decided **", "_____no_output_____" ] ], [ [ "# Calculating optimal k\n\nk = 0\ntotal = sum(pca.explained_variance_)\ncurrent_sum = 0\n\nwhile(current_sum / total < 0.98):\n current_sum += pca.explained_variance_[k]\n k += 1\nk", "_____no_output_____" ], [ "## Applying PCA with k calcuated above\n\npca = PCA(n_components=k, whiten=True, svd_solver='randomized')\n\nx_train_pca = pca.fit_transform(x_train)\nx_test_pca = pca.transform(x_test)", "_____no_output_____" ] ], [ [ "### Using Randomforest Classifier on the Cifar data !", "_____no_output_____" ] ], [ [ "rf = RandomForestClassifier()\nrf.fit(x_train_pca, y_train)\n\ny_pred_rf = rf.predict(x_test_pca)\n\nrandom_forest_score = accuracy_score(y_test, y_pred_rf)\nprint(random_forest_score)", "_____no_output_____" ] ], [ [ "### Using LogisticsRegression classifier", "_____no_output_____" ] ], [ [ "## Training ## Train \nlr = LogisticRegression()\nlr.fit(x_train_pca, y_train)", "_____no_output_____" ], [ "## Predicting## Predi \ny_pred_lr = lr.predict(x_test_pca)\nlogistic_regression_score = accuracy_score(y_test, y_pred_lr)\nlogistic_regression_score", "_____no_output_____" ] ], [ [ "### Using Support Vector Machine", "_____no_output_____" ] ], [ [ "## Training\nsvc = svm.SVC()\nsvc.fit(x_train_pca, y_train)", "_____no_output_____" ], [ "# Predi \ny_pred_svm = svc.predict(x_test_pca)\nsvc_score = accuracy_score(y_test, y_pred_svm)\nsvc_score", "_____no_output_____" ] ], [ [ "### Using K-neighbours clf", "_____no_output_____" ] ], [ [ "# Train \nknn = KNeighborsClassifier()\nknn.fit(x_train_pca, y_train)", "_____no_output_____" ], [ "# Predi \ny_pred_knn = knn.predict(x_test_pca)\n\nknn_score = accuracy_score(y_test, y_pred_knn)\nknn_score", "_____no_output_____" ] ], [ [ "## Comparing All the classifier", "_____no_output_____" ] ], [ [ "print(\"RandomForest \", random_forest_score)\nprint(\"SVM : \", svc_score)\nprint(\"KNN \", knn_score)\nprint(\"Logistic Regression : \", logistic_regression_score)", "_____no_output_____" ], [ "name = []\nfor i in range(len(y_pred_svm)):\n name.append(class_names[y_pred_svm[i]])", "_____no_output_____" ], [ "np.savetxt(\"svmPred.csv\", name, fmt = '%s')", "_____no_output_____" ], [ "# memory footprint support libraries/code\n!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi\n!pip install gputil\n!pip install psutil\n!pip install humanize\nimport psutil\nimport humanize\nimport os\nimport GPUtil as GPU\nGPUs = GPU.getGPUs()\n# XXX: only one GPU on Colab and isn’t guaranteed\ngpu = GPUs[0]\ndef printm():\n process = psutil.Process(os.getpid())\n print(\"Gen RAM Free: \" + humanize.naturalsize( psutil.virtual_memory().available ), \" I Proc size: \" + humanize.naturalsize( process.memory_info().rss))\n print(\"GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB\".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))\nprintm()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ecd30c8d0c9fd3ce645c4e8d1ea0f09e8a65ac7d
451,028
ipynb
Jupyter Notebook
examples/filtering.ipynb
oneconcern/stompy
d2cb86e7d1a2de698701b8d1b391e27e1ee935c0
[ "MIT" ]
17
2017-10-12T14:53:25.000Z
2022-02-26T01:24:52.000Z
examples/filtering.ipynb
oneconcern/stompy
d2cb86e7d1a2de698701b8d1b391e27e1ee935c0
[ "MIT" ]
6
2018-03-12T12:43:14.000Z
2021-09-04T17:44:31.000Z
examples/filtering.ipynb
rustychris/stompy
4efb78824804edc68555bced275e37842f98ba1f
[ "MIT" ]
6
2017-09-29T21:20:11.000Z
2020-09-28T21:29:23.000Z
241.579004
276,755
0.884992
[ [ [ "Comparison of Filtering Methods\n===\n\nThis notebook tests/demonstrates three lowpass filtering methods. The\ntypical use is for time series filtering, but there is no reason these\nmethods cannot be used to filter coordinate series, or the lowpass\nresult subtracted from the original signal to get a highpass series.\n\nThe text here is high level, and belies a dim recollection of proper\nsignal processing. DSP experts will be offended, but they are not\nthe intended audience.\n\n** FIR **\n\nFinite impulse response, which is a glorified moving average. Compared\nto a standard \"boxcar\" moving average, we choose a Hanning window for\na smoother response. This method deals well with NaNs (NaNs should be left\nin so that the input has an evenly spaced timebase). The cutoff period\nis where the frequency response falls to 0.5 of the DC response. A tidal\nfilter should probably have a cutoff of about 72 hours to be sure that very\nlittle diurnal signal gets through.\n\n** IIR **\n\nInfinite impulse response, where each output point is calculated as a weighted\nsum of recent output points and recent input points. This method allows\nfor fast filtering and sharply defined frequency responses. The \"order\" of\nthe method defines how many \"recent\" inputs and outputs are considered. Higher\norder allows for sharper cutoffs between pass frequencies and stop frequencies,\nat the expense of possible numerical stability issues. Note that the cutoff \nperiod for the IIR method here is not the same as for FIR. The response \nfalls to 0.5 at twice the cutoff period. A tidal filter can reasonably have\na cutoff of 36 hours, which means that very little energy gets through at 36\nhours, and only half of the energy at 72 hours get through. \n\nFor example, an FIR filter with a cutoff at 36 hours will\nstill pass half of the input signal at a 36-hour period. The IIR code would \nrequire a \"cutoff\" of 18 hours to get the same half-pass effect at 36 hours.\nThis may change in the future, but will require a new function since\nthere is code that depends on the current implementation.\n\n** Godin **\n\nThis is an old-school moving average filter for removing tides from time series.\nIt is intended to be applied to hourly data, though the implementation here \nwill approximate a Godin filter on time series with arbitrary (but constant!)\ntime steps.\n\nAll of the methods preserve the length of the input data, but generally produce\nunusable results near the start and end of the output. ", "_____no_output_____" ] ], [ [ "from stompy import filters, utils\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib notebook", "_____no_output_____" ], [ "# Sample data -- all times in hours\ndt=0.1\nx=np.arange(0,100,dt)\ny=np.random.random(len(x))\n\ntarget_cutoff=36.0\n\ny_fir=filters.lowpass_fir(y,int(target_cutoff/dt))\ny_iir=filters.lowpass(y,dt=dt,cutoff=target_cutoff/2.)\ny_godin=filters.lowpass_godin(y,in_t_days=x/24.)\n", "_____no_output_____" ] ], [ [ "Construct a noise signal and plot the result of applying each method.", "_____no_output_____" ] ], [ [ "fig,ax=plt.subplots()\nax.plot(x,y,label='Original',lw=0.2)\nax.plot(x,y_fir,label='FIR')\nax.plot(x,y_iir,label='IIR')\nax.plot(x,y_godin,label='Godin')\nax.legend(loc='upper right')", "_____no_output_____" ] ], [ [ "Frequency Response\n---\n\nThis is a brute-force approach to frequency response to \ndemonstrate the details of what each method does to \nincoming frequencies.\n\nEach filter is applied to a collection of sine-curve \ninputs of varying frequencies. For each frequency,\nthe gain is computed by comparing the RMS magnitude\nof the input and output waveforms.\n\n", "_____no_output_____" ] ], [ [ "periods=10**(np.linspace(np.log10(1),np.log10(400),150))\nfreqs=1./periods\n\n# A single time base that's good enough for the full range\nx=np.arange(0,4*periods[-1],periods[0]/4.)\ndt=np.median(np.diff(x))\n\ntarget_cutoff=36.0\n\nfreq=freqs[0]\n\ny=np.cos(2*np.pi*freq*x)\nwin=np.hanning(len(y))\n\ndef fir36hour(y):\n return filters.lowpass_fir(y,int(2*target_cutoff/dt))\n\ndef iir36hour(y):\n return filters.lowpass(y,dt=dt,cutoff=target_cutoff,order=4)\n\ndef godin(y):\n return filters.lowpass_godin(y,in_t_days=x/24.)\n\ndef scan(f):\n gains=[]\n for freq in freqs:\n y=np.cos(2*np.pi*freq*x)\n y_filt=f(y)\n mag_in=utils.rms( win*y )\n mag_out=utils.rms( win*y_filt)\n gains.append( (freq,mag_out/mag_in) ) \n return np.array(gains)\n\nfir_gains=scan(fir36hour)\niir_gains=scan(iir36hour)\ngodin_gains=scan(godin)", "_____no_output_____" ], [ "fig,ax=plt.subplots()\nax.loglog(iir_gains[:,0],iir_gains[:,1],label='IIR 4th order')\nax.loglog(fir_gains[:,0],fir_gains[:,1],label='FIR')\nax.loglog(godin_gains[:,0],godin_gains[:,1],label='Godin')\n\nax.axvline(1./target_cutoff,label='36h',color='k',lw=0.8,zorder=-1)\nax.axvline(1./24,label='__nolabel__',color='0.6',lw=0.8,zorder=-1)\nax.axvline(1./24.84,label='__nolabel__',color='0.6',lw=0.8,zorder=-1)\nax.axvline(1./12,label='__nolabel__',color='0.6',lw=0.8,zorder=-1)\nax.axvline(1./12.42,label='__nolabel__',color='0.6',lw=0.8,zorder=-1)\n\n\nax.axhline(0.5,label='__nolabel__',color='k',lw=0.8,zorder=-1)\n\n\nax.set_xlabel('Freq (1/h)')\nax.set_ylabel('Gain')\nax.legend(loc='lower left')", "_____no_output_____" ] ], [ [ "Discussion of Response\n---\n\nThe plot shows that IIR has the fastest rolloff above the pass band, consistent\nwith it being a 4th order filter. A 2nd order IIR filter would show a rolloff\nsimilar to but smoother than the FIR and Godin filters. It may be useful to\nnote that the Godin filter is really an FIR filter where the window is implicitly\ndefined by the moving averages as opposed to a closed form like the Hanning\nwindow. Both Godin and FIR have 2nd order rolloff above the passband.\n\nAn interesting feature of the Godin filter, and a good argument for its use,\nis that it has deep notches at the dominant tidal frequencies, near 24h and\n12h. For that reason it is actually better at rejecting tidal-band energy\nthan either of the other two methods.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecd335839d668219df2f8240f43e6393fd1032ba
121,521
ipynb
Jupyter Notebook
ETL Pipeline Preparation.ipynb
Apucs/Disaster-response-pipeline
fd8599166a39dae9987d1c6e55a29715eda3e051
[ "MIT" ]
null
null
null
ETL Pipeline Preparation.ipynb
Apucs/Disaster-response-pipeline
fd8599166a39dae9987d1c6e55a29715eda3e051
[ "MIT" ]
5
2021-06-08T22:10:18.000Z
2022-03-12T00:43:57.000Z
ETL Pipeline Preparation.ipynb
Apucs/Disaster-response-pipeline
fd8599166a39dae9987d1c6e55a29715eda3e051
[ "MIT" ]
null
null
null
43.72832
847
0.376355
[ [ [ "# ETL Pipeline Preparation\nFollow the instructions below to help you create your ETL pipeline.\n### 1. Import libraries and load datasets.\n- Import Python libraries\n- Load `messages.csv` into a dataframe and inspect the first few lines.\n- Load `categories.csv` into a dataframe and inspect the first few lines.", "_____no_output_____" ] ], [ [ "!pip install sqlalchemy", "Collecting sqlalchemy\n Downloading SQLAlchemy-1.3.18-cp37-cp37m-manylinux2010_x86_64.whl (1.3 MB)\n\u001b[K |████████████████████████████████| 1.3 MB 374 kB/s eta 0:00:01\n\u001b[?25hInstalling collected packages: sqlalchemy\nSuccessfully installed sqlalchemy-1.3.18\n" ], [ "# import libraries\nimport pandas as pd\nimport numpy as np\n\nfrom sqlalchemy import create_engine", "_____no_output_____" ], [ "# load messages dataset\nmessages = pd.read_csv('messages.csv')\nprint(messages.shape)\nmessages.head()", "(26248, 4)\n" ], [ "# load categories dataset\ncategories = pd.read_csv(\"categories.csv\")\nprint(categories.shape)\ncategories.head()", "(26248, 2)\n" ] ], [ [ "### 2. Merge datasets.\n- Merge the messages and categories datasets using the common id\n- Assign this combined dataset to `df`, which will be cleaned in the following steps", "_____no_output_____" ] ], [ [ "# merge datasets\ndf = pd.merge(messages, categories, how='outer', on='id')\nprint(df.shape)\ndf.head()", "(26386, 5)\n" ] ], [ [ "### 3. Split `categories` into separate category columns.\n- Split the values in the `categories` column on the `;` character so that each value becomes a separate column. You'll find [this method](https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.Series.str.split.html) very helpful! Make sure to set `expand=True`.\n- Use the first row of categories dataframe to create column names for the categories data.\n- Rename columns of `categories` with new column names.", "_____no_output_____" ] ], [ [ "# create a dataframe of the 36 individual category columns\ncategories = df[\"categories\"].str.split(\";\", expand=True)\ncategories.head()", "_____no_output_____" ], [ "# select the first row of the categories dataframe\nrow = categories.iloc[1]\n#for r in row:\n # print(r)\n \n#type(row)\n# use this row to extract a list of new column names for categories.\n# one way is to apply a lambda function that takes everything \n# up to the second to last character of each string with slicing\ncategory_colnames = row.apply(lambda x: x.split('-')[0])\nprint(category_colnames)", "0 related\n1 request\n2 offer\n3 aid_related\n4 medical_help\n5 medical_products\n6 search_and_rescue\n7 security\n8 military\n9 child_alone\n10 water\n11 food\n12 shelter\n13 clothing\n14 money\n15 missing_people\n16 refugees\n17 death\n18 other_aid\n19 infrastructure_related\n20 transport\n21 buildings\n22 electricity\n23 tools\n24 hospitals\n25 shops\n26 aid_centers\n27 other_infrastructure\n28 weather_related\n29 floods\n30 storm\n31 fire\n32 earthquake\n33 cold\n34 other_weather\n35 direct_report\nName: 1, dtype: object\n" ], [ "# rename the columns of `categories`\ncategories.columns = category_colnames\ncategories.head()", "_____no_output_____" ] ], [ [ "### 4. Convert category values to just numbers 0 or 1.\n- Iterate through the category columns in df to keep only the last character of each string (the 1 or 0). For example, `related-0` becomes `0`, `related-1` becomes `1`. Convert the string to a numeric value.\n- You can perform [normal string actions on Pandas Series](https://pandas.pydata.org/pandas-docs/stable/text.html#indexing-with-str), like indexing, by including `.str` after the Series. You may need to first convert the Series to be of type string, which you can do with `astype(str)`.", "_____no_output_____" ] ], [ [ "for column in categories:\n print(categories.groupby(categories[column]).count())", "1 request offer aid_related medical_help medical_products \\\nrelated \nrelated-0 6140 6140 6140 6140 6140 \nrelated-1 20042 20042 20042 20042 20042 \nrelated-2 204 204 204 204 204 \n\n1 search_and_rescue security military child_alone water ... \\\nrelated ... \nrelated-0 6140 6140 6140 6140 6140 ... \nrelated-1 20042 20042 20042 20042 20042 ... \nrelated-2 204 204 204 204 204 ... \n\n1 aid_centers other_infrastructure weather_related floods storm \\\nrelated \nrelated-0 6140 6140 6140 6140 6140 \nrelated-1 20042 20042 20042 20042 20042 \nrelated-2 204 204 204 204 204 \n\n1 fire earthquake cold other_weather direct_report \nrelated \nrelated-0 6140 6140 6140 6140 6140 \nrelated-1 20042 20042 20042 20042 20042 \nrelated-2 204 204 204 204 204 \n\n[3 rows x 35 columns]\n1 related offer aid_related medical_help medical_products \\\nrequest \nrequest-0 21873 21873 21873 21873 21873 \nrequest-1 4513 4513 4513 4513 4513 \n\n1 search_and_rescue security military child_alone water ... \\\nrequest ... \nrequest-0 21873 21873 21873 21873 21873 ... \nrequest-1 4513 4513 4513 4513 4513 ... \n\n1 aid_centers other_infrastructure weather_related floods storm \\\nrequest \nrequest-0 21873 21873 21873 21873 21873 \nrequest-1 4513 4513 4513 4513 4513 \n\n1 fire earthquake cold other_weather direct_report \nrequest \nrequest-0 21873 21873 21873 21873 21873 \nrequest-1 4513 4513 4513 4513 4513 \n\n[2 rows x 35 columns]\n1 related request aid_related medical_help medical_products \\\noffer \noffer-0 26265 26265 26265 26265 26265 \noffer-1 121 121 121 121 121 \n\n1 search_and_rescue security military child_alone water ... \\\noffer ... \noffer-0 26265 26265 26265 26265 26265 ... \noffer-1 121 121 121 121 121 ... \n\n1 aid_centers other_infrastructure weather_related floods storm \\\noffer \noffer-0 26265 26265 26265 26265 26265 \noffer-1 121 121 121 121 121 \n\n1 fire earthquake cold other_weather direct_report \noffer \noffer-0 26265 26265 26265 26265 26265 \noffer-1 121 121 121 121 121 \n\n[2 rows x 35 columns]\n1 related request offer medical_help medical_products \\\naid_related \naid_related-0 15432 15432 15432 15432 15432 \naid_related-1 10954 10954 10954 10954 10954 \n\n1 search_and_rescue security military child_alone water ... \\\naid_related ... \naid_related-0 15432 15432 15432 15432 15432 ... \naid_related-1 10954 10954 10954 10954 10954 ... \n\n1 aid_centers other_infrastructure weather_related floods \\\naid_related \naid_related-0 15432 15432 15432 15432 \naid_related-1 10954 10954 10954 10954 \n\n1 storm fire earthquake cold other_weather direct_report \naid_related \naid_related-0 15432 15432 15432 15432 15432 15432 \naid_related-1 10954 10954 10954 10954 10954 10954 \n\n[2 rows x 35 columns]\n1 related request offer aid_related medical_products \\\nmedical_help \nmedical_help-0 24287 24287 24287 24287 24287 \nmedical_help-1 2099 2099 2099 2099 2099 \n\n1 search_and_rescue security military child_alone water \\\nmedical_help \nmedical_help-0 24287 24287 24287 24287 24287 \nmedical_help-1 2099 2099 2099 2099 2099 \n\n1 ... aid_centers other_infrastructure weather_related \\\nmedical_help ... \nmedical_help-0 ... 24287 24287 24287 \nmedical_help-1 ... 2099 2099 2099 \n\n1 floods storm fire earthquake cold other_weather \\\nmedical_help \nmedical_help-0 24287 24287 24287 24287 24287 24287 \nmedical_help-1 2099 2099 2099 2099 2099 2099 \n\n1 direct_report \nmedical_help \nmedical_help-0 24287 \nmedical_help-1 2099 \n\n[2 rows x 35 columns]\n1 related request offer aid_related medical_help \\\nmedical_products \nmedical_products-0 25067 25067 25067 25067 25067 \nmedical_products-1 1319 1319 1319 1319 1319 \n\n1 search_and_rescue security military child_alone water \\\nmedical_products \nmedical_products-0 25067 25067 25067 25067 25067 \nmedical_products-1 1319 1319 1319 1319 1319 \n\n1 ... aid_centers other_infrastructure weather_related \\\nmedical_products ... \nmedical_products-0 ... 25067 25067 25067 \nmedical_products-1 ... 1319 1319 1319 \n\n1 floods storm fire earthquake cold other_weather \\\nmedical_products \nmedical_products-0 25067 25067 25067 25067 25067 25067 \nmedical_products-1 1319 1319 1319 1319 1319 1319 \n\n1 direct_report \nmedical_products \nmedical_products-0 25067 \nmedical_products-1 1319 \n\n[2 rows x 35 columns]\n1 related request offer aid_related medical_help \\\nsearch_and_rescue \nsearch_and_rescue-0 25661 25661 25661 25661 25661 \nsearch_and_rescue-1 725 725 725 725 725 \n\n1 medical_products security military child_alone water \\\nsearch_and_rescue \nsearch_and_rescue-0 25661 25661 25661 25661 25661 \nsearch_and_rescue-1 725 725 725 725 725 \n\n1 ... aid_centers other_infrastructure weather_related \\\nsearch_and_rescue ... \nsearch_and_rescue-0 ... 25661 25661 25661 \nsearch_and_rescue-1 ... 725 725 725 \n\n1 floods storm fire earthquake cold other_weather \\\nsearch_and_rescue \nsearch_and_rescue-0 25661 25661 25661 25661 25661 25661 \nsearch_and_rescue-1 725 725 725 725 725 725 \n\n1 direct_report \nsearch_and_rescue \nsearch_and_rescue-0 25661 \nsearch_and_rescue-1 725 \n\n[2 rows x 35 columns]\n1 related request offer aid_related medical_help \\\nsecurity \nsecurity-0 25915 25915 25915 25915 25915 \nsecurity-1 471 471 471 471 471 \n\n1 medical_products search_and_rescue military child_alone water \\\nsecurity \nsecurity-0 25915 25915 25915 25915 25915 \nsecurity-1 471 471 471 471 471 \n\n1 ... aid_centers other_infrastructure weather_related floods \\\nsecurity ... \nsecurity-0 ... 25915 25915 25915 25915 \nsecurity-1 ... 471 471 471 471 \n\n1 storm fire earthquake cold other_weather direct_report \nsecurity \nsecurity-0 25915 25915 25915 25915 25915 25915 \nsecurity-1 471 471 471 471 471 471 \n\n[2 rows x 35 columns]\n" ], [ "for column in categories:\n # set each value to be the last character of the string\n #print(column)\n categories[column] = categories[column].apply(lambda x: int(x.split(\"-\")[1]))\n \n # convert column from string to numeric\n #categories[column] = \ncategories['related'] = categories['related'].replace(2, 1)\ncategories.head()", "_____no_output_____" ], [ "categories.shape", "_____no_output_____" ] ], [ [ "### 5. Replace `categories` column in `df` with new category columns.\n- Drop the categories column from the df dataframe since it is no longer needed.\n- Concatenate df and categories data frames.", "_____no_output_____" ] ], [ [ "# drop the original categories column from `df`\ndf.drop(['categories'],axis=1, inplace=True)\n\ndf.head()", "_____no_output_____" ], [ "# concatenate the original dataframe with the new `categories` dataframe\ndf = pd.concat([df, categories], axis=1)\ndf.head()", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "### 6. Remove duplicates.\n- Check how many duplicates are in this dataset.\n- Drop the duplicates.\n- Confirm duplicates were removed.", "_____no_output_____" ] ], [ [ "# check number of duplicates\nsum(df.duplicated())", "_____no_output_____" ], [ "# drop duplicates\ndf = df.drop_duplicates()", "_____no_output_____" ], [ "# check number of duplicates\nsum(df.duplicated())", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ] ], [ [ "## Checking for null value\n\n> __If there're null value in the dataset then fill it with `ffill()` and `bfill()` method__\n", "_____no_output_____" ] ], [ [ "df.isnull().sum()", "_____no_output_____" ] ], [ [ "> __It looks there're quite a null value that we need to fill up__", "_____no_output_____" ] ], [ [ "df = df.fillna(method = 'ffill').fillna(method = 'bfill')\ndf.isnull().sum()", "_____no_output_____" ], [ "from collections import Counter \n\na = Counter(df['text_length'])\nb = a.most_common()\nx = []\ny = []\nfor val1, val2 in b:\n x.append(val1)\n y.append(val2)\n \nprint(x)\nprint(y)", "[10, 12, 14, 16, 13, 18, 15, 20, 19, 22, 11, 17, 21, 9, 23, 24, 8, 25, 26, 7, 27, 28, 30, 29, 32, 31, 6, 34, 33, 35, 36, 37, 38, 5, 39, 41, 40, 42, 44, 45, 43, 46, 47, 49, 48, 4, 51, 53, 50, 52, 54, 55, 56, 57, 60, 58, 61, 59, 64, 62, 65, 3, 63, 1, 66, 68, 67, 70, 69, 73, 72, 81, 74, 75, 76, 2, 80, 78, 97, 91, 89, 82, 71, 119, 79, 102, 84, 262, 93, 133, 0, 77, 173, 177, 162, 387, 147, 226, 146, 121, 96, 86, 125, 114, 157, 134, 104, 152, 356, 627, 277, 995, 141, 224, 143, 663, 355, 540, 483, 172, 150, 805, 196, 165, 362, 647, 470, 263, 759, 272, 1683, 468, 211, 130, 539, 390, 670, 231, 229, 435, 634, 924, 374, 369, 185, 421, 88, 401, 392, 174, 1071, 367, 168, 116, 145, 934, 434, 629, 737, 1686, 778, 357, 521, 440, 95, 538, 164, 582, 120, 494, 159, 209, 496, 396, 641, 144, 482, 225, 372, 194, 984, 83, 848, 345, 1076, 246, 242]\n[945, 910, 899, 898, 896, 886, 874, 873, 870, 868, 866, 863, 855, 842, 821, 816, 811, 762, 741, 683, 667, 639, 603, 592, 561, 534, 463, 457, 448, 409, 366, 332, 303, 259, 254, 227, 218, 175, 159, 146, 137, 122, 104, 82, 79, 74, 54, 54, 52, 47, 41, 38, 35, 30, 29, 26, 23, 21, 20, 19, 17, 16, 15, 15, 14, 14, 13, 12, 12, 11, 11, 10, 6, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n" ], [ "df[df['text_length'] < 100].groupby('text_length').count()['id']", "_____no_output_____" ], [ "t=df.drop(columns=['id','message','original','genre', 'text_length']).sum().sort_values(ascending=False).head(5)\nt.values", "_____no_output_____" ] ], [ [ "### 7. Save the clean dataset into an sqlite database.\nYou can do this with pandas [`to_sql` method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_sql.html) combined with the SQLAlchemy library. Remember to import SQLAlchemy's `create_engine` in the first cell of this notebook to use it below.", "_____no_output_____" ] ], [ [ "engine = create_engine('sqlite:///DisasterResponse.db')\ndf.to_sql('DisasterResponse', engine, index=False, if_exists='replace')", "_____no_output_____" ] ], [ [ "### 8. Use this notebook to complete `etl_pipeline.py`\nUse the template file attached in the Resources folder to write a script that runs the steps above to create a database based on new datasets specified by the user. Alternatively, you can complete `etl_pipeline.py` in the classroom on the `Project Workspace IDE` coming later.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecd337248ec24e90d0f44939f2cfd632c6d12a39
7,874
ipynb
Jupyter Notebook
Execution_of_the_bundling_layer_2.ipynb
inpicksys/Neural_networks_and_computer_vision_SamsungResearch_Open_Education
ab1f475659096c116c4d59558faf4e253ffdc89a
[ "MIT" ]
null
null
null
Execution_of_the_bundling_layer_2.ipynb
inpicksys/Neural_networks_and_computer_vision_SamsungResearch_Open_Education
ab1f475659096c116c4d59558faf4e253ffdc89a
[ "MIT" ]
null
null
null
Execution_of_the_bundling_layer_2.ipynb
inpicksys/Neural_networks_and_computer_vision_SamsungResearch_Open_Education
ab1f475659096c116c4d59558faf4e253ffdc89a
[ "MIT" ]
null
null
null
36.119266
137
0.466726
[ [ [ "import torch\nfrom abc import ABC, abstractmethod\nimport numpy as np", "_____no_output_____" ], [ "def calc_out_shape(input_matrix_shape, out_channels, kernel_size, stride, padding):\n batch_size, channels_count, input_height, input_width = input_matrix_shape\n output_height = (input_height + 2 * padding - (kernel_size - 1) - 1) // stride + 1\n output_width = (input_width + 2 * padding - (kernel_size - 1) - 1) // stride + 1\n\n return batch_size, out_channels, output_height, output_width", "_____no_output_____" ], [ "class ABCConv2d(ABC):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n\n def set_kernel(self, kernel):\n self.kernel = kernel\n\n @abstractmethod\n def __call__(self, input_tensor):\n pass", "_____no_output_____" ], [ "class Conv2d(ABCConv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size,\n stride, padding=0, bias=False)\n\n def set_kernel(self, kernel):\n self.conv2d.weight.data = kernel\n\n def __call__(self, input_tensor):\n return self.conv2d(input_tensor)", "_____no_output_____" ], [ "def create_and_call_conv2d_layer(conv2d_layer_class, stride, kernel, input_matrix):\n out_channels = kernel.shape[0]\n in_channels = kernel.shape[1]\n kernel_size = kernel.shape[2]\n\n layer = conv2d_layer_class(in_channels, out_channels, kernel_size, stride)\n layer.set_kernel(kernel)\n\n return layer(input_matrix)\n", "_____no_output_____" ], [ "def test_conv2d_layer(conv2d_layer_class, batch_size=2,\n input_height=4, input_width=4, stride=2):\n kernel = torch.tensor(\n [[[[0., 1, 0],\n [1, 2, 1],\n [0, 1, 0]],\n\n [[1, 2, 1],\n [0, 3, 3],\n [0, 1, 10]],\n\n [[10, 11, 12],\n [13, 14, 15],\n [16, 17, 18]]]])\n\n in_channels = kernel.shape[1]\n\n input_tensor = torch.arange(0, batch_size * in_channels *\n input_height * input_width,\n out=torch.FloatTensor()) \\\n .reshape(batch_size, in_channels, input_height, input_width)\n\n custom_conv2d_out = create_and_call_conv2d_layer(\n conv2d_layer_class, stride, kernel, input_tensor)\n conv2d_out = create_and_call_conv2d_layer(\n Conv2d, stride, kernel, input_tensor)\n\n return torch.allclose(custom_conv2d_out, conv2d_out) \\\n and (custom_conv2d_out.shape == conv2d_out.shape)\n", "_____no_output_____" ], [ "class Conv2dMatrix(ABCConv2d):\n # Функция преобразование кернела в матрицу нужного вида.\n def _unsqueeze_kernel(self, torch_input, output_height, output_width):\n zero_matrix = np.zeros((torch_input.size()[1], torch_input.size()[2] * torch_input.size()[3]))\n j = 0\n for core in self.kernel[0].numpy():\n k = 0\n for i in range(len(core)):\n zero_matrix[j, k:k+len(core[i])] = core[i]\n k += torch_input.size()[2]\n j+=1\n MATRIX = zero_matrix.reshape((output_height, torch_input.size()[1] * torch_input.size()[2] * torch_input.size()[3]))\n kernel_unsqueezed = torch.from_numpy(MATRIX).float()# Реализуйте функцию, возвращающую преобразованный кернел.\n return kernel_unsqueezed\n\n def __call__(self, torch_input):\n batch_size, out_channels, output_height, output_width\\\n = calc_out_shape(\n input_matrix_shape=torch_input.shape,\n out_channels=self.kernel.shape[0],\n kernel_size=self.kernel.shape[2],\n stride=self.stride,\n padding=0)\n\n kernel_unsqueezed = self._unsqueeze_kernel(torch_input, output_height, output_width)\n result = kernel_unsqueezed @ torch_input.view((batch_size, -1)).permute(1, 0)\n return result.permute(1, 0).view((batch_size, self.out_channels,\n output_height, output_width))\n\n# Проверка происходит автоматически вызовом следующего кода\n# (раскомментируйте для самостоятельной проверки,\n# в коде для сдачи задания должно быть закомментировано):\nprint(test_conv2d_layer(Conv2dMatrix))", "torch.Size([2, 3, 4, 4])\n(3, 16)\nTrue\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecd33b28d1196d61e0b0af56023f6feab928f314
1,041
ipynb
Jupyter Notebook
simple-gist-link.ipynb
anasir514/colab
fc75014432ae608ce1afa9b595cbaa1cb74e21e6
[ "MIT" ]
null
null
null
simple-gist-link.ipynb
anasir514/colab
fc75014432ae608ce1afa9b595cbaa1cb74e21e6
[ "MIT" ]
null
null
null
simple-gist-link.ipynb
anasir514/colab
fc75014432ae608ce1afa9b595cbaa1cb74e21e6
[ "MIT" ]
null
null
null
26.692308
228
0.558117
[ [ [ "<a href=\"https://colab.research.google.com/github/anasir514/colab/blob/main/simple-gist-link.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "https://gist.githubusercontent.com/anasir514/a74833596df99c4cb9baf484874c7cf8/raw/21d80aa1890251e9bf1bdff15d3717e959425ce2/index.html", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ecd355aa767f221a1c9be18e811e5c6ecf45f190
17,018
ipynb
Jupyter Notebook
site/en-snapshot/guide/migrate/fault_tolerance.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
491
2020-01-27T19:05:32.000Z
2022-03-31T08:50:44.000Z
site/en-snapshot/guide/migrate/fault_tolerance.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
511
2020-01-27T22:40:05.000Z
2022-03-21T08:40:55.000Z
site/en-snapshot/guide/migrate/fault_tolerance.ipynb
phoenix-fork-tensorflow/docs-l10n
2287738c22e3e67177555e8a41a0904edfcf1544
[ "Apache-2.0" ]
627
2020-01-27T21:49:52.000Z
2022-03-28T18:11:50.000Z
35.380457
309
0.537784
[ [ [ "##### Copyright 2021 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Migrate the fault tolerance mechanism\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/migrate/fault_tolerance\">\n <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />\n View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/migrate/fault_tolerance.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/fault_tolerance.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/migrate/fault_tolerance.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "Fault tolerance refers to a mechanism of periodically saving the states of trackable objects, such as parameters and models. This enables you to recover them in the event of a program/machine failure during training.\n\nThis guide first demonstrates how to add fault tolerance to training with `tf.estimator.Estimator` in TensorFlow 1 by specifying metric saving with `tf.estimator.RunConfig`. Then, you will learn how to implement fault tolerance for training in Tensorflow 2 in two ways:\n\n- If you use the Keras `Model.fit` API, you can pass the `tf.keras.callbacks.experimental.BackupAndRestore` callback to it.\n- If you use a custom training loop (with `tf.GradientTape`), you can arbitrarily save checkpoints using the `tf.train.Checkpoint` and `tf.train.CheckpointManager` APIs.\n\nBoth of these methods will back up and restore the training states in [checkpoint](../../guide/checkpoint.ipynb) files.\n", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ] ], [ [ "import tensorflow.compat.v1 as tf1\nimport tensorflow as tf\nimport numpy as np\nimport tempfile\nimport time", "_____no_output_____" ], [ "mnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0", "_____no_output_____" ] ], [ [ "## TensorFlow 1: Save checkpoints with tf.estimator.RunConfig\n\nIn TensorFlow 1, you can configure a `tf.estimator` to save checkpoints every step by configuring `tf.estimator.RunConfig`.\n\nIn this example, start by writing a hook that artificially throws an error during the fifth checkpoint:", "_____no_output_____" ] ], [ [ "class InterruptHook(tf1.train.SessionRunHook):\n # A hook for artificially interrupting training.\n def begin(self):\n self._step = -1\n\n def before_run(self, run_context):\n self._step += 1\n\n def after_run(self, run_context, run_values):\n if self._step == 5:\n raise RuntimeError('Interruption')", "_____no_output_____" ] ], [ [ "Next, configure `tf.estimator.Estimator` to save every checkpoint and use the MNIST dataset:", "_____no_output_____" ] ], [ [ "feature_columns = [tf1.feature_column.numeric_column(\"x\", shape=[28, 28])]\nconfig = tf1.estimator.RunConfig(save_summary_steps=1,\n save_checkpoints_steps=1)\n\npath = tempfile.mkdtemp()\n\nclassifier = tf1.estimator.DNNClassifier(\n feature_columns=feature_columns,\n hidden_units=[256, 32],\n optimizer=tf1.train.AdamOptimizer(0.001),\n n_classes=10,\n dropout=0.2,\n model_dir=path,\n config = config\n)\n\ntrain_input_fn = tf1.estimator.inputs.numpy_input_fn(\n x={\"x\": x_train},\n y=y_train.astype(np.int32),\n num_epochs=10,\n batch_size=50,\n shuffle=True,\n)", "_____no_output_____" ] ], [ [ "Begin training the model. An artificial exception will be raised by the hook you defined earlier.", "_____no_output_____" ] ], [ [ "try:\n classifier.train(input_fn=train_input_fn,\n hooks=[InterruptHook()],\n max_steps=10)\nexcept Exception as e:\n print(f'{type(e).__name__}:{e}')", "_____no_output_____" ] ], [ [ "Rebuild the `tf.estimator.Estimator` using the last saved checkpoint and continue training:", "_____no_output_____" ] ], [ [ "classifier = tf1.estimator.DNNClassifier(\n feature_columns=feature_columns,\n hidden_units=[256, 32],\n optimizer=tf1.train.AdamOptimizer(0.001),\n n_classes=10,\n dropout=0.2,\n model_dir=path,\n config = config\n)\nclassifier.train(input_fn=train_input_fn,\n max_steps = 10)", "_____no_output_____" ] ], [ [ "## TensorFlow 2: Back up and restore with a callback and Model.fit\n\nIn TensorFlow 2, if you use the Keras `Model.fit` API for training, you can provide the `tf.keras.callbacks.experimental.BackupAndRestore` callback to add the fault tolerance functionality.\n\nTo help demonstrate this, let's first start by defining a callback class that artificially throws an error during the fifth checkpoint:\n", "_____no_output_____" ] ], [ [ "class InterruptingCallback(tf.keras.callbacks.Callback):\n # A callback for artificially interrupting training.\n def on_epoch_end(self, epoch, log=None):\n if epoch == 4:\n raise RuntimeError('Interruption')", "_____no_output_____" ] ], [ [ "Then, define and instantiate a simple Keras model, define the loss function, call `Model.compile`, and set up a `tf.keras.callbacks.experimental.BackupAndRestore` callback that will save the checkpoints in a temporary directory:", "_____no_output_____" ] ], [ [ "def create_model():\n return tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10)\n ])\n\nloss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n\nmodel = create_model()\nmodel.compile(optimizer='adam',\n loss=loss,\n metrics=['accuracy'],\n steps_per_execution=10)\n\nlog_dir = tempfile.mkdtemp()\n\nbackup_restore_callback = tf.keras.callbacks.experimental.BackupAndRestore(\n backup_dir = log_dir\n)", "_____no_output_____" ] ], [ [ "Now, start training the model with `Model.fit`. During training, checkpoints will be saved thanks to the `backup_restore_callback` defined above, while the `InterruptingCallback` will raise an artificial exception to simulate a failure.", "_____no_output_____" ] ], [ [ "try:\n model.fit(x=x_train,\n y=y_train,\n epochs=10,\n validation_data=(x_test, y_test),\n callbacks=[backup_restore_callback, InterruptingCallback()])\nexcept Exception as e:\n print(f'{type(e).__name__}:{e}')", "_____no_output_____" ] ], [ [ "Next, instantiate the Keras model, call `Model.compile`, and continue training the model with `Model.fit` from a previously saved checkpoint:", "_____no_output_____" ] ], [ [ "model = create_model()\nmodel.compile(optimizer='adam',\n loss=loss,\n metrics=['accuracy'],\n steps_per_execution=10)\nmodel.fit(x=x_train,\n y=y_train,\n epochs=10,\n validation_data=(x_test, y_test),\n callbacks=[backup_restore_callback])", "_____no_output_____" ] ], [ [ "## TensorFlow 2: Write manual checkpoints with a custom training loop\n\nIf you use a custom training loop in TensorFlow 2, you can implement a fault tolerance mechanism with the `tf.train.Checkpoint` and `tf.train.CheckpointManager` APIs.\n\nThis example demonstrates how to:\n\n- Use a `tf.train.Checkpoint` object to manually create a checkpoint, where the trackable objects you want to save are set as attributes.\n- Use a `tf.train.CheckpointManager` to manage multiple checkpoints.\n\nStart by defining and instantiating the Keras model, the optimizer, and the loss function. Then, create a `Checkpoint` that manages two objects with trackable states (the model and the optimizer), as well as a `CheckpointManager` for logging and keeping several checkpoints in a temporary directory.", "_____no_output_____" ] ], [ [ "model = create_model()\noptimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\nloss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\nlog_dir = tempfile.mkdtemp()\nepochs = 5\nsteps_per_epoch = 5\n\ncheckpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\ncheckpoint_manager = tf.train.CheckpointManager(\n checkpoint, log_dir, max_to_keep=2)", "_____no_output_____" ] ], [ [ "Now, implement a custom training loop where after the first epoch every time a new epoch starts the last checkpoint is loaded:", "_____no_output_____" ] ], [ [ "for epoch in range(epochs):\n if epoch > 0:\n tf.train.load_checkpoint(save_path)\n print(f\"\\nStart of epoch {epoch}\")\n\n for step in range(steps_per_epoch):\n with tf.GradientTape() as tape:\n\n logits = model(x_train, training=True)\n loss_value = loss_fn(y_train, logits)\n\n grads = tape.gradient(loss_value, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n\n save_path = checkpoint_manager.save()\n print(f\"Checkpoint saved to {save_path}\")\n print(f\"Training loss at step {step}: {loss_value}\")", "_____no_output_____" ] ], [ [ "## Next steps\n\nTo learn more about fault tolerance and checkpointing in TensorFlow 2, consider the following documentation:\n\n- The `tf.keras.callbacks.experimental.BackupAndRestore` callback API docs.\n- The `tf.train.Checkpoint` and `tf.train.CheckpointManager` API docs.\n- The [Training checkpoints](../../guide/checkpoint.ipynb) guide, including the _Writing checkpoints_ section.\n\nYou may also find the following material related to [distributed training](../..guide/distributed_training.ipynb) useful:\n\n- The _Fault tolerance_ section in the [Multi-worker training with Keras](../../tutorials/distribute/multi_worker_with_keras.ipynb) tutorial.\n- The _Handing task failure_ section in the [Parameter server training](../../tutorials/distribute/parameter_server_training.ipynb) tutorial.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecd356e5ac6c6e064da5b4e50dbd86934a4d8f72
2,625
ipynb
Jupyter Notebook
Peer-graded_Assignment_Capstone_Project_The Battle of Neighborhoods_W1_Introduction_Business Problem.ipynb
anjana-dodampe/Coursera_Capstone
d20e49d6299add737885aadf181036e9120af5d9
[ "Apache-2.0" ]
null
null
null
Peer-graded_Assignment_Capstone_Project_The Battle of Neighborhoods_W1_Introduction_Business Problem.ipynb
anjana-dodampe/Coursera_Capstone
d20e49d6299add737885aadf181036e9120af5d9
[ "Apache-2.0" ]
null
null
null
Peer-graded_Assignment_Capstone_Project_The Battle of Neighborhoods_W1_Introduction_Business Problem.ipynb
anjana-dodampe/Coursera_Capstone
d20e49d6299add737885aadf181036e9120af5d9
[ "Apache-2.0" ]
null
null
null
36.458333
526
0.657905
[ [ [ "<h3>Peer-graded_Assignment_Capstone_Project_The Battle of Neighborhoods_W1_Introduction_Business Problem</h3>", "_____no_output_____" ], [ "<h3>1.\tIntroduction </h3>\n\n<b>1.1 Background </b>\n<br>\n<br>\n Tourism, the act and process of spending time away from home in pursuit of recreation, relaxation, and pleasure has been a trending sector across the globe. Human as a social animal, always fonds of travelling, exploring new adventures. No matter which country you’re from, you can always come across a group of people who always like travelling places. Tourism plays a significant role in developing economy of a country and it brings a particular country to a prominent place in global standing. \n\n\n", "_____no_output_____" ], [ "Tourism industry is important in many aspects since it creates demand and growth for many more industries. It plays an important role in generating more employments, revenues and contributing in empowering livelihood of many locals. Sri Lanka, the pearl of the Indian Ocean is a country that is immensely benefited from tourism industry. \n\n<b>1.2 Problem</b>\n<br>\n<br>\nAll the benefits of tourism tend to reflect on the employment opportunities that it provides to the people of that country. The objective of this project is to analyse tourist places across districts of Sri Lanka and try to recommend the best location where they can open a business to make the best use of the opportunity. \n The target audience for this project includes the people who are interested in opening a business that associates with tourism industry. This also recommends tourists, tourist attraction hotspots in a particular district in Sri Lanka.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
ecd374e30a3f9e5133b71e8c906e8d3c4896ecd7
2,265
ipynb
Jupyter Notebook
notebooks/21_TorchGPUTest.ipynb
txn2/gpu-lab
341a91ca60ebb39a9074d84e40028bab6609ab4d
[ "MIT" ]
1
2020-10-29T00:39:26.000Z
2020-10-29T00:39:26.000Z
notebooks/21_TorchGPUTest.ipynb
txn2/gpu-lab
341a91ca60ebb39a9074d84e40028bab6609ab4d
[ "MIT" ]
null
null
null
notebooks/21_TorchGPUTest.ipynb
txn2/gpu-lab
341a91ca60ebb39a9074d84e40028bab6609ab4d
[ "MIT" ]
2
2020-10-29T00:33:01.000Z
2020-10-29T00:39:32.000Z
17.030075
52
0.461369
[ [ [ "# PyTorch CUDA Test", "_____no_output_____" ] ], [ [ "import torch", "_____no_output_____" ], [ "torch.cuda.is_available()", "_____no_output_____" ], [ "torch.cuda.device_count()", "_____no_output_____" ], [ "device = torch.device(\"cuda\")\nx = torch.rand(10000, 10000, device=device)\ny = x.to(\"cpu\", torch.double)\nprint(x[0:5, 0:5])\nprint(y[0:5, 0:5])", "_____no_output_____" ] ], [ [ "# CPU Test", "_____no_output_____" ] ], [ [ "%%time\nfor i in range(0,100):\n ym = y * .5\n \nprint(ym[0:5, 0:5])", "_____no_output_____" ] ], [ [ "# GPU Test", "_____no_output_____" ] ], [ [ "%%time\nfor i in range(0,100):\n xm = x * .5\n\nprint(xm[0:5, 0:5])", "_____no_output_____" ] ], [ [ "# Long Running GPU Test", "_____no_output_____" ] ], [ [ "%%time\nfor i in range(0,90000):\n xm = x * .5\n\nprint(xm[0:5, 0:5])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd3968cae35956422e338f6eb234cabe4694173
252,344
ipynb
Jupyter Notebook
LSTM_RNN/mask_with_impurities_prediction.ipynb
adderbyte/Dynamic-Behaviour-Modelling-With-LSTM-.-AIR-POLLUTION-As-case-study
ee111508b76e7a7ded6419b74ea6909dc8b12c3c
[ "MIT" ]
3
2018-12-05T14:37:34.000Z
2021-03-01T16:53:45.000Z
LSTM_RNN/mask_with_impurities_prediction.ipynb
adderbyte/Dynamic-Behaviour-Modelling-With-LSTM-.-AIR-POLLUTION-As-case-study
ee111508b76e7a7ded6419b74ea6909dc8b12c3c
[ "MIT" ]
null
null
null
LSTM_RNN/mask_with_impurities_prediction.ipynb
adderbyte/Dynamic-Behaviour-Modelling-With-LSTM-.-AIR-POLLUTION-As-case-study
ee111508b76e7a7ded6419b74ea6909dc8b12c3c
[ "MIT" ]
1
2017-06-13T10:49:55.000Z
2017-06-13T10:49:55.000Z
79.628905
60,206
0.797518
[ [ [ "import tensorflow as tf\nprint(tf.__version__)", "0.11.0\n" ], [ "%matplotlib inline\nimport numpy.ma as ma\nfrom numpy import isfinite\n\n\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n#from tensorflow.nn.rnn import *\nfrom tensorflow.python.ops import *\n\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow.python.framework import dtypes\n\nimport seaborn as sns", "_____no_output_____" ], [ "cd /", "/\n" ], [ "cd Users/Seychelles/Desktop/GoogleTensorflow/Data_Collector/CSV_file_from_2007_to_2017/", "/Users/Seychelles/Desktop/GoogleTensorflow/Data_Collector/CSV_file_from_2007_to_2017\n" ], [ "DataKarpos = pd.read_csv('Target.csv') # Only Karpos Location is used here . Other locations are zero", "_____no_output_____" ], [ "print (\"Number of columns present : \");DataKarpos.columns.values.size", "Number of columns present : \n" ], [ "DataKarpos.columns", "_____no_output_____" ], [ "DataKarpos.describe() # Describe data input ", "//anaconda/envs/seychelles/lib/python3.5/site-packages/numpy/lib/function_base.py:3834: RuntimeWarning: Invalid value encountered in percentile\n RuntimeWarning)\n" ], [ "DataKarpos.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 89500 entries, 0 to 89499\nData columns (total 27 columns):\nUnnamed: 0 89500 non-null int64\ndate 89500 non-null object\nPM10 60629 non-null float64\nNAME 89500 non-null object\nPM10_null_pointers 89500 non-null int64\nCO 57342 non-null float64\nCO_null_pointers 89500 non-null int64\nNO2 42360 non-null float64\nNO2_null_pointers 89500 non-null int64\nO3 50553 non-null float64\nO3_null_pointers 89500 non-null int64\nPM25 36143 non-null float64\nPM25_null_pointers 89500 non-null int64\ntime 89500 non-null object\nmonth 89500 non-null int64\nday 89500 non-null int64\nhour 89500 non-null int64\ndaysInterval 89500 non-null object\ndays_interval 89500 non-null int64\nhour_interval 89500 non-null int64\nCentar 89500 non-null float64\nGazi Baba 89500 non-null float64\nKarpos 89500 non-null float64\nLisice 89500 non-null float64\nMiladinovci 89500 non-null float64\nMrsevci 89500 non-null float64\nRektorat 89500 non-null float64\ndtypes: float64(12), int64(11), object(4)\nmemory usage: 18.4+ MB\n" ] ], [ [ "## Extract Data And Feature set", "_____no_output_____" ] ], [ [ "TargetKarpos = DataKarpos[['PM10']].copy(deep=True) # copy PM10 data out from data sets", "_____no_output_____" ], [ "features_set = DataKarpos.copy(deep=True)", "_____no_output_____" ], [ "del features_set['PM10'] # delete feature set from data set", "_____no_output_____" ], [ "features_set.drop(['Unnamed: 0','date','day', 'time','Lisice', 'Miladinovci', 'Mrsevci',\n 'Rektorat','daysInterval','NAME', 'hour_interval', 'days_interval','Centar', 'Gazi Baba'],axis=1,inplace=True)", "_____no_output_____" ], [ "print (\"Number of columns present in Future set : \");features_set.columns.values.size", "Number of columns present in Future set : \n" ], [ "features_set.columns", "_____no_output_____" ], [ "features_set[1:5] # Our feature set now comes out neat.", "_____no_output_____" ] ], [ [ "# Analysis Of Mising Values For PM10", "_____no_output_____" ] ], [ [ "Target = DataKarpos[['PM10']].copy(deep=True)", "_____no_output_____" ], [ "Target.shape", "_____no_output_____" ], [ "null_data_PM10 = Target[Target.isnull().any(axis=1)] # extract data sets with null values", "_____no_output_____" ], [ "null_data_PM10.describe()", "//anaconda/envs/seychelles/lib/python3.5/site-packages/numpy/lib/function_base.py:3834: RuntimeWarning: Invalid value encountered in percentile\n RuntimeWarning)\n" ], [ "null_data_PM10.info() # Info about non-null", "<class 'pandas.core.frame.DataFrame'>\nInt64Index: 28871 entries, 168 to 86692\nData columns (total 1 columns):\nPM10 0 non-null float64\ndtypes: float64(1)\nmemory usage: 451.1 KB\n" ], [ "Target.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 89500 entries, 0 to 89499\nData columns (total 1 columns):\nPM10 60629 non-null float64\ndtypes: float64(1)\nmemory usage: 699.3 KB\n" ], [ "Target.describe()", "//anaconda/envs/seychelles/lib/python3.5/site-packages/numpy/lib/function_base.py:3834: RuntimeWarning: Invalid value encountered in percentile\n RuntimeWarning)\n" ], [ "def getPctMissing(series):\n '''\n Returns percentage of Missing data in a data set.\n Input : Pandas series\n Output: Percentage of missing data in data set\n '''\n num = series.isnull().sum()\n Total_data = len(series)\n return 100*(num/Total_data)", "_____no_output_____" ], [ "print(\"Number of null values in PM10 : \");Target.isnull().sum()", "Number of null values in PM10 : \n" ], [ "print(\"Number of non null values in PM10 : \");Target.count()", "Number of non null values in PM10 : \n" ], [ "print(\"Percentage of Missing Value: \"); getPctMissing(Target)", "Percentage of Missing Value: \n" ] ], [ [ "# Data After Cleaning", "_____no_output_____" ] ], [ [ "Target.describe()", "//anaconda/envs/seychelles/lib/python3.5/site-packages/numpy/lib/function_base.py:3834: RuntimeWarning: Invalid value encountered in percentile\n RuntimeWarning)\n" ], [ "Target = Target.reset_index(drop=True)", "_____no_output_____" ], [ "Target.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 89500 entries, 0 to 89499\nData columns (total 1 columns):\nPM10 60629 non-null float64\ndtypes: float64(1)\nmemory usage: 699.3 KB\n" ], [ "print(\"Number of null values in PM10 : \");Target.isnull().sum()", "Number of null values in PM10 : \n" ], [ "print(\"Number of non null values in PM10 : \");Target.count()", "Number of non null values in PM10 : \n" ], [ "Target.head()", "_____no_output_____" ] ], [ [ "# Clean data", "_____no_output_____" ] ], [ [ "Target.drop(Target.index[168:215],inplace=True);# 16 consecutive entries missing\nTarget.drop(Target.index[337:361],inplace=True);# \nTarget.drop(Target.index[961:984],inplace=True);\nTarget.drop(Target.index[1082:1105],inplace=True)\nTarget.drop(Target.index[1088:1106],inplace=True)\nTarget.drop(Target.index[1569:1593],inplace=True)\nTarget.drop(Target.index[1689:1705],inplace=True)\nTarget.drop(Target.index[2272:2296],inplace=True)\nTarget.drop(Target.index[3470:3592],inplace=True)\nTarget.drop(Target.index[16707:35423],inplace=True)", "_____no_output_____" ], [ "features_set.drop(features_set.index[168:215],inplace=True);# 16 consecutive entries missin\nfeatures_set.drop(features_set.index[337:361],inplace=True);# \nfeatures_set.drop(features_set.index[961:984],inplace=True);\nfeatures_set.drop(features_set.index[1082:1105],inplace=True)\nfeatures_set.drop(features_set.index[1088:1106],inplace=True)\nfeatures_set.drop(features_set.index[1569:1593],inplace=True)\nfeatures_set.drop(features_set.index[1689:1705],inplace=True)\nfeatures_set.drop(features_set.index[2272:2296],inplace=True)\nfeatures_set.drop(features_set.index[3470:3592],inplace=True)\nfeatures_set.drop(features_set.index[16707:35423],inplace=True)", "_____no_output_____" ], [ "# Reset index\nTarget = Target.reset_index(drop=True)\nfeatures_set = features_set.reset_index(drop=True)", "_____no_output_____" ], [ "print(\"Number of null values in PM10 : \");Target.isnull().sum()", "Number of null values in PM10 : \n" ], [ "print(\"Percentage of Missing Value After cleaning: \"); getPctMissing(Target)", "Percentage of Missing Value After cleaning: \n" ] ], [ [ "# Prepare Data ", "_____no_output_____" ] ], [ [ "column_size = features_set.columns.values.size # this is size of list element size\nTarget_list = Target.values.tolist() # create a list of values for use in data model\nfeature_list= features_set.values.tolist()", "_____no_output_____" ], [ "counter=0 # for tracking each input of target\ndef dataGetter(datalist):\n \"\"\"\n Function for moving the sliding window across the target\n Appends a taget variable on each call.\n \n Returns a single PM10 value.\n Input: PM10 Data list named Target_list above\n Output: single PM10 value\n \"\"\"\n global counter;\n assert type(datalist) == list;\n \n \n value = datalist[counter]\n #print(counter)\n\n\n counter = counter +1\n \n return np.array([value])", "_____no_output_____" ], [ "counter_t=0 # for tracking each input of feature set\ndef dataGetter_target(datalist):\n \"\"\"\n Function move sliding window across the feature set\n Appends a new feature set on each call\n \n Input: Feature set Data list named Feature_list above\n Output: single PM10 value\n \"\"\"\n global counter_t;\n assert type(datalist) == list;\n \n \n value = datalist[counter_t]\n #print(counter)\n\n\n counter_t = counter_t +1\n \n return np.array([value])", "_____no_output_____" ], [ "# Define window size or learning\nsliding_window = []\nlag=25 # 24 hours time step from 0 to 24 plus the window_step_move (25) which is to be predicted\nwindow_move_step = 1\nfor i in range(lag - window_move_step):\n sliding_window.append(dataGetter(Target_list))", "_____no_output_____" ], [ "# sliding window for feature set \n\nsliding_window_featureset = []\nlag=25 # 24 hours time step from 0 to 24 plus the window_step_move (25) which is to be predicted\nwindow_move_step = 1\nfor i in range(lag - window_move_step):\n sliding_window_featureset.append(dataGetter_target(feature_list))", "_____no_output_____" ], [ "def get_pair(Target,Target2):\n \"\"\"\n Function for moving the slifing window.\n It calls datagetter and dataGetter_target seperately to \n get the feature set and target separately.\n \n At each call it drops the the first element in \n the feature set.\n \n In the target, it only uses the 25th value since the aim \n is to predict the 25th value given the past feature set for the last 24 hours.\n \n \n \n \"\"\"\n \n global sliding_window # sliding window for target \n global sliding_window_featureset # Sliding window feature set\n sliding_window.append(dataGetter(Target)) \n #print(len(sliding_window))\n sliding_window_featureset.append(dataGetter_target(Target2))\n \n input_value = sliding_window_featureset[0:24]\n \n \n input_value=np.array(input_value)\n \n #print(input_value.shape)\n input_value=np.reshape(input_value,(1,len(input_value)*column_size))\n mx = np.ma.masked_invalid(input_value)\n \n ##print(input_value)\n output_value = sliding_window[-1]\n my = np.ma.masked_invalid(output_value)\n ####remove last value from sliding_window\n _ = sliding_window_featureset[-1]\n ##print(output_value)\n sliding_window = sliding_window[1:]\n sliding_window_featureset = sliding_window_featureset[1:]\n #print(len(sliding_window))\n # Process output\n mask_x = ~mx.mask\n mask_float = mask_x\n ## y output\n mask_y =~my.mask\n mask_yfloat = mask_y\n return mx.data,mask_x.astype(float), my.data,mask_y.astype(float)", "_____no_output_____" ] ], [ [ "# Build Model", "_____no_output_____" ] ], [ [ "#Imports\nimport tensorflow as tf\n#from tensorflow.nn.rnn import *\nfrom tensorflow.python.ops import *\n", "_____no_output_____" ], [ "#Input Params\nNUMBER=0; # replacement for nan\nwith tf.name_scope(\"input_target_placeholders\"): \n input_dim = 1\n ##The Input Layer as a Placeholder\n #Since we will provide data sequentially, the 'batch size'\n #is 1.\n input_layer = tf.placeholder(tf.float32, [1, input_dim*288],name=\"input_data\")\n correct_output = tf.placeholder(tf.float32, [1, input_dim],name=\"target_data\")\n mask_x = tf.placeholder(tf.float32, [1, input_dim*288],name=\"inputmask\")\n mask_y = tf.placeholder(tf.float32, [1, input_dim],name=\"target_mask\")\n features_with_nans = tf.mul(input_layer, mask_x)\n y_input_with_nans = tf.mul(mask_y,correct_output)\n y_input = tf.select(tf.is_nan(y_input_with_nans), tf.ones_like(y_input_with_nans) * NUMBER, y_input_with_nans);\n features = tf.select(tf.is_nan(features_with_nans), tf.ones_like(features_with_nans) * NUMBER, features_with_nans);", "_____no_output_____" ], [ "###inistate = tf.Variable(lstm_cell_with_dropout.zero_state(BATCH_SIZE, tf.float32), trainable=False)", "_____no_output_____" ], [ "with tf.name_scope(\"lstmLayer\"): \n lstm_layer1 = rnn_cell.BasicLSTMCell(input_dim*288,state_is_tuple=False)\n #The LSTM state as a Variable initialized to zeroes\n lstm_state1 = tf.Variable(tf.zeros([1, lstm_layer1.state_size]),trainable=False,name=\"initial_state\")\n #lstm_state1 = tf.Variable(lstm_layer1.zero_state(1,lstm_layer1.state_size[-1] ), trainable=False)\n #Connect the input layer and initial LSTM state to the LSTM cell\n lstm_output1, lstm_state_output1 = lstm_layer1(features, lstm_state1)\n #The LSTM state will get updated\n lstm_update_op1 = lstm_state1.assign(lstm_state_output1)", "WARNING:tensorflow:<tensorflow.python.ops.rnn_cell.BasicLSTMCell object at 0x103179828>: Using a concatenated state is slower and will soon be deprecated. Use state_is_tuple=True.\n" ], [ "lstm_state_output1.get_shape() # verify shape", "_____no_output_____" ], [ "with tf.name_scope(\"weight_Bias_learning_rate\"):\n global_step = tf.Variable(0, trainable=False,name=\"global_step\")\n starter_learning_rate = 0.009\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n 1000, 0.8, staircase=False,name =\"Exponential_decay\")\n ##The Regression-Output Layer\n #The Weights and Biases matrices first\n output_W1 = tf.Variable(tf.truncated_normal([input_dim,input_dim*288]),name=\"weight\")\n output_b1 = tf.Variable(tf.zeros([input_dim]),name=\"bias\")\n selector=tf.mul(mask_x,output_W1)\n selector_2 = tf.transpose(selector)\n ", "_____no_output_____" ], [ "with tf.name_scope(\"prediction\"):\n #Compute the output\n \n final_output = tf.matmul(lstm_output1, selector_2) + output_b1", "_____no_output_____" ], [ "lambda_l2_reg=0.2\nl2 = lambda_l2_reg * sum(\n tf.nn.l2_loss(tf_var)\n for tf_var in tf.trainable_variables()\n if not (\"noreg\" in tf_var.name or \"bias\" in tf_var.name)\n)\n#loss += l2", "_____no_output_____" ], [ "final_output.get_shape() # verify output shape", "_____no_output_____" ], [ "output_W1 # verify weight shape", "_____no_output_____" ], [ "with tf.name_scope(\"RMS_error\"):\n ##Calculate the Sum-of-Squares Error\n \n error = tf.pow(tf.sub(final_output,y_input), 2)+l2", "_____no_output_____" ], [ "with tf.name_scope(\"optimizer\"):\n ##The Optimizer\n #Adam works best\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(error)", "_____no_output_____" ], [ "# Create a summary to monitor MSE\nmse=tf.summary.tensor_summary(\"errors_Summary\",error)\n# Create a summary to monitor predictions\nprediction=tf.summary.tensor_summary(\"predictions_Summmary\", final_output)\n# Create a summary to monitor bias\nbias_vec=tf.summary.tensor_summary(\"bias\", output_b1)\n# create sumary\n#rate_vec=tf.summary.scalar(\"rate\", learning_rate)\n\n\n#histogram plot\n\nerror_stats=tf.histogram_summary(\"errors_Histogram\",error)\nweight_stats=tf.histogram_summary(\"weights_Histogram\",output_W1)\nbias_stats=tf.histogram_summary(\"biases_Histogram\",output_b1)\n#learning_stats=tf.histogram_summary(\"biases_Histogram\",learning_rate)\n\n\n#merged_summary_op = tf.merge_all_summaries()\nmerged_summary_op = tf.merge_summary([mse,prediction,bias_vec,error_stats,weight_stats,bias_stats])", "_____no_output_____" ], [ "##Session\nsess = tf.Session()\n#Initialize all Variables\nsess.run(tf.initialize_all_variables())\n", "_____no_output_____" ] ], [ [ "# Evaluation Data set", "_____no_output_____" ] ], [ [ "logs_path = '/Users/Seychelles/Desktop/GoogleTensorflow/finals/tensorboardData/local_median/' # for tensorfboard", "_____no_output_____" ], [ "##Training Parameters\nn_iter = 70415\ninner_iter = 6000\nactual_output1 = []\nnetwork_output1 = []\n", "_____no_output_____" ], [ "len(Target_list)", "_____no_output_____" ] ], [ [ "# Feature and target Validation and test set", "_____no_output_____" ] ], [ [ "import copy\n#new_list = copy.deepcopy(old_list)\nTest_eval=copy.deepcopy(Target_list[n_iter:])", "_____no_output_____" ], [ "Feature_eval=copy.deepcopy(feature_list[n_iter:]) # copy out feature set for validation", "_____no_output_____" ], [ "len(Feature_eval)", "_____no_output_____" ], [ "len(Test_eval)", "_____no_output_____" ] ], [ [ "# Errors Plot", "_____no_output_____" ] ], [ [ "writer = tf.train.SummaryWriter(logs_path, graph= tf.get_default_graph())", "_____no_output_____" ], [ "init = tf.initialize_all_variables()\nerror_=[]\n\nsess.run(init)\ncounter =0 \ncounter_t=0\nassert counter==0\n\nfor i in range(n_iter):\n input_v,input_mask ,output_v,output_mask = get_pair(Target_list,feature_list)\n y_inputz = sess.run([y_input],feed_dict={correct_output: output_v,mask_y:output_mask})\n \n #print(input_v.shape)\n #print (dtype(input_v))\n #print(input_mask.shape)\n #print(output_mask.shape)\n #print(output_v.shape)\n _, _, network_output,errors,summary = sess.run([lstm_update_op1,\n train_step,\n final_output,error,merged_summary_op],\n feed_dict = {\n input_layer:input_v,mask_x:input_mask,\n correct_output: output_v,mask_y:output_mask})\n writer.add_summary(summary)\n error_.append(errors)\n if i%inner_iter==0 & i!=0:\n assert i!=0;\n # reset state\n # Every once in a while we reset the initial state to zero.\n # This is to facilitate learning and prevent overfitting.\n # The model has been initially configured to propagate previous state to the initial \n # state in the next iteration. \n sess.run(lstm_state1.assign(tf.zeros([1, lstm_layer1.state_size])))\n \n actual_output1.append(y_inputz)\n #actual_output2.append(output_v[0][1])\n network_output1.append(network_output)\n #network_output2.append(network_output[0][1])\n #x_axis.append(i)\n ", "_____no_output_____" ], [ "errorplot = np.array(error_)\nerrorplot = errorplot.reshape(n_iter,1)", "_____no_output_____" ], [ "import matplotlib.mlab as mlab\n\n#fig, ax = plt.subplots()\nn, bins, patches=plt.hist(errorplot,60,normed=1,facecolor='green',alpha=0.9)\nmu= np.mean(errorplot);sigma=np.std(errorplot);\ny = mlab.normpdf( bins,mu,sigma)\n#y = mlab.normpdf( bins, mu, sigma)\nplt.plot(bins, y, 'r--', linewidth=2)\n#ax.set_yscale('log')\n#ax.set_xscale('log')\nplt.title(\"Histogram plot for error\")\nplt.ylabel(\"Frequemcy\")\nplt.xlabel(\"Error Values\")\nplt.show()", "_____no_output_____" ], [ "import matplotlib.mlab as mlab\n\nfig, ax = plt.subplots()\nplt.hist(errorplot,bins=200,normed=1,facecolor='olivedrab')\n\nax.set_yscale('log')\n#ax.set_xscale('log')\nplt.title(\"Error Plot\")\nplt.ylabel(\"Error Values\")\nplt.xlabel(\"Errors\")\nplt.show()", "_____no_output_____" ] ], [ [ "# Training Prediction plot", "_____no_output_____" ] ], [ [ "from matplotlib.dates import date2num\nData = pd.read_csv('ItemListWithDummy.csv')", "_____no_output_____" ], [ "pd.to_datetime(Data.date);", "_____no_output_____" ], [ "x_axis = Data['date']", "_____no_output_____" ], [ "x_axis = pd.to_datetime(x_axis)", "_____no_output_____" ], [ "#80000-len(network_output1)", "_____no_output_____" ], [ "network = np.array(network_output1) # convert out put to ", "_____no_output_____" ], [ "actual= np.array(actual_output1)", "_____no_output_____" ], [ "actual.shape[0]", "_____no_output_____" ], [ "import matplotlib\nplt.rcParams[\"figure.figsize\"] = (8,5)\nfig = plt.figure(figsize=(30, 2))\n\nfig, ax = plt.subplots()\nax.plot(x_axis[0:48], network.reshape(actual.shape[0],1)[0:48], 'r-',color='deepskyblue',label= 'Prediction')\nax.plot_date(x_axis[0:48], actual.reshape(actual.shape[0],1)[0:48], 'b-',color='olivedrab',label='Actual Value')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize='x-large')\n#ax.xaxis.set_minor_locator(dates.MonthLocator())\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M:%S'))\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%d\\n%a'))\n#ax.xaxis.grid(True, which=\"minor\")\n#ax.yaxis.grid()\n#ax.xaxis.set_major_locator(dates.DayLocator())\n#ax.xaxis.set_major_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\nax.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%Y-%b-%a %H:%M:%S'))\nax.xaxis.grid(True, which=\"minor\")\n\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n\n\nplt.xticks( rotation=25 )\nplt.tight_layout()\nplt.ylabel(\"PM10 Values\",fontweight='bold')\nplt.xlabel(\"Time/hr \",fontweight='bold')\nplt.title(\"PM10 Prediction for Small Time interval During Training\",fontweight='bold')\nplt.show()", "_____no_output_____" ] ], [ [ "# Plots predictions during Training", "_____no_output_____" ] ], [ [ "fig = plt.figure(figsize=(30, 2))\n\nfig, ax = plt.subplots()\nax.plot(x_axis[0:actual.shape[0]], network.reshape(actual.shape[0],1)[0:actual.shape[0]], 'r-',color='deepskyblue',label='Training Prediction')\nax.plot_date(x_axis[0:actual.shape[0]], actual.reshape(actual.shape[0],1)[0:actual.shape[0]], 'b-',color='olivedrab',label='Actual Values')\n\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize='x-large')\n#ax.xaxis.set_minor_locator(dates.MonthLocator())\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M:%S'))\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%d\\n%a'))\n#ax.xaxis.grid(True, which=\"minor\")\n#ax.yaxis.grid()\n#ax.xaxis.set_major_locator(dates.DayLocator())\n#ax.xaxis.set_major_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\nax.xaxis.set_major_formatter( matplotlib.dates.DateFormatter('%Y-%b-%a %H:%M:%S'))\nax.xaxis.grid(True, which=\"minor\")\n\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n\n\nplt.xticks( rotation=25 )\nplt.tight_layout()\nplt.ylabel(\"PM10 values\",fontweight='bold')\nplt.xlabel('Date', fontweight='bold')\nplt.title(\"PM10 Training prediction and Actual Value Plots\",fontweight='bold')\nplt.show()", "_____no_output_____" ] ], [ [ "# Prediction", "_____no_output_____" ] ], [ [ "sess.run(lstm_state1.assign(tf.zeros([1, lstm_layer1.state_size])));", "_____no_output_____" ], [ "Target_list[-48:-24];", "_____no_output_____" ], [ "# Testing plot\ncounter =0 # reset the counter for target\ncounter_t=0 # reset the counter for feature lis\nactual_output_test = []\nnetwork_output_test = []\n\n \nfor i in range(len(Test_eval[-48:-24])):\n \n input_v,input_mask ,output_v,output_mask = get_pair(Target_list[-48:-24],Feature_eval[-48:-24])\n y_inputz_test = sess.run([y_input],feed_dict={correct_output: output_v,mask_y:output_mask})\n _, network_output = sess.run([lstm_update_op1,\n final_output],\n feed_dict = {\n input_layer:input_v,mask_x:input_mask,\n correct_output: output_v})\n \n \n actual_output_test.append(y_inputz_test)\n \n network_output_test.append(network_output)\n ", "_____no_output_____" ], [ "4+5", "_____no_output_____" ], [ "import matplotlib.pyplot \n\n\nfig = plt.figure(figsize=(30, 2))\n\nfig, ax = plt.subplots()\nax.plot( np.array(network_output_test).reshape(len(network_output_test),1), 'r-',color='deepskyblue',label=\"Test prediction\")\nax.plot(np.array(Target_list[-24:]).reshape(len(Target_list[-24:]),1), 'b-',color='olivedrab',label=\"Actual values\")\n\n#ax.xaxis.set_minor_locator(dates.MonthLocator())\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M:%S'))\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%d\\n%a'))\n#ax.xaxis.grid(True, which=\"minor\")\n#ax.yaxis.grid()\n#ax.xaxis.set_major_locator(dates.DayLocator())\n#ax.xaxis.set_major_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n#ax.xaxis.set_major_formatter(dates.DateFormatter('%Y-%b-%a %H:%M:%S'))\n#ax.xaxis.grid(True, which=\"minor\")\nplt.title(\"PM10 Test prediction and Actual Value Plots (24hr Time Frame)\",fontweight='bold')\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\nplt.ylabel(\"PM10 values\",fontweight='bold')\nplt.xlabel(\"Time/hr for Test Sample\",fontweight='bold')\nplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,fontsize='x-large')\n\n\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n\n\n#plt.xticks( rotation=25 )\n#plt.tight_layout()\n\nplt.show()", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error", "_____no_output_____" ], [ "mean_squared_error(np.array(network_output_test).reshape(len(network_output_test),1), np.array(Target_list[-24:]).reshape(len(Target_list[-24:]),1))**0.5 ", "_____no_output_____" ], [ "import scipy\ndef rsquared(x, y):\n \"\"\" Return R^2 where x and y are array-like.\"\"\"\n \n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)\n return r_value**2", "_____no_output_____" ], [ "import scipy\ndef rsquared(x, y):\n \"\"\" Return R^2 where x and y are array-like.\"\"\"\n \n slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)\n return r_value**2", "_____no_output_____" ], [ "rsquared(np.array(network_output_test).reshape(len(Target_list[-24:]),),\n np.array(Target_list[-24:]).reshape(len(network_output_test),) ) ", "_____no_output_____" ], [ "actual_output_test = np.array(actual_output_test)", "_____no_output_____" ], [ "import matplotlib.pyplot \n\n\nfig = plt.figure(figsize=(30, 2))\n\nfig, ax = plt.subplots()\nax.plot( actual_output_test.reshape(48,1), 'b-')\n#ax.xaxis.set_minor_locator(dates.MonthLocator())\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M:%S'))\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%d\\n%a'))\n#ax.xaxis.grid(True, which=\"minor\")\n#ax.yaxis.grid()\n#ax.xaxis.set_major_locator(dates.DayLocator())\n#ax.xaxis.set_major_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n#ax.xaxis.set_major_formatter(dates.DateFormatter('%Y-%b-%a %H:%M:%S'))\n#ax.xaxis.grid(True, which=\"minor\")\n\n\n\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n\n\n#plt.xticks( rotation=25 )\n#plt.tight_layout()\n\nplt.show()", "_____no_output_____" ], [ "import matplotlib.pyplot \n\n\nfig = plt.figure(figsize=(30, 2))\n\nfig, ax = plt.subplots()\nax.plot( network_output_test.reshape(48,1), 'r-')\n#ax.xaxis.set_minor_locator(dates.MonthLocator())\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%H:%M:%S'))\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('%d\\n%a'))\n#ax.xaxis.grid(True, which=\"minor\")\n#ax.yaxis.grid()\n#ax.xaxis.set_major_locator(dates.DayLocator())\n#ax.xaxis.set_major_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n#ax.xaxis.set_major_formatter(dates.DateFormatter('%Y-%b-%a %H:%M:%S'))\n#ax.xaxis.grid(True, which=\"minor\")\n\n\n\n#ax.xaxis.set_minor_formatter(dates.DateFormatter('\\n\\n%a\\%b\\%Y'))\n\n\n#plt.xticks( rotation=25 )\n#plt.tight_layout()\n\nplt.show()", "_____no_output_____" ], [ "\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd3a8077e1f24abf606feaa87d68af5222696b3
158,043
ipynb
Jupyter Notebook
Perceptron Networks/Color/Color Net.ipynb
SidSata/Information-in-Language
36f7b2fa79cb0c7fe03d757d7870040aa29db419
[ "MIT" ]
1
2021-10-05T17:29:51.000Z
2021-10-05T17:29:51.000Z
Perceptron Networks/Color/Color Net.ipynb
SidSata/Information-in-Language
36f7b2fa79cb0c7fe03d757d7870040aa29db419
[ "MIT" ]
null
null
null
Perceptron Networks/Color/Color Net.ipynb
SidSata/Information-in-Language
36f7b2fa79cb0c7fe03d757d7870040aa29db419
[ "MIT" ]
null
null
null
49.636621
12,844
0.614662
[ [ [ "import sys\nimport math\nimport numpy as np\nimport json\nsys.path.insert(0, '..')\nfrom net_framework import *\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport pandas as pd", "_____no_output_____" ] ], [ [ "### Formatting Data", "_____no_output_____" ] ], [ [ "term_data = pd.read_csv('term.txt', sep=\"\\t\", header=None)\nterm_data.columns = [\"#Lnum\", \"#snum\", \"#cnum\", \"Term Abbrev\"]\nterm_data.head()", "_____no_output_____" ], [ "term_data['#Lnum'].unique()", "_____no_output_____" ], [ "cnum_data = pd.read_csv('cnum-vhcm-lab-new.txt', sep=\"\\t\")\nlocations = cnum_data[['#cnum']]\nlocations['Normalized-L'] = (cnum_data['L*'] - cnum_data['L*'].mean())/(cnum_data['L*'] - cnum_data['L*'].mean()).std() * 1/2\nlocations['Normalized-a'] = (cnum_data['a*'] - cnum_data['a*'].mean())/(cnum_data['a*'] - cnum_data['a*'].mean()).std() * 1/2\nlocations['Normalized-b'] = (cnum_data['b*'] - cnum_data['b*'].mean())/(cnum_data['b*'] - cnum_data['b*'].mean()).std() * 1/2\ndisplay(locations)", "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n after removing the cwd from sys.path.\nC:\\ProgramData\\Anaconda3\\lib\\site-packages\\ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n" ], [ "# debug1 = cnum_data.loc[:, ['#cnum', 'L*', 'a*', 'b*']]\n# debug1['L*'] = (debug1['L*'] - debug1['L*'].mean()) / debug1['L*'].std()\n# debug1", "_____no_output_____" ], [ "locations = locations.sort_values('#cnum')\nchip_num = list(locations['#cnum'])\nlab_norm = [[row[2], row[3], row[4]] for row in locations.itertuples()]\n# print(lab_norm)", "_____no_output_____" ], [ "# #Neural Network Shape Test\n# NNtest = Neural_Network(inputSize = 3, outputSize = 9, hiddenSize = [3,3,3] , learning_rate = 0.001)\n# NNtest(torch.FloatTensor([[1, 1, 1], [1, 1, 1]]))\n", "_____no_output_____" ], [ "language_num=5\nl1 = term_data[term_data.get('#Lnum').eq(language_num)]\nunique_symbols = list(l1['Term Abbrev'].unique())\nl1_grouped = l1.groupby('#cnum')['Term Abbrev'].apply(list)\nl1_chip_abbrev_percentage = [[(l1_grouped[i + 1].count(abbrev) / len(l1_grouped[i + 1])) \\\n for abbrev in unique_symbols] for i in range(len(l1_grouped))]\n#display(l1_chip_abbrev_percentage)", "_____no_output_____" ], [ "l1_result = pd.DataFrame(l1_chip_abbrev_percentage)\nl1_result.index += 1\nl1_result.index.name = '#cnum'\nl1_result.columns = unique_symbols\nprint(l1_result)\nchip_norm = []\n#pull the percentage for each cnum\nfor x in chip_num:\n# chip_norm.append((l1_result.loc[l1[\"#cnum\"]==x]).values.tolist()[0])\n chip_norm.append(l1_result.loc[x,:].values.tolist())\n#display(chip_norm)", " 7 2 4 6 9 1 5 3 \\\n#cnum \n1 0.833333 0.166667 0.0 0.000000 0.000000 0.000000 0.000000 0.0 \n2 0.333333 0.666667 0.0 0.000000 0.000000 0.000000 0.000000 0.0 \n3 0.000000 0.000000 1.0 0.000000 0.000000 0.000000 0.000000 0.0 \n4 0.000000 0.000000 0.0 1.000000 0.000000 0.000000 0.000000 0.0 \n5 0.333333 0.666667 0.0 0.000000 0.000000 0.000000 0.000000 0.0 \n... ... ... ... ... ... ... ... ... \n326 0.000000 0.000000 0.0 0.000000 0.000000 0.000000 1.000000 0.0 \n327 0.166667 0.000000 0.0 0.000000 0.666667 0.000000 0.166667 0.0 \n328 0.166667 0.666667 0.0 0.166667 0.000000 0.000000 0.000000 0.0 \n329 0.000000 0.000000 0.0 0.333333 0.000000 0.166667 0.500000 0.0 \n330 0.166667 0.000000 0.0 0.000000 0.833333 0.000000 0.000000 0.0 \n\n 8 \n#cnum \n1 0.0 \n2 0.0 \n3 0.0 \n4 0.0 \n5 0.0 \n... ... \n326 0.0 \n327 0.0 \n328 0.0 \n329 0.0 \n330 0.0 \n\n[330 rows x 9 columns]\n" ] ], [ [ "### Defining Network Shapes", "_____no_output_____" ] ], [ [ "node_num = range(1,25)\nlayer_num = range(1,4)\n\n\nshape_collection = []\nfor node in node_num:\n if node < 3:\n shape_collection.append([node])\n\ndef trickle(arr, iteration_left, check):\n if iteration_left == 0:\n global shape_collection\n #running the int fxn to make sure we don't have floats\n mp = map(int, arr)\n x = list(mp)\n if check == sum(x):\n shape_collection.append(x)\n else:\n new_arr = [0]+ arr + [0]\n #recursively expanding the list symmetrically\n while new_arr[0] < new_arr[1]-2 and new_arr[-1] < new_arr[-2]-2:\n new_arr[0] += 1\n new_arr[1] -= 1\n new_arr[-1] += 1\n new_arr[-2] -= 1\n trickle(new_arr, iteration_left - 1, check)\n\nfor node in node_num:\n for layer in layer_num:\n if node//layer < 3:\n continue\n if layer%2 == 0:\n trickle([node/2, node/2], (layer-2)/2, node)\n else:\n trickle([node], (layer-1)/2, node) \n\nprint(shape_collection)\n", "[[1], [2], [3], [4], [5], [6], [3, 3], [7], [8], [4, 4], [9], [3, 3, 3], [10], [5, 5], [3, 4, 3], [11], [3, 5, 3], [12], [6, 6], [4, 4, 4], [13], [4, 5, 4], [14], [7, 7], [4, 6, 4], [15], [5, 5, 5], [16], [8, 8], [5, 6, 5], [17], [5, 7, 5], [18], [9, 9], [6, 6, 6], [19], [6, 7, 6], [20], [10, 10], [6, 8, 6], [21], [7, 7, 7], [22], [11, 11], [7, 8, 7], [23], [7, 9, 7], [24], [12, 12], [8, 8, 8]]\n" ] ], [ [ "### Training Network", "_____no_output_____" ] ], [ [ "\n#Number of training iterations\nnum_iters = 1000\n\n#Listing out the shapes of each model\ncolors_num = len(chip_norm[0])\ninput_size = 3\n\nnetwork_shapes = []\nfor s in shape_collection:\n network_shapes.append((input_size,s,colors_num))\n\n#Learning rate of the network\nrate = 0.001\n\n#Generating Training Data\ndef shuffle(lab_norm, chip_norm):\n '''\n Applying train-test split\n '''\n lab_train, lab_test, chip_train, chip_test = train_test_split(lab_norm, chip_norm, test_size=0.2, shuffle = True)\n input_train = torch.FloatTensor(lab_train)\n output_train = torch.FloatTensor(chip_train)\n input_test= torch.FloatTensor(lab_test)\n output_test = torch.FloatTensor(chip_test)\n return input_train, output_train, input_test, output_test\n\nprint(network_shapes)", "[(3, [1], 9), (3, [2], 9), (3, [3], 9), (3, [4], 9), (3, [5], 9), (3, [6], 9), (3, [3, 3], 9), (3, [7], 9), (3, [8], 9), (3, [4, 4], 9), (3, [9], 9), (3, [3, 3, 3], 9), (3, [10], 9), (3, [5, 5], 9), (3, [3, 4, 3], 9), (3, [11], 9), (3, [3, 5, 3], 9), (3, [12], 9), (3, [6, 6], 9), (3, [4, 4, 4], 9), (3, [13], 9), (3, [4, 5, 4], 9), (3, [14], 9), (3, [7, 7], 9), (3, [4, 6, 4], 9), (3, [15], 9), (3, [5, 5, 5], 9), (3, [16], 9), (3, [8, 8], 9), (3, [5, 6, 5], 9), (3, [17], 9), (3, [5, 7, 5], 9), (3, [18], 9), (3, [9, 9], 9), (3, [6, 6, 6], 9), (3, [19], 9), (3, [6, 7, 6], 9), (3, [20], 9), (3, [10, 10], 9), (3, [6, 8, 6], 9), (3, [21], 9), (3, [7, 7, 7], 9), (3, [22], 9), (3, [11, 11], 9), (3, [7, 8, 7], 9), (3, [23], 9), (3, [7, 9, 7], 9), (3, [24], 9), (3, [12, 12], 9), (3, [8, 8, 8], 9)]\n" ], [ "#Array of losses over training period for each network\nnum_average = 10\noutput_file = {}\nfor n in node_num:\n output_file[n] = {}\n \n\nfor net_num, shape in enumerate(network_shapes):\n print(\"Training: \",shape)\n net_error_arr = []\n for j in range(num_average):\n print('Run ' + str(j+1))\n NN = Neural_Network(inputSize = shape[0], outputSize = shape[2],\n hiddenSize = shape[1] , learning_rate = rate)\n error_arr = []\n prev_error = 0\n strike = 0\n\n input_train, output_train, input_test, output_test = shuffle(lab_norm, chip_norm)\n\n for i in range(num_iters): \n NN.train(input_train, output_train)\n validation_error = NN.l1error(output_test, NN(input_test))\n #Printing error\n if i == 0: \n dh = display(\"#\" + str(i) + \" Validation Error: \" + str(validation_error), display_id=True)\n else:\n dh.update(\"#\" + str(i) + \" Validation Error: \" + str(validation_error))\n \n #zero small error change\n if i == 0:\n strike = 0\n #adding error to array\n error_arr.append(validation_error)\n #waiting for number 'too small' decreases or increases in validation error before ending training\n if (prev_error < validation_error) and i > 100:\n if strike > 5:\n print(\"Complete at iteration \", i, \"\\nFinal error: \", np.min(error_arr), \"\\n\")\n break\n else:\n strike += 1\n prev_error = validation_error\n net_error_arr.append(np.min(error_arr))\n output_file[sum(shape[1])][len(shape[1])] = [np.mean(net_error_arr), np.std(net_error_arr)]", "_____no_output_____" ], [ "with open('validation_errors_{0}.json'.format(language_num), 'w') as f:\n json.dump(output_file, f)", "_____no_output_____" ] ], [ [ "### Looking at minimum size of networks for each threshold value", "_____no_output_____" ] ], [ [ "with open('validation_errors_{0}.json'.format(language_num)) as f:\n output_file=json.load(f)", "_____no_output_____" ], [ "errors = []\nfor size in node_num:\n out_dict_for_size = output_file[str(size)]\n vals = list(out_dict_for_size.values())\n vals = np.array(vals)\n vals = vals[:,0]\n errors.append(np.min(vals))\nerrors = np.array(errors)\n\nthresholds = np.arange(.001, 1, .001)\n\nmin_sizes = []\nfor threshold in thresholds:\n idx = 0\n for err in errors:\n if err <= threshold:\n break\n idx += 1\n if idx < len(node_num):\n min_sizes.append(node_num[idx])\n else:\n min_sizes.append(max(node_num))\n \nplt.title('Threshold Plot for Language {0}'.format(language_num))\nplt.plot(thresholds, min_sizes)\nplt.xlabel('Error Treshold Value')\nplt.ylabel('Minimum Network Size')\nplt.xlim(0,0.2)", "_____no_output_____" ] ], [ [ "**Calculating Complexity in bits**", "_____no_output_____" ] ], [ [ "def language_complexity(lnum):\n language_data = term_data[term_data.get('#Lnum').eq(lnum)]\n unique_terms = list(language_data['Term Abbrev'].unique())\n l1_grouped = language_data.groupby('#cnum')['Term Abbrev'].apply(list)\n display(l1_grouped)\n l1_chip_abbrev_percentage = [[(l1_grouped[i + 1].count(abbrev) / len(l1_grouped[i + 1])) \\\n for abbrev in unique_terms] for i in range(len(l1_grouped))]\n l1_result = pd.DataFrame(l1_chip_abbrev_percentage)\n l1_result.index += 1\n l1_result.index.name = '#cnum'\n l1_result.columns = unique_terms\n\n chip_norm = []\n\n for x in chip_num:\n chip_norm.append(l1_result.loc[x,:].values.tolist())\n\n terms = unique_terms\n chips = list(language_data['#cnum'].unique())\n\n complexity = 0\n prior_m = 1 / len(chips)\n for w in terms:\n word_prob = 0\n for m in chips:\n word_prob += prior_m * l1_result.at[m, w]\n for m in chips:\n encoder_prob = l1_result.at[m, w]\n if encoder_prob != 0:\n mutual_information = prior_m * encoder_prob * np.log2(encoder_prob / word_prob)\n complexity += mutual_information\n\n return complexity", "_____no_output_____" ], [ "result = language_complexity(language_num)\nprint(result)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd3b15aa9b7f40c73d26531dded773f36a237ec
56,040
ipynb
Jupyter Notebook
MachineLearning/04.SupportVectorMachines/Trees and SVMs Lab.ipynb
LuGeorgiev/Python-SoftUni
545daa4684b7a333f78dd958f8e9d13263575ddf
[ "MIT" ]
null
null
null
MachineLearning/04.SupportVectorMachines/Trees and SVMs Lab.ipynb
LuGeorgiev/Python-SoftUni
545daa4684b7a333f78dd958f8e9d13263575ddf
[ "MIT" ]
null
null
null
MachineLearning/04.SupportVectorMachines/Trees and SVMs Lab.ipynb
LuGeorgiev/Python-SoftUni
545daa4684b7a333f78dd958f8e9d13263575ddf
[ "MIT" ]
null
null
null
38.044807
334
0.509154
[ [ [ "%matplotlib inline", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom nose.tools import *\n\nnp.random.seed(24680)", "_____no_output_____" ] ], [ [ "Write your imports in the cell below.", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing\nfrom sklearn.metrics import f1_score\n\nfrom sklearn.preprocessing import PolynomialFeatures\n\nfrom sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV\n\nfrom sklearn.linear_model import LogisticRegression, ElasticNet\n\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ] ], [ [ "# Ensemble Models and Support Vector Machines Lab\n## Training and comparing different algorithms", "_____no_output_____" ], [ "Once again, we'll work with the bank dataset. This time, the data preprocessing steps have been done for you.\n\nThe goal is to try and improve our predictions (if they can be improved at all) using different types of algorithms.", "_____no_output_____" ], [ "### 1. Read the data (1 point)\nThis time you only need to read the data. The indicator variables have been separated out for you.\n\nRead the dataset and save it in the variable `bank_data`. The target column is `y`. Use the variables `bank_attributes` and `bank_labels` to save the attributes (explanatory variables, features, predictors), and labels (`y`).", "_____no_output_____" ] ], [ [ "bank_data = pd.read_csv(\"data/bank.csv\")\nbank_attributes = bank_data.drop(\"y\",axis = 1)\nbank_labels = bank_data[\"y\"]", "_____no_output_____" ], [ "assert_is_not_none(bank_data)\nassert_is_not_none(bank_attributes)\nassert_is_not_none(bank_labels)", "_____no_output_____" ], [ "bank_attributes.head()", "_____no_output_____" ] ], [ [ "### 2. Normalize the data (1 point)\nBecause both forests and SVMs are sensitive to non-scaled data, we need to normalize our dataset first.\n\nRescale all columns in `bank_attributes` so they have mean = 0 and variance = 1. You can either look at the `sklearn` docs or do this yourself. When you're ready, overwrite the `bank_attributes` column. Make sure that you don't lose the column names in the process.", "_____no_output_____" ] ], [ [ "scale = preprocessing.StandardScaler()\nbank_attributes[bank_attributes.columns] = scale.fit_transform(bank_attributes)\nbank_attributes", "_____no_output_____" ], [ "assert_is_not_none(bank_attributes)", "_____no_output_____" ] ], [ [ "### 3. Split the data (1 point)\nUse the standard 70% / 30% split. Since this is a classification problem, be sure to stratify the split according to the `bank_labels`.", "_____no_output_____" ] ], [ [ "bank_attributes_train, bank_attributes_test, bank_labels_train, bank_labels_test = train_test_split(\n bank_attributes, bank_labels, train_size = 0.7, stratify = bank_labels)", "_____no_output_____" ], [ "assert_is_not_none(bank_attributes_train)\nassert_is_not_none(bank_labels_train)\n\nassert_is_not_none(bank_attributes_test)\nassert_is_not_none(bank_labels_test)", "_____no_output_____" ] ], [ [ "### 4. Prepare the cross-validation folds (1 point)\nUse a stratified k-fold cross-validation split, with $k = 5$. Fit it to the train data. Save the trained cross-validator to the variable `k_fold`.\n\nThe data should already be shuffled. There's no need to shuffle it again.", "_____no_output_____" ] ], [ [ "k_fold = StratifiedKFold(n_splits = 5)", "_____no_output_____" ], [ "assert_is_not_none(k_fold)", "_____no_output_____" ] ], [ [ "### 5. Decision Tree (2 points)\nUse cross-validation to train and optimize the hyperparameters for a decision tree classifier.\n\nUse grid search with the following grid:\n* `max_depth`: 1, 5, 7, 15, 20\n* `min_samples_leaf`: 2, 5, 10, 12\n* `max_leaf_nodes`: 5, 10, 20\n\nUse the most appropriate scoring metric (remember that accuracy doesn't work in this case because the data is highly imbalanced; we need something which combines precision and recall). Use the cross-validation splits you just created.\n\nSave the grid results in `grid_search`. Save the best classifier in `tree_classifier`.\n\nOptionally, you can print and / or visualize the cross-validation results and the best chosen parameters.", "_____no_output_____" ] ], [ [ "partameters = {\n \"max_depth\":[1, 5, 7, 15, 20],\n \"min_samples_leaf\": [ 2, 5, 10, 12],\n \"max_leaf_nodes\": [5, 10, 20]\n}\ntree_classifier = DecisionTreeClassifier()\ngrid_search = GridSearchCV(tree_classifier, param_grid = partameters,scoring = \"f1\", cv = k_fold)\ngrid_search.fit(bank_attributes_train, bank_labels_train)\ntree_classifier = grid_search.best_estimator_\n\nprint(\"Decision tree; best score:\", grid_search.best_score_)", "Decision tree; best score: 0.5073561954209913\n" ], [ "grid_search.best_params_", "_____no_output_____" ], [ "assert_is_not_none(grid_search)\nassert_is_not_none(tree_classifier)", "_____no_output_____" ] ], [ [ "### 5. Random Forest (1 point)\nUse cross-validation to train and optimize the hyperparameters for a random forest classifier. Use the same technique as before.\n\nUse the following grid:\n* `n_estimators`: 100, 200, 300 \n* `max_depth`: 20, 50, 100\n\nNote that this grid is on the small side but this is mainly due to performance reasons. Also note that the training will take some time.\n\nSave the grid results in `grid_search`. Save the best classifier in `forest_classifier`.\n\nOptionally, you can print and / or visualize the cross-validation results and the best chosen parameters.\n\nDue to the relatively slow training, we've chosen low values for the parameters. The performance of the random forest will be worse than the decision tree. This is not necessarily the case in general, it's due to the parameters we've chosen to try.", "_____no_output_____" ] ], [ [ "partameters = {\n \"n_estimators\": [100, 200, 300] ,\n \"max_depth\": [20, 50, 100]\n}\nforest_classifier = RandomForestClassifier()\ngrid_search = GridSearchCV(forest_classifier, param_grid=partameters,scoring = \"f1\", cv=k_fold)\ngrid_search.fit(bank_attributes_train, bank_labels_train)\nforest_classifier = grid_search.best_estimator_\n\nprint(\"Random forest best params:\", grid_search.best_params_)\nprint(\"Random forest; best score:\", grid_search.best_score_)", "Random forest best params: {'max_depth': 20, 'n_estimators': 200}\nRandom forest; best score: 0.3489575280108656\n" ], [ "assert_is_not_none(grid_search)\nassert_is_not_none(forest_classifier)", "_____no_output_____" ] ], [ [ "### 6. Linear SVM (1 point)\nUse cross-validation to train and optimize the hyperparameters for a linear support vector machine. Use the same technique as before.\n\nUse the following grid:\n* `C`: 0.1, 0.5, 0.8, 1, 1.5, 2, 6, 10, 15, 20\n\nNote that we're choosing relatively small values for `C`. This is allowed because our data is normalized.\n\nSave the grid results in `grid_search`. Save the best classifier in `linear_svm_classifier`. There are many ways to create a linear SVM classifier. Look up the `sklearn` docs to choose the fastest one (in terms of performance).\n\nOptionally, you can print and / or visualize the cross-validation results and the best chosen parameters.", "_____no_output_____" ] ], [ [ "partameters = {\n \"C\" : [0.1, 0.5, 0.8, 1, 1.5, 2, 6, 10, 15, 20]\n}\n\nlinear_svm_classifier = LinearSVC()\ngrid_search = GridSearchCV(linear_svm_classifier, param_grid=partameters,scoring = \"f1\", cv=k_fold)\ngrid_search.fit(bank_attributes_train, bank_labels_train)\nlinear_svm_classifier = grid_search.best_estimator_\n\nprint(\"Liners SVM, best params\", grid_search.best_params_)\nprint(\"Linear SVM; best score:\", grid_search.best_score_)", "C:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\nC:\\Users\\LyubomirGeorgiev\\anaconda3\\lib\\site-packages\\sklearn\\svm\\_base.py:947: ConvergenceWarning: Liblinear failed to converge, increase the number of iterations.\n \"the number of iterations.\", ConvergenceWarning)\n" ], [ "assert_is_not_none(grid_search)\nassert_is_not_none(linear_svm_classifier)", "_____no_output_____" ] ], [ [ "### 7. Gaussian SVM (1 point)\nUse cross-validation to train and optimize the hyperparameters for an SVM with a Gaussian kernel. Use the same technique as before.\n\nUse the following grid:\n* `C`: 10, 15, 20, 50, 200\n* `gamma`: 0.001, 0.01, 0.1, 0.2\n\nNote that this time we give larger values of `C` because the governing parameter here is `gamma`.\n\nSave the grid results in `grid_search`. Save the best classifier in `gaussian_svm_classifier`.\n\nOptionally, you can print and / or visualize the cross-validation results and the best chosen parameters.", "_____no_output_____" ] ], [ [ "partameters = {\n \"C\" : [10, 15, 20, 50, 200],\n \"gamma\": [0.001, 0.01, 0.1, 0.2]\n}\ngaussian_svm_classifier = SVC()\ngrid_search = GridSearchCV(gaussian_svm_classifier, param_grid=partameters,scoring = \"f1\", cv=k_fold)\ngrid_search.fit(bank_attributes_train, bank_labels_train)\ngaussian_svm_classifier = grid_search.best_estimator_\n\nprint(\"Gaussian SVM; best score:\", grid_search.best_score_)\nprint(\"Gaussian SVM; best params:\", grid_search.best_params_)", "Gaussian SVM; best score: 0.40780648495320576\nGaussian SVM; best params: {'C': 200, 'gamma': 0.01}\n" ], [ "assert_is_not_none(grid_search)\nassert_is_not_none(gaussian_svm_classifier)", "_____no_output_____" ] ], [ [ "### 9. Compare performance on the testing data (1 point)\nNow that you've trained all your models, you've got to select the best one. This should be done on the testing data.\n\nUse the appropriate scoring metric to get the testing scores for all your models. Don't forget to pass the **testing**, not the training data. Save all scores.\n\nChoose the best classifier, based on these scores (the one with the highest test score). Of course, this is not enough. We need to look at ROC curves, track performance through other measures, debug the sources of variance in testing results, try more hyperparameters, etc. However, this is enough for an introductory lab :).\n\nOptionally, you can think of combining them into a boosted model but this is out of the scope of this lab.", "_____no_output_____" ] ], [ [ "tree_classifier_score = f1_score(bank_labels_test, tree_classifier.predict(bank_attributes_test))\nforest_classifier_score = f1_score(bank_labels_test, forest_classifier.predict(bank_attributes_test))\nlinear_svm_classifier_score = f1_score(bank_labels_test, linear_svm_classifier.predict(bank_attributes_test))\ngaussian_svm_classifier_score = f1_score(bank_labels_test, gaussian_svm_classifier.predict(bank_attributes_test))\n\nprint(\"Testing scores:\")\nprint(\"Decision tree:\", tree_classifier_score)\nprint(\"Random forest:\", forest_classifier_score)\nprint(\"Linear SVM:\", linear_svm_classifier_score)\nprint(\"Gaussian SVM:\", gaussian_svm_classifier_score)", "Testing scores:\nDecision tree: 0.4545454545454546\nRandom forest: 0.2898550724637681\nLinear SVM: 0.40366972477064217\nGaussian SVM: 0.46621621621621623\n" ], [ "best_classifier = \"gaussian SVM\" # Replace empty string with \"tree\", \"forest\", \"linear SVM\" or \"gaussian SVM\"\n", "_____no_output_____" ], [ "assert_not_equal(best_classifier, \"\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecd3c30037662a6e946683d5eb1fdedc41cdcc61
290,575
ipynb
Jupyter Notebook
dev/csv-files/gcamp-csv.ipynb
shengwanhui/Lab-Analysis
fddccbdcdc0845893c6033b421cb60b6b2d219e6
[ "MIT" ]
4
2021-03-29T18:10:25.000Z
2021-04-19T02:44:33.000Z
dev/csv-files/gcamp-csv.ipynb
shengwanhui/Lab-Analysis
fddccbdcdc0845893c6033b421cb60b6b2d219e6
[ "MIT" ]
4
2021-02-25T00:17:11.000Z
2021-03-17T14:25:12.000Z
dev/csv-files/gcamp-csv.ipynb
shengwanhui/Lab-Analysis
fddccbdcdc0845893c6033b421cb60b6b2d219e6
[ "MIT" ]
null
null
null
988.35034
40,350
0.715172
[ [ [ "# Read ImageJ CSV/XLS Files with Python", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "filePath = \"../../data/025um.xls\"", "_____no_output_____" ] ], [ [ "## Read CSV with Numpy\n\nNumpy is simple and returns unlabeled 2D arrays.", "_____no_output_____" ] ], [ [ "np.loadtxt(filePath, skiprows=1, delimiter=\"\\t\")", "_____no_output_____" ] ], [ [ "## Read CSV with Pandas\nPandas is fancier and uses _data frames_ (2D arrays with labeled rows and columns)", "_____no_output_____" ] ], [ [ "pd.read_csv(filePath, delimiter=\"\\t\")", "_____no_output_____" ], [ "df = pd.read_csv(filePath, delimiter=\"\\t\")\n\n# Correct times and convert units to minutes\nframePeriodSeconds = 5.0\nframesPerMinute = 60 / framePeriodSeconds\ntimes = df['Time'].values / framesPerMinute\n\nroiNames = df.columns[1:] # skip the first column (Time)\nfor roiName in roiNames[:5]: # plot the first 5 ROIs\n plt.figure(figsize=(4, 3))\n afu = df[roiName].values\n plt.plot(times, afu, '.-')\n plt.title(roiName)\n plt.xlabel(\"Time (minutes)\")\n plt.ylabel(\"AFU\")\n plt.show()\n plt.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd3cb634ea7dcec26d6803e3e7f9dd5c4afa482
347,535
ipynb
Jupyter Notebook
Visualizing the Data.ipynb
SamuelaAnastasi/Day_Night_Image_Classifier
c782e558fc2f6de58606af20304ad41587b14b32
[ "MIT" ]
null
null
null
Visualizing the Data.ipynb
SamuelaAnastasi/Day_Night_Image_Classifier
c782e558fc2f6de58606af20304ad41587b14b32
[ "MIT" ]
null
null
null
Visualizing the Data.ipynb
SamuelaAnastasi/Day_Night_Image_Classifier
c782e558fc2f6de58606af20304ad41587b14b32
[ "MIT" ]
null
null
null
909.777487
180,860
0.954373
[ [ [ "# Day and Night Image Classifier\n---\n\nThe day/night image dataset consists of 200 RGB color images in two categories: day and night. There are equal numbers of each example: 100 day images and 100 night images.\n\nWe'd like to build a classifier that can accurately label these images as day or night, and that relies on finding distinguishing features between the two types of images!\n\n*Note: All images come from the [AMOS dataset](http://cs.uky.edu/~jacobs/datasets/amos/) (Archive of Many Outdoor Scenes).*\n", "_____no_output_____" ], [ "### Import resources\n\nBefore you get started on the project code, import the libraries and resources that you'll need.", "_____no_output_____" ] ], [ [ "import cv2 # computer vision library\nimport helpers\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Training and Testing Data\nThe 200 day/night images are separated into training and testing datasets. \n\n* 60% of these images are training images, for you to use as you create a classifier.\n* 40% are test images, which will be used to test the accuracy of your classifier.\n\nFirst, we set some variables to keep track of some where our images are stored:\n\n image_dir_training: the directory where our training image data is stored\n image_dir_test: the directory where our test image data is stored", "_____no_output_____" ] ], [ [ "# Image data directories\nimage_dir_training = \"day_night_images/training/\"\nimage_dir_test = \"day_night_images/test/\"", "_____no_output_____" ] ], [ [ "## Load the datasets\n\nThese first few lines of code will load the training day/night images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label (\"day\" or \"night\"). \n\nFor example, the first image-label pair in `IMAGE_LIST` can be accessed by index: \n``` IMAGE_LIST[0][:]```.\n", "_____no_output_____" ] ], [ [ "# Using the load_dataset function in helpers.py\n# Load training data\nIMAGE_LIST = helpers.load_dataset(image_dir_training)\n", "_____no_output_____" ] ], [ [ "---\n# 1. Visualize the input images\n", "_____no_output_____" ] ], [ [ "# Select an image and its label by list index\nimage_index = 0\nselected_image = IMAGE_LIST[image_index][0]\nselected_label = IMAGE_LIST[image_index][1]\n\n## TODO: Print out 1. The shape of the image and 2. The image's label `selected_label`\nprint(selected_image.shape)\nprint(selected_label)\nplt.imshow(selected_image)\n", "(372, 640, 3)\nday\n" ], [ "hsv_day = cv2.cvtColor(selected_image, cv2.COLOR_RGB2HSV)\nh = hsv_day[:,:,0]\ns = hsv_day[:,:,1]\nv = hsv_day[:,:,2]", "_____no_output_____" ], [ "h", "_____no_output_____" ], [ "s", "_____no_output_____" ], [ "v", "_____no_output_____" ], [ "v_mean = np.mean(v)\nv_mean", "_____no_output_____" ], [ "def is_night_img(img, threshhold=80): \n hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n v = hsv[:,:,2]\n return np.mean(v) < threshhold", "_____no_output_____" ], [ "## TODO: Display a night image\n# Note the differences between the day and night images\n# Any measurable differences can be used to classify these images\nnight_img = None\nlabel = None\nfor im, l in IMAGE_LIST:\n if is_night_img(im):\n night_img, label = im, l\n break\nprint(label)\nplt.imshow(night_img)", "night\n" ], [ "# h_p = night_img[:,:,0]\n# s_p = night_img[:,:,1]\nv_p = night_img[:,:,2]", "_____no_output_____" ], [ "np.mean(v_p)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd3e2a544f8e8300bd26566c09c52ba3edb9c44
688,327
ipynb
Jupyter Notebook
coursera/FundamentalsOfReinforcementLearning/Assignment1.ipynb
MrSquanchee/ProblemSolving
309160f6a2fb43ae7673210b01957ffca9247d0d
[ "MIT" ]
null
null
null
coursera/FundamentalsOfReinforcementLearning/Assignment1.ipynb
MrSquanchee/ProblemSolving
309160f6a2fb43ae7673210b01957ffca9247d0d
[ "MIT" ]
null
null
null
coursera/FundamentalsOfReinforcementLearning/Assignment1.ipynb
MrSquanchee/ProblemSolving
309160f6a2fb43ae7673210b01957ffca9247d0d
[ "MIT" ]
null
null
null
383.469081
199,060
0.930606
[ [ [ "# Assignment 1: Bandits and Exploration/Exploitation", "_____no_output_____" ], [ "Welcome to Assignment 1. This notebook will:\n- Help you create your first bandit algorithm\n- Help you understand the effect of epsilon on exploration and learn about the exploration/exploitation tradeoff\n- Introduce you to some of the reinforcement learning software we are going to use for this specialization\n\nThis class uses RL-Glue to implement most of our experiments. It was originally designed by Adam White, Brian Tanner, and Rich Sutton. This library will give you a solid framework to understand how reinforcement learning experiments work and how to run your own. If it feels a little confusing at first, don't worry - we are going to walk you through it slowly and introduce you to more and more parts as you progress through the specialization.\n\nWe are assuming that you have used a Jupyter notebook before. But if not, it is quite simple. Simply press the run button, or shift+enter to run each of the cells. The places in the code that you need to fill in will be clearly marked for you.", "_____no_output_____" ], [ "## Section 0: Preliminaries", "_____no_output_____" ] ], [ [ "# Import necessary libraries\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport time\n\nfrom rlglue.rl_glue import RLGlue\nimport main_agent\nimport ten_arm_env\nimport test_env", "_____no_output_____" ] ], [ [ "In the above cell, we import the libraries we need for this assignment. We use numpy throughout the course and occasionally provide hints for which methods to use in numpy. Other than that we mostly use vanilla python and the occasional other library, such as matplotlib for making plots.\n\nYou might have noticed that we import ten_arm_env. This is the __10-armed Testbed__ introduced in [section 2.3](http://www.incompleteideas.net/book/RLbook2018.pdf) of the textbook. We use this throughout this notebook to test our bandit agents. It has 10 arms, which are the actions the agent can take. Pulling an arm generates a stochastic reward from a Gaussian distribution with unit-variance. For each action, the expected value of that action is randomly sampled from a normal distribution, at the start of each run. If you are unfamiliar with the 10-armed Testbed please review it in the textbook before continuing.\n\n__DO NOT IMPORT OTHER LIBRARIES as this will break the autograder.__\n\n__DO NOT SET A RANDOM SEED as this will break the autograder.__", "_____no_output_____" ], [ "## Section 1: Greedy Agent", "_____no_output_____" ], [ "We want to create an agent that will find the action with the highest expected reward. One way an agent could operate is to always choose the action with the highest value based on the agent’s current estimates. This is called a greedy agent as it greedily chooses the action that it thinks has the highest value. Let's look at what happens in this case.\n\nFirst we are going to implement the argmax function, which takes in a list of action values and returns an action with the highest value. Why are we implementing our own instead of using the argmax function that numpy uses? Numpy's argmax function returns the first instance of the highest value. We do not want that to happen as it biases the agent to choose a specific action in the case of ties. Instead we want to break ties between the highest values randomly. So we are going to implement our own argmax function. You may want to look at [np.random.choice](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.choice.html) to randomly select from a list of values.", "_____no_output_____" ] ], [ [ "# -----------\n# Graded Cell\n# -----------\ndef argmax(q_values):\n \"\"\"\n Takes in a list of q_values and returns the index of the item \n with the highest value. Breaks ties randomly.\n returns: int - the index of the highest value in q_values\n \"\"\"\n top_value = float(\"-inf\")\n ties = []\n \n for i in range(len(q_values)):\n # if a value in q_values is greater than the highest value update top and reset ties to zero\n # if a value is equal to top value add the index to ties\n # return a random selection from ties.\n # YOUR CODE HERE\n if q_values[i] > top_value:\n top_value = q_values[i]\n ties = [i]\n elif q_values[i] == top_value:\n ties.append(i)\n\n return np.random.choice(ties)", "_____no_output_____" ], [ "# --------------\n# Debugging Cell\n# --------------\n# Feel free to make any changes to this cell to debug your code\n\ntest_array = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]\nassert argmax(test_array) == 8, \"Check your argmax implementation returns the index of the largest value\"\n\n# make sure np.random.choice is called correctly\nnp.random.seed(0)\ntest_array = [1, 0, 0, 1]\n\nassert argmax(test_array) == 0", "_____no_output_____" ], [ "# -----------\n# Tested Cell\n# -----------\n# The contents of the cell will be tested by the autograder.\n# If they do not pass here, they will not pass there.\n\ntest_array = [0, 0, 0, 0, 0, 0, 0, 0, 1, 0]\nassert argmax(test_array) == 8, \"Check your argmax implementation returns the index of the largest value\"\n\n# set random seed so results are deterministic\nnp.random.seed(0)\ntest_array = [1, 0, 0, 1]\n\ncounts = [0, 0, 0, 0]\nfor _ in range(100):\n a = argmax(test_array)\n counts[a] += 1\n\n# make sure argmax does not always choose first entry\nassert counts[0] != 100, \"Make sure your argmax implementation randomly choooses among the largest values.\"\n\n# make sure argmax does not always choose last entry\nassert counts[3] != 100, \"Make sure your argmax implementation randomly choooses among the largest values.\"\n\n# make sure the random number generator is called exactly once whenver `argmax` is called\nexpected = [44, 0, 0, 56] # <-- notice not perfectly uniform due to randomness\nassert counts == expected", "_____no_output_____" ] ], [ [ "Now we introduce the first part of an RL-Glue agent that you will implement. Here we are going to create a GreedyAgent and implement the agent_step method. This method gets called each time the agent takes a step. The method has to return the action selected by the agent. This method also ensures the agent’s estimates are updated based on the signals it gets from the environment.\n\nFill in the code below to implement a greedy agent.", "_____no_output_____" ] ], [ [ "# -----------\n# Graded Cell\n# -----------\nclass GreedyAgent(main_agent.Agent):\n def agent_step(self, reward, observation):\n \"\"\"\n Takes one step for the agent. It takes in a reward and observation and \n returns the action the agent chooses at that time step.\n\n Arguments:\n reward -- float, the reward the agent recieved from the environment after taking the last action.\n observation -- float, the observed state the agent is in. Do not worry about this as you will not use it\n until future lessons\n Returns:\n current_action -- int, the action chosen by the agent at the current time step.\n \"\"\"\n ### Useful Class Variables ###\n # self.q_values : An array with what the agent believes each of the values of the arm are.\n # self.arm_count : An array with a count of the number of times each arm has been pulled.\n # self.last_action : The action that the agent took on the previous time step\n #######################\n\n # Update Q values Hint: Look at the algorithm in section 2.4 of the textbook.\n # increment the counter in self.arm_count for the action from the previous time step\n # update the step size using self.arm_count\n # update self.q_values for the action from the previous time step\n\n # YOUR CODE HERE\n self.arm_count[self.last_action] += 1\n self.q_values[self.last_action] += (reward - self.q_values[self.last_action]) / self.arm_count[self.last_action]\n\n # current action = ? # Use the argmax function you created above\n # YOUR CODE HERE\n current_action = argmax(self.q_values)\n\n self.last_action = current_action\n\n return current_action\n ", "_____no_output_____" ], [ "# --------------\n# Debugging Cell\n# --------------\n# Feel free to make any changes to this cell to debug your code\n\n# build a fake agent for testing and set some initial conditions\nnp.random.seed(1)\ngreedy_agent = GreedyAgent()\ngreedy_agent.q_values = [0, 0, 0.5, 0, 0]\ngreedy_agent.arm_count = [0, 1, 0, 0, 0]\ngreedy_agent.last_action = 1\n\naction = greedy_agent.agent_step(reward=1, observation=0)\n\n# make sure the q_values were updated correctly\nassert greedy_agent.q_values == [0, 0.5, 0.5, 0, 0]\n\n# make sure the agent is using the argmax that breaks ties randomly\nassert action == 2", "_____no_output_____" ], [ "# lock\n# -----------\n# Tested Cell\n# -----------\n# The contents of the cell will be tested by the autograder.\n# If they do not pass here, they will not pass there.\n\n# build a fake agent for testing and set some initial conditions\ngreedy_agent = GreedyAgent()\ngreedy_agent.q_values = [0, 0, 1.0, 0, 0]\ngreedy_agent.arm_count = [0, 1, 0, 0, 0]\ngreedy_agent.last_action = 1\n\n# take a fake agent step\naction = greedy_agent.agent_step(reward=1, observation=0)\n\n# make sure agent took greedy action\nassert action == 2\n\n# make sure q_values were updated correctly\nassert greedy_agent.q_values == [0, 0.5, 1.0, 0, 0]", "_____no_output_____" ] ], [ [ "Let's visualize the result. Here we run an experiment using RL-Glue to test our agent. For now, we will set up the experiment code; in future lessons, we will walk you through running experiments so that you can create your own.", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\n\nnum_runs = 200 # The number of times we run the experiment\nnum_steps = 1000 # The number of pulls of each arm the agent takes\nenv = ten_arm_env.Environment # We set what environment we want to use to test\nagent = GreedyAgent # We choose what agent we want to use\nagent_info = {\"num_actions\": 10} # We pass the agent the information it needs. Here how many arms there are.\nenv_info = {} # We pass the environment the information it needs. In this case nothing.\n\nall_averages = []\n\naverage_best = 0\nfor run in tqdm(range(num_runs)): # tqdm is what creates the progress bar below\n np.random.seed(run)\n \n rl_glue = RLGlue(env, agent) # Creates a new RLGlue experiment with the env and agent we chose above\n rl_glue.rl_init(agent_info, env_info) # We pass RLGlue what it needs to initialize the agent and environment\n rl_glue.rl_start() # We start the experiment\n\n average_best += np.max(rl_glue.environment.arms)\n \n scores = [0]\n averages = []\n \n for i in range(num_steps):\n reward, _, action, _ = rl_glue.rl_step() # The environment and agent take a step and return\n # the reward, and action taken.\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n all_averages.append(averages)\n\nplt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\nplt.plot([average_best / num_runs for _ in range(num_steps)], linestyle=\"--\")\nplt.plot(np.mean(all_averages, axis=0))\nplt.legend([\"Best Possible\", \"Greedy\"])\nplt.title(\"Average Reward of Greedy Agent\")\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Average reward\")\nplt.show()\ngreedy_scores = np.mean(all_averages, axis=0)", "100%|██████████| 200/200 [00:02<00:00, 67.09it/s]\n" ] ], [ [ "How did our agent do? Is it possible for it to do better?", "_____no_output_____" ], [ "## Section 2: Epsilon-Greedy Agent", "_____no_output_____" ], [ "We learned about [another way for an agent to operate](https://www.coursera.org/learn/fundamentals-of-reinforcement-learning/lecture/tHDck/what-is-the-trade-off), where it does not always take the greedy action. Instead, sometimes it takes an exploratory action. It does this so that it can find out what the best action really is. If we always choose what we think is the current best action is, we may miss out on taking the true best action, because we haven't explored enough times to find that best action.\n\nImplement an epsilon-greedy agent below. Hint: we are implementing the algorithm from [section 2.4](http://www.incompleteideas.net/book/RLbook2018.pdf#page=52) of the textbook. You may want to use your greedy code from above and look at [np.random.random](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.random.html), as well as [np.random.randint](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.randint.html), to help you select random actions. ", "_____no_output_____" ] ], [ [ "# -----------\n# Graded Cell\n# -----------\nclass EpsilonGreedyAgent(main_agent.Agent):\n def agent_step(self, reward, observation):\n \"\"\"\n Takes one step for the agent. It takes in a reward and observation and \n returns the action the agent chooses at that time step.\n \n Arguments:\n reward -- float, the reward the agent recieved from the environment after taking the last action.\n observation -- float, the observed state the agent is in. Do not worry about this as you will not use it\n until future lessons\n Returns:\n current_action -- int, the action chosen by the agent at the current time step.\n \"\"\"\n \n ### Useful Class Variables ###\n # self.q_values : An array with what the agent believes each of the values of the arm are.\n # self.arm_count : An array with a count of the number of times each arm has been pulled.\n # self.last_action : The action that the agent took on the previous time step\n # self.epsilon : The probability an epsilon greedy agent will explore (ranges between 0 and 1)\n #######################\n \n # Update Q values - this should be the same update as your greedy agent above\n # YOUR CODE HERE\n self.arm_count[self.last_action] += 1\n self.q_values[self.last_action] += (reward - self.q_values[self.last_action]) / self.arm_count[self.last_action]\n \n # Choose action using epsilon greedy\n # Randomly choose a number between 0 and 1 and see if it's less than self.epsilon\n # (hint: look at np.random.random()). If it is, set current_action to a random action.\n # otherwise choose current_action greedily as you did above.\n # YOUR CODE HERE\n probability = np.random.random() < self.epsilon\n if probability:\n current_action = np.random.randint(len(self.q_values))\n else:\n current_action = argmax(self.q_values)\n\n self.last_action = current_action\n\n return current_action", "_____no_output_____" ], [ "# --------------\n# Debugging Cell\n# --------------\n# Feel free to make any changes to this cell to debug your code\n\n# build a fake agent for testing and set some initial conditions\nnp.random.seed(0)\ne_greedy_agent = EpsilonGreedyAgent()\ne_greedy_agent.q_values = [0, 0.0, 0.5, 0, 0]\ne_greedy_agent.arm_count = [0, 1, 0, 0, 0]\ne_greedy_agent.num_actions = 5\ne_greedy_agent.last_action = 1\ne_greedy_agent.epsilon = 0.5\n\n# given this random seed, we should see a greedy action (action 2) here\naction = e_greedy_agent.agent_step(reward=1, observation=0)\n\n# -----------------------------------------------\n# we'll try to guess a few of the trickier places\n# -----------------------------------------------\n\n# make sure to update for the *last_action* not the current action\nassert e_greedy_agent.q_values != [0, 0.5, 1.0, 0, 0], \"A\"\n\n# make sure the stepsize is based on the *last_action* not the current action\nassert e_greedy_agent.q_values != [0, 1, 0.5, 0, 0], \"B\"\n\n# make sure the agent is using the argmax that breaks ties randomly\nassert action == 2, \"C\"\n\n# -----------------------------------------------\n\n# let's see what happens for another action\nnp.random.seed(1)\ne_greedy_agent = EpsilonGreedyAgent()\ne_greedy_agent.q_values = [0, 0.5, 0.5, 0, 0]\ne_greedy_agent.arm_count = [0, 1, 0, 0, 0]\ne_greedy_agent.num_actions = 5\ne_greedy_agent.last_action = 1\ne_greedy_agent.epsilon = 0.5\n\n# given this random seed, we should see a random action (action 4) here\naction = e_greedy_agent.agent_step(reward=1, observation=0)\n\n# The agent saw a reward of 1, so should increase the value for *last_action*\nassert e_greedy_agent.q_values == [0, 0.75, 0.5, 0, 0], \"D\"\n\n# the agent should have picked a random action for this particular random seed\nassert action == 4, \"E\"\n", "_____no_output_____" ], [ "# -----------\n# Tested Cell\n# -----------\n# The contents of the cell will be tested by the autograder.\n# If they do not pass here, they will not pass there.\n\nnp.random.seed(0)\ne_greedy_agent = EpsilonGreedyAgent()\ne_greedy_agent.q_values = [0, 0, 1.0, 0, 0]\ne_greedy_agent.arm_count = [0, 1, 0, 0, 0]\ne_greedy_agent.num_actions = 5\ne_greedy_agent.last_action = 1\ne_greedy_agent.epsilon = 0.5\naction = e_greedy_agent.agent_step(reward=1, observation=0)\n\nassert e_greedy_agent.q_values == [0, 0.5, 1.0, 0, 0]\n\n# manipulate the random seed so the agent takes a random action\nnp.random.seed(1)\naction = e_greedy_agent.agent_step(reward=0, observation=0)\n\nassert action == 4\n\n# check to make sure we update value for action 4\naction = e_greedy_agent.agent_step(reward=1, observation=0)\nassert e_greedy_agent.q_values == [0, 0.5, 0.0, 0, 1.0]", "_____no_output_____" ] ], [ [ "Now that we have our epsilon greedy agent created. Let's compare it against the greedy agent with epsilon of 0.1.", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\n\n# Plot Epsilon greedy results and greedy results\nnum_runs = 200\nnum_steps = 1000\nepsilon = 0.1\nagent = EpsilonGreedyAgent\nenv = ten_arm_env.Environment\nagent_info = {\"num_actions\": 10, \"epsilon\": epsilon}\nenv_info = {}\nall_averages = []\n\nfor run in tqdm(range(num_runs)):\n np.random.seed(run)\n \n rl_glue = RLGlue(env, agent)\n rl_glue.rl_init(agent_info, env_info)\n rl_glue.rl_start()\n\n scores = [0]\n averages = []\n for i in range(num_steps):\n reward, _, action, _ = rl_glue.rl_step() # The environment and agent take a step and return\n # the reward, and action taken.\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n all_averages.append(averages)\n\nplt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\nplt.plot([1.55 for _ in range(num_steps)], linestyle=\"--\")\nplt.plot(greedy_scores)\nplt.title(\"Average Reward of Greedy Agent vs. E-Greedy Agent\")\nplt.plot(np.mean(all_averages, axis=0))\nplt.legend((\"Best Possible\", \"Greedy\", \"Epsilon: 0.1\"))\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Average reward\")\nplt.show()", "100%|██████████| 200/200 [00:03<00:00, 64.01it/s]\n" ] ], [ [ "Notice how much better the epsilon-greedy agent did. Because we occasionally choose a random action we were able to find a better long term policy. By acting greedily before our value estimates are accurate, we risk settling on a suboptimal action.", "_____no_output_____" ], [ "## Section 2.1 Averaging Multiple Runs", "_____no_output_____" ], [ "Did you notice that we averaged over 2000 runs? Why did we do that?\n\nTo get some insight, let's look at the results of two individual runs by the same agent.", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\n\n# Plot runs of e-greedy agent\nagent = EpsilonGreedyAgent\nenv = ten_arm_env.Environment\nagent_info = {\"num_actions\": 10, \"epsilon\": 0.1}\nenv_info = {}\nall_averages = []\nplt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\nnum_steps = 1000\n\nfor run in (0, 1):\n np.random.seed(run) # Here we set the seed so that we can compare two different runs\n averages = []\n rl_glue = RLGlue(env, agent)\n rl_glue.rl_init(agent_info, env_info)\n rl_glue.rl_start()\n\n scores = [0]\n for i in range(num_steps):\n reward, state, action, is_terminal = rl_glue.rl_step()\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n \n plt.plot(averages)\n\nplt.title(\"Comparing two independent runs\")\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Average reward\")\nplt.show()", "_____no_output_____" ] ], [ [ "Notice how the two runs were different? But, if this is the exact same algorithm, why does it behave differently in these two runs?\n\nThe answer is that it is due to randomness in the environment and in the agent. Depending on what action the agent randomly starts with, or when it randomly chooses to explore, it can change the results of the runs. And even if the agent chooses the same action, the reward from the environment is randomly sampled from a Gaussian. The agent could get lucky, and see larger rewards for the best action early on and so settle on the best action faster. Or, it could get unlucky and see smaller rewards for best action early on and so take longer to recognize that it is in fact the best action.\n\nTo be more concrete, let’s look at how many times an exploratory action is taken, for different seeds. ", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\nprint(\"Random Seed 1\")\nnp.random.seed(1)\nfor _ in range(15):\n if np.random.random() < 0.1:\n print(\"Exploratory Action\")\n \n\nprint()\nprint()\n\nprint(\"Random Seed 2\")\nnp.random.seed(2)\nfor _ in range(15):\n if np.random.random() < 0.1:\n print(\"Exploratory Action\")", "Random Seed 1\nExploratory Action\nExploratory Action\nExploratory Action\n\n\nRandom Seed 2\nExploratory Action\n" ] ], [ [ "With the first seed, we take an exploratory action three times out of 15, but with the second, we only take an exploratory action once. This can significantly affect the performance of our agent because the amount of exploration has changed significantly.\n\nTo compare algorithms, we therefore report performance averaged across many runs. We do this to ensure that we are not simply reporting a result that is due to stochasticity, as explained [in the lectures](https://www.coursera.org/learn/fundamentals-of-reinforcement-learning/lecture/PtVBs/sequential-decision-making-with-evaluative-feedback). Rather, we want statistically significant outcomes. We will not use statistical significance tests in this course. Instead, because we have access to simulators for our experiments, we use the simpler strategy of running for a large number of runs and ensuring that the confidence intervals do not overlap. ", "_____no_output_____" ], [ "## Section 3: Comparing values of epsilon", "_____no_output_____" ], [ "Can we do better than an epsilon of 0.1? Let's try several different values for epsilon and see how they perform. We try different settings of key performance parameters to understand how the agent might perform under different conditions.\n\nBelow we run an experiment where we sweep over different values for epsilon:", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\n\n# Experiment code for different e-greedy\nepsilons = [0.0, 0.01, 0.1, 0.4]\n\nplt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\nplt.plot([1.55 for _ in range(num_steps)], linestyle=\"--\")\n\nn_q_values = []\nn_averages = []\nn_best_actions = []\n\nnum_runs = 200\n\nfor epsilon in epsilons:\n all_averages = []\n for run in tqdm(range(num_runs)):\n agent = EpsilonGreedyAgent\n agent_info = {\"num_actions\": 10, \"epsilon\": epsilon}\n env_info = {\"random_seed\": run}\n\n rl_glue = RLGlue(env, agent)\n rl_glue.rl_init(agent_info, env_info)\n rl_glue.rl_start()\n \n best_arm = np.argmax(rl_glue.environment.arms)\n\n scores = [0]\n averages = []\n best_action_chosen = []\n \n for i in range(num_steps):\n reward, state, action, is_terminal = rl_glue.rl_step()\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n if action == best_arm:\n best_action_chosen.append(1)\n else:\n best_action_chosen.append(0)\n if epsilon == 0.1 and run == 0:\n n_q_values.append(np.copy(rl_glue.agent.q_values))\n if epsilon == 0.1:\n n_averages.append(averages)\n n_best_actions.append(best_action_chosen)\n all_averages.append(averages)\n \n plt.plot(np.mean(all_averages, axis=0))\n\nplt.legend([\"Best Possible\"] + epsilons)\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Average reward\")\nplt.show()", "100%|██████████| 200/200 [00:03<00:00, 62.04it/s]\n100%|██████████| 200/200 [00:03<00:00, 63.36it/s]\n100%|██████████| 200/200 [00:03<00:00, 64.23it/s]\n100%|██████████| 200/200 [00:02<00:00, 73.70it/s]\n" ] ], [ [ "Why did 0.1 perform better than 0.01?\n\nIf exploration helps why did 0.4 perform worse that 0.0 (the greedy agent)?\n\nThink about these and how you would answer these questions. They are questions in the practice quiz. If you still have questions about it, retake the practice quiz.", "_____no_output_____" ], [ "## Section 4: The Effect of Step Size", "_____no_output_____" ], [ "In Section 1 of this assignment, we decayed the step size over time based on action-selection counts. The step-size was 1/N(A), where N(A) is the number of times action A was selected. This is the same as computing a sample average. We could also set the step size to be a constant value, such as 0.1. What would be the effect of doing that? And is it better to use a constant or the sample average method? \n\nTo investigate this question, let’s start by creating a new agent that has a constant step size. This will be nearly identical to the agent created above. You will use the same code to select the epsilon-greedy action. You will change the update to have a constant step size instead of using the 1/N(A) update.", "_____no_output_____" ] ], [ [ "# -----------\n# Graded Cell\n# -----------\nclass EpsilonGreedyAgentConstantStepsize(main_agent.Agent):\n def agent_step(self, reward, observation):\n \"\"\"\n Takes one step for the agent. It takes in a reward and observation and \n returns the action the agent chooses at that time step.\n \n Arguments:\n reward -- float, the reward the agent recieved from the environment after taking the last action.\n observation -- float, the observed state the agent is in. Do not worry about this as you will not use it\n until future lessons\n Returns:\n current_action -- int, the action chosen by the agent at the current time step.\n \"\"\"\n \n ### Useful Class Variables ###\n # self.q_values : An array with what the agent believes each of the values of the arm are.\n # self.arm_count : An array with a count of the number of times each arm has been pulled.\n # self.last_action : An int of the action that the agent took on the previous time step.\n # self.step_size : A float which is the current step size for the agent.\n # self.epsilon : The probability an epsilon greedy agent will explore (ranges between 0 and 1)\n #######################\n \n # Update q_values for action taken at previous time step \n # using self.step_size intead of using self.arm_count\n # YOUR CODE HERE\n self.arm_count[self.last_action] += 1\n self.q_values[self.last_action] += self.step_size * (reward - self.q_values[self.last_action])\n \n # Choose action using epsilon greedy. This is the same as you implemented above.\n # YOUR CODE HERE\n probability = np.random.random() < self.epsilon\n if probability:\n current_action = np.random.randint(len(self.q_values))\n else:\n current_action = argmax(self.q_values)\n \n self.last_action = current_action\n \n return current_action", "_____no_output_____" ], [ "# --------------\n# Debugging Cell\n# --------------\n# Feel free to make any changes to this cell to debug your code\n\nfor step_size in [0.01, 0.1, 0.5, 1.0]:\n e_greedy_agent = EpsilonGreedyAgentConstantStepsize()\n e_greedy_agent.q_values = [0, 0, 1.0, 0, 0]\n e_greedy_agent.num_actions = 5\n e_greedy_agent.last_action = 1\n e_greedy_agent.epsilon = 0.0\n e_greedy_agent.step_size = step_size\n action = e_greedy_agent.agent_step(1, 0)\n assert e_greedy_agent.q_values == [0, step_size, 1.0, 0, 0], \"Check that you are updating q_values correctly using the stepsize.\"", "_____no_output_____" ], [ "# -----------\n# Tested Cell\n# -----------\n# The contents of the cell will be tested by the autograder.\n# If they do not pass here, they will not pass there.\n\nnp.random.seed(0)\n# Check Epsilon Greedy with Different Constant Stepsizes\nfor step_size in [0.01, 0.1, 0.5, 1.0]:\n e_greedy_agent = EpsilonGreedyAgentConstantStepsize()\n e_greedy_agent.q_values = [0, 0, 1.0, 0, 0]\n e_greedy_agent.num_actions = 5\n e_greedy_agent.last_action = 1\n e_greedy_agent.epsilon = 0.0\n e_greedy_agent.step_size = step_size\n \n action = e_greedy_agent.agent_step(1, 0)\n \n assert e_greedy_agent.q_values == [0, step_size, 1.0, 0, 0] ", "_____no_output_____" ], [ "# ---------------\n# Discussion Cell\n# ---------------\n\n# Experiment code for different step sizes\nstep_sizes = [0.01, 0.1, 0.5, 1.0, '1/N(A)']\n\nepsilon = 0.1\nnum_steps = 1000\nnum_runs = 200\n\nfig, ax = plt.subplots(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\n\nq_values = {step_size: [] for step_size in step_sizes}\ntrue_values = {step_size: None for step_size in step_sizes}\nbest_actions = {step_size: [] for step_size in step_sizes}\n\nfor step_size in step_sizes:\n all_averages = []\n for run in tqdm(range(num_runs)):\n np.random.seed(run)\n agent = EpsilonGreedyAgentConstantStepsize if step_size != '1/N(A)' else EpsilonGreedyAgent\n agent_info = {\"num_actions\": 10, \"epsilon\": epsilon, \"step_size\": step_size, \"initial_value\": 0.0}\n env_info = {}\n\n rl_glue = RLGlue(env, agent)\n rl_glue.rl_init(agent_info, env_info)\n rl_glue.rl_start()\n \n best_arm = np.argmax(rl_glue.environment.arms)\n\n scores = [0]\n averages = []\n \n if run == 0:\n true_values[step_size] = np.copy(rl_glue.environment.arms)\n \n best_action_chosen = []\n for i in range(num_steps):\n reward, state, action, is_terminal = rl_glue.rl_step()\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n if action == best_arm:\n best_action_chosen.append(1)\n else:\n best_action_chosen.append(0)\n if run == 0:\n q_values[step_size].append(np.copy(rl_glue.agent.q_values))\n best_actions[step_size].append(best_action_chosen)\n ax.plot(np.mean(best_actions[step_size], axis=0))\n\nplt.legend(step_sizes)\nplt.title(\"% Best Arm Pulled\")\nplt.xlabel(\"Steps\")\nplt.ylabel(\"% Best Arm Pulled\")\nvals = ax.get_yticks()\nax.set_yticklabels(['{:,.2%}'.format(x) for x in vals])\nplt.show()", "100%|██████████| 200/200 [00:03<00:00, 64.18it/s]\n100%|██████████| 200/200 [00:03<00:00, 64.20it/s]\n100%|██████████| 200/200 [00:03<00:00, 63.98it/s]\n100%|██████████| 200/200 [00:03<00:00, 62.08it/s]\n100%|██████████| 200/200 [00:03<00:00, 64.04it/s]\n" ] ], [ [ "Notice first that we are now plotting the amount of time that the best action is taken rather than the average reward. To better understand the performance of an agent, it can be useful to measure specific behaviors, beyond just how much reward is accumulated. This measure indicates how close the agent’s behaviour is to optimal.\n\nIt seems as though 1/N(A) performed better than the others, in that it reaches a solution where it takes the best action most frequently. Now why might this be? Why did a step size of 0.5 start out better but end up performing worse? Why did a step size of 0.01 perform so poorly?\n\nLet's dig into this further below. Let’s plot how well each agent tracks the true value, where each agent has a different step size method. You do not have to enter any code here, just follow along.", "_____no_output_____" ] ], [ [ "# lock\n# ---------------\n# Discussion Cell\n# ---------------\nlargest = 0\nnum_steps = 1000\nfor step_size in step_sizes:\n plt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\n largest = np.argmax(true_values[step_size])\n plt.plot([true_values[step_size][largest] for _ in range(num_steps)], linestyle=\"--\")\n plt.title(\"Step Size: {}\".format(step_size))\n plt.plot(np.array(q_values[step_size])[:, largest])\n plt.legend([\"True Expected Value\", \"Estimated Value\"])\n plt.xlabel(\"Steps\")\n plt.ylabel(\"Value\")\n plt.show()", "_____no_output_____" ] ], [ [ "These plots help clarify the performance differences between the different step sizes. A step size of 0.01 makes such small updates that the agent’s value estimate of the best action does not get close to the actual value. Step sizes of 0.5 and 1.0 both get close to the true value quickly, but are very susceptible to stochasticity in the rewards. The updates overcorrect too much towards recent rewards, and so oscillate around the true value. This means that on many steps, the action that pulls the best arm may seem worse than it actually is. A step size of 0.1 updates fairly quickly to the true value, and does not oscillate as widely around the true values as 0.5 and 1.0. This is one of the reasons that 0.1 performs quite well. Finally we see why 1/N(A) performed well. Early on while the step size is still reasonably high it moves quickly to the true expected value, but as it gets pulled more its step size is reduced which makes it less susceptible to the stochasticity of the rewards.\n\nDoes this mean that 1/N(A) is always the best? When might it not be? One possible setting where it might not be as effective is in non-stationary problems. You learned about non-stationarity in the lessons. Non-stationarity means that the environment may change over time. This could manifest itself as continual change over time of the environment, or a sudden change in the environment.\n\nLet's look at how a sudden change in the reward distributions affects a step size like 1/N(A). This time we will run the environment for 2000 steps, and after 1000 steps we will randomly change the expected value of all of the arms. We compare two agents, both using epsilon-greedy with epsilon = 0.1. One uses a constant step size of 0.1, the other a step size of 1/N(A) that reduces over time. ", "_____no_output_____" ] ], [ [ "# ---------------\n# Discussion Cell\n# ---------------\nepsilon = 0.1\nnum_steps = 2000\nnum_runs = 200\nstep_size = 0.1\n\nplt.figure(figsize=(15, 5), dpi= 80, facecolor='w', edgecolor='k')\nplt.plot([1.55 for _ in range(num_steps)], linestyle=\"--\")\n\nfor agent in [EpsilonGreedyAgent, EpsilonGreedyAgentConstantStepsize]:\n all_averages = []\n for run in tqdm(range(num_runs)):\n agent_info = {\"num_actions\": 10, \"epsilon\": epsilon, \"step_size\": step_size}\n np.random.seed(run)\n \n rl_glue = RLGlue(env, agent)\n rl_glue.rl_init(agent_info, env_info)\n rl_glue.rl_start()\n\n scores = [0]\n averages = []\n \n for i in range(num_steps):\n reward, state, action, is_terminal = rl_glue.rl_step()\n scores.append(scores[-1] + reward)\n averages.append(scores[-1] / (i + 1))\n if i == 1000:\n rl_glue.environment.arms = np.random.randn(10)\n all_averages.append(averages)\n \n plt.plot(np.mean(all_averages, axis=0))\nplt.legend([\"Best Possible\", \"1/N(A)\", \"0.1\"])\nplt.xlabel(\"Steps\")\nplt.ylabel(\"Average reward\")\nplt.show()", "100%|██████████| 200/200 [00:06<00:00, 30.97it/s]\n100%|██████████| 200/200 [00:06<00:00, 31.17it/s]\n" ] ], [ [ "Now the agent with a step size of 1/N(A) performed better at the start but then performed worse when the environment changed! What happened?\n\nThink about what the step size would be after 1000 steps. Let's say the best action gets chosen 500 times. That means the step size for that action is 1/500 or 0.002. At each step when we update the value of the action and the value is going to move only 0.002 * the error. That is a very tiny adjustment and it will take a long time for it to get to the true value.\n\nThe agent with step size 0.1, however, will always update in 1/10th of the direction of the error. This means that on average it will take ten steps for it to update its value to the sample mean.\n\nThese are the types of tradeoffs we have to think about in reinforcement learning. A larger step size moves us more quickly toward the true value, but can make our estimated values oscillate around the expected value. A step size that reduces over time can converge to close to the expected value, without oscillating. On the other hand, such a decaying stepsize is not able to adapt to changes in the environment. Nonstationarity---and the related concept of partial observability---is a common feature of reinforcement learning problems and when learning online. ", "_____no_output_____" ], [ "## Section 5: Conclusion", "_____no_output_____" ], [ "Great work! You have:\n- Implemented your first agent\n- Learned about the effect of epsilon, an exploration parameter, on the performance of an agent\n- Learned about the effect of step size on the performance of the agent\n- Learned about a good experiment practice of averaging across multiple runs", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ecd3e70197a3a62073ba0adc1d2b4f8b7c985fc9
424,044
ipynb
Jupyter Notebook
my_notebooks/gradient_descent_explained.ipynb
awcrosby/deep_learning
993e339bf0ef6107d65d70ceb11908107212f2b9
[ "MIT" ]
null
null
null
my_notebooks/gradient_descent_explained.ipynb
awcrosby/deep_learning
993e339bf0ef6107d65d70ceb11908107212f2b9
[ "MIT" ]
7
2021-11-10T20:17:25.000Z
2021-11-10T20:17:27.000Z
my_notebooks/gradient_descent_explained.ipynb
awcrosby/deep_learning
993e339bf0ef6107d65d70ceb11908107212f2b9
[ "MIT" ]
null
null
null
769.589837
323,152
0.947284
[ [ [ "### Gradient Descent Example 1\nExplained. Only 1 input feature.", "_____no_output_____" ] ], [ [ "import numpy as np\n\n# Input data\nx = np.array([1, 2, 3, 4])\n\n# Target\ny = np.array(0.5)\n\n# Initial weights (input to output weights)\nw = np.array([0.5, -0.5, 0.3, 0.1])\n\n# ### Activation Function ###\n# Activation function = f(h)\n# Result of f(h) is y_hat (prediction from 0 to 1)\n# Activation function is sometimes, and in this case, the sigmoid\n# y_hat AKA neural network output: nn_output\n\n# Defining the sigmoid activation function\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n# Derivative of the sigmoid function\ndef sigmoid_prime(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\n# the linear combination performed by the node (h in f(h) and f'(h)), AKA input to the output unit\nh = np.dot(x, w)\n\n# The neural network output (y_hat)\nnn_output = sigmoid(h)\n\n# Error of neural network (y - y_hat)\nerror = y - nn_output\n\n# Output gradient (f'(h))\noutput_grad = sigmoid_prime(h)\n\n# Error term (lowercase delta)\nerror_term = error * output_grad\n\n# The learning rate, eta in the weight step equation\nlearnrate = 0.5\n\n# Gradient descent step, AKA change in weights (uppercase delta of w)\ndel_w = learnrate * error_term * x\n\n\nprint('Neural Network output:')\nprint(nn_output)\nprint('Amount of Error:')\nprint(error)\nprint('Change in Weights:')\nprint(del_w)", "Neural Network output:\n0.6899744811276125\nAmount of Error:\n-0.1899744811276125\nChange in Weights:\n[-0.02031869 -0.04063738 -0.06095608 -0.08127477]\n" ] ], [ [ "### Hidden Layer, forward pass only, Example 1", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1/(1+np.exp(-x))\n\n# Network size\nN_input = 4\nN_hidden = 3\nN_output = 2\n\nnp.random.seed(42)\n# Make some fake data\nX = np.random.randn(4)\n\nweights_input_to_hidden = np.random.normal(0, scale=0.1, size=(N_input, N_hidden))\nweights_hidden_to_output = np.random.normal(0, scale=0.1, size=(N_hidden, N_output))\n\n\n################\nN_input = 3\nN_hidden = 2\nN_output = 1\nX = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\nweights_input_to_hidden = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\nweights_hidden_to_output = np.array([[0.3],\n [-0.1]])\n################\n\n\n\n# ### TODO: Make a forward pass through the network ###\n# dot of inputs(vector) and weights(matrix), result is vector\nhidden_layer_in = np.dot(X, weights_input_to_hidden)\nhidden_layer_out = sigmoid(hidden_layer_in)\n\nprint('Hidden-layer Output:')\nprint(hidden_layer_out)\n\n# dot of hidden_out(vector) and weights(matrix), result is vector\noutput_layer_in = np.dot(hidden_layer_out, weights_hidden_to_output)\noutput_layer_out = sigmoid(output_layer_in)\n\nprint('Output-layer Output:')\nprint(output_layer_out)", "Hidden-layer Output:\n[[0.4850045 0.45512111]]\nOutput-layer Output:\n[[0.5249765]]\n" ] ], [ [ "![hidden_layers.jpg](attachment:hidden_layers.jpg)", "_____no_output_____" ], [ "### Backpropagation Example", "_____no_output_____" ] ], [ [ "import numpy as np\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\nx = np.array([0.5, 0.1, -0.2])\ntarget = 0.6\nlearnrate = 0.5\n\nweights_input_hidden = np.array([[0.5, -0.6],\n [0.1, -0.2],\n [0.1, 0.7]])\n\nweights_hidden_output = np.array([0.1, -0.3]) # TODO maybe make (2,1)\n\n\n################\nx = np.array([[0.5, -0.2, 0.1]])\ntarget = np.array([[0.4]])\nweights_input_hidden = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\nweights_hidden_output = np.array([0.3, -0.1])\n################\n\n\n## Forward pass\nhidden_layer_input = np.dot(x, weights_input_hidden)\nhidden_layer_output = sigmoid(hidden_layer_input)\nprint('hidden_layer_output', hidden_layer_output, hidden_layer_output.shape)\n\noutput_layer_in = np.dot(hidden_layer_output, weights_hidden_output)\noutput = sigmoid(output_layer_in)\nprint('output', output)\n\n## Backwards pass\n## TODO: Calculate output error\nerror = target - output\nprint('error', error, error.shape)\n\n# TODO: Calculate error term for output layer\noutput_gradient = output * (1-output) # sigmoid prime\noutput_error_term = error * output_gradient\nprint('output_error_term', output_error_term, output_error_term.shape)\n\n# TODO: Calculate error term for hidden layer\n# equation = weights * output error * f'(h)\nhidden_gradient = hidden_layer_output * (1 - hidden_layer_output)\nprint('hidden_gradient', hidden_gradient, hidden_gradient.shape)\nhidden_error_term = weights_hidden_output * output_error_term * hidden_gradient\nprint('weights_hidden_output', weights_hidden_output, weights_hidden_output.shape)\nprint(\"hidden_error_term\", hidden_error_term, hidden_error_term.shape)\n\n# TODO: Calculate change in weights for hidden layer to output layer\n# equation = learning rate * output error * f(h)\ndelta_w_h_o = learnrate * output_error_term * hidden_layer_output\nprint('delta_w_h_o', delta_w_h_o, delta_w_h_o.shape)\n\n# TODO: Calculate change in weights for input layer to hidden layer\n# equation = learning rate * hidden error * x_i\nflipped_hidden_error = hidden_error_term[:,None].T # from (2,) to (1,2)\nmaxtrix_x = x[:,None] # from (3,) to (3,1)\ndelta_w_i_h = learnrate * maxtrix_x * flipped_hidden_error\n\nprint('Change in weights for hidden layer to output layer:')\nprint(delta_w_h_o)\nprint('Change in weights for input layer to hidden layer:')\nprint(delta_w_i_h)", "hidden_layer_output [[0.4850045 0.45512111]] (1, 2)\noutput [0.5249765]\nerror [[-0.1249765]] (1, 1)\noutput_error_term [[-0.03116616]] (1, 1)\nhidden_gradient [[0.24977513 0.24798589]] (1, 2)\nweights_hidden_output [ 0.3 -0.1] (2,)\nhidden_error_term [[-0.00233536 0.00077288]] (1, 2)\ndelta_w_h_o [[-0.00755786 -0.00709219]] (1, 2)\nChange in weights for hidden layer to output layer:\n[[-0.00755786 -0.00709219]]\nChange in weights for input layer to hidden layer:\n[[[-5.83839932e-04 2.33535973e-04 -1.16767986e-04]]\n\n [[ 1.93219209e-04 -7.72876837e-05 3.86438418e-05]]]\n" ] ], [ [ "![backprop.png](attachment:backprop.png)", "_____no_output_____" ], [ "### Forward Pass & Backward Propagation\npasted quiz example:", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\nnp.random.seed(21)\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n\n# Hyperparameters\nn_hidden = 2 # number of hidden units\nepochs = 900\nlearnrate = 0.005\n\nn_records, n_features = features.shape\nlast_loss = None\n# Initialize weights\nweights_input_hidden = np.random.normal(scale=1 / n_features ** .5,\n size=(n_features, n_hidden))\nweights_hidden_output = np.random.normal(scale=1 / n_features ** .5,\n size=n_hidden)\n\nfor e in range(epochs):\n del_w_input_hidden = np.zeros(weights_input_hidden.shape)\n del_w_hidden_output = np.zeros(weights_hidden_output.shape)\n for x, y in zip(features.values, targets):\n ## Forward pass ##\n # TODO: Calculate the output\n hidden_input = np.dot(x, weights_input_hidden)\n hidden_output = sigmoid(hidden_input)\n out_layer_in = np.dot(hidden_output, weights_hidden_output)\n output = sigmoid(out_layer_in)\n # print('x:', x, x.shape)\n # print('weights_input_hidden:', weights_input_hidden, weights_input_hidden.shape)\n # print('hidden_input', hidden_input, hidden_input.shape)\n # print('hidden_output', hidden_output, hidden_output.shape)\n # print('weights_hidden_output', weights_hidden_output, weights_hidden_output.shape)\n # print('output', output, output.shape)\n\n ## Backward pass ##\n # TODO: Calculate the network's prediction error\n error = y - output\n\n # TODO: Calculate error term for the output unit\n output_error_term = error * output * (1 - output)\n # print('output_error_term', output_error_term, output_error_term.shape)\n\n ## propagate errors to hidden layer\n\n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = weights_hidden_output * output_error_term\n # print('hidden_error', hidden_error, hidden_error.shape)\n \n # TODO: Calculate the error term for the hidden layer\n hidden_error_term = hidden_error * hidden_output * (1 - hidden_output)\n # print('hidden_error_term', hidden_error_term, hidden_error_term.shape)\n \n # TODO: Update the change in weights\n del_w_hidden_output += output_error_term * hidden_output\n \n # change x (6,) to (6,1), hidden_error_term (2,) to (1,2)\n del_w_input_hidden += (x[:,None] *\n hidden_error_term[:,None].T)\n # print('del_w_hidden_output', del_w_hidden_output, del_w_hidden_output.shape)\n # print('del_w_input_hidden', del_w_input_hidden, del_w_input_hidden.shape)\n \n # break #TEMP\n\n # TODO: Update weights\n weights_input_hidden += learnrate * del_w_input_hidden / n_records\n weights_hidden_output += learnrate * del_w_hidden_output / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n hidden_output = sigmoid(np.dot(x, weights_input_hidden))\n out = sigmoid(np.dot(hidden_output,\n weights_hidden_output))\n loss = np.mean((out - targets) ** 2)\n\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n# Calculate accuracy on test data\nhidden = sigmoid(np.dot(features_test, weights_input_hidden))\nout = sigmoid(np.dot(hidden, weights_hidden_output))\npredictions = out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))\n", "_____no_output_____" ] ], [ [ "### Gradient Descent Example 2\nmultiple input features", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_prep import features, targets, features_test, targets_test\n\n\ndef sigmoid(x):\n \"\"\"\n Calculate sigmoid\n \"\"\"\n return 1 / (1 + np.exp(-x))\n\n# TODO: We haven't provided the sigmoid_prime function like we did in\n# the previous lesson to encourage you to come up with a more\n# efficient solution. If you need a hint, check out the comments\n# in solution.py from the previous lecture.\n\n# Use to same seed to make debugging easier\nnp.random.seed(42)\n\nn_records, n_features = features.shape\nlast_loss = None\n\n# Initialize weights\nweights = np.random.normal(scale=1 / n_features**.5, size=n_features)\n\n# Neural Network hyperparameters\nepochs = 1000\nlearnrate = 0.5\n\nfor e in range(epochs):\n del_w = np.zeros(weights.shape)\n for x, y in zip(features.values, targets):\n # Loop through all records, x is the input, y is the target\n\n # Note: We haven't included the h variable from the previous\n # lesson. You can add it if you want, or you can calculate\n # the h together with the output\n\n # TODO: Calculate the output\n output = sigmoid(np.dot(x, weights))\n output_gradient = output * (1-output)\n\n # TODO: Calculate the error\n error = y - output\n\n # TODO: Calculate the error term\n error_term = error * output_gradient\n\n # TODO: Calculate the change in weights for this sample\n # and add it to the total weight change\n del_w += error_term * x\n\n # TODO: Update weights using the learning rate and the average change in weights\n weights += learnrate * del_w / n_records\n\n # Printing out the mean square error on the training set\n if e % (epochs / 10) == 0:\n out = sigmoid(np.dot(features, weights))\n loss = np.mean((out - targets) ** 2)\n if last_loss and last_loss < loss:\n print(\"Train loss: \", loss, \" WARNING - Loss Increasing\")\n else:\n print(\"Train loss: \", loss)\n last_loss = loss\n\n\n# Calculate accuracy on test data\ntes_out = sigmoid(np.dot(features_test, weights))\npredictions = tes_out > 0.5\naccuracy = np.mean(predictions == targets_test)\nprint(\"Prediction accuracy: {:.3f}\".format(accuracy))", "_____no_output_____" ] ], [ [ "#### data_prep.py for example 2", "_____no_output_____" ] ], [ [ "# data_prep.py\nimport numpy as np\nimport pandas as pd\n\nadmissions = pd.read_csv('binary.csv')\n\n# Make dummy variables for rank\ndata = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)\ndata = data.drop('rank', axis=1)\n\n# Standarize features\nfor field in ['gre', 'gpa']:\n mean, std = data[field].mean(), data[field].std()\n data.loc[:,field] = (data[field]-mean)/std\n \n# Split off random 10% of the data for testing\nnp.random.seed(42)\nsample = np.random.choice(data.index, size=int(len(data)*0.9), replace=False)\ndata, test_data = data.ix[sample], data.drop(sample)\n\n# Split into features and targets\nfeatures, targets = data.drop('admit', axis=1), data['admit']\nfeatures_test, targets_test = test_data.drop('admit', axis=1), test_data['admit']", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd3ef7170c8fa9142e54a1e62975bc62c878479
1,669
ipynb
Jupyter Notebook
bump_anaconda_key.ipynb
menpo/menpo-admin
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
[ "BSD-3-Clause" ]
null
null
null
bump_anaconda_key.ipynb
menpo/menpo-admin
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
[ "BSD-3-Clause" ]
1
2017-01-24T11:17:03.000Z
2017-01-24T11:17:03.000Z
bump_anaconda_key.ipynb
menpo/menpo-admin
41cb5ab9aa56c3df26e7bfbf43a56a0cbd5f9674
[ "BSD-3-Clause" ]
null
null
null
23.180556
153
0.56441
[ [ [ "For this to work you'll need to have the travis CLI installed and have already run login (cd to a menpo dir before running login for auto magic)\n```\n> sudo gem install travis\n```\nTo bump on Jenkins, visit here:\nhttps://jenkins.menpo.org/credentials/", "_____no_output_____" ] ], [ [ "import os\nimport subprocess\nfrom utils import apply_to_all_projects\n\nworking_dir = '~/_bump_anaconda_key'\nnew_key = 'not a chance'\n\ndef bump_repo(repo_dir, restart_travis=False):\n os.chdir(repo_dir)\n print(subprocess.check_output(['travis', 'env', 'set', 'BINSTAR_KEY', new_key]))\n if restart_travis:\n print(subprocess.check_output(['travis', 'restart']))", "_____no_output_____" ], [ "apply_to_all_projects(working_dir, bump_repo)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ] ]
ecd3fc4a663bd799b78fd0688efc61fdf6a1e1ea
589,042
ipynb
Jupyter Notebook
notebooks/Debugging scikit-learn text classification pipeline.ipynb
susantamoh84/eli5
7218e0da76cd952d768cdec0986b24eaeaaabb3e
[ "MIT" ]
null
null
null
notebooks/Debugging scikit-learn text classification pipeline.ipynb
susantamoh84/eli5
7218e0da76cd952d768cdec0986b24eaeaaabb3e
[ "MIT" ]
null
null
null
notebooks/Debugging scikit-learn text classification pipeline.ipynb
susantamoh84/eli5
7218e0da76cd952d768cdec0986b24eaeaaabb3e
[ "MIT" ]
null
null
null
94.731747
13,828
0.530392
[ [ [ "# Debugging scikit-learn text classification pipeline\n\nscikit-learn docs provide a nice text classification [tutorial](http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html). Make sure to read it first. We'll be doing something similar to it, while taking more detailed look at classifier weights and predictions.\n\n## 1. Baseline model\n\nFirst, we need some data. Let's load 20 Newsgroups data, keeping only 4 categories:", "_____no_output_____" ] ], [ [ "from sklearn.datasets import fetch_20newsgroups\n\ncategories = ['alt.atheism', 'soc.religion.christian', \n 'comp.graphics', 'sci.med']\ntwenty_train = fetch_20newsgroups(\n subset='train',\n categories=categories,\n shuffle=True,\n random_state=42\n)\ntwenty_test = fetch_20newsgroups(\n subset='test',\n categories=categories,\n shuffle=True,\n random_state=42\n)", "_____no_output_____" ] ], [ [ "A basic text processing pipeline - bag of words features and Logistic Regression as a classifier:", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.pipeline import make_pipeline\n\nvec = CountVectorizer()\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target);", "_____no_output_____" ] ], [ [ "We're using LogisticRegressionCV here to adjust regularization\nparameter C automatically. It allows to compare different\nvectorizers - optimal C value could be different for different input\nfeatures (e.g. for bigrams or for character-level input). An alternative\nwould be to use GridSearchCV or RandomizedSearchCV.\n\n\nLet's check quality of this pipeline:", "_____no_output_____" ] ], [ [ "from sklearn import metrics\n\ndef print_report(pipe):\n y_test = twenty_test.target\n y_pred = pipe.predict(twenty_test.data)\n report = metrics.classification_report(y_test, y_pred, \n target_names=twenty_test.target_names)\n print(report)\n print(\"accuracy: {:0.3f}\".format(metrics.accuracy_score(y_test, y_pred)))\n \nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.93 0.80 0.86 319\n comp.graphics 0.87 0.96 0.91 389\n sci.med 0.94 0.81 0.87 396\nsoc.religion.christian 0.85 0.98 0.91 398\n\n avg / total 0.90 0.89 0.89 1502\n\naccuracy: 0.891\n" ] ], [ [ "Not bad. We can try other classifiers and preprocessing methods, but let's check first what the model learned using eli5.show_weights function:", "_____no_output_____" ] ], [ [ "import eli5\neli5.show_weights(clf, top=10)", "_____no_output_____" ] ], [ [ "The table above doesn't make any sense; the problem is that eli5 was not able to get feature and class names from the classifier object alone. We can provide feature and target names explicitly:", "_____no_output_____" ] ], [ [ "# eli5.show_weights(clf, \n# feature_names=vec.get_feature_names(), \n# target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "The code above works, but a better way is to provide vectorizer instead and let eli5 figure out the details automatically:", "_____no_output_____" ] ], [ [ "eli5.show_weights(clf, vec=vec, top=10, \n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "This starts to make more sense. Columns are target classes. In each column there are features and their weights. Intercept (bias) feature is shown as ``<BIAS>`` in the same table. We can inspect features and weights because we're using a bag-of-words vectorizer and a linear classifier (so there is a direct mapping between individual words and classifier coefficients). For other classifiers features can be harder to inspect.\n\nSome features look good, but some don't. It seems model learned some names specific to a dataset (email parts, etc.) though, instead of learning topic-specific words. Let's check prediction results on an example:", "_____no_output_____" ] ], [ [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "What can be highlighted in text is highlighted in text. There is also a separate table for features which can't be highlighted in text - ``<BIAS>`` in this case. If you hover mouse on a highlighted word it shows you a weight of this word in a title. Words are colored according to their weights.\n\n## 2. Baseline model, improved data\n\nAha, from the highlighting above it can be seen that a classifier learned some non-interesting stuff indeed, e.g. it remembered parts of email addresses. We should probably clean the data first to make it more interesting; improving model (trying different classifiers, etc.) doesn't make sense at this point - it may just learn to leverage these email addresses better. \n\nIn practice we'd have to do cleaning yourselves; in this example 20 newsgroups dataset provides an option to remove footers and headers from the messages. Nice. Let's clean up the data and re-train a classifier.", "_____no_output_____" ] ], [ [ "twenty_train = fetch_20newsgroups(\n subset='train',\n categories=categories,\n shuffle=True,\n random_state=42,\n remove=['headers', 'footers'],\n)\ntwenty_test = fetch_20newsgroups(\n subset='test',\n categories=categories,\n shuffle=True,\n random_state=42,\n remove=['headers', 'footers'],\n)\n\nvec = CountVectorizer()\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target);", "_____no_output_____" ] ], [ [ "We just made the task harder and more realistic for a classifier.", "_____no_output_____" ] ], [ [ "print_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.83 0.78 0.80 319\n comp.graphics 0.82 0.96 0.88 389\n sci.med 0.89 0.80 0.84 396\nsoc.religion.christian 0.88 0.86 0.87 398\n\n avg / total 0.85 0.85 0.85 1502\n\naccuracy: 0.852\n" ] ], [ [ "A great result - we just made quality worse! Does it mean pipeline is worse now? No, likely it has a better quality on unseen messages. It is evaluation which is more fair now. Inspecting features used by classifier allowed us to notice a problem with the data and made a good change, despite of numbers which told us not to do that.\n\nInstead of removing headers and footers we could have improved evaluation setup directly, using e.g. GroupKFold from scikit-learn. Then quality of old model would have dropped, we could have removed headers/footers and see increased accuracy, so the numbers would have told us to remove headers and footers. It is not obvious how to split data though, what groups to use with GroupKFold.\n\nSo, what have the updated classifier learned? (output is less verbose because only a subset of classes is shown - see \"targets\" argument):", "_____no_output_____" ] ], [ [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names,\n targets=['sci.med'])", "_____no_output_____" ] ], [ [ "Hm, it no longer uses email addresses, but it still doesn't look good: classifier assigns high weights to seemingly unrelated words like 'do' or 'my'. These words appear in many texts, so maybe classifier uses them as a proxy for bias. Or maybe some of them are more common in some of classes. \n\n## 3. Pipeline improvements\n\nTo help classifier we may filter out stop words:", "_____no_output_____" ] ], [ [ "vec = CountVectorizer(stop_words='english')\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.87 0.76 0.81 319\n comp.graphics 0.85 0.95 0.90 389\n sci.med 0.93 0.85 0.89 396\nsoc.religion.christian 0.85 0.89 0.87 398\n\n avg / total 0.87 0.87 0.87 1502\n\naccuracy: 0.871\n" ], [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names,\n targets=['sci.med'])", "_____no_output_____" ] ], [ [ "Looks better, isn't it? \n\nAlternatively, we can use TF\\*IDF scheme; it should give a somewhat similar effect. \n\nNote that we're cross-validating LogisticRegression regularisation parameter here, like in other examples (LogisticRegressionCV, not LogisticRegression). TF\\*IDF values are different from word count values, so optimal C value can be different. We could draw a wrong conclusion if a classifier with fixed regularization strength is used - the chosen C value could have worked better for one kind of data.", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import TfidfVectorizer\n\nvec = TfidfVectorizer()\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.91 0.79 0.85 319\n comp.graphics 0.83 0.97 0.90 389\n sci.med 0.95 0.87 0.91 396\nsoc.religion.christian 0.90 0.91 0.91 398\n\n avg / total 0.90 0.89 0.89 1502\n\naccuracy: 0.892\n" ], [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names,\n targets=['sci.med'])", "_____no_output_____" ] ], [ [ "It helped, but didn't have quite the same effect. Why not do both?", "_____no_output_____" ] ], [ [ "vec = TfidfVectorizer(stop_words='english')\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.93 0.77 0.84 319\n comp.graphics 0.84 0.97 0.90 389\n sci.med 0.95 0.89 0.92 396\nsoc.religion.christian 0.88 0.92 0.90 398\n\n avg / total 0.90 0.89 0.89 1502\n\naccuracy: 0.893\n" ], [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names,\n targets=['sci.med'])", "_____no_output_____" ] ], [ [ "This starts to look good! \n\n\n## 4. Char-based pipeline\n\nMaybe we can get somewhat better quality by choosing a different classifier, but let's skip it for now. Let's try other analysers instead - use char n-grams instead of words:", "_____no_output_____" ] ], [ [ "vec = TfidfVectorizer(stop_words='english', analyzer='char', \n ngram_range=(3,5))\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.93 0.79 0.85 319\n comp.graphics 0.81 0.97 0.89 389\n sci.med 0.95 0.86 0.90 396\nsoc.religion.christian 0.89 0.91 0.90 398\n\n avg / total 0.89 0.89 0.89 1502\n\naccuracy: 0.888\n" ], [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "It works, but quality is a bit worse. Also, it takes ages to train. \n\nIt looks like stop_words have no effect now - in fact, this is documented in scikit-learn docs, so our stop_words='english' was useless. But at least it is now more obvious how the text looks like for a char ngram-based classifier. Grab a cup of tea and see how char_wb looks like:", "_____no_output_____" ] ], [ [ "vec = TfidfVectorizer(analyzer='char_wb', ngram_range=(3,5))\nclf = LogisticRegressionCV()\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.93 0.79 0.85 319\n comp.graphics 0.87 0.96 0.91 389\n sci.med 0.91 0.90 0.90 396\nsoc.religion.christian 0.89 0.91 0.90 398\n\n avg / total 0.90 0.89 0.89 1502\n\naccuracy: 0.894\n" ], [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "The result is similar, with some minor changes. Quality is better for unknown reason; maybe cross-word dependencies are not that important. \n\n## 5. Debugging HashingVectorizer\n\nTo check that we can try fitting word n-grams instead of char n-grams. But let's deal with efficiency first.\nTo handle large vocabularies we can use HashingVectorizer from scikit-learn; to make training faster we can employ SGDCLassifier:", "_____no_output_____" ] ], [ [ "from sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\n\nvec = HashingVectorizer(stop_words='english', ngram_range=(1,2))\nclf = SGDClassifier(n_iter=10, random_state=42)\npipe = make_pipeline(vec, clf)\npipe.fit(twenty_train.data, twenty_train.target)\n\nprint_report(pipe)", " precision recall f1-score support\n\n alt.atheism 0.90 0.80 0.85 319\n comp.graphics 0.88 0.96 0.92 389\n sci.med 0.93 0.90 0.92 396\nsoc.religion.christian 0.89 0.91 0.90 398\n\n avg / total 0.90 0.90 0.90 1502\n\naccuracy: 0.899\n" ] ], [ [ "It was super-fast! We're not choosing regularization parameter using cross-validation though. \nLet's check what model learned:", "_____no_output_____" ] ], [ [ "eli5.show_prediction(clf, twenty_test.data[0], vec=vec, \n target_names=twenty_test.target_names,\n targets=['sci.med'])", "_____no_output_____" ] ], [ [ "Result looks similar to CountVectorizer. But with HashingVectorizer we don't even have a vocabulary! Why does it work?", "_____no_output_____" ] ], [ [ "eli5.show_weights(clf, vec=vec, top=10,\n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "Ok, we don't have a vocabulary, so we don't have feature names. Are we out of luck? Nope, eli5 has an answer for that: InvertableHashingVectorizer. It can be used to get feature names for HahshingVectorizer without fitiing a huge vocabulary. It still needs some data to learn words -> hashes mapping though; we can use a random subset of data to fit it.", "_____no_output_____" ] ], [ [ "from eli5.sklearn import InvertableHashingVectorizer\nimport numpy as np", "_____no_output_____" ], [ "ivec = InvertableHashingVectorizer(vec)\nsample_size = len(twenty_train.data) // 10\nX_sample = np.random.choice(twenty_train.data, size=sample_size)\nivec.fit(X_sample);", "_____no_output_____" ], [ "eli5.show_weights(clf, vec=ivec, top=20,\n target_names=twenty_test.target_names)", "_____no_output_____" ] ], [ [ "There are collisions (hover mouse over features with \"...\"), and there are important features which were not seen in the random sample (FEATURE[...]), but overall it looks fine. \n\n\"rutgers edu\" bigram feature is suspicious though, it looks like a part of URL.", "_____no_output_____" ] ], [ [ "rutgers_example = [x for x in twenty_train.data if 'rutgers' in x.lower()][0]\nprint(rutgers_example)", "In article <[email protected]> [email protected] writes:\n>In article <[email protected]> [email protected]\n>Matt. 22:9-14 'Go therefore to the main highways, and as many as you find\n>there, invite to the wedding feast.'...\n\n>hmmmmmm. Sounds like your theology and Christ's are at odds. Which one am I \n>to believe?\n" ] ], [ [ "Yep, it looks like model learned this address instead of learning something useful.", "_____no_output_____" ] ], [ [ "eli5.show_prediction(clf, rutgers_example, vec=vec, \n target_names=twenty_test.target_names, \n targets=['soc.religion.christian'])", "_____no_output_____" ] ], [ [ "Quoted text makes it too easy for model to classify some of the messages; that won't generalize to new messages. So to improve the model next step could be to process the data further, e.g. remove quoted text or replace email addresses with a special token. \n\nYou get the idea: looking at features helps to understand how classifier works. Maybe even more importantly, it helps to notice preprocessing bugs, data leaks, issues with task specification - all these nasty problems you get in a real world.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
ecd406c8a26baebf89581ba8063ae7befcf8d310
77,169
ipynb
Jupyter Notebook
pipelining/pdp-exp1/pdp-exp1_cslg-rand-200_plotting.ipynb
ZeruiW/s2search
cb0539b9594d7afe12e64c0b4ada4fb29c793060
[ "Apache-2.0" ]
2
2022-02-07T16:08:04.000Z
2022-03-27T19:29:33.000Z
pipelining/pdp-exp1/pdp-exp1_cslg-rand-200_plotting.ipynb
youyinnn/s2search
f965a595386b24ffab0385b860a1028e209fde86
[ "Apache-2.0" ]
1
2022-03-30T17:50:32.000Z
2022-03-30T17:50:32.000Z
pipelining/pdp-exp1/pdp-exp1_cslg-rand-200_plotting.ipynb
ZeruiW/s2search
cb0539b9594d7afe12e64c0b4ada4fb29c793060
[ "Apache-2.0" ]
1
2022-03-14T19:44:47.000Z
2022-03-14T19:44:47.000Z
272.681979
39,818
0.910003
[ [ [ "<a href=\"https://colab.research.google.com/github/DingLi23/s2search/blob/pipelining/pipelining/pdp-exp1/pdp-exp1_cslg-rand-200_plotting.ipynb\" target=\"_blank\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "### Experiment Description\n\nProduce PDP for a randomly picked data from cslg.\n\n> This notebook is for experiment \\<pdp-exp1\\> and data sample \\<cslg-rand-200\\>.", "_____no_output_____" ], [ "### Initialization", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2\nimport numpy as np, sys, os\nin_colab = 'google.colab' in sys.modules\n# fetching code and data(if you are using colab\nif in_colab:\n !rm -rf s2search\n !git clone --branch pipelining https://github.com/youyinnn/s2search.git\n sys.path.insert(1, './s2search')\n %cd s2search/pipelining/pdp-exp1/\n\npic_dir = os.path.join('.', 'plot')\nif not os.path.exists(pic_dir):\n os.mkdir(pic_dir)\n", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ] ], [ [ "### Loading data", "_____no_output_____" ] ], [ [ "sys.path.insert(1, '../../')\nimport numpy as np, sys, os, pandas as pd\nfrom s2search_score_pdp import pdp_based_importance, apply_order\n\nsample_name = 'cslg-rand-200'\n\nf_list = ['title', 'abstract', 'venue', 'authors', 'year', 'n_citations']\n\npdp_xy = {}\npdp_metric = pd.DataFrame(columns=['feature_name', 'pdp_range', 'pdp_importance'])\n\nfor f in f_list:\n file = os.path.join('.', 'scores', f'{sample_name}_pdp_{f}.npz')\n if os.path.exists(file):\n data = np.load(file)\n sorted_pdp_data = apply_order(data)\n feature_pdp_data = [np.mean(pdps) for pdps in sorted_pdp_data]\n \n pdp_xy[f] = {\n 'y': feature_pdp_data,\n 'numerical': True\n }\n if f == 'year' or f == 'n_citations':\n pdp_xy[f]['x'] = np.sort(data['arr_1'])\n else:\n pdp_xy[f]['y'] = feature_pdp_data\n pdp_xy[f]['x'] = list(range(len(feature_pdp_data)))\n pdp_xy[f]['numerical'] = False\n \n pdp_metric.loc[len(pdp_metric.index)] = [f, np.max(feature_pdp_data) - np.min(feature_pdp_data), pdp_based_importance(feature_pdp_data, f)]\n \n pdp_xy[f]['weird'] = feature_pdp_data[len(feature_pdp_data) - 1] > 30\n \n\nprint(pdp_metric.sort_values(by=['pdp_importance'], ascending=False))\n", " feature_name pdp_range pdp_importance\n1 abstract 17.614836 6.195750\n0 title 15.939687 3.486766\n2 venue 12.615256 0.892033\n4 year 1.984472 0.445873\n5 n_citations 0.817997 0.191021\n3 authors 0.000000 0.000000\n" ] ], [ [ "### PDP", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\ncategorical_plot_conf = [\n {\n 'xlabel': 'Title',\n 'ylabel': 'Scores',\n 'pdp_xy': pdp_xy['title']\n },\n {\n 'xlabel': 'Abstract',\n 'pdp_xy': pdp_xy['abstract']\n }, \n {\n 'xlabel': 'Authors',\n 'pdp_xy': pdp_xy['authors']\n },\n {\n 'xlabel': 'Venue',\n 'pdp_xy': pdp_xy['venue'],\n # 'zoom': {\n # 'inset_axes': [0.15, 0.45, 0.47, 0.47],\n # 'x_limit': [950, 1010],\n # 'y_limit': [-9, 7],\n # 'connects': [True, True, False, False]\n # }\n },\n]\n\nnumerical_plot_conf = [\n {\n 'xlabel': 'Year',\n 'ylabel': 'Scores',\n 'pdp_xy': pdp_xy['year']\n },\n {\n 'xlabel': 'Citation Count',\n 'pdp_xy': pdp_xy['n_citations'],\n # 'zoom': {\n # 'inset_axes': [0.5, 0.2, 0.47, 0.47],\n # 'x_limit': [-100, 1000],\n # 'y_limit': [-7.3, -6.2],\n # 'connects': [False, False, True, True]\n # }\n }\n]\n\ndef pdp_plot(confs, title):\n fig, axes = plt.subplots(nrows=1, ncols=len(confs), figsize=(20, 5), dpi=100)\n subplot_idx = 0\n # plt.suptitle(title, fontsize=20, fontweight='bold')\n # plt.autoscale(False)\n for conf in confs:\n axess = axes if len(confs) == 1 else axes[subplot_idx]\n\n axess.plot(conf['pdp_xy']['x'], conf['pdp_xy']['y'])\n axess.grid(alpha = 0.4)\n\n if ('ylabel' in conf):\n axess.set_ylabel(conf.get('ylabel'), fontsize=20, labelpad=10)\n \n axess.set_xlabel(conf['xlabel'], fontsize=16, labelpad=10)\n \n if not (conf['pdp_xy']['weird']):\n if (conf['pdp_xy']['numerical']):\n axess.set_ylim([-9, -6.5])\n pass\n else:\n axess.set_ylim([-15, 10])\n pass\n \n if 'zoom' in conf:\n axins = axess.inset_axes(conf['zoom']['inset_axes']) \n axins.plot(conf['pdp_xy']['x'], conf['pdp_xy']['y'])\n axins.set_xlim(conf['zoom']['x_limit'])\n axins.set_ylim(conf['zoom']['y_limit'])\n axins.grid(alpha=0.3)\n rectpatch, connects = axess.indicate_inset_zoom(axins)\n connects[0].set_visible(conf['zoom']['connects'][0])\n connects[1].set_visible(conf['zoom']['connects'][1])\n connects[2].set_visible(conf['zoom']['connects'][2])\n connects[3].set_visible(conf['zoom']['connects'][3])\n \n subplot_idx += 1\n\npdp_plot(categorical_plot_conf, \"PDPs for four categorical features\")\nplt.savefig(os.path.join('.', 'plot', f'{sample_name}-categorical.png'), facecolor='white', transparent=False, bbox_inches='tight')\n\n# second fig\npdp_plot(numerical_plot_conf, \"PDPs for two numerical features\")\nplt.savefig(os.path.join('.', 'plot', f'{sample_name}-numerical.png'), facecolor='white', transparent=False, bbox_inches='tight')\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd413d7c92d283ab7acb1f746fdb59bb95ac019
234,133
ipynb
Jupyter Notebook
Notebooks/EDA.ipynb
crystal-ctrl/engineering_project
8a7d03f4a81bb8d5f9de4377bd522549ce66bdf9
[ "MIT" ]
5
2021-07-24T12:53:25.000Z
2022-03-23T02:17:34.000Z
Notebooks/EDA.ipynb
crystal-ctrl/engineering_project
8a7d03f4a81bb8d5f9de4377bd522549ce66bdf9
[ "MIT" ]
null
null
null
Notebooks/EDA.ipynb
crystal-ctrl/engineering_project
8a7d03f4a81bb8d5f9de4377bd522549ce66bdf9
[ "MIT" ]
3
2021-07-24T12:53:09.000Z
2022-03-13T18:58:53.000Z
157.136242
61,520
0.85967
[ [ [ "# EDA", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "## 1. COVID-19 Report", "_____no_output_____" ] ], [ [ "# Accessing data from database\nengine = create_engine('sqlite:///covid.db')\ndf = pd.read_sql('SELECT * FROM case_data;',engine)\n\ndf['report_date'] = pd.to_datetime(df.report_date)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 61047 entries, 0 to 61046\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 report_date 61047 non-null datetime64[ns]\n 1 fips 61047 non-null object \n 2 locality 61047 non-null object \n 3 vdh_health_district 61047 non-null object \n 4 total_cases 61047 non-null object \n 5 hospitalizations 61047 non-null object \n 6 deaths 61047 non-null object \ndtypes: datetime64[ns](1), object(6)\nmemory usage: 3.3+ MB\n" ], [ "df['total_cases'] = df.total_cases.astype(int)\ndf['hospitalizations'] = df.hospitalizations.astype(int)\ndf['deaths'] = df.deaths.astype(int)", "_____no_output_____" ], [ "# group by district, county and date\n(df\n.groupby(['vdh_health_district','locality','report_date'])\n.total_cases.sum()\n.reset_index()\n.sort_values('total_cases', ascending=False)).head()", "_____no_output_____" ], [ "df.sort_values(['vdh_health_district', 'locality','report_date'],\n inplace=True,ascending=True)\ndf.head(10)", "_____no_output_____" ], [ "county_list = list(pd.unique(df.locality.values))\nprint(len(county_list))\nprint(county_list)", "133\n['Alexandria', 'Alleghany', 'Botetourt', 'Covington', 'Craig', 'Roanoke County', 'Salem', 'Arlington', 'Albemarle', 'Charlottesville', 'Fluvanna', 'Greene', 'Louisa', 'Nelson', 'Augusta', 'Bath', 'Buena Vista City', 'Harrisonburg', 'Highland', 'Lexington', 'Rockbridge', 'Rockingham', 'Staunton', 'Waynesboro', 'Amherst', 'Appomattox', 'Bedford', 'Campbell', 'Lynchburg', 'Chesapeake', 'Chesterfield', 'Colonial Heights', 'Powhatan', 'Charles City', 'Goochland', 'Hanover', 'New Kent', 'Dinwiddie', 'Emporia', 'Greensville', 'Hopewell', 'Petersburg', 'Prince George', 'Surry', 'Sussex', 'Buchanan', 'Dickenson', 'Russell', 'Tazewell', 'Accomack', 'Northampton', 'Fairfax', 'Fairfax City', 'Falls Church', 'Hampton', 'Henrico', 'Lee', 'Norton', 'Scott', 'Wise', 'Clarke', 'Frederick', 'Page', 'Shenandoah', 'Warren', 'Winchester', 'Loudoun', 'Bland', 'Bristol', 'Carroll', 'Galax', 'Grayson', 'Smyth', 'Washington', 'Wythe', 'Floyd', 'Giles', 'Montgomery', 'Pulaski', 'Radford', 'Norfolk', 'James City', 'Newport News', 'Poquoson', 'Williamsburg', 'York', 'Amelia', 'Buckingham', 'Charlotte', 'Cumberland', 'Lunenburg', 'Nottoway', 'Prince Edward', 'Danville', 'Pittsylvania', 'Portsmouth', 'Manassas City', 'Manassas Park', 'Prince William', 'Caroline', 'Fredericksburg', 'King George', 'Spotsylvania', 'Stafford', 'Culpeper', 'Fauquier', 'Madison', 'Orange', 'Rappahannock', 'Richmond City', 'Roanoke City', 'Brunswick', 'Halifax', 'Mecklenburg', 'Essex', 'Gloucester', 'King William', 'King and Queen', 'Lancaster', 'Mathews', 'Middlesex', 'Northumberland', 'Richmond County', 'Westmoreland', 'Virginia Beach', 'Franklin County', 'Henry', 'Martinsville', 'Patrick', 'Franklin City', 'Isle of Wight', 'Southampton', 'Suffolk']\n" ], [ "district_list = list(pd.unique(df.vdh_health_district.values))\nprint(len(district_list))\nprint(district_list)", "36\n['Alexandria', 'Alleghany', 'Arlington', 'Blue Ridge', 'Central Shenandoah', 'Central Virginia', 'Chesapeake', 'Chesterfield', 'Chickahominy', 'Crater', 'Cumberland Plateau', 'Eastern Shore', 'Fairfax', 'Hampton', 'Henrico', 'Lenowisco', 'Lord Fairfax', 'Loudoun', 'Mount Rogers', 'New River', 'Norfolk', 'Peninsula', 'Piedmont', 'Pittsylvania-Danville', 'Portsmouth', 'Prince William', 'Rappahannock', 'Rappahannock Rapidan', 'Richmond', 'Roanoke', 'Southside', 'Thomas Jefferson', 'Three Rivers', 'Virginia Beach', 'West Piedmont', 'Western Tidewater']\n" ], [ "# VDH rename Thomas Jefferson district to Blue Ridge\ndf.replace({'vdh_health_district':\"Thomas Jefferson\"}, \"Blue Ridge\", inplace=True)", "_____no_output_____" ], [ "district_list = list(pd.unique(df.vdh_health_district.values))\nprint(len(district_list))", "35\n" ], [ "# cumulative counts\ncumulative_df = (df\n.groupby(['report_date'])\n['total_cases','hospitalizations','deaths'].sum()\n.reset_index()\n.sort_values('total_cases', ascending=False))", "_____no_output_____" ], [ "cumulative_df.head()", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nplt.plot(cumulative_df.report_date,cumulative_df.total_cases, label='cases')\nplt.plot(cumulative_df.report_date,cumulative_df.hospitalizations, label='hospitalizations')\nplt.plot(cumulative_df.report_date,cumulative_df.deaths, label='deaths')\nplt.legend();", "_____no_output_____" ], [ "print('total cases: ', cumulative_df.total_cases.max())\nprint('total hospitalizations: ', cumulative_df.hospitalizations.max())\nprint('total deaths: ', cumulative_df.deaths.max())", "total cases: 678506\ntotal hospitalizations: 30291\ntotal deaths: 11342\n" ], [ "print(pd.unique(df.fips.values))", "['51510' '51005' '51023' '51580' '51045' '51161' '51775' '51013' '51003'\n '51540' '51065' '51079' '51109' '51125' '51015' '51017' '51530' '51660'\n '51091' '51678' '51163' '51165' '51790' '51820' '51009' '51011' '51019'\n '51031' '51680' '51550' '51041' '51570' '51145' '51036' '51075' '51085'\n '51127' '51053' '51595' '51081' '51670' '51730' '51149' '51181' '51183'\n '51027' '51051' '51167' '51185' '51001' '51131' '51059' '51600' '51610'\n '51650' '51087' '51105' '51720' '51169' '51195' '51043' '51069' '51139'\n '51171' '51187' '51840' '51107' '51021' '51520' '51035' '51640' '51077'\n '51173' '51191' '51197' '51063' '51071' '51121' '51155' '51750' '51710'\n '51095' '51700' '51735' '51830' '51199' '51007' '51029' '51037' '51049'\n '51111' '51135' '51147' '51590' '51143' '51740' '51683' '51685' '51153'\n '51033' '51630' '51099' '51177' '51179' '51047' '51061' '51113' '51137'\n '51157' '51760' '51770' '51025' '51083' '51117' '51057' '51073' '51101'\n '51097' '51103' '51115' '51119' '51133' '51159' '51193' '51810' '51067'\n '51089' '51690' '51141' '51620' '51093' '51175' '51800']\n" ], [ "df[['prev_date','prev_total_cases','prev_hospitalization','prev_death']] = (df\n .groupby(['vdh_health_district','locality'])\\\n ['report_date','total_cases','hospitalizations','deaths']\n .apply(lambda grp: grp.shift(1)))", "_____no_output_____" ], [ "df.fillna(0, inplace=True)", "_____no_output_____" ], [ "def get_case_count(row):\n counter = (row['total_cases'] - row['prev_total_cases'])\n if counter < 0:\n return 0\n return counter\ndef get_hos_count(row):\n counter = (row['hospitalizations'] - row['prev_hospitalization'])\n if counter < 0:\n return 0\n return counter\ndef get_death_count(row):\n counter = (row['deaths'] - row['prev_death'])\n if counter < 0:\n return 0\n return counter\n\ndf['new_case'] = df.apply(lambda row: get_case_count(row), axis=1 )\ndf['new_hospitalization'] = df.apply(lambda row: get_hos_count(row), axis=1 )\ndf['new_death'] = df.apply(lambda row: get_death_count(row), axis=1 )", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "va_daily_count = df.groupby(\"report_date\")['new_case','new_hospitalization','new_death'].sum().reset_index()", "_____no_output_____" ], [ "plt.plot(va_daily_count.report_date, va_daily_count.new_case, label='case')\nplt.plot(va_daily_count.report_date, va_daily_count.new_hospitalization, label='hospitalization')\nplt.plot(va_daily_count.report_date, va_daily_count.new_death, label='death')\nplt.legend();", "_____no_output_____" ], [ "plt.plot(va_daily_count.report_date, va_daily_count.new_hospitalization, label='hospitalization')\nplt.plot(va_daily_count.report_date, va_daily_count.new_death, label='death')\nplt.legend();", "_____no_output_____" ] ], [ [ "## 2. COVID-19 Vaccines", "_____no_output_____" ] ], [ [ "df = pd.read_sql('SELECT * FROM vaccine_data;',engine)\ndf.head()", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 259100 entries, 0 to 259099\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 administration_date 259098 non-null object\n 1 fips 259100 non-null object\n 2 locality 259100 non-null object\n 3 health_district 259100 non-null object\n 4 facility_type 259100 non-null object\n 5 vaccine_manufacturer 259100 non-null object\n 6 dose_number 259100 non-null object\n 7 vaccine_doses_administered 259100 non-null object\ndtypes: object(8)\nmemory usage: 15.8+ MB\n" ], [ "df['administration_date'] = pd.to_datetime(df.administration_date)\ndf['vaccine_doses_administered'] = df.vaccine_doses_administered.astype(int)", "_____no_output_____" ], [ "print(pd.unique(df.fips.values))", "['51001' '51003' '51005' '51007' '51009' '51011' '51013' '51015' '51017'\n '51019' '51021' '51023' '51025' '51027' '51029' '51031' '51033' '51035'\n '51036' '51037' '51041' '51043' '51045' '51047' '51049' '51051' '51053'\n '51057' '51059' '51061' '51063' '51065' '51067' '51069' '51071' '51073'\n '51075' '51077' '51079' '51081' '51083' '51085' '51087' '51089' '51093'\n '51095' '51097' '51099' '51101' '51105' '51107' '51109' '51111' '51113'\n '51115' '51117' '51119' '51121' '51125' '51127' '51131' '51133' '51135'\n '51137' '51139' '51141' '51143' '51145' '51147' '51149' '51153' '51155'\n '51157' '51159' '51161' '51163' '51165' '51167' '51169' '51171' '51173'\n '51175' '51177' '51179' '51181' '51183' '51185' '51187' '51191' '51193'\n '51195' '51197' '51199' '51510' '51520' '51530' '51540' '51550' '51570'\n '51580' '51590' '51595' '51600' '51610' '51620' '51630' '51640' '51650'\n '51660' '51670' '51678' '51680' '51683' '51685' '51690' '51700' '51710'\n '51730' '51735' '51740' '51750' '51760' '51770' '51775' '51790' '51800'\n '51810' '51820' '51830' '51840' 'Not Reported' 'Out of State' '51091'\n '51103' '51720']\n" ], [ "print(pd.unique(df.health_district.values))", "['Eastern Shore' 'Blue Ridge' 'Alleghany' 'Piedmont' 'Central Virginia'\n 'Arlington' 'Central Shenandoah' 'Mount Rogers' 'Southside'\n 'Cumberland Plateau' 'Rappahannock' 'Chickahominy' 'Chesterfield'\n 'Lord Fairfax' 'Rappahannock Rapidan' 'Crater' 'Three Rivers' 'Fairfax'\n 'New River' 'West Piedmont' 'Henrico' 'Western Tidewater' 'Peninsula'\n 'Lenowisco' 'Loudoun' 'Pittsylvania-Danville' 'Prince William'\n 'Alexandria' 'Chesapeake' 'Hampton' 'Norfolk' 'Portsmouth' 'Richmond'\n 'Roanoke' 'Virginia Beach' 'Not Reported' 'Out of State']\n" ], [ "print(pd.unique(df.facility_type.values))", "['Other Community Health Providers' 'Medical Practices' 'Pharmacies'\n 'Local Health Departments' 'Hospitals' 'Federal Doses']\n" ], [ "print(pd.unique(df.dose_number.values))", "['2' '1']\n" ], [ "print(pd.unique(df.vaccine_manufacturer.values))", "['Pfizer' 'Moderna' 'J&J' 'Non-Specified']\n" ], [ "print(\"total_doses: \", df.vaccine_doses_administered.sum())\nprint(\"at least one dose: \", df[df.dose_number =='1'].vaccine_doses_administered.sum())\nprint(\"two doses: \", df[df.dose_number =='2'].vaccine_doses_administered.sum())\nprint(\"fully vaccinated: \", df[df.dose_number =='2'].vaccine_doses_administered.sum()+df[(df.dose_number =='1') & (df.vaccine_manufacturer =='J&J')].vaccine_doses_administered.sum())", "total_doses: 8782457\nat least one dose: 4910407\ntwo doses: 3872050\nfully vaccinated: 4167519\n" ], [ "#problems:\n#1. Federal doses - district, manufacturer not specified\n#2. Not reported, out of state", "_____no_output_____" ], [ "total_dose = df.groupby(\"administration_date\")['vaccine_doses_administered'].sum().reset_index()\nfirst_dose = df[df.dose_number =='1'].groupby(\"administration_date\")['vaccine_doses_administered'].sum().reset_index()\ntwo_dose = df[df.dose_number =='2'].groupby(\"administration_date\")['vaccine_doses_administered'].sum().reset_index()", "_____no_output_____" ], [ "plt.plot(total_dose.administration_date, total_dose.vaccine_doses_administered, label='total')\nplt.plot(first_dose.administration_date, first_dose.vaccine_doses_administered, label='first')\nplt.plot(two_dose.administration_date, two_dose.vaccine_doses_administered, label='two')\nplt.legend();", "_____no_output_____" ], [ "import seaborn as sns\nsns.countplot(df.vaccine_manufacturer, hue=df.dose_number)", "_____no_output_____" ], [ "sns.countplot(df.facility_type, hue=df.vaccine_manufacturer)\nplt.xticks(rotation=80);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd423ee548522a82600aa0f2bac65f49d226728
434,981
ipynb
Jupyter Notebook
model_improvements/full_train/run_test_data.ipynb
joemcglinchy/IS_segmentation
fcf968d72c8370e9bc60659921544c9018988842
[ "MIT" ]
2
2020-01-17T03:47:19.000Z
2021-09-07T07:53:29.000Z
model_scenarios/full_train/run_test_data.ipynb
earthlab/UNet_Impervious_Surface_Classifier
b6b096e9a5bbf1d2f34ae8a43e56dfd1faecabcc
[ "MIT" ]
1
2021-10-12T22:05:14.000Z
2021-10-12T22:05:14.000Z
model_improvements/full_train/run_test_data.ipynb
joemcglinchy/IS_segmentation
fcf968d72c8370e9bc60659921544c9018988842
[ "MIT" ]
null
null
null
583.867114
372,800
0.940625
[ [ [ "from glob import glob\n\n%matplotlib inline\nfrom pylab import *\n# import cv2\nimport rasterio as rio\n\nrcParams['figure.figsize'] = 10, 10\n\nfont = {'family' : 'normal',\n 'weight' : 'medium',\n 'size' : 14}\n\nmatplotlib.rc('font', **font)\n\nimport os,sys\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy import misc\nimport fiona\nfrom shapely.geometry import shape\nimport shapely\nfrom rasterio.mask import mask\nfrom pyproj import Proj, transform\n\nimport rasterio\n\n# add the unet helpers\nsys.path.append('../../')\nfrom test_unet_helpers import *\nfrom unet_models import unet11_MS\n\n# torch stuff\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torch\nfrom torchvision import models, datasets\nimport torchvision\nfrom torchvision.transforms import ToTensor, Normalize, Compose\nfrom utils import variable", "/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/site-packages/matplotlib/__init__.py:1067: UserWarning: Duplicate key in file \"/home/ubuntu/.config/matplotlib/matplotlibrc\", line #2\n (fname, cnt))\n/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/site-packages/matplotlib/__init__.py:1067: UserWarning: Duplicate key in file \"/home/ubuntu/.config/matplotlib/matplotlibrc\", line #3\n (fname, cnt))\n/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n" ], [ "class UNet(nn.Module):\n def __init__(self, in_channels=1, n_classes=2, depth=5, wf=6, padding=False,\n batch_norm=False, up_mode='upconv'):\n \"\"\"\n Implementation of\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n (Ronneberger et al., 2015)\n https://arxiv.org/abs/1505.04597\n Using the default arguments will yield the exact version used\n in the original paper\n Args:\n in_channels (int): number of input channels\n n_classes (int): number of output channels\n depth (int): depth of the network\n wf (int): number of filters in the first layer is 2**wf\n padding (bool): if True, apply padding such that the input shape\n is the same as the output.\n This may introduce artifacts\n batch_norm (bool): Use BatchNorm after layers with an\n activation function\n up_mode (str): one of 'upconv' or 'upsample'.\n 'upconv' will use transposed convolutions for\n learned upsampling.\n 'upsample' will use bilinear upsampling.\n \"\"\"\n super(UNet, self).__init__()\n assert up_mode in ('upconv', 'upsample')\n self.padding = padding\n self.depth = depth\n prev_channels = in_channels\n self.down_path = nn.ModuleList()\n for i in range(depth):\n self.down_path.append(UNetConvBlock(prev_channels, 2**(wf+i),\n padding, batch_norm))\n prev_channels = 2**(wf+i)\n\n self.up_path = nn.ModuleList()\n for i in reversed(range(depth - 1)):\n self.up_path.append(UNetUpBlock(prev_channels, 2**(wf+i), up_mode,\n padding, batch_norm))\n prev_channels = 2**(wf+i)\n\n self.last = nn.Conv2d(prev_channels, n_classes, kernel_size=1)\n\n def forward(self, x):\n blocks = []\n for i, down in enumerate(self.down_path):\n x = down(x)\n if i != len(self.down_path)-1:\n blocks.append(x)\n x = F.avg_pool2d(x, 2)\n\n for i, up in enumerate(self.up_path):\n x = up(x, blocks[-i-1])\n\n return self.last(x)\n\n\nclass UNetConvBlock(nn.Module):\n def __init__(self, in_size, out_size, padding, batch_norm):\n super(UNetConvBlock, self).__init__()\n block = []\n\n block.append(nn.Conv2d(in_size, out_size, kernel_size=3,\n padding=int(padding)))\n block.append(nn.ReLU())\n if batch_norm:\n block.append(nn.BatchNorm2d(out_size))\n\n block.append(nn.Conv2d(out_size, out_size, kernel_size=3,\n padding=int(padding)))\n block.append(nn.ReLU())\n if batch_norm:\n block.append(nn.BatchNorm2d(out_size))\n\n self.block = nn.Sequential(*block)\n\n def forward(self, x):\n out = self.block(x)\n return out\n\n\nclass UNetUpBlock(nn.Module):\n def __init__(self, in_size, out_size, up_mode, padding, batch_norm):\n super(UNetUpBlock, self).__init__()\n if up_mode == 'upconv':\n self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2,\n stride=2)\n elif up_mode == 'upsample':\n self.up = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2),\n nn.Conv2d(in_size, out_size, kernel_size=1))\n\n self.conv_block = UNetConvBlock(in_size, out_size, padding, batch_norm)\n\n def center_crop(self, layer, target_size):\n _, _, layer_height, layer_width = layer.size()\n diff_y = (layer_height - target_size[0]) // 2\n diff_x = (layer_width - target_size[1]) // 2\n return layer[:, :, diff_y:(diff_y + target_size[0]), diff_x:(diff_x + target_size[1])]\n\n def forward(self, x, bridge):\n up = self.up(x)\n crop1 = self.center_crop(bridge, up.shape[2:])\n out = torch.cat([up, crop1], 1)\n out = self.conv_block(out)\n\n return out\n\nfrom glob import glob\nclass DG_GT_Dataset(Dataset):\n \"\"\"Dataset class for ignition types (Y var)\"\"\"\n \n def __init__(self, data_dir, channels='all', img_transform=None, gt_transform=None):\n \"\"\" \n Args:\n data_dir (string): the folder containing the image files\n channels (string): 'all', 'bgr', 'bgrn1', 'bgrn2'; band sets for DG imagery\n img_transform (callable, optional): Optional transform to be applied to source image data\n gt_transform (callable, optional): Optional transform to be applied to labeled image data\n x_var (iterable, optional): list of predictor variable names\n land_mask (string, optional): defines whether or not to return land mask\n \"\"\"\n \n # some sanity checks\n assert os.path.exists(data_dir)\n \n self.gt_files = sorted(glob(data_dir + '/gt*.tif'))\n self.img_files = sorted(glob(data_dir + '/dg*.tif'))\n \n \n print(self.img_files[0])\n print(self.gt_files[0])\n \n self.img_transform = img_transform\n self.gt_transform = gt_transform\n self.dg_bgr = [1,2,4]\n self.dg_bgrn1 = [1,2,4,6]\n self.dg_bgrn2 = [1,2,4,7]\n self.channels = channels\n \n assert len(self.img_files) == len(self.gt_files)\n \n \n def __getitem__(self, idx):\n \n \"\"\"\n Files are organized as <var_type>_<year>_<month>_t<tileNumber>.tif, e.g., Arson_1992_1_t1\n A single dataset needs to be constructed for a given ignition type, year, month, and tile number\n \"\"\"\n \n img_file = self.img_files[idx]\n gt_file = self.gt_files[idx]\n \n with rio.open(img_file) as src:\n img_arr = src.read()\n \n # check the channels\n if self.channels == 'bgr':\n img_arr = img_arr[self.dg_bgr, :, :]\n elif self.channels == 'bgrn1':\n img_arr = img_arr[self.dg_bgrn1, :, :]\n elif self.channels == 'bgrn2':\n img_arr = img_arr[self.dg_brgn2, :, :]\n else:\n pass\n \n with rio.open(gt_file) as src:\n gt_arr = src.read()\n\n if (self.img_transform is not None):\n return (self.img_transform(torch.from_numpy(img_arr)), \n self.gt_transform(torch.from_numpy(gt_arr))) \n else:\n return (torch.from_numpy(img_arr), torch.from_numpy(gt_arr)) # return X, Y, Mask (Mask uses LandMask in X-var folder)\n \n \n def __len__(self):\n return len(self.img_files)", "_____no_output_____" ] ], [ [ "# Now that data is ready, set up the model and run through it", "_____no_output_____" ], [ "## some code for setting up the model and performance eval", "_____no_output_____" ] ], [ [ "import utils as pyt_utils\nfrom torch.optim import Adam\nimport torch.backends.cudnn as cudnn\nfrom pathlib import Path\nfrom validation import validation_binary\nfrom loss import LossBinary\nimport json\n\nimport time\nfrom sklearn.metrics import roc_auc_score, log_loss, roc_auc_score, roc_curve, auc\n\n# need to change this to DICE loss!\n#loss = LossBinary(jaccard_weight=args.jaccard_weight)\n#criterion = nn.CrossEntropyLoss()\n\n\ncudnn.benchmark = True", "/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88\n return f(*args, **kwds)\n" ] ], [ [ "# DO the same for pansharpened data", "_____no_output_____" ] ], [ [ "train_folder = r'D:\\projects\\RD\\debug_IS_segmentation\\tilesPS_d10000_512x512\\train'\ntest_folder = r'D:\\projects\\RD\\debug_IS_segmentation\\tilesPS_d10000_512x512\\test'\nval_folder = r'D:\\projects\\RD\\debug_IS_segmentation\\tilesPS_d10000_512x512\\val'\n\n# ubuntu paths\ntrain_folder = '/media/joemcglinchy/Data/projects/RD/debug_IS_segmentation/kmeans3_split/tilesPS_d10000_512x512/train'\ntest_folder = '/media/joemcglinchy/Data/projects/RD/debug_IS_segmentation/kmeans3_split/tilesPS_d10000_512x512/test'\nval_folder = '/media/joemcglinchy/Data/projects/RD/debug_IS_segmentation/kmeans3_split/tilesPS_d10000_512x512/val'\n\n# aws pathts\ntrain_folder = '../../../tiles/kmeans3_split/tilesPS_d10000_512x512/train'\ntest_folder = '../../../tiles/kmeans3_split/tilesPS_d10000_512x512/test'\nval_folder = '../../../tiles/kmeans3_split/tilesPS_d10000_512x512/val'\n\n# load as 4 band\nps_train_ds = DG_GT_Dataset(train_folder, channels='bgrn1')\nps_test_ds = DG_GT_Dataset(test_folder, channels='bgrn1')\nps_val_ds = DG_GT_Dataset(val_folder, channels='bgrn1')", "../../../tiles/kmeans3_split/tilesPS_d10000_512x512/train/dg_is_00000.tif\n../../../tiles/kmeans3_split/tilesPS_d10000_512x512/train/gt_is_00000.tif\n../../../tiles/kmeans3_split/tilesPS_d10000_512x512/test/dg_is_00731.tif\n../../../tiles/kmeans3_split/tilesPS_d10000_512x512/test/gt_is_00731.tif\n../../../tiles/kmeans3_split/tilesPS_d10000_512x512/val/dg_is_00500.tif\n../../../tiles/kmeans3_split/tilesPS_d10000_512x512/val/gt_is_00500.tif\n" ], [ "# RGB-NIR\nmodel_path = './files_PS_bgrn1/bgrn1_ps_ep75_step1575_b24.pt'\n\ntorch.cuda.empty_cache()\n\n#model = unet11(pretrained=False) # B-G-R\nps_model = unet11_MS(num_bands=4, pretrained=False) \n\n\n# load on CPU\nif os.path.exists(model_path):\n state_dict = torch.load(str(model_path), map_location='cpu')\n epoch = state_dict['epoch']\n step = state_dict['step']\n new_state_dict = OrderedDict()\n for k, v in state_dict['model'].items():\n name = k[7:] # remove 'module.' of dataparallel\n new_state_dict[name]=v\n\n ps_model.load_state_dict(new_state_dict)\n print('Restored model, epoch {}, step {:,}'.format(epoch, step))\n\nps_model.eval()", "num_bands is 4\nRestored model, epoch files_PS_bgrn1/bgrn1_ps_ep75_step1575_b24.pt, step 1,575\n" ], [ "all_tps = []\nall_fps= []\nmaxes = []\nmins = []\naucs = []\nfor ind in range(len(ps_val_ds)):\n test_im, test_target = ps_val_ds[ind]\n test_out = ps_model(variable(test_im.unsqueeze(0)))\n \n\n out = test_out.cpu().detach().numpy()[0][0]\n maxes.append(out.max())\n mins.append(out.min())\n \n false_positive_rate, true_positive_rate, thresholds = roc_curve(test_target.numpy().flatten(), out.flatten())\n aucs.append(roc_auc_score(test_target.numpy().flatten(), out.flatten()))\n all_tps.append(true_positive_rate)\n all_fps.append(false_positive_rate)", "_____no_output_____" ], [ "font = {'family' : 'normal',\n 'weight' : 'medium',\n 'size' : 20}\n\nmatplotlib.rc('font', **font)\n\n\nplt.figure(figsize=(15,15))\nfor i in range(len(all_tps)):\n \n plt.plot(all_fps[i], all_tps[i])\n \nplt.plot([0,1], [0,1], 'r--')\nplt.title('ROC Curves for Test Set: Blue-Green-Red-NIR1 bands')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.grid(True)\nplt.show()", "/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/site-packages/matplotlib/font_manager.py:1328: UserWarning: findfont: Font family ['normal'] not found. Falling back to DejaVu Sans\n (prop.get_family(), self.defaultFamily[fontext]))\n" ], [ "max(maxes), min(mins)", "_____no_output_____" ], [ "rasters = glob('./rastertiles2015/*.tif')\nrasmax2, rasmin2 = [], []\nfor f in rasters:\n with rio.open(f) as src:\n arr = src.read()\n rasmax2.append(arr.max())\n rasmin2.append(arr.min())\n \nrasters2 = glob('./rastertiles2/*.tif')\nrasmax, rasmin = [], []\nfor f in rasters2:\n with rio.open(f) as src:\n arr = src.read()\n rasmax.append(arr.max())\n rasmin.append(arr.min())", "_____no_output_____" ], [ "max(rasmax2), min(rasmin2)", "_____no_output_____" ], [ "plt.figure(figsize=(10,10))\nplt.hist(maxes, alpha=0.3, bins=100, label='model data')\nplt.hist(rasmax2, alpha=0.3, bins=100, label='2015 raster data')\nplt.hist(rasmax, alpha=0.3, bins=100, label='2016 raster data')\nplt.legend()\nplt.xlabel('Image Tile Maximum')\nplt.ylabel('Count')\nplt.show()", "/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/site-packages/matplotlib/font_manager.py:1328: UserWarning: findfont: Font family ['normal'] not found. Falling back to DejaVu Sans\n (prop.get_family(), self.defaultFamily[fontext]))\n" ], [ "plt.hist(aucs, bins=100);", "/home/ubuntu/anaconda3/envs/spatial_torch/lib/python3.5/site-packages/matplotlib/font_manager.py:1328: UserWarning: findfont: Font family ['normal'] not found. Falling back to DejaVu Sans\n (prop.get_family(), self.defaultFamily[fontext]))\n" ], [ "mean(aucs), min(aucs), max(aucs)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd427aec9838084620d14becdb498b7e92700bd
68,197
ipynb
Jupyter Notebook
calibration/ProjFunctionCalibration2.ipynb
ETC-UA/LeafAreaIndex.jl
739b1af9f496b6fd919ee09088e5210287cd9e56
[ "MIT" ]
10
2015-08-17T12:56:04.000Z
2020-10-02T08:24:02.000Z
calibration/ProjFunctionCalibration2.ipynb
ETC-UA/LeafAreaIndex.jl
739b1af9f496b6fd919ee09088e5210287cd9e56
[ "MIT" ]
4
2015-06-03T11:07:17.000Z
2019-02-19T20:36:32.000Z
calibration/ProjFunctionCalibration2.ipynb
ETC-UA/LeafAreaIndex.jl
739b1af9f496b6fd919ee09088e5210287cd9e56
[ "MIT" ]
4
2015-11-01T07:52:51.000Z
2021-10-01T18:02:25.000Z
353.352332
32,746
0.912723
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecd448bb0666b090c56caa2be43da2c670f88516
7,032
ipynb
Jupyter Notebook
Analise Custos/custos.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
Analise Custos/custos.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
Analise Custos/custos.ipynb
lsawakuchi/x1
564a135b4fdaa687a4ef6d470ddaa4730932d429
[ "MIT" ]
null
null
null
19.808451
138
0.446672
[ [ [ "import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nfrom sqlalchemy import create_engine", "_____no_output_____" ], [ "engine = create_engine(\"mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo\")\ncon = engine.connect()\ndf = pd.read_sql(\"select * from consultas_idwall_operacoes\", con)\ncon.close()", "_____no_output_____" ], [ "df[\"mes_consulta\"] = df.apply(lambda x : x[\"data_ref\"].date(), axis=1)", "_____no_output_____" ], [ "df = df[df['mes_consulta']>=datetime(2019, 1,1).date()]", "_____no_output_____" ], [ "# consultados no semestre\ndf[\"numero_consulta\"].unique().tolist().__len__()", "_____no_output_____" ], [ "5845*15", "_____no_output_____" ], [ "engine = create_engine(\"mysql+pymysql://capMaster:#jackpot123#@captalys.cmrbivuuu7sv.sa-east-1.rds.amazonaws.com:23306/varejo\")\ncon = engine.connect()\ndfp = pd.read_sql(\"select * from pre_analise\", con)\ncon.close()", "_____no_output_____" ], [ "dfp[\"produto\"].unique().tolist()", "_____no_output_____" ], [ "dt = dfp[dfp[\"produto\"]!=\"kred\"]", "_____no_output_____" ], [ "res = dt[dt[\"flag_faturamento\"]==1]", "_____no_output_____" ], [ "2075*15", "_____no_output_____" ], [ "res = res.groupby(\"produto\").count().reset_index()[[\"produto\", \"cnpj\"]]", "_____no_output_____" ], [ "res[\"custo\"] = res[\"cnpj\"]*15", "_____no_output_____" ], [ "res", "_____no_output_____" ], [ "(df[\"numero_consulta\"].unique().tolist().__len__()*15)/6", "_____no_output_____" ], [ "534*15", "_____no_output_____" ], [ "# demanda de consulta por plataforma", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd45f248eaf3c9b237d74525c948a9600e8515a
37,470
ipynb
Jupyter Notebook
notebooks/Report.ipynb
csipapicsa/fyp2021p04g10
45229412e0c3c0cee36ba77ce0aeaa3b50769026
[ "MIT" ]
null
null
null
notebooks/Report.ipynb
csipapicsa/fyp2021p04g10
45229412e0c3c0cee36ba77ce0aeaa3b50769026
[ "MIT" ]
null
null
null
notebooks/Report.ipynb
csipapicsa/fyp2021p04g10
45229412e0c3c0cee36ba77ce0aeaa3b50769026
[ "MIT" ]
null
null
null
32.025641
177
0.576701
[ [ [ "# <center> Sentiment and offensive language analysis - Group 10 - Project 4", "_____no_output_____" ], [ "Through this report we are aiming to detect offensiveness and sentimentality of different tweets", "_____no_output_____" ], [ "# Export packages", "_____no_output_____" ] ], [ [ "import nltk\nimport re\nimport difflib\nfrom nltk import agreement\nfrom nltk.tokenize import TweetTokenizer\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random #only used for generating 100 random tweets for manual labelling\nfrom collections import Counter\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, GridSearchCV\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.neighbors import LocalOutlierFactor, KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import SVR, SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import linear_model\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.naive_bayes import GaussianNB, MultinomialNB\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, classification_report, roc_auc_score, roc_curve, recall_score, precision_score, f1_score\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.naive_bayes import ComplementNB\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import cohen_kappa_score\n", "_____no_output_____" ] ], [ [ "## All functions for the notebook", "_____no_output_____" ], [ "During the tokenaziation punctuations, emojois, pointless strings and characters are removed", "_____no_output_____" ] ], [ [ "def tokenizer(tweets):\n \"\"\"\n Function that takes a list of strings and returns the tokenized version of each string\n \"\"\"\n #counter = 0\n #token_pat = re.compile(r'[\\w@’#]+')\n token_pat = re.compile(r'\\w+')\n skippable_pat = re.compile(r'[\\s\\d]+|@user')\n\n non_white_space = re.compile(r'[^@’#\\w\\s]') #Finds characters that are not white_space nor word characters (nor @’#)\n #print(\"these are the tweets\")\n #print(tweets)\n \n # Initialise lists\n tokens = []\n unmatchable = []\n\n# Compile patterns for speedup\n token_pat = re.compile(r'\\w+')\n\n tokenlist = []\n for i in tweets:\n #counter = counter + 1\n #print(counter)\n #tokens = []\n #unmatchable = []\n line = i.lower()\n #print(\"this is i: \",i)\n \n while line:\n #print(\"this is the line\")\n #print(line)\n skippable_match = re.search(skippable_pat, line)\n if skippable_match and skippable_match.start() == 0:\n line = line[skippable_match.end():]\n else:\n token_match = re.search(token_pat, line)\n #print(\"tokens_match\")\n #print(token_match)\n #print(token_match.start())\n if token_match and token_match.start() == 0:\n #print(\"\\nAPPEND IS RUNNING\\n\")\n #print(line[:token_match.end()])\n tokens.append(line[:token_match.end()])\n line = line[token_match.end():]\n else:\n unmatchable_end = len(line)\n if skippable_match:\n unmatchable_end = skippable_match.start()\n if token_match:\n unmatchable_end = min(unmatchable_end, token_match.start())\n unmatchable.append(line[:unmatchable_end])\n line = line[unmatchable_end:]\n tokenlist.append(tokens)\n tokens = []\n return(tokenlist)\n\n\ndef compare_tokenizers(bool):\n if bool==True:\n tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)\n j = 0\n for i in training_data: \n temp = i\n diff = difflib.context_diff(tknzr.tokenize(i),token_tweets[j])\n #print(\"\".join(diff), end = \"\")\n print(i,\"tknzr:\",tknzr.tokenize(i),\"\\ntokenlist:\",token_tweets[j],\"\\n\")\n j+=1\n\n \ndef import_(classification_task, file_name):\n with open(\"../data/raw/\"+classification_task+\"/\"+file_name, \"r\", encoding=\"utf-8\") as f:\n temp = [int(line.strip(\"\\n\")) for line in f]\n return(temp)\n\n\ndef import_and_tokenize(classification_task, file_name):\n with open(\"../data/raw/\"+classification_task+\"/\"+file_name, \"r\", encoding=\"utf-8\") as f:\n temp = [line for line in f]\n return(tokenizer(temp))\n\n\ndef report_clf_stats(predicted, test, classification_task):\n name_dict = {\"offensive\": [\"Not offensive\",\"Offensive\"], \"sentiment\": [\"Negative\", \"Neutral\", \"Positive\"]}\n print(metrics.accuracy_score(test, predicted))\n print(metrics.classification_report(predicted, test, target_names=name_dict[classification_task]),\"\\n\")\n print(metrics.confusion_matrix(test, predicted))", "_____no_output_____" ] ], [ [ "## Reading data\n### The Offensive Training Data", "_____no_output_____" ] ], [ [ "f = open(\"../data/raw/offensive/train_text.txt\", 'r', encoding = \"utf-8\")\ninputlist = [line for line in f]\nf.close()\n\ntraining_data, validation_data = inputlist[:len(inputlist)//2], inputlist[len(inputlist)//2:]", "_____no_output_____" ], [ "token_tweets = tokenizer(training_data)\nprint(token_tweets)\n#print(token_tweets[1])\n#[print(*i) for i in token_tweets]", "_____no_output_____" ] ], [ [ "### Comparing our own tokenizer with TweetTokenizer from nltk library\n<b>Set below value 'see_output' = True for comparison <i>(It'll run for a while)</i></b>\n", "_____no_output_____" ] ], [ [ "# Comparing our own tokenizer with TweetTokenizer from nltk library\n# Set below value 'see_output' = True for comparison\nsee_output = False\ncompare_tokenizers(see_output)", "_____no_output_____" ] ], [ [ "### Corpus size of Offensive and sentiment training sets respectively:", "_____no_output_____" ] ], [ [ "%%bash\nwc ../data/raw/offensive/train_text.txt\nwc ../data/raw/sentiment/train_text.txt", "_____no_output_____" ] ], [ [ "<b>Offensive:</b> 11916 lines/tweets, 262370 words <br>\n<b>Sentiment:</b> 45615 lines/tweets, 877516 words", "_____no_output_____" ], [ "### Running tokenizer function on offensive and sentiment training data to get token count right", "_____no_output_____" ] ], [ [ "with open(\"../data/raw/offensive/train_text.txt\", \"r\", encoding = \"utf-8\",) as f:\n offensive_raw = [line for line in f]\n\nwith open(\"../data/raw/sentiment/train_text.txt\", \"r\", encoding = \"utf-8\",) as f:\n sentiment_raw = [line for line in f]\n", "_____no_output_____" ] ], [ [ "<i>Below cell line takes some time to run", "_____no_output_____" ] ], [ [ "offensive_tokens = tokenizer(offensive_raw)\nsentiment_tokens = tokenizer(sentiment_raw)", "_____no_output_____" ] ], [ [ "## The top 10 most frequent words of each dataset", "_____no_output_____" ] ], [ [ "#from https://stackoverflow.com/questions/45019607/count-occurrence-of-a-list-in-a-list-of-lists\noff_uniq = pd.Series(offensive_tokens).explode().value_counts()\nsent_uniq = pd.Series(sentiment_tokens).explode().value_counts()\n\nprint(\"Offensive dataset, top 10 tokens:\",\"\\n\",off_uniq[:10],\"\\n\")\nprint(\"Sentiment dataset, top 10 tokens:\",\"\\n\",sent_uniq[:10])\n\n#Turning above pd.series into dataframes, for ease of use later\n#Transformation found at:https://stackoverflow.com/questions/40224319/pandas-series-to-dataframe-using-series-indexes-as-columns\noff_uniq = off_uniq.to_frame().reset_index()\nsent_uniq = sent_uniq.to_frame().reset_index()\n\n#Renaming columns in dataframes\noff_uniq.columns = [\"token\",\"count\"]\nsent_uniq.columns = [\"token\",\"count\"]", "_____no_output_____" ] ], [ [ "### type/token ratio", "_____no_output_____" ] ], [ [ "#Types == Amount of different Tokens in dataset\noff_types = len(off_uniq[\"token\"])\nsent_types = len(sent_uniq[\"token\"])\nprint(\"Offensive Types: {}\\nSentiment types: {}\\n\".format(off_types,sent_types))\n\n#Tokens == Amount of all \"Words\" in dataset\noff_token_amount = off_uniq[\"count\"].sum()\nsent_token_amount = sent_uniq[\"count\"].sum()\nprint(\"Offensive tokens, amount: {}\\nSentiment tokens, amount: {}\\n\".format(off_token_amount, sent_token_amount))\n\n#Type/token ratio (=ttratio)\noff_ttratio = off_types/off_token_amount\nsent_ttratio = sent_types/sent_token_amount\nprint(\"Offensive type/token ratio: {:.4f}\\nSentiment type/token ratio: {:.4f}\".format(off_ttratio, sent_ttratio))", "_____no_output_____" ] ], [ [ "#### Types that only occur 1, 2 or 3 times\n<ul>\n <li>Things like Hashtags and misspelled nouns are prevalent, but they, more importantly, contain most of the Types in the vocabulary</li>\n <li>Tokens that occur only once make up ~ 50% of the types in both datasets!</li>\n</ul>", "_____no_output_____" ] ], [ [ "print(\"Offensive types w. freq 1, 2, or 3 divided by total types: {:.2f}%\".format(\n len(off_uniq.loc[(off_uniq[\"count\"]==1) | (off_uniq[\"count\"]==2) | (off_uniq[\"count\"]==3)])/off_types*100))\nprint(\"Sentiment types w. freq 1, 2, or 3 divided by total types: {:.2f}%\".format(\n len(sent_uniq.loc[(sent_uniq[\"count\"]==1) | (sent_uniq[\"count\"]==2) | (sent_uniq[\"count\"]==3)])/sent_types*100))\n\nprint()\n\nprint(\"Offensive types w. freq. just 1 divided by total types: {:.2f}%\".format(len(off_uniq.loc[off_uniq[\"count\"]==1])/off_types*100))\nprint(\"Sentiment types w. freq. just 1 divided by total types: {:.2f}%\".format(len(sent_uniq.loc[sent_uniq[\"count\"]==1])/sent_types*100))", "_____no_output_____" ] ], [ [ "Amount of types showing up x times in the offensive dataset (e.g 14000 tokens only showing up once, and so on)<br>\nThe 500 types with the highest count are skipped, to make x-axis on the plot visible", "_____no_output_____" ] ], [ [ "# Amount of types showing up x times in the offensive dataset (e.g 14000 tokens only showing up once, and so on)\n# Skipping top 500 types, for visibility in plot (They're not impactful on the plot otherwise, the most frequent of \n# these 500 entries is 13)\n#Plotting visual and double y-axes found at https://stackoverflow.com/questions/33179122/seaborn-countplot-with-frequencies\n\nfig, ax = plt.subplots(figsize=(16,9))\nsns.countplot(x=\"count\", data=off_uniq[500:]) #Sns counts the type frequency of each word, and plots it\nsns.set_style(\"darkgrid\")\nplt.title(\"Frequency of tokens showing up x times in the Offensive dataset\")\nplt.xlabel(\"token occuring x time(s)\")\nplt.ylabel(\"sum of types occurring x time(s)\")\nax.tick_params('x',rotation=45, labelsize = 10) #xlabels are rotated 45 degrees and made bigger\n\n# Twin axes, creating and visualising\nax2 = ax.twinx()\nax2.set_ylabel(\"Frequency (percent)\")\n\n# Moving the ticks and labels of y-axes to opposite sides for more visually pleasing plot\nax2.yaxis.tick_left()\nax.yaxis.tick_right()\nax.yaxis.set_label_position('right')\nax2.yaxis.set_label_position('left')\n\n# Setting appropriate limits for the y-axes, removing duplicate grid\nax.set_ylim(0,len(off_uniq))\nax2.set_ylim(0,100)\nax2.grid(None)", "_____no_output_____" ] ], [ [ "### Noticable difference in the two datasets", "_____no_output_____" ], [ "<ul>\n <li>Big difference in size, sentiment dataset over twice the amount of tokens (=library of sentiment twice the size of library of offensive language)</li>\n <li>otherwise quite similar, in both sets the percentage of the vocabulary made up of tokens w. frq. 1 is ~ 50%</li>\n <ul><li>Both datasets also seem to follow Zipf's law (see below graphs)</li>\n </ul>\n</ul>\n ", "_____no_output_____" ], [ "### Corpus Statistics Consistent with Zipf's law?", "_____no_output_____" ], [ "Log-log plot of the rank of token frequency against against the frequency in the offensive dataset", "_____no_output_____" ] ], [ [ "off_uniq[\"log_frq\"] = np.log(off_uniq[\"count\"])\noff_uniq[\"log_rank\"] = np.log(off_uniq[\"count\"].rank(ascending=False))\nsns.relplot(x=\"log_rank\",y=\"log_frq\", data=off_uniq, color=\"red\", edgecolor=(0.2,0,0,0.01)).set(title=\n \"log-log plot of frequency against rank of frequency in Offensive dataset\")\nplt.show()", "_____no_output_____" ], [ "sent_uniq[\"log_frq\"] = np.log(sent_uniq[\"count\"])\nsent_uniq[\"log_rank\"] = np.log(sent_uniq[\"count\"].rank(ascending=False))\nsns.relplot(x=\"log_rank\",y=\"log_frq\", data=sent_uniq, color=\"r\", edgecolor=(0.2,0,0,0.01)).set(title=\n \"log-log plot of frequency against rank of frequency in Sentiment dataset\")\nplt.show()", "_____no_output_____" ] ], [ [ "<b>As seen in the above plots, both datasets seem consistent with Zipf's law</b>", "_____no_output_____" ], [ "## Task 3: Manual Annotation & Inter-user Agreement", "_____no_output_____" ], [ "### Generating 100 random tweets for manual annotation", "_____no_output_____" ] ], [ [ "random.seed(42) #Seeded for consistency\nrandom_tweets = random.sample(list(enumerate(sentiment_raw)),100)\nrtweet_index = [i[0] for i in random_tweets]\n\n# #File-generation is commented out, as the randomness is seeded, thus Making the same \"Random\" file every time\n# with open(\"../data/interim/random_tweets.txt\",\"w\", encoding=\"utf-8\") as f:\n# [f.write(str(i[1])+\"\\n\") for i in random_tweets]", "_____no_output_____" ], [ "sent_label = pd.read_csv('../data/raw/sentiment/train_labels.txt',header=None)\nsent_raw = pd.read_csv(\"../data/raw/sentiment/train_text.txt\",header=None, sep=\"\\n\",quoting=3)", "_____no_output_____" ] ], [ [ "### Putting the manually annotated labels into a single dataframe", "_____no_output_____" ] ], [ [ "man_labels = pd.read_csv(\"../data/interim/manual_annotation/all_combined.csv\", delimiter=\",\") #All manual labels\nman_labels = man_labels.iloc[:,:-1] #Not using the _A0_value column from the file\ndisplay(man_labels) #The manually annotated labels, put into a dataframe\n\nsame_label = man_labels.eq(man_labels.iloc[:,0], axis=0).all(1) #Finding where all annotators agree on a label\nprint(\"# of equal labels:\",np.sum(same_label))\nobs_agreement = np.sum(same_label)/len(man_labels.iloc[:,0])\nprint(\"observed agreement:\",obs_agreement)", "_____no_output_____" ] ], [ [ "### Calculating Chance-corrected agreement", "_____no_output_____" ] ], [ [ "#Formatting manual label answers to calculate Scott's pi, Fleiss' kappa with nltk.agreement\ntweets_len = len(man_labels.iloc[:,0])\nformatted_answers = [] #Formatting of only the manually annotated data\nfor column in range(len(man_labels.columns)):\n for tweet_num in range(tweets_len):\n formatted_answers.append([column+1,tweet_num,man_labels.iloc[tweet_num,column]])\n\n#adding the \"True\" labels to all_formatted:\nall_formatted = formatted_answers.copy() #Formatting of BOTH the manually annotated data AND the \"True\" Annotation of the data\ntrue_label_list = list(sent_label.iloc[rtweet_index][0])\nfor i in range(tweets_len):\n all_formatted.append([len(man_labels.columns)+1,i,true_label_list[i]])\n \nprint(\"lenght of formatted_answers:\",len(formatted_answers))\nprint(\"length of all_formatted:\",len(all_formatted)) #should be 100 characters longer than the above", "_____no_output_____" ] ], [ [ "#### Chance-corrected for just the manual labels", "_____no_output_____" ], [ "The inter-annotator agreement values are almost 0.4, which can be decided as fair or moderate", "_____no_output_____" ] ], [ [ "\"\"\"\nNote that in the nltk.agreement documentation: https://www.nltk.org/_modules/nltk/metrics/agreement.html\nthe returned value is the chance-corrected agreement, not just A_e.\n\"\"\"\nratingtask = agreement.AnnotationTask(data=formatted_answers)\nprint(\"Scott's pi: {:.4f}\\nCohen's kappa: {:.4f}\\nFleiss' kappa: {:.4f}\".format(ratingtask.pi(),ratingtask.kappa(),ratingtask.multi_kappa()))\n", "_____no_output_____" ] ], [ [ "#### Chance-corrected for both the manual labels AND the \"true\" labels", "_____no_output_____" ] ], [ [ "all_label_rating = agreement.AnnotationTask(data=all_formatted)\nprint(\"Scott's pi: {:.4f}\\nCohen's kappa: {:.4f}\\nFleiss' kappa: {:.4f}\".format(all_label_rating.pi(),all_label_rating.kappa(),all_label_rating.multi_kappa()))\n", "_____no_output_____" ] ], [ [ "### Showing the tweets with agreeing/disagreeing manual labels for later discussion, saved to file", "_____no_output_____" ] ], [ [ "same_label[same_label==False]\nmanual_tweets = sent_raw.loc[rtweet_index]\n\n#tweets labels disagree on\nannotation_disagree = manual_tweets.iloc[np.where(same_label==False)]\ndisplay(annotation_disagree[:10])#Showing the 10 first tweets with disagreeing manual annotation\n\n#tweets labels agree on\nannotation_agree = manual_tweets.iloc[np.where(same_label==True)]\n\n# File creation commented out\n# annotation_disagree.to_csv(\"../data/interim/man_anno_disagree.txt\", header=None, index=False)\n# annotation_agree.to_csv(\"../data/interim/man_anno_agree.txt\", header=None, index=False)\n\n#man_labels[same_label==True][\"anno_1\"][:10]\n", "_____no_output_____" ] ], [ [ "# Calculating the Observed Agreement result of the manual annotation", "_____no_output_____" ], [ "During the manual annotation we found in case:\n- Only 34% of the cases we totally agreed 34% \n- 43% of the cases one of us had different opinion\n- 23% of the cases (this means 0.5 Observed Agreement result) we couldn't decide obviously the sentiment of the sentence", "_____no_output_____" ] ], [ [ "# hide errors\npd.options.mode.chained_assignment = None\n\nman_labels.reset_index()\nman_labels[\"AO\"] = 1.1\nfor i in range(len(man_labels)):\n l = []\n rowData = man_labels.loc[ i , : ]\n l.append(rowData[0])\n l.append(rowData[1])\n l.append(rowData[2])\n l.append(rowData[3])\n occurence_count = Counter(l)\n same_counter = occurence_count.most_common(1)[0][1]\n res=occurence_count.most_common(1)[0][0]\n ao = same_counter/4\n #print(ao)\n man_labels[\"AO\"][i] = ao\n #print(\"Variance of sample set is % s\" %(statistics.variance(l)),\"AO number is : \", ao, \"winner is :\", res)\n None\n\nplt.hist(man_labels[\"AO\"], label='linear', bins=3)\nplt.title('Observed Agreement results of manual annotation')", "_____no_output_____" ] ], [ [ "# Cohen's kappa score heatmap", "_____no_output_____" ] ], [ [ "man_and_true = man_labels.copy()\nman_and_true = man_and_true.iloc[:,:4]\nman_and_true[\"true\"] = true_label_list\n\nannotator_np = np.empty((5,5))\n\nfor i in range(len(man_and_true.columns)):\n for j in range(i, len(man_and_true.columns)):\n l1 = list(man_and_true.iloc[:,i])\n l2 = list(man_and_true.iloc[:,j])\n score = cohen_kappa_score(l1,l2)\n annotator_np[i,j] = score\n annotator_np[j,i] = score\n# if j==4:\n# print(\"Anno_{}, True:\\n\".format(i+1),score,\"\\n\")\n# else:\n# print(\"Anno_{}, Anno_{}:\\n\".format(i+1,j+1),score,\"\\n\")\n\n#print(annotator_np)\n", "_____no_output_____" ], [ "fig, ax = plt.subplots()\n\nsns.heatmap(data=annotator_np, annot=True,\n xticklabels= [\"Annotator 1\",\"Annotator 2\",'Annotator 3',\"Annotator 4\",\"'True' labels\"],\n yticklabels = [\"Annotator 1\",\"Annotator 2\",'Annotator 3',\"Annotator 4\",\"'True' labels\"])\nax.tick_params('x',rotation=45)", "_____no_output_____" ], [ "#Running inter-annotator agreement without Annotator 3:\njust_three_annos = list(filter(lambda x: x[0] != 3, all_formatted))\njust_three_annos\nthree_anno_rating = agreement.AnnotationTask(data=just_three_annos)\nprint(\"Scott's pi: {:.4f}\\nCohen's kappa: {:.4f}\\nFleiss' kappa: {:.4f}\".format(three_anno_rating.pi(),three_anno_rating.kappa(),three_anno_rating.multi_kappa()))\n", "_____no_output_____" ] ], [ [ "### First model on Offensive Language", "_____no_output_____" ] ], [ [ "count_vec = CountVectorizer(tokenizer = lambda x: x, lowercase = False)", "_____no_output_____" ], [ "#Loading in offensive x-train, x-test, y-train, y-test\n\n# x-train\nox_train = import_and_tokenize(\"offensive\", \"train_text.txt\")\n\n# x-test\nox_test = import_(\"offensive\", \"train_labels.txt\")\n\n# y-train\noy_train = import_and_tokenize(\"offensive\", \"test_text.txt\")\n\n# y-test\noy_test = import_(\"offensive\", \"test_labels.txt\")\n\n", "_____no_output_____" ], [ "#Running Count_vectorizor (Pipeline for the coming commands)\ncount_ox_train = count_vec.fit_transform(ox_train)\n#count_ox_train\n#Running tf_idf on off_train to \"balance\" tweets\ntf_idf_transformer = TfidfTransformer(use_idf = False)\ntf_off_train = tf_idf_transformer.fit_transform(count_ox_train)\n#tf_off_train", "_____no_output_____" ], [ "classifier = SGDClassifier(loss=\"log\")", "_____no_output_____" ], [ "#Training model\noff_clf = classifier.fit(tf_off_train, ox_test)\n\n#Preparing validation data\noff_pred = count_vec.transform(oy_train)\ntf_off_pred = tf_idf_transformer.transform(off_pred)\n\n#Fitting validation data over model\noff_predicted = off_clf.predict(tf_off_pred)\n\n# % of answers gotten right\nsum((off_predicted == oy_test)) / len(oy_test)", "_____no_output_____" ], [ "print(metrics.classification_report(off_predicted, oy_test, target_names=[\"Not offensive\",\"Offensive\"]))", "_____no_output_____" ], [ "print(metrics.confusion_matrix(oy_test, off_predicted))", "_____no_output_____" ] ], [ [ "As seen above, The recall is very close to 1 for non-offensive tweets and very close to 0 for offensive tweets.<br>\nThis means that the model predicts that most of the tweets are not offensive, and the only reason for our relatively high accuracy is that the training data is unbalanced.", "_____no_output_____" ] ], [ [ "#Checking offensive validation data compared to offensive predicted data\nnp_list_off = np.array(off_predicted)\nnp_off_validation = np.array(oy_test)\nnp_off_train = np.array(ox_test)\n\nprint(\"Number of predicted non-offensive tweets: {}\\nNumber of predicted offensive tweets: {}\\n\".format(\nlen(np_list_off[np.where(np_list_off == 0)]),\nlen(np_list_off[np.where(np_list_off == 1)])))\n\nprint(\"Number of actual non-offensive tweets: {}\\nNumber of actual offensive tweets: {}\\n\".format(\nlen(np_off_validation[np.where(np_off_validation == 0)]),\nlen(np_off_validation[np.where(np_off_validation == 1)])))\n\nprint(\"Number of training non-offensive tweets: {}\\nNumber of training offensive tweets: {}\".format(\nlen(np_off_train[np.where(np_off_train == 0)]),\nlen(np_off_train[np.where(np_off_train == 1)])))\n", "_____no_output_____" ] ], [ [ "## Other Models for the offensive dataset", "_____no_output_____" ] ], [ [ "#Pipeline for sgdclassifier\nsgd_clf = Pipeline([\n ('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False, ngram_range=(1,3),\n max_df = 0.7, min_df = 4, max_features = 1000)),\n ('tfidf', TfidfTransformer(use_idf=False)),\n ('clf', SGDClassifier(loss=\"log\")),\n])\n\nsgd_clf.fit(ox_train, ox_test)\nsgd_predicted2 = sgd_clf.predict(oy_train)\n#sgd_predicted2\n\nreport_clf_stats(sgd_predicted2, oy_test, \"offensive\")", "_____no_output_____" ] ], [ [ "<i> Highest Achieved accuracy score for SGDClassifier: 78.6% </i>", "_____no_output_____" ] ], [ [ "# MultinomialNB\nmultinb_clf = Pipeline([\n ('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),\n ('tfidf', TfidfTransformer(use_idf=False)),\n ('clf', MultinomialNB()),\n])\n\nmultinb_clf.fit(ox_train, ox_test)\nmultinb_predict = multinb_clf.predict(oy_train)\n\nreport_clf_stats(multinb_predict, oy_test, \"offensive\")", "_____no_output_____" ], [ "#ComplementNB\ncomplement_clf = Pipeline([\n ('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),\n ('tfidf', TfidfTransformer(use_idf=False)),\n ('clf', ComplementNB()),\n])\n\ncomplement_clf.fit(ox_train, ox_test)\ncomplement_predict = complement_clf.predict(oy_train)\n\nreport_clf_stats(complement_predict, oy_test, \"offensive\")", "_____no_output_____" ] ], [ [ "It takes time to run SVC classifier", "_____no_output_____" ] ], [ [ "# SVC\nSVC_clf = Pipeline([\n ('vec', CountVectorizer(tokenizer = lambda x: x, lowercase = False)),\n ('tfidf', TfidfTransformer(use_idf=False)),\n ('clf', SVC(kernel='poly', degree = 3)),\n])\n\nSVC_clf.fit(ox_train, ox_test)\nSVC_predict = SVC_clf.predict(oy_train)\n\nreport_clf_stats(SVC_predict, oy_test, \"offensive\")\n", "_____no_output_____" ] ], [ [ "## Classifiers for the sentiment (multiclass) task", "_____no_output_____" ] ], [ [ "#Loading in Sentiment x-train, x-test, y-train, y-test\n\n# x-train\nsx_train = import_and_tokenize(\"sentiment\", \"train_text.txt\")\n\n# x-test\nsx_test = import_(\"sentiment\", \"train_labels.txt\")\n\n# y-train\nsy_train = import_and_tokenize(\"sentiment\", \"test_text.txt\")\n\n# y-test\nsy_test = import_(\"sentiment\", \"test_labels.txt\")\n", "_____no_output_____" ], [ "# Bayes on Sentiment analysis\n\nmultinb_clf.fit(sx_train, sx_test)\nsent_multinb = multinb_clf.predict(sy_train)\n\nreport_clf_stats(sent_multinb, sy_test, \"sentiment\")", "_____no_output_____" ], [ "# Complement on Sentiment\n\ncomplement_clf.fit(sx_train, sx_test)\nsent_complement = complement_clf.predict(sy_train)\n\nreport_clf_stats(sent_complement, sy_test, \"sentiment\")", "_____no_output_____" ], [ "# SGD on Sentiment\n\nsgd_clf.fit(sx_train, sx_test)\nsent_sgd = sgd_clf.predict(sy_train)\n\nreport_clf_stats(sent_sgd, sy_test, \"sentiment\")", "_____no_output_____" ] ], [ [ "### Importing validation x- and y- from both datasets", "_____no_output_____" ] ], [ [ "# Offensive validation:\n\n# y-train\noval_train = import_and_tokenize(\"offensive\",\"val_text.txt\")\n\n# y-test\noval_true = import_(\"offensive\", \"val_labels.txt\")\n\n# Sentiment validation\n\n# y-train\nsval_train = import_and_tokenize(\"sentiment\",\"val_text.txt\")\n\n# y-test\nsval_true = import_(\"sentiment\",\"val_labels.txt\")\n", "_____no_output_____" ] ], [ [ "### Offensive validation prediction\n<i>Using SGDClassifier, as that gave the highest accuracy and F1 score </i>\n", "_____no_output_____" ] ], [ [ "oval_pred = count_vec.transform(oval_train)\ntf_oval_pred = tf_idf_transformer.transform(oval_pred) #transforming data, as we didn't have a pipeline for this exact model\n\noval_sgd = off_clf.predict(tf_oval_pred) # model to use\n\nreport_clf_stats(oval_sgd, oval_true,\"offensive\")", "_____no_output_____" ] ], [ [ "### Sentiment validation prediction\n<i>Using Complement, as that gave the highest accuracy and F1 score </i>\n", "_____no_output_____" ] ], [ [ "\nsval_complement = complement_clf.predict(sval_train)\n\nreport_clf_stats(sval_complement, sval_true, \"sentiment\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd460c7a27f3c61983c22b3ea7929588013dc5f
20,913
ipynb
Jupyter Notebook
Week 1/SLU02 - Data Structures/Extra notebook.ipynb
duartevfreire/ds-prep-course
0722d2d4f58cffe5e77f6f4ad3a90b7e84d6d53a
[ "MIT" ]
26
2020-03-23T19:34:02.000Z
2021-03-03T23:02:38.000Z
Week 1/SLU02 - Data Structures/Extra notebook.ipynb
duartevfreire/ds-prep-course
0722d2d4f58cffe5e77f6f4ad3a90b7e84d6d53a
[ "MIT" ]
30
2020-03-15T20:37:50.000Z
2022-03-12T00:27:09.000Z
Week 1/SLU02 - Data Structures/Extra notebook.ipynb
duartevfreire/ds-prep-course
0722d2d4f58cffe5e77f6f4ad3a90b7e84d6d53a
[ "MIT" ]
36
2020-03-21T12:44:08.000Z
2021-04-02T21:56:32.000Z
20.01244
340
0.487018
[ [ [ "# SLU02 - Data Structures", "_____no_output_____" ], [ "This notebook covers methods of Data Structures (Lists and Dictionaries) that were not covered on learning notebook. ", "_____no_output_____" ], [ "### 1 Other Methods For Lists <a name=\"1\"></a>", "_____no_output_____" ], [ "#### 1.1 `clear()`", "_____no_output_____" ], [ "Removes all the elements from a given list.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"gorgonzola\", \"calzone\"]\npizza", "_____no_output_____" ], [ "pizza.clear()\npizza", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 1.2 `copy()`", "_____no_output_____" ], [ "Let's consider we have a __`pizza`__ list and we want to create a new list called __`pasta`__, starting with the same values as `pizza`. Let's see what happens if we do `pasta = pizza`.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"gorgonzola\", \"calzone\"]\npizza", "_____no_output_____" ], [ "pasta = pizza\npasta", "_____no_output_____" ] ], [ [ "Now, if we remove `\"calzone\"` from the `pasta` list, let's see what happens to both lists.", "_____no_output_____" ] ], [ [ "del pasta[-1]\npasta", "_____no_output_____" ] ], [ [ "From the cell above we can see that we removed `\"calzone\"` from the list `pasta`. But what happen with `pizza` list?", "_____no_output_____" ] ], [ [ "pizza", "_____no_output_____" ] ], [ [ "It was also removed from the list `pizza`. Let's check if the IDs of both objects are the same with function `id()`.", "_____no_output_____" ] ], [ [ "id(pizza) == id(pasta)", "_____no_output_____" ] ], [ [ "The IDs of `pizza` and `pasta` are the same. What happens here is when we do `pizza = pasta` we are just creating an alias to the same object pizza. __We are not creating a new list__. It is like a mirror, every thing that we do with `pizza`, also happens with `pasta`, and vice-versa.\n\nAs it is explained on this [link](https://docs.python.org/3/library/copy.html): 'Assignment statements in Python do not copy objects, they create bindings between a target and an object. For collections that are mutable or contain mutable items, a copy is sometimes needed so one can change one copy without changing the other.' \n\nAlso, a method that belongs to an object act on it. As an example, when you use __`append()`__, we can do `this_list.append(new_element)`, without needing to assign the value `this_list = this_list.append(new_element)`.", "_____no_output_____" ], [ "We don't want `pizza` and `pasta` to be the same thing. We need them distinct. In order to do that, we can make use of __`copy()`__ method.", "_____no_output_____" ], [ "Let's create the list `pizza` again.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"gorgonzola\", \"calzone\"]\npizza", "_____no_output_____" ], [ "pasta = pizza.copy()\npasta", "_____no_output_____" ], [ "del pasta[-1]\npasta", "_____no_output_____" ], [ "pizza", "_____no_output_____" ] ], [ [ "We can see, from the cell above, that when we changed __`pasta`__, __pizza__ remained the same.", "_____no_output_____" ] ], [ [ "id(pizza) == id(pasta)", "_____no_output_____" ] ], [ [ "And as expected, `pizza` and `pasta` objects have different IDs.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 1.3 `extend()`", "_____no_output_____" ], [ "This method can be used when we want to append more than one element to the end of the list.", "_____no_output_____" ] ], [ [ "pizza = [\"margherita\", \"napoletana\", \"carbonara\", \"romana\", \"gorgonzola\", \"calzone\"]\npizza", "_____no_output_____" ] ], [ [ "We have our `pizza` list, but we want to add to it 3 more pizzas.", "_____no_output_____" ] ], [ [ "other_pizzas = [\"quattro stagioni\", \"Frutti di Mare\", \"quattro formaggi\"]", "_____no_output_____" ] ], [ [ "In order to add pizzas in __other_pizzas__ to __pizza__ list, we can use __extend__.", "_____no_output_____" ] ], [ [ "pizza.extend(other_pizzas)", "_____no_output_____" ], [ "pizza", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 1.4 `insert()`", "_____no_output_____" ], [ "This method is useful when we want to add an element in a specific index in a list.", "_____no_output_____" ] ], [ [ "pizza", "_____no_output_____" ] ], [ [ "Let's add the element `\"Frutti di Mare\"` to the second position of the array. This means positive index 1.", "_____no_output_____" ] ], [ [ "pizza.insert(1, 'Frutti di Mare')\npizza", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 1.5 `pop()`", "_____no_output_____" ], [ "Now, if we want to do the opposite and remove an element in a specific position, we can use the method `pop()`.", "_____no_output_____" ] ], [ [ "pizza", "_____no_output_____" ] ], [ [ "Let's remove the element we just added, `\"Frutti di Mare\"` on index 1.", "_____no_output_____" ] ], [ [ "pizza.pop(1)\npizza", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 1.6 `reverse()`", "_____no_output_____" ], [ "If we need to get the list elements backwards, we can use `reverse()`.", "_____no_output_____" ] ], [ [ "pizza", "_____no_output_____" ], [ "pizza.reverse()\npizza", "_____no_output_____" ] ], [ [ "### 2 Other Methods For Dictionaries <a name=\"2.5\"></a>", "_____no_output_____" ], [ "#### 2.1 `copy()`", "_____no_output_____" ], [ "As we did on <a href=\"#1\">section 1</a> for lists, we can also use `copy()` in order to copy the values of a dictionary to a new dictionary.", "_____no_output_____" ] ], [ [ "toilet_paper = {'type': 'others', 'price_per_unit': 50, 'quantity_purchased': 1000}\ntoilet_paper", "_____no_output_____" ], [ "other_toilet_paper = toilet_paper.copy()\nother_toilet_paper", "_____no_output_____" ], [ "id(toilet_paper) != id(other_toilet_paper)", "_____no_output_____" ] ], [ [ "As we can see, `toilet_paper` and `other_toilet_paper` have the same elements but have different IDs, so they are different objects.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "#### 2.2 `clear()`", "_____no_output_____" ], [ "This method deletes all the elements in a dictionary.", "_____no_output_____" ] ], [ [ "toilet_paper", "_____no_output_____" ], [ "other_toilet_paper.clear()\nlen(other_toilet_paper) #size of the dictionary", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.3 `popitem()`", "_____no_output_____" ], [ "This method removes a random key-value pair from the dictionary and returns it as a tuple.", "_____no_output_____" ] ], [ [ "toilet_paper.popitem()", "_____no_output_____" ], [ "toilet_paper", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ], [ "#### 2.4 `setdefault()`", "_____no_output_____" ], [ "This method receives a key as input. If this key exists in the dictionary, then it returns its value. If the key does not exist, the method adds the key-value pair to the dictionary with `value = default`. If the `default=` is not defined the new value is `None`.", "_____no_output_____" ] ], [ [ "toilet_paper.setdefault(\"price_per_unit\")", "_____no_output_____" ], [ "toilet_paper", "_____no_output_____" ], [ "toilet_paper.setdefault(\"size\")", "_____no_output_____" ], [ "toilet_paper", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
ecd4778cd7e7430d69154942f661e68dc8cc3ebc
522,123
ipynb
Jupyter Notebook
homeworks/D015/Day_015_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
6
2019-05-19T05:53:07.000Z
2020-04-18T05:02:13.000Z
homeworks/D015/Day_015_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
null
null
null
homeworks/D015/Day_015_HW.ipynb
peteryuX/100Day-ML-Marathon
cd61add6fa91ef117429eb1300cbdd96682d2d43
[ "MIT" ]
1
2019-11-20T14:33:12.000Z
2019-11-20T14:33:12.000Z
2,522.333333
281,744
0.962264
[ [ [ "## 作業\n1. 請用 numpy 建立一個 10 x 10, 數值分布自 -1.0 ~ 1.0 的矩陣並繪製 Heatmap\n2. 請用 numpy 建立一個 1000 x 3, 數值分布為 -1.0 ~ 1.0 的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)\n3. 請用 numpy 建立一個 1000 x 3, 數值分布為常態分佈的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)", "_____no_output_____" ], [ "# [作業目標]\n- 試著設定隨機資料, 並依照範例練習基礎與進階的 Heatmap", "_____no_output_____" ], [ "# [作業重點]\n- 如題1.條件隨機矩陣, 並仿造基礎 Heatmap 範例作圖 \n(In[2], OUT[2]) (Hint : numpy.random.random - 均勻分布, 隨機小數)\n- 如題2.3.條件隨機數值列, 並仿造進階 Heatmap 範例作圖 \n(In[3], OUT[3], In[4], OUT[4]) (Hint : numpy.random.randn - 常態分布)", "_____no_output_____" ] ], [ [ "# 載入需要的套件\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns # 另一個繪圖-樣式套件\nplt.style.use('ggplot')\n\n# 忽略警告訊息\n%matplotlib inline\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "\"\"\"\nYour Code Here\n\"\"\"\nmatrix = np.random.rand(10, 10) * 2 - 1\n\nplt.figure(figsize=(10,10))\n\"\"\"\nYour Code Here\n\"\"\"\nheatmap = sns.heatmap(matrix, cmap = plt.cm.RdYlBu_r, vmin = -1.0, annot = True, vmax = 1.0)\n\nplt.show()", "_____no_output_____" ], [ "nrow = 1000\nncol = 3\n\"\"\"\nYour Code Here\n\"\"\"\nmatrix = np.random.rand(nrow, ncol) * 2 - 1\n\n# 隨機給予 0, 1, 2 三種標籤\nindice = np.random.choice([0,1,2], size=nrow)\nplot_data = pd.DataFrame(matrix, indice)\n\n# 繪製 seborn 進階 Heatmap\ngrid = sns.PairGrid(data = plot_data, size = 3, diag_sharey=False)\n\"\"\"\nYour Code Here\nPlease replace \"...\" to correct plot function\n\"\"\"\ngrid.map_upper(plt.scatter, alpha = 0.2)\ngrid.map_diag(sns.kdeplot)\ngrid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r)\n\nplt.show()", "_____no_output_____" ], [ "nrow = 1000\nncol = 3\n\"\"\"\nYour Code Here\n\"\"\"\nmatrix = np.random.randn(nrow, ncol) * 2 - 1\n# 隨機給予 0, 1, 2 三種標籤\nindice = np.random.choice([0,1,2], size=nrow)\nplot_data = pd.DataFrame(matrix, indice)\n\n# 繪製 seborn 進階 Heatmap\ngrid = sns.PairGrid(data = plot_data, size = 3, diag_sharey=False)\n\"\"\"\nYour Code Here\nPlease replace \"...\" to correct plot function\n\"\"\"\ngrid.map_upper(plt.scatter, alpha = 0.2)\ngrid.map_diag(sns.kdeplot)\ngrid.map_lower(sns.kdeplot, cmap = plt.cm.OrRd_r)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
ecd499ec78679709a842a679b4c12585f7db7a3f
15,304
ipynb
Jupyter Notebook
notebooks/4.0-ry-building-machine-learning-api.ipynb
yrahul3910/titanic
b3a4fc0409b85c5aa254997cbe8b5fceac11818b
[ "MIT" ]
null
null
null
notebooks/4.0-ry-building-machine-learning-api.ipynb
yrahul3910/titanic
b3a4fc0409b85c5aa254997cbe8b5fceac11818b
[ "MIT" ]
null
null
null
notebooks/4.0-ry-building-machine-learning-api.ipynb
yrahul3910/titanic
b3a4fc0409b85c5aa254997cbe8b5fceac11818b
[ "MIT" ]
null
null
null
26.943662
94
0.360625
[ [ [ "## Hello World using Flask", "_____no_output_____" ] ], [ [ "import os\nscript_file = os.path.join(os.path.pardir, \"src\", \"models\", \"hello_world_api.py\")", "_____no_output_____" ], [ "%%writefile $script_file\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\[email protected](\"/api\", methods=[\"POST\"])\ndef say_hello():\n data = request.get_json(force=True)\n name = data[\"name\"]\n return \"Hello, {0}\".format(name)\n\nif __name__ == \"__main__\":\n app.run(port=9001, debug=True)", "Writing ..\\src\\models\\hello_world_api.py\n" ], [ "import json\nimport requests", "_____no_output_____" ], [ "url = \"http://localhost:9001/api\"\ndata = json.dumps({\"name\": \"Rahul\"})\nr = requests.post(url, data)", "_____no_output_____" ], [ "print(r.text)", "Hello, Rahul\n" ] ], [ [ "## ML API", "_____no_output_____" ] ], [ [ "ml_api_file = os.path.join(os.path.pardir, \"src\", \"models\", \"ml_api.py\")", "_____no_output_____" ], [ "%%writefile $ml_api_file\nfrom flask import Flask, request\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport json\nimport os\n\napp = Flask(__name__)\n\n# Load model and scaler\nmodel_path = os.path.join(os.path.pardir, os.path.pardir, \"models\")\nmodel_file = os.path.join(model_path, \"lr_model.pkl\")\nscaler_file = os.path.join(model_path, \"lr_scaler.pkl\")\n\nscaler = pickle.load(open(scaler_file, \"rb\"))\nmodel = pickle.load(open(model_file, \"rb\"))\n\ncolumns = [\"Age\", \"Fare\", \"FamilySize\",\n \"IsMother\", \"IsMale\", \"Deck_A\", \"Deck_B\", \"Deck_C\", \"Deck_D\", \n \"Deck_E\", \"Deck_F\", \"Deck_G\", \"Deck_Z\", \"Pclass_1\", \"Pclass_2\", \n \"Pclass_3\", \"Title_Lady\", \"Title_Master\", \"Title_Miss\", \"Title_Mr\", \n \"Title_Mrs\", \"Title_Officer\", \"Title_Sir\", \"Fare_Bin_Very_Low\", \n \"Fare_Bin_Low\", \"Fare_Bin_High\", \"Fare_Bin_Very_High\", \"Embarked_C\", \n \"Embarked_Q\", \"Embarked_S\", \"AgeState_Adult\", \"AgeState_Child\"]\n\[email protected](\"/api\", methods=[\"POST\"])\ndef make_prediction():\n # Read JSON from the request, convert to JSON string\n data = json.dumps(request.get_json(force=True))\n \n # Create Pandas DataFrame\n df = pd.read_json(data)\n \n # Extract PassengerId\n pids = df[\"PassengerId\"].ravel()\n \n X = df[columns].as_matrix().astype(\"float\")\n X_scaled = scaler.transform(X)\n \n predictions = model.predict(X_scaled)\n df_response = pd.DataFrame({\"PassengerId\": pids, \"Predicted\": predictions})\n \n return df_response.to_json()\n\nif __name__ == \"__main__\":\n app.run(port=9001, debug=True)", "Overwriting ..\\src\\models\\ml_api.py\n" ] ], [ [ "### Invoking ML API", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd", "_____no_output_____" ], [ "processed_data_path = os.path.join(os.path.pardir, \"data\", \"processed\")\ntrain_file_path = os.path.join(processed_data_path, \"train.csv\")\ntrain_df = pd.read_csv(train_file_path)", "_____no_output_____" ], [ "survived_passengers = train_df[train_df.Survived == 1][:5]", "_____no_output_____" ], [ "survived_passengers", "_____no_output_____" ], [ "import requests\n\ndef make_api_req(data):\n url = \"http://localhost:9001/api\"\n r = requests.post(url, data)\n return r.json()", "_____no_output_____" ], [ "make_api_req(survived_passengers.to_json())", "_____no_output_____" ], [ "result = make_api_req(train_df.to_json())\ndf_result = pd.read_json(json.dumps(result))\ndf_result.head()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
ecd4bef5cd3dc02547cb7cd6aacdf4389e9b6799
883
ipynb
Jupyter Notebook
dockerfiles/pyforest_sandbox/test.ipynb
triper1022/pyforest
6023591e39b2054d8d872aa966f6378d97b4d215
[ "MIT" ]
1,002
2019-08-13T15:00:39.000Z
2022-03-29T19:14:41.000Z
dockerfiles/pyforest_sandbox/test.ipynb
triper1022/pyforest
6023591e39b2054d8d872aa966f6378d97b4d215
[ "MIT" ]
40
2019-08-13T19:17:49.000Z
2022-02-14T08:46:09.000Z
dockerfiles/pyforest_sandbox/test.ipynb
triper1022/pyforest
6023591e39b2054d8d872aa966f6378d97b4d215
[ "MIT" ]
202
2019-08-13T19:37:25.000Z
2022-03-21T20:05:27.000Z
19.195652
69
0.553794
[ [ [ "import pyforest", "_____no_output_____" ], [ "df = pd.DataFrame(dict(a=np.arange(10)))\nsns.distplot(df.a)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
ecd4c66c5191a163061cfc2cfc31cec9fab33e4a
30,957
ipynb
Jupyter Notebook
notebooks/timeouts.ipynb
masroorhasan/seldon-core
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
[ "Apache-2.0" ]
null
null
null
notebooks/timeouts.ipynb
masroorhasan/seldon-core
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
[ "Apache-2.0" ]
null
null
null
notebooks/timeouts.ipynb
masroorhasan/seldon-core
674e00cd4b740ee21ac3de00ab145ebd6ebf8b9e
[ "Apache-2.0" ]
null
null
null
34.550223
187
0.502891
[ [ [ "# Testing Custom Timeouts\n", "_____no_output_____" ], [ "## Prerequistes\nYou will need\n - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core)\n - [Helm](https://github.com/kubernetes/helm)\n - [Minikube](https://github.com/kubernetes/minikube) version v0.24.0 or greater\n - [seldon-core Python package](https://pypi.org/project/seldon-core/) (```pip install seldon-core```)\n", "_____no_output_____" ], [ "# Create Cluster\n\nStart minikube and ensure custom resource validation is activated and there is 5G of memory. \n\nYour start command with the kvm driver would then look like:\n```\nminikube start --vm-driver kvm2 --memory 4096 --feature-gates=CustomResourceValidation=true --extra-config=apiserver.Authorization.Mode=RBAC\n```", "_____no_output_____" ], [ "# Setup", "_____no_output_____" ] ], [ [ "!kubectl create namespace seldon", "namespace/seldon created\r\n" ], [ "!kubectl config set-context $(kubectl config current-context) --namespace=seldon", "Context \"minikube\" modified.\r\n" ], [ "!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default", "clusterrolebinding.rbac.authorization.k8s.io/kube-system-cluster-admin created\r\n" ] ], [ [ "# Install Helm", "_____no_output_____" ] ], [ [ "!kubectl -n kube-system create sa tiller\n!kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller\n!helm init --service-account tiller", "serviceaccount/tiller created\nclusterrolebinding.rbac.authorization.k8s.io/tiller created\n$HELM_HOME has been configured at /home/janis/.helm.\n\nTiller (the Helm server-side component) has been installed into your Kubernetes Cluster.\n\nPlease note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.\nTo prevent this, run `helm init` with the --tiller-tls-verify flag.\nFor more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation\nHappy Helming!\n" ], [ "!kubectl rollout status deploy/tiller-deploy -n kube-system", "Waiting for deployment \"tiller-deploy\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"tiller-deploy\" successfully rolled out\n" ] ], [ [ "## Start seldon-core", "_____no_output_____" ], [ "Install the custom resource definition", "_____no_output_____" ] ], [ [ "!helm install ../helm-charts/seldon-core-crd --name seldon-core-crd --set usage_metrics.enabled=true", "NAME: seldon-core-crd\nLAST DEPLOYED: Tue Dec 18 11:10:48 2018\nNAMESPACE: seldon\nSTATUS: DEPLOYED\n\nRESOURCES:\n==> v1beta1/Deployment\nNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE\nseldon-spartakus-volunteer 1 0 0 0 0s\n\n==> v1/ServiceAccount\nNAME SECRETS AGE\nseldon-spartakus-volunteer 1 0s\n\n==> v1beta1/ClusterRole\nNAME AGE\nseldon-spartakus-volunteer 0s\n\n==> v1beta1/ClusterRoleBinding\nNAME AGE\nseldon-spartakus-volunteer 0s\n\n==> v1/ConfigMap\nNAME DATA AGE\nseldon-spartakus-config 3 0s\n\n==> v1beta1/CustomResourceDefinition\nNAME AGE\nseldondeployments.machinelearning.seldon.io 0s\n\n\nNOTES:\nNOTES: TODO\n\n\n" ], [ "!helm install ../helm-charts/seldon-core --name seldon-core --namespace seldon \\\n --set ambassador.enabled=true", "NAME: seldon-core\nLAST DEPLOYED: Tue Dec 18 11:10:51 2018\nNAMESPACE: seldon\nSTATUS: DEPLOYED\n\nRESOURCES:\n==> v1/Service\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE\nseldon-core-ambassador-admin NodePort 10.108.119.46 <none> 8877:30383/TCP 1s\nseldon-core-ambassador NodePort 10.110.98.62 <none> 80:31892/TCP,443:32336/TCP 1s\nseldon-core-seldon-apiserver NodePort 10.105.205.239 <none> 8080:30495/TCP,5000:30228/TCP 0s\nseldon-core-redis ClusterIP 10.107.101.105 <none> 6379/TCP 0s\n\n==> v1beta1/Deployment\nNAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE\nseldon-core-ambassador 1 1 1 0 0s\nseldon-core-seldon-apiserver 1 1 1 0 0s\nseldon-core-seldon-cluster-manager 1 1 1 0 0s\nseldon-core-redis 1 1 1 0 0s\n\n==> v1/Pod(related)\nNAME READY STATUS RESTARTS AGE\nseldon-core-ambassador-56cb8fc595-nrxnw 0/1 ContainerCreating 0 0s\nseldon-core-seldon-apiserver-59978fbf45-cd9sr 0/1 ContainerCreating 0 0s\nseldon-core-seldon-cluster-manager-5449b8dd5f-689h9 0/1 ContainerCreating 0 0s\nseldon-core-redis-567db9ddcf-dwvbp 0/1 ContainerCreating 0 0s\n\n==> v1/ServiceAccount\nNAME SECRETS AGE\nseldon 1 1s\n\n==> v1beta1/Role\nNAME AGE\nseldon-local 1s\nambassador 1s\n\n==> v1beta1/RoleBinding\nNAME AGE\nambassador 1s\n\n==> v1/RoleBinding\nNAME AGE\nseldon 1s\n\n\nNOTES:\nThank you for installing Seldon Core.\n\nDocumentation can be found at https://github.com/SeldonIO/seldon-core\n\n\n\n\n" ] ], [ [ "Check all services are running before proceeding.", "_____no_output_____" ] ], [ [ "!kubectl rollout status deploy/seldon-core-seldon-cluster-manager\n!kubectl rollout status deploy/seldon-core-seldon-apiserver\n!kubectl rollout status deploy/seldon-core-ambassador ", "Waiting for deployment \"seldon-core-seldon-cluster-manager\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"seldon-core-seldon-cluster-manager\" successfully rolled out\nWaiting for deployment \"seldon-core-seldon-apiserver\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"seldon-core-seldon-apiserver\" successfully rolled out\nWaiting for deployment \"seldon-core-ambassador\" rollout to finish: 0 of 1 updated replicas are available...\ndeployment \"seldon-core-ambassador\" successfully rolled out\n" ] ], [ [ "## Set up REST and gRPC methods\n\n**Ensure you port forward to API Gateway**\n\nREST:\n\n```\nkubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080\n```\n\nGRPC:\n\n```\nkubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8004:5000\n```\n\n**Ensure you port forward ambassador**:\n\n```\nkubectl port-forward $(kubectl get pods -n seldon -l service=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8005:8080\n```", "_____no_output_____" ], [ "Illustration of both REST and gRPC requests. ", "_____no_output_____" ] ], [ [ "import requests\nfrom requests.auth import HTTPBasicAuth\nfrom seldon_core.proto import prediction_pb2\nfrom seldon_core.proto import prediction_pb2_grpc\nimport grpc\nimport numpy as np\n\nAMBASSADOR_API=\"localhost:8005\"\nGATEWAY_REST=\"localhost:8003\"\nGATEWAY_GRPC=\"localhost:8004\"\n\ndef get_token(oauth_key,oauth_secret):\n payload = {'grant_type': 'client_credentials'}\n response = requests.post(\n \"http://\"+GATEWAY_REST+\"/oauth/token\",\n auth=HTTPBasicAuth(oauth_key, oauth_secret),\n data=payload)\n print(response.text)\n token = response.json()[\"access_token\"]\n return token\n\n \ndef rest_request_api_gateway(oauth_key,oauth_secret):\n token = get_token(oauth_key,oauth_secret)\n headers = {'Authorization': 'Bearer '+token}\n payload = {\"data\":{\"names\":[\"a\",\"b\"],\"tensor\":{\"shape\":[2,2],\"values\":[0,0,1,1]}}}\n response = requests.post(\n \"http://\"+GATEWAY_REST+\"/api/v0.1/predictions\",\n headers=headers,\n json=payload)\n print(response.text)\n\ndef grpc_request_api_gateway(oauth_key,oauth_secret,data_size):\n token = get_token(oauth_key,oauth_secret)\n shape = [1,data_size]\n arr = np.random.rand(data_size)\n datadef = prediction_pb2.DefaultData(\n names = [\"a\",\"b\"],\n tensor = prediction_pb2.Tensor(\n shape = shape,\n values = arr\n )\n )\n request = prediction_pb2.SeldonMessage(data = datadef)\n channel = grpc.insecure_channel(GATEWAY_GRPC)\n stub = prediction_pb2_grpc.SeldonStub(channel)\n metadata = [('oauth_token', token)]\n response = stub.Predict(request=request,metadata=metadata)\n print(response)\n \ndef grpc_request_ambassador(deploymentName,data_size):\n shape = [1,data_size]\n arr = np.random.rand(data_size)\n datadef = prediction_pb2.DefaultData(\n names = [\"a\",\"b\"],\n tensor = prediction_pb2.Tensor(\n shape = shape,\n values = arr\n )\n )\n request = prediction_pb2.SeldonMessage(data = datadef)\n channel = grpc.insecure_channel(AMBASSADOR_API)\n stub = prediction_pb2_grpc.SeldonStub(channel)\n metadata = [('seldon',deploymentName)]\n response = stub.Predict(request=request,metadata=metadata)\n print(response)", "_____no_output_____" ], [ "!pygmentize resources/model_short_timeouts.json", "{\r\n \u001b[34;01m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n \u001b[34;01m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n \u001b[34;01m\"metadata\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"labels\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"app\"\u001b[39;49;00m: \u001b[33m\"seldon\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"seldon-deployment-example\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"annotations\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"project_name\"\u001b[39;49;00m: \u001b[33m\"FX Market Prediction\"\u001b[39;49;00m,\r\n \u001b[34;01m\"deployment_version\"\u001b[39;49;00m: \u001b[33m\"v1\"\u001b[39;49;00m,\r\n\t \u001b[34;01m\"seldon.io/rest-read-timeout\"\u001b[39;49;00m:\u001b[33m\"1\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"test-deployment\"\u001b[39;49;00m,\r\n \u001b[34;01m\"oauth_key\"\u001b[39;49;00m: \u001b[33m\"oauth-key\"\u001b[39;49;00m,\r\n \u001b[34;01m\"oauth_secret\"\u001b[39;49;00m: \u001b[33m\"oauth-secret\"\u001b[39;49;00m,\r\n \u001b[34;01m\"predictors\"\u001b[39;49;00m: [\r\n {\r\n \u001b[34;01m\"componentSpecs\"\u001b[39;49;00m: [{\r\n \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"containers\"\u001b[39;49;00m: [\r\n {\r\n \u001b[34;01m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n \u001b[34;01m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n \u001b[34;01m\"resources\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"requests\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n }\r\n }\r\n }\r\n ],\r\n \u001b[34;01m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n }\r\n }],\r\n \u001b[34;01m\"graph\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"children\"\u001b[39;49;00m: [],\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n \u001b[34;01m\"endpoint\"\u001b[39;49;00m: {\r\n\t\t\t\u001b[34;01m\"type\"\u001b[39;49;00m : \u001b[33m\"REST\"\u001b[39;49;00m\r\n\t\t },\r\n \u001b[34;01m\"type\"\u001b[39;49;00m: \u001b[33m\"MODEL\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"fx-market-predictor\"\u001b[39;49;00m,\r\n \u001b[34;01m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n\t\t\u001b[34;01m\"annotations\"\u001b[39;49;00m: {\r\n\t\t \u001b[34;01m\"predictor_version\"\u001b[39;49;00m : \u001b[33m\"v1\"\u001b[39;49;00m\r\n\t\t}\r\n }\r\n ]\r\n }\r\n}\r\n" ] ], [ [ "## Create Seldon Deployment", "_____no_output_____" ], [ "Deploy the runtime graph to kubernetes.", "_____no_output_____" ] ], [ [ "!kubectl apply -f resources/model_short_timeouts.json -n seldon", "seldondeployment.machinelearning.seldon.io/seldon-deployment-example created\r\n" ], [ "!kubectl get seldondeployments -n seldon", "NAME CREATED AT\r\nseldon-deployment-example 2s\r\n" ], [ "!kubectl describe seldondeployments seldon-deployment-example -n seldon", "Name: seldon-deployment-example\r\nNamespace: seldon\r\nLabels: app=seldon\r\nAnnotations: kubectl.kubernetes.io/last-applied-configuration:\r\n {\"apiVersion\":\"machinelearning.seldon.io/v1alpha2\",\"kind\":\"SeldonDeployment\",\"metadata\":{\"name\":\"seldon-deployment-example\",\"namespace\":\"s...\r\nAPI Version: machinelearning.seldon.io/v1alpha2\r\nKind: SeldonDeployment\r\nMetadata:\r\n Cluster Name: \r\n Creation Timestamp: 2018-12-18T11:12:01Z\r\n Generation: 1\r\n Resource Version: 746\r\n Self Link: /apis/machinelearning.seldon.io/v1alpha2/namespaces/seldon/seldondeployments/seldon-deployment-example\r\n UID: be7a2bce-02b5-11e9-aed1-18941d3b439b\r\nSpec:\r\n Annotations:\r\n Deployment Version: v1\r\n Project Name: FX Market Prediction\r\n Seldon . Io / Rest - Read - Timeout: 1\r\n Name: test-deployment\r\n Oauth Key: oauth-key\r\n Oauth Secret: oauth-secret\r\n Predictors:\r\n Annotations:\r\n Predictor Version: v1\r\n Component Specs:\r\n Spec:\r\n Containers:\r\n Image: seldonio/mock_classifier:1.0\r\n Image Pull Policy: IfNotPresent\r\n Name: classifier\r\n Resources:\r\n Requests:\r\n Memory: 1Mi\r\n Termination Grace Period Seconds: 20\r\n Graph:\r\n Endpoint:\r\n Type: REST\r\n Name: classifier\r\n Type: MODEL\r\n Name: fx-market-predictor\r\n Replicas: 1\r\nStatus:\r\n State: Creating\r\nEvents: <none>\r\n" ] ], [ [ "Get the status of the SeldonDeployment. **When ready the replicasAvailable should be 1**.", "_____no_output_____" ] ], [ [ "!kubectl get seldondeployments seldon-deployment-example -o jsonpath='{.status}' -n seldon", "map[predictorStatus:[map[replicasAvailable:1 name:test-deployment-fx-market-predictor-7cd068f replicas:1]] state:Available]" ] ], [ [ "## Get predictions with short and long timeouts", "_____no_output_____" ], [ "This next request should fail as the timeout is too short", "_____no_output_____" ] ], [ [ "rest_request_api_gateway('oauth-key','oauth-secret')", "{\"access_token\":\"7bdc8db7-e6a9-4a4a-80c9-c208992bff1b\",\"token_type\":\"bearer\",\"expires_in\":43199,\"scope\":\"read write\"}\n{\n \"code\": 103,\n \"info\": \"Status code: 500 Reason: \",\n \"reason\": \"Microservice error\",\n \"status\": \"FAILURE\"\n}\n" ] ], [ [ "Delete this graph and recreate one with a longer timeout", "_____no_output_____" ] ], [ [ "!kubectl delete -f resources/model_short_timeouts.json", "seldondeployment.machinelearning.seldon.io \"seldon-deployment-example\" deleted\r\n" ], [ "!pygmentize resources/model_long_timeouts.json", "{\r\n \u001b[34;01m\"apiVersion\"\u001b[39;49;00m: \u001b[33m\"machinelearning.seldon.io/v1alpha2\"\u001b[39;49;00m,\r\n \u001b[34;01m\"kind\"\u001b[39;49;00m: \u001b[33m\"SeldonDeployment\"\u001b[39;49;00m,\r\n \u001b[34;01m\"metadata\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"labels\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"app\"\u001b[39;49;00m: \u001b[33m\"seldon\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"seldon-deployment-example\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"annotations\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"project_name\"\u001b[39;49;00m: \u001b[33m\"FX Market Prediction\"\u001b[39;49;00m,\r\n \u001b[34;01m\"deployment_version\"\u001b[39;49;00m: \u001b[33m\"v1\"\u001b[39;49;00m,\r\n\t \u001b[34;01m\"seldon.io/rest-read-timeout\"\u001b[39;49;00m:\u001b[33m\"10000\"\u001b[39;49;00m,\r\n\t \u001b[34;01m\"seldon.io/rest-connection-timeout\"\u001b[39;49;00m:\u001b[33m\"10000\"\u001b[39;49;00m,\t \r\n\t \u001b[34;01m\"seldon.io/grpc-read-timeout\"\u001b[39;49;00m:\u001b[33m\"10000\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"test-deployment\"\u001b[39;49;00m,\r\n \u001b[34;01m\"oauth_key\"\u001b[39;49;00m: \u001b[33m\"oauth-key\"\u001b[39;49;00m,\r\n \u001b[34;01m\"oauth_secret\"\u001b[39;49;00m: \u001b[33m\"oauth-secret\"\u001b[39;49;00m,\r\n \u001b[34;01m\"predictors\"\u001b[39;49;00m: [\r\n {\r\n \u001b[34;01m\"componentSpecs\"\u001b[39;49;00m: [{\r\n \u001b[34;01m\"spec\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"containers\"\u001b[39;49;00m: [\r\n {\r\n \u001b[34;01m\"image\"\u001b[39;49;00m: \u001b[33m\"seldonio/mock_classifier:1.0\"\u001b[39;49;00m,\r\n \u001b[34;01m\"imagePullPolicy\"\u001b[39;49;00m: \u001b[33m\"IfNotPresent\"\u001b[39;49;00m,\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n \u001b[34;01m\"resources\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"requests\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"memory\"\u001b[39;49;00m: \u001b[33m\"1Mi\"\u001b[39;49;00m\r\n }\r\n }\r\n }\r\n ],\r\n \u001b[34;01m\"terminationGracePeriodSeconds\"\u001b[39;49;00m: \u001b[34m20\u001b[39;49;00m\r\n }\r\n }],\r\n \u001b[34;01m\"graph\"\u001b[39;49;00m: {\r\n \u001b[34;01m\"children\"\u001b[39;49;00m: [],\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"classifier\"\u001b[39;49;00m,\r\n \u001b[34;01m\"endpoint\"\u001b[39;49;00m: {\r\n\t\t\t\u001b[34;01m\"type\"\u001b[39;49;00m : \u001b[33m\"REST\"\u001b[39;49;00m\r\n\t\t },\r\n \u001b[34;01m\"type\"\u001b[39;49;00m: \u001b[33m\"MODEL\"\u001b[39;49;00m\r\n },\r\n \u001b[34;01m\"name\"\u001b[39;49;00m: \u001b[33m\"fx-market-predictor\"\u001b[39;49;00m,\r\n \u001b[34;01m\"replicas\"\u001b[39;49;00m: \u001b[34m1\u001b[39;49;00m,\r\n\t\t\u001b[34;01m\"annotations\"\u001b[39;49;00m: {\r\n\t\t \u001b[34;01m\"predictor_version\"\u001b[39;49;00m : \u001b[33m\"v1\"\u001b[39;49;00m\r\n\t\t}\r\n }\r\n ]\r\n }\r\n}\r\n" ], [ "!kubectl apply -f resources/model_long_timeouts.json -n seldon", "seldondeployment.machinelearning.seldon.io/seldon-deployment-example created\r\n" ], [ "!kubectl get seldondeployments seldon-deployment-example -o jsonpath='{.status}' -n seldon", "map[state:Available predictorStatus:[map[name:test-deployment-fx-market-predictor-7cd068f replicas:1 replicasAvailable:1]]]" ] ], [ [ "This next request should work as the timeout is much longer", "_____no_output_____" ] ], [ [ "rest_request_api_gateway('oauth-key','oauth-secret')", "{\"access_token\":\"7bdc8db7-e6a9-4a4a-80c9-c208992bff1b\",\"token_type\":\"bearer\",\"expires_in\":43126,\"scope\":\"read write\"}\n{\n \"meta\": {\n \"puid\": \"8jq85h9pbdquf9uii6nh6majr7\",\n \"tags\": {\n },\n \"routing\": {\n },\n \"requestPath\": {\n \"classifier\": \"seldonio/mock_classifier:1.0\"\n },\n \"metrics\": []\n },\n \"data\": {\n \"names\": [\"proba\"],\n \"tensor\": {\n \"shape\": [2, 1],\n \"values\": [0.05133579311531625, 0.12823373759251927]\n }\n }\n}\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
ecd4c9635967bab052dbe9aa3a2e839cdbefb09f
12,467
ipynb
Jupyter Notebook
processing_components/notebooks/imaging-coalesce.ipynb
mfarrera/algorithm-reference-library
7331812aa7cc3501a15d3392cecf6ea65b43f91e
[ "Apache-2.0" ]
null
null
null
processing_components/notebooks/imaging-coalesce.ipynb
mfarrera/algorithm-reference-library
7331812aa7cc3501a15d3392cecf6ea65b43f91e
[ "Apache-2.0" ]
null
null
null
processing_components/notebooks/imaging-coalesce.ipynb
mfarrera/algorithm-reference-library
7331812aa7cc3501a15d3392cecf6ea65b43f91e
[ "Apache-2.0" ]
null
null
null
27.766147
247
0.570787
[ [ [ "# Investigation of coalesence for LOW data", "_____no_output_____" ], [ "Baseline dependent averaging is a form of data coalescence. In this script, we create a critically sampled snapshot of a LOW data set, and then coalesce and decoalesce it to see what errors result. We look at the time required for all steps.", "_____no_output_____" ] ], [ [ "% matplotlib inline\n\nimport os\nimport sys\n\nsys.path.append(os.path.join('..', '..'))\n\nfrom matplotlib import pylab\n\npylab.rcParams['agg.path.chunksize'] = 10000\n\nfrom data_models.parameters import arl_path\nresults_dir = arl_path('test_results')\n\n\n\nimport numpy\n\nfrom astropy.convolution import Gaussian2DKernel\n\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\nfrom astropy.wcs.utils import pixel_to_skycoord\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.pyplot import cm\n\nfrom data_models.polarisation import PolarisationFrame\nfrom processing_components.visibility.base import create_blockvisibility, copy_visibility\nfrom processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent, apply_beam_to_skycomponent\nfrom processing_components.image.operations import show_image, smooth_image\nfrom processing_components.imaging.base import create_image_from_visibility\nfrom processing_components.visibility.coalesce import coalesce_visibility\nfrom processing_components.visibility.iterators import vis_timeslice_iter\nfrom processing_components.simulation.testing_support import create_named_configuration, create_low_test_beam, \\\n create_low_test_skycomponents_from_gleam\nfrom processing_components.imaging.base import predict_skycomponent_visibility\n\nimport logging\n\ndef init_logging():\n logging.basicConfig(filename='%s/imaging-coalesce.log' % results_dir,\n filemode='a',\n format='%(thread)s %(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.INFO)\nlog = logging.getLogger()\nlogging.info(\"Starting imaging-coalesce\")\n", "_____no_output_____" ], [ "pylab.rcParams['figure.figsize'] = (12.0, 12.0)\npylab.rcParams['image.cmap'] = 'Greys'", "_____no_output_____" ] ], [ [ "Construct the configuration and fill in the appropriate sampling values", "_____no_output_____" ] ], [ [ "config = 'core'\nif config == 'full':\n b = 8e4\n low = create_named_configuration('LOWBD2', rmax=b)\n cellsize = 0.00001\n npixel=8192\n nsnapshots = 30\n\nelse:\n b = 1e3\n low = create_named_configuration('LOWBD2', rmax=b)\n cellsize = 0.0003\n npixel=1024\n nsnapshots = 30\n\n \noversampling = 2\n\nsampling_time = 35.0 / (oversampling * b)\nlog.info(\"Critical sampling time = %.5f (radians) %.2f (seconds)\" % \n (sampling_time, sampling_time * 43200.0 / numpy.pi))\nsampling_frequency = 1e8 * 35.0 / (oversampling * b) \nlog.info(\"Critical sampling frequency = %.5f (Hz) \" % (sampling_frequency))\ntimes = numpy.arange(0.0, + nsnapshots * sampling_time, sampling_time)\nfrequency = numpy.linspace(1e8 - sampling_frequency, 1e8 + sampling_frequency, 3)\nchannel_bandwidth = numpy.full_like(frequency, sampling_frequency)\n\nlog.info(\"Observing frequencies %s Hz\" % (frequency))\n\nlog.info(\"Cellsize = %.6f radians\" % (cellsize))", "_____no_output_____" ] ], [ [ "We create the visibility holding the vis, uvw, time, antenna1, antenna2, weight columns in a table. The actual visibility values are zero.", "_____no_output_____" ] ], [ [ "phasecentre = SkyCoord(ra=+355.0 * u.deg, dec=-10.0 * u.deg, frame='icrs', equinox='J2000')\nvt = create_blockvisibility(low, times, frequency, channel_bandwidth=channel_bandwidth,\n weight=1.0, phasecentre=phasecentre, polarisation_frame=PolarisationFrame('stokesI'))", "_____no_output_____" ] ], [ [ "Create components from GLEAM ", "_____no_output_____" ] ], [ [ "comps = create_low_test_skycomponents_from_gleam(flux_limit=1.0, polarisation_frame=PolarisationFrame(\"stokesI\"),\n frequency=frequency, phasecentre=phasecentre, \n radius=0.2)", "_____no_output_____" ], [ "model = create_image_from_visibility(vt, npixel=npixel, cellsize=cellsize, frequency=frequency,\n polarisation_frame=PolarisationFrame('stokesI'),\n phasecentre=phasecentre, nchan=len(frequency))\nbeam=create_low_test_beam(model)\ncomps = apply_beam_to_skycomponent(comps, beam)\nmodel = insert_skycomponent(model, comps)", "_____no_output_____" ], [ "show_image(beam, cm='Greys', components=comps, vmax=1.0, vmin=0.0)\nplt.title(\"Beam\")\nplt.show()", "_____no_output_____" ], [ "cmodel = smooth_image(model)\nshow_image(cmodel, vmax=1, vmin=0.0, cm='Greys')\nplt.title(\"Smoothed model image\")\nplt.show()", "_____no_output_____" ] ], [ [ "Since we are using a BlockVisibility, we can only predict the visibility from sky components. ", "_____no_output_____" ] ], [ [ "vt = predict_skycomponent_visibility(vt, comps)", "_____no_output_____" ] ], [ [ "Now we coalesce the data", "_____no_output_____" ] ], [ [ "time_coal=1.0\nmax_time_coal=100\nfrequency_coal=0.0\nmax_frequency_coal=1\n\ncvt = coalesce_visibility(vt, time_coal=time_coal, frequency_coal=frequency_coal)\nplt.clf()\nplt.plot(+cvt.uvw[:,0],+cvt.uvw[:,1], '.', color='b')\nplt.plot(-cvt.uvw[:,0],-cvt.uvw[:,1], '.', color='b')\nplt.title('Coalesced uv coverage')\nplt.xlabel('U (lambda)')\nplt.ylabel('V (lambda)')\nplt.show()", "_____no_output_____" ] ], [ [ "Check that we did not coalesce in frequency and that we did in time", "_____no_output_____" ] ], [ [ "numpy.unique(cvt.frequency)==vt.frequency", "_____no_output_____" ], [ "numpy.unique(cvt.time).size > vt.time.size", "_____no_output_____" ] ], [ [ "Make the dirty image from coalesced data", "_____no_output_____" ] ], [ [ "dirtyimage, sumwt = invert_2d(cvt, model)", "_____no_output_____" ], [ "show_image(dirtyimage, vmax=1.0, vmin=-0.1, cm='Greys')\nplt.title('Coalesced data image')\nplt.show()", "_____no_output_____" ], [ "cvtpred = copy_visibility(cvt, zero=True)\ncvtpred = predict_skycomponent_visibility(cvtpred, comps)", "_____no_output_____" ], [ "plt.clf()\nplt.plot(vt.vis.real.flatten(), vt.vis.imag.flatten(), '.', color='g', label='BlockVisibility')\nplt.xlabel('Real')\nplt.ylabel('Imaginary')\nplt.title('Original visibility')\nplt.legend()\nplt.show()\n\nplt.clf()\nplt.plot(cvt.vis.real.flatten()-cvtpred.vis.real.flatten(), \n cvt.vis.imag.flatten()-cvtpred.vis.imag.flatten(), '.', color='r', label='Error')\nplt.title('Error between coalesce/predict and predict/coalesce')\nplt.xlabel('Real')\nplt.ylabel('Imaginary')\nplt.legend()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
ecd4d505c9d4eb131bbf0b20d53fc8b33cf7e38b
25,373
ipynb
Jupyter Notebook
QSTP_ROS_week1.ipynb
albegade-pavan/Robotics_Automation_QSTP_2021
2f8bb8e95e28ff807fd3f7697b98e099ff61f311
[ "MIT" ]
null
null
null
QSTP_ROS_week1.ipynb
albegade-pavan/Robotics_Automation_QSTP_2021
2f8bb8e95e28ff807fd3f7697b98e099ff61f311
[ "MIT" ]
null
null
null
QSTP_ROS_week1.ipynb
albegade-pavan/Robotics_Automation_QSTP_2021
2f8bb8e95e28ff807fd3f7697b98e099ff61f311
[ "MIT" ]
null
null
null
160.588608
19,798
0.844204
[ [ [ "#! /usr/bin/python3\n\"\"\"Week I Assignment\nSimulate the trajectory of a robot approximated using a unicycle model given the\nfollowing start states, dt, velocity commands and timesteps\nState = (x, y, theta);\nVelocity = (v, w)\n1. Start=(0, 0, 0); dt=0.1; vel=(1, 0.5); timesteps: 25\n2. Start=(0, 0, 1.57); dt=0.2; vel=(0.5, 1); timesteps: 10\n3. Start(0, 0, 0.77); dt=0.05; vel=(5, 4); timestep: 50\nUpload the completed python file and the figures of the three sub parts in classroom\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n \nclass Unicycle:\n def __init__(self, x: float, y: float, theta: float, dt: float):\n self.x = x\n self.y = y\n self.theta = theta\n self.dt = dt\n \n # Store the points of the trajectory to plot\n self.x_points = [self.x]\n self.y_points = [self.y]\n \n # print(self.x_points,self.y_points)\n \n def step(self, v: float, w: float, n: int):\n self.v = v\n self.w = w\n self.n = n\n \n self.dt=0.1\n # time = np.arange(0, self.dt)\n x = self.x\n y = self.y\n theta = self.theta\n time = self.dt\n for i in range(n):\n theta = theta+time * w \n x = x+v * np.cos(theta) * time \n y = y+v * np.sin(theta) * time \n \n self.x_points.append(x)\n self.y_points.append(y)\n \n \n \"\"\"\n Write the Kinematics model here\n Expectation:\n 1. Given v, w and the current state self.x, self.y, self.theta\n and control commands (v, w) for n timesteps, i.e. n * dt seconds,\n return the final pose (x, y, theta) after this time.\n 2. Append all intermediate points into the self.x_points, self.y_points list\n for plotting the trajectory.\n Args:\n v (float): linear velocity\n w (float): angular velocity\n n (int) : timesteps\n Return:\n x, y, theta (float): final pose\n \"\"\"\n \n #print(self.x_points,self.y_points)\n return self.x_points,self.y_points\n # return x, y, theta\n \n def plot(self, v: float, w: float):\n \"\"\"Function that plots the intermeditate trajectory of the Robot\"\"\"\n plt.title(f\"Unicycle Model: {v}, {w}\")\n plt.xlabel(\"X-Coordinates\")\n plt.ylabel(\"Y-Coordinates\")\n plt.plot(self.x_points, self.y_points, color=\"red\", alpha=0.75)\n plt.grid()\n \n # If you want to view the plot uncomment plt.show() and comment out plt.savefig()\n #plt.show()\n # If you want to save the file, uncomment plt.savefig() and comment out plt.show()\n plt.savefig(f\"Unicycle_{v}_{w}.png\")\n \n \n \n \nif __name__ == \"__main__\":\n robot1 = Unicycle(0, 0, 0, 0.1)\n robot1.step(1, 0.5, 25)\n robot1.plot(1,0.5)\n\n robot2 = Unicycle(0, 0, 1.57, 0.2)\n robot2.step(0.5, 1, 10)\n robot2.plot(0.5,1)\n\n robot3 = Unicycle(0, 0, 0.77, 0.05)\n robot3.step(5, 4, 50)\n robot3.plot(5,4)\n\n\n \n print(\"Unicycle Model Assignment\")\n \n # make an object of the robot and plot various trajectories", "Unicycle Model Assignment\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
ecd4d9bc99197c73a07471532dd228b76bea8d85
680,709
ipynb
Jupyter Notebook
Models/BMO_StockPricesPrediction.ipynb
MarkLee7925/Bank-Stock-Prices-Prediction
c8d50e4f82a9cf16763949bd4ba885985917b882
[ "MIT" ]
1
2021-11-01T20:27:40.000Z
2021-11-01T20:27:40.000Z
Models/BMO_StockPricesPrediction.ipynb
MarkLee7925/Bank-Stock-Prices-Prediction
c8d50e4f82a9cf16763949bd4ba885985917b882
[ "MIT" ]
null
null
null
Models/BMO_StockPricesPrediction.ipynb
MarkLee7925/Bank-Stock-Prices-Prediction
c8d50e4f82a9cf16763949bd4ba885985917b882
[ "MIT" ]
null
null
null
468.807851
392,158
0.921148
[ [ [ "# BMO - Stock Prices Prediction", "_____no_output_____" ], [ "## 1. Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport pandas_datareader as webreader\nimport pandas as pd\nimport tensorflow as tf\nimport seaborn as sns\nimport datetime as dt\nimport pandas_datareader as webreader\nfrom datetime import date, timedelta, datetime", "_____no_output_____" ] ], [ [ "## 2. Load Data", "_____no_output_____" ] ], [ [ "# Import dataset\ndf = pd.read_csv('BMO_StockPrices_2016-2021.csv')\n\n# today = date.today()\n# date_today = today.strftime(\"%Y-%m-%d\")\n# date_start = '2016-01-01'\n\n# # Read stock quotes from Yahoo Finance\n# df = webreader.DataReader('BMO.TO', data_source='yahoo', start=date_start, end=date_today)\n\n# Input data of np array - Closing price\ndataset = df.iloc[:, 4:5].values\n\nprint(dataset.shape)", "(1254, 1)\n" ], [ "df", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1254 entries, 0 to 1253\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 1254 non-null object \n 1 Open 1254 non-null float64\n 2 High 1254 non-null float64\n 3 Low 1254 non-null float64\n 4 Close 1254 non-null float64\n 5 Adj Close 1254 non-null float64\n 6 Volume 1254 non-null int64 \ndtypes: float64(5), int64(1), object(1)\nmemory usage: 68.7+ KB\n" ], [ "# Check for missing values\ndf.isnull().sum()", "_____no_output_____" ], [ "# from sklearn.impute import SimpleImputer\n\n# imputer = SimpleImputer(missing_values = np.nan, strategy = 'mean')\n# imputer.fit_transform(dataset)\n# dataset = imputer.transform(dataset)\n# df.dropna(inplace=True)", "_____no_output_____" ] ], [ [ "## 3. Visualize Data", "_____no_output_____" ] ], [ [ "# Plot each data column\ndf.plot(subplots=True, figsize=(12,20), sharex=False, cmap='rainbow')", "_____no_output_____" ], [ "# Pariwise relationship between features\nsns.pairplot(df)", "_____no_output_____" ], [ "# Correlation Map\nplt.figure(figsize=(12,12))\nsns.heatmap(df.corr(method='pearson'), annot=True, cmap='hot', fmt='.2f')\nplt.title('Correlation Map')\nplt.xticks(rotation=60)", "_____no_output_____" ] ], [ [ "## 4. Data Preprocessing and Feature Engineering", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler", "_____no_output_____" ], [ "# Normalize data\nsc = MinMaxScaler(feature_range=(0, 1))\ndataset_scaled = sc.fit_transform(dataset)", "_____no_output_____" ], [ "dataset_scaled", "_____no_output_____" ], [ "# Split dataset into training set and test set (80/20 split)\ntraining_size = int(len(df)*0.8)\ndataset_train = dataset_scaled[:training_size]\ndataset_test = dataset_scaled[training_size:]\n\nprint('Training set shape: ' + str(dataset_train.shape))\nprint('Test set shape: ' + str(dataset_test.shape))", "Training set shape: (1003, 1)\nTest set shape: (251, 1)\n" ] ], [ [ "#### Create a data structure with 60 timesteps and 1 output", "_____no_output_____" ] ], [ [ "# Predict stock price results using 60 timesteps (60 business days)\ntimesteps = 60\n\ndef define_training_data(timesteps, dataset):\n ''' \n Define number of timesteps and features needed for training and \n validating RNN model \n '''\n\n # X - number of timesteps, y - indicators (features) used for training\n X, y = [], []\n for i in range(timesteps, dataset.shape[0]):\n X.append(dataset[i-timesteps:i, 0])\n y.append(dataset[i, 0])\n X, y = np.array(X), np.array(y)\n return X, y\n\nX_train, y_train = define_training_data(timesteps, dataset_train)\nprint(X_train.shape, y_train.shape)", "(943, 60) (943,)\n" ] ], [ [ "#### Reshaping", "_____no_output_____" ] ], [ [ "# Reshape(number of rows, number of timesteps (columns), number of predictors)\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))", "_____no_output_____" ], [ "X_train", "_____no_output_____" ], [ "X_train.shape", "_____no_output_____" ] ], [ [ "## 5. Define LSTM model", "_____no_output_____" ] ], [ [ "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score", "_____no_output_____" ], [ "# Define model\nreg = Sequential()\n\n# LSTM layers\nreg.add(LSTM(units=70, return_sequences=True, \n input_shape=(X_train.shape[1], X_train.shape[2])))\nreg.add(Dropout(0.2))\nreg.add(LSTM(units=70, return_sequences=True))\nreg.add(Dropout(0.2))\nreg.add(LSTM(units=70, return_sequences=True))\nreg.add(Dropout(0.1))\nreg.add(LSTM(units=70, return_sequences=False))\nreg.add(Dropout(0.1))\n\n# Output layer\nreg.add(Dense(units=X_train.shape[2]))\n\n# Compile model\n# The Adam optimizer is always a safe choice for any DNN model\n# Alternatively, use RMSprop\nreg.compile(optimizer='adam', loss='mean_squared_error')\n\n# Train (fit) model\nhistory = reg.fit(X_train, y_train, batch_size=32, epochs=100)\n\n# Plot loss during training\nplt.plot(history.history['loss'], 'g', label='loss')\nplt.title('Training Curve')\nplt.xlabel('Epochs')\nplt.ylabel('MSE')\nplt.legend()\nplt.grid()\nplt.show()", "Epoch 1/100\n30/30 [==============================] - 10s 141ms/step - loss: 0.0363\nEpoch 2/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0075\nEpoch 3/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0056\nEpoch 4/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0046\nEpoch 5/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0043\nEpoch 6/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0041\nEpoch 7/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0040\nEpoch 8/100\n30/30 [==============================] - 4s 138ms/step - loss: 0.0034\nEpoch 9/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0032\nEpoch 10/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0039\nEpoch 11/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0030\nEpoch 12/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0029\nEpoch 13/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0026\nEpoch 14/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0027\nEpoch 15/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0028\nEpoch 16/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0025\nEpoch 17/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0023\nEpoch 18/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0023\nEpoch 19/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0024\nEpoch 20/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0022\nEpoch 21/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0027\nEpoch 22/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0022\nEpoch 23/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0020\nEpoch 24/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0019\nEpoch 25/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0019\nEpoch 26/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0020\nEpoch 27/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0019\nEpoch 28/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0021\nEpoch 29/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0022\nEpoch 30/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0018\nEpoch 31/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0017\nEpoch 32/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0018\nEpoch 33/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0017\nEpoch 34/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0018\nEpoch 35/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0016\nEpoch 36/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0017\nEpoch 37/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0016\nEpoch 38/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0015\nEpoch 39/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0016\nEpoch 40/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0015\nEpoch 41/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0014\nEpoch 42/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0016\nEpoch 43/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0014\nEpoch 44/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0014\nEpoch 45/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0014\nEpoch 46/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0016\nEpoch 47/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0013\nEpoch 48/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0015\nEpoch 49/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0014\nEpoch 50/100\n30/30 [==============================] - 4s 139ms/step - loss: 0.0014\nEpoch 51/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0013\nEpoch 52/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0016\nEpoch 53/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0017\nEpoch 54/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0012\nEpoch 55/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0013\nEpoch 56/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0014\nEpoch 57/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0011\nEpoch 58/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0013\nEpoch 59/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0012\nEpoch 60/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0013\nEpoch 61/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0012\nEpoch 62/100\n30/30 [==============================] - 4s 143ms/step - loss: 9.8989e-04\nEpoch 63/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0011\nEpoch 64/100\n30/30 [==============================] - 4s 144ms/step - loss: 0.0011\nEpoch 65/100\n30/30 [==============================] - 4s 142ms/step - loss: 0.0011\nEpoch 66/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0010\nEpoch 67/100\n30/30 [==============================] - 4s 145ms/step - loss: 0.0012\nEpoch 68/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0010\nEpoch 69/100\n30/30 [==============================] - 4s 144ms/step - loss: 0.0010\nEpoch 70/100\n30/30 [==============================] - 4s 144ms/step - loss: 0.0011\nEpoch 71/100\n30/30 [==============================] - 4s 144ms/step - loss: 0.0011\nEpoch 72/100\n30/30 [==============================] - 7s 239ms/step - loss: 0.0011\nEpoch 73/100\n30/30 [==============================] - 7s 225ms/step - loss: 0.0011\nEpoch 74/100\n30/30 [==============================] - 4s 144ms/step - loss: 9.7771e-04\nEpoch 75/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.8531e-04\nEpoch 76/100\n30/30 [==============================] - 4s 141ms/step - loss: 0.0011\nEpoch 77/100\n30/30 [==============================] - 4s 141ms/step - loss: 8.9877e-04\nEpoch 78/100\n30/30 [==============================] - 4s 141ms/step - loss: 9.3377e-04\nEpoch 79/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.1539e-04\nEpoch 80/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.8173e-04\nEpoch 81/100\n30/30 [==============================] - 4s 143ms/step - loss: 0.0011\nEpoch 82/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0010\nEpoch 83/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.9145e-04\nEpoch 84/100\n30/30 [==============================] - 4s 140ms/step - loss: 0.0010\nEpoch 85/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.4438e-04\nEpoch 86/100\n30/30 [==============================] - 4s 142ms/step - loss: 8.2457e-04\nEpoch 87/100\n30/30 [==============================] - 4s 141ms/step - loss: 9.7291e-04\nEpoch 88/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.0336e-04\nEpoch 89/100\n30/30 [==============================] - 4s 144ms/step - loss: 9.8652e-04\nEpoch 90/100\n30/30 [==============================] - 4s 143ms/step - loss: 8.8163e-04\nEpoch 91/100\n30/30 [==============================] - 4s 141ms/step - loss: 9.4427e-04\nEpoch 92/100\n30/30 [==============================] - 4s 140ms/step - loss: 8.9696e-04\nEpoch 93/100\n30/30 [==============================] - 4s 141ms/step - loss: 8.6774e-04\nEpoch 94/100\n30/30 [==============================] - 4s 143ms/step - loss: 9.4099e-04\nEpoch 95/100\n30/30 [==============================] - 4s 141ms/step - loss: 9.8791e-04\nEpoch 96/100\n30/30 [==============================] - 5s 173ms/step - loss: 8.9753e-04\nEpoch 97/100\n30/30 [==============================] - 4s 145ms/step - loss: 0.0011\nEpoch 98/100\n30/30 [==============================] - 4s 142ms/step - loss: 8.9849e-04\nEpoch 99/100\n30/30 [==============================] - 4s 142ms/step - loss: 8.9806e-04\nEpoch 100/100\n30/30 [==============================] - 4s 142ms/step - loss: 9.2094e-04\n" ] ], [ [ "## 6. Evalutate model", "_____no_output_____" ], [ "#### Make predictions", "_____no_output_____" ] ], [ [ "# Get 60 previous stock prices for predicting\ninputs = dataset[len(dataset)-len(dataset_test) - timesteps:]\n# Reshape input values\ninputs = inputs.reshape(-1, 1)\n# Scale input values, directly apply transformations (already fitted)\ninputs = sc.transform(inputs)\n\n# Define test set structure\nX_test = []\n# For the entire test set\nfor i in range(timesteps, timesteps + len(dataset_test)):\n # Range of values from i-60 to i, first column\n X_test.append(inputs[i-timesteps:i, 0])\nX_test = np.array(X_test)\n\n# Obtain 3D structure of the test set\nX_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\n\n# Predict the test set results\npredicted_stock_price = reg.predict(X_test)\n# Apply inverse transformations\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\nreal_stock_price = sc.inverse_transform(dataset_test)\n\n# Print predicted and real stock prices side-by-side\nprint(np.concatenate((predicted_stock_price.reshape(len(predicted_stock_price), 1), \n real_stock_price.reshape(len(real_stock_price), 1)), 1 ))\n\n# Print Scores\nprint('')\nprint('--- Prediction Scores ---')\nprint('')\nmae = mean_absolute_error(real_stock_price, predicted_stock_price)\nprint('MAE: ' + str(mae))\nmse = mean_squared_error(real_stock_price, predicted_stock_price)\nprint('MSE: ' + str(mse))\nrmse = np.sqrt(mean_squared_error(real_stock_price, predicted_stock_price))\nprint('RMSE: '+ str(rmse))\nr2 = r2_score(real_stock_price, predicted_stock_price)\nprint('R^2 Score: '+ str(r2))", "[[ 81.16570282 81.25 ]\n [ 81.59667206 80.639999 ]\n [ 81.83959198 80.790001 ]\n [ 81.89061737 80.230003 ]\n [ 81.71968079 79.169998 ]\n [ 81.22531891 78.040001 ]\n [ 80.38375854 77.650002 ]\n [ 79.44176483 77.989998 ]\n [ 78.79369354 77.980003 ]\n [ 78.54658508 79.589996 ]\n [ 78.94908905 78.169998 ]\n [ 79.38227844 77.839996 ]\n [ 79.52805328 77.080002 ]\n [ 79.28474426 77.400002 ]\n [ 78.93808746 78.449997 ]\n [ 78.90766907 80.370003 ]\n [ 79.5536499 81.720001 ]\n [ 80.78092194 82.209999 ]\n [ 82.13579559 81.779999 ]\n [ 83.10533142 81.389999 ]\n [ 83.48757935 80.940002 ]\n [ 83.33524323 81.010002 ]\n [ 82.93436432 80.989998 ]\n [ 82.527565 79.779999 ]\n [ 81.97651672 80.860001 ]\n [ 81.64546967 81.949997 ]\n [ 81.83596039 82.730003 ]\n [ 82.49204254 84.089996 ]\n [ 83.5210495 83.650002 ]\n [ 84.414711 82.080002 ]\n [ 84.59980011 80.080002 ]\n [ 83.79505157 81.080002 ]\n [ 82.76397705 79.330002 ]\n [ 81.61392212 79.5 ]\n [ 80.67777252 81.529999 ]\n [ 80.61301422 82.68 ]\n [ 81.43695068 82.089996 ]\n [ 82.40242767 81.379997 ]\n [ 82.92207336 84.889999 ]\n [ 83.74958038 87.57 ]\n [ 85.32111359 87.349998 ]\n [ 86.97394562 86.769997 ]\n [ 88.05635071 87.540001 ]\n [ 88.64974976 89.660004 ]\n [ 89.34590912 90.82 ]\n [ 90.32504272 91.510002 ]\n [ 91.41201782 92.269997 ]\n [ 92.44322968 93.349998 ]\n [ 93.43056488 94.839996 ]\n [ 94.51882935 97.279999 ]\n [ 95.99775696 97.040001 ]\n [ 97.3235321 97.160004 ]\n [ 98.19116974 96.769997 ]\n [ 98.47937012 93.330002 ]\n [ 97.5018158 96.519997 ]\n [ 96.79161072 97.330002 ]\n [ 96.88565063 97.760002 ]\n [ 97.50865936 98.050003 ]\n [ 98.22318268 97.389999 ]\n [ 98.53993225 97.650002 ]\n [ 98.56848145 98.330002 ]\n [ 98.64430237 97.510002 ]\n [ 98.53586578 97.139999 ]\n [ 98.20626068 96.120003 ]\n [ 97.55040741 97.459999 ]\n [ 97.25898743 97.43 ]\n [ 97.31651306 97.580002 ]\n [ 97.54695892 96.440002 ]\n [ 97.44639587 96.779999 ]\n [ 97.23032379 96.660004 ]\n [ 97.0220108 97.330002 ]\n [ 97.07707977 97.25 ]\n [ 97.25865173 97.139999 ]\n [ 97.39836121 96.610001 ]\n [ 97.31278229 96.779999 ]\n [ 97.1662674 95.989998 ]\n [ 96.83924866 96.459999 ]\n [ 96.62397003 98.199997 ]\n [ 97.06057739 99.239998 ]\n [ 98.05732727 99.860001 ]\n [ 99.19946289 99.870003 ]\n [100.06394958 99.620003 ]\n [100.46508026 99.290001 ]\n [100.4397049 99.5 ]\n [100.28418732 100.629997 ]\n [100.46650696 101.230003 ]\n [100.99351501 100.5 ]\n [101.3338623 99.550003 ]\n [101.15151215 98.199997 ]\n [100.33190155 97.599998 ]\n [ 99.21789551 98.139999 ]\n [ 98.46242523 98.050003 ]\n [ 98.14215851 97.050003 ]\n [ 97.80648804 98.129997 ]\n [ 97.79142761 95.120003 ]\n [ 97.09966278 95.75 ]\n [ 96.28544617 96.440002 ]\n [ 95.93358612 96.209999 ]\n [ 95.91248322 97.93 ]\n [ 96.51428223 97.839996 ]\n [ 97.26872253 97.769997 ]\n [ 97.81149292 97.620003 ]\n [ 98.03778839 97.309998 ]\n [ 97.97188568 96.839996 ]\n [ 97.66751862 97.599998 ]\n [ 97.55539703 98.239998 ]\n [ 97.82067871 99.370003 ]\n [ 98.53838348 99.669998 ]\n [ 99.37622833 101.25 ]\n [100.43491364 101.839996 ]\n [101.50401306 104.900002 ]\n [103.0834198 107.800003 ]\n [105.29968262 105.889999 ]\n [106.77586365 103.980003 ]\n [106.89807892 106.099998 ]\n [106.84851074 107.400002 ]\n [107.2635498 107.32 ]\n [107.83488464 106.550003 ]\n [108.0556488 108.110001 ]\n [108.42941284 108.889999 ]\n [109.04012299 108.389999 ]\n [109.43218994 110.150002 ]\n [110.04814911 109.730003 ]\n [110.49991608 110.209999 ]\n [110.82948303 109.720001 ]\n [110.86019135 109.599998 ]\n [110.67976379 110.660004 ]\n [110.76222992 111.830002 ]\n [111.30924988 111.580002 ]\n [111.81987762 110.760002 ]\n [111.86196136 109.650002 ]\n [111.30380249 110.559998 ]\n [110.88856506 112.230003 ]\n [111.2245636 112.959999 ]\n [112.02935028 112.519997 ]\n [112.61242676 113.169998 ]\n [113.04849243 112.019997 ]\n [112.96791077 113. ]\n [112.9413681 113.620003 ]\n [113.18159485 113.089996 ]\n [113.32250214 113.830002 ]\n [113.55915833 114.349998 ]\n [113.9339447 114.889999 ]\n [114.41381073 115.720001 ]\n [115.03022766 113.860001 ]\n [114.98442078 114.279999 ]\n [114.72612 114.559998 ]\n [114.57692719 114.82 ]\n [114.62017059 114.07 ]\n [114.50078583 112.669998 ]\n [113.87757111 113.790001 ]\n [113.52466583 113.93 ]\n [113.49001312 116.010002 ]\n [114.20800018 116.709999 ]\n [115.22415924 116.800003 ]\n [116.02949524 117.099998 ]\n [116.5623703 117.25 ]\n [116.88064575 116.010002 ]\n [116.68231964 115.809998 ]\n [116.25366211 115.510002 ]\n [115.79724884 116.239998 ]\n [115.70832825 117.300003 ]\n [116.14342499 117.839996 ]\n [116.81219482 117.809998 ]\n [117.32592773 117.139999 ]\n [117.39405823 116.489998 ]\n [117.04431152 119.419998 ]\n [117.4965744 120.650002 ]\n [118.57184601 121.519997 ]\n [119.78557587 122.360001 ]\n [120.89422607 122. ]\n [121.54418945 123.139999 ]\n [122.14385223 123.919998 ]\n [122.81719971 123.599998 ]\n [123.26808167 125.410004 ]\n [124.00395203 126.18 ]\n [124.88312531 126.830002 ]\n [125.73488617 126.190002 ]\n [126.1495285 127.980003 ]\n [126.75306702 127.419998 ]\n [127.12880707 128.570007 ]\n [127.60502625 128. ]\n [127.82706451 128.550003 ]\n [128.01623535 128.169998 ]\n [128.02560425 127.150002 ]\n [127.65454865 126.949997 ]\n [127.15986633 126.919998 ]\n [126.74171448 126.5 ]\n [126.32860565 127.510002 ]\n [126.28632355 129.309998 ]\n [126.86422729 127.830002 ]\n [127.02803802 126.93 ]\n [126.65820312 128.050003 ]\n [126.48175049 128.119995 ]\n [126.46841431 127.900002 ]\n [126.44034576 127.760002 ]\n [126.35031128 128.649994 ]\n [126.4982605 127.080002 ]\n [126.25450134 126.599998 ]\n [125.76306152 127.059998 ]\n [125.45582581 127.089996 ]\n [125.33335876 127.32 ]\n [125.36690521 126.68 ]\n [125.24655151 126.839996 ]\n [125.12116241 124.980003 ]\n [124.49695587 126.120003 ]\n [124.16663361 127. ]\n [124.33432007 126.769997 ]\n [124.56687164 126.419998 ]\n [124.62067413 126.339996 ]\n [124.55318451 125.110001 ]\n [124.11907959 122.370003 ]\n [122.86542511 123.139999 ]\n [121.86338806 124.120003 ]\n [121.63420105 123.589996 ]\n [121.6262207 124.470001 ]\n [121.90057373 125.330002 ]\n [122.42158508 125.400002 ]\n [122.88369751 125.510002 ]\n [123.20581818 126.139999 ]\n [123.56855011 123.529999 ]\n [123.15175629 124.260002 ]\n [122.74111176 123.989998 ]\n [122.45301819 125.470001 ]\n [122.76028442 126.449997 ]\n [123.50737762 126.540001 ]\n [124.20670319 127.919998 ]\n [125.03870392 129.050003 ]\n [125.99938965 129.130005 ]\n [126.77290344 128.729996 ]\n [127.16746521 127.989998 ]\n [127.13485718 127.5 ]\n [126.84696198 128.149994 ]\n [126.76663208 126.790001 ]\n [126.44020844 127.910004 ]\n [126.41667938 128.720001 ]\n [126.75012207 130.869995 ]\n [127.67801666 130.720001 ]\n [128.46696472 127.669998 ]\n [128.11645508 126.940002 ]\n [127.18177795 125.910004 ]\n [126.0234375 125.559998 ]\n [125.04297638 127.010002 ]\n [124.86908722 127.809998 ]\n [125.26459503 127.730003 ]\n [125.64865112 128.5 ]\n [126.05476379 128.440002 ]\n [126.30767059 128.270004 ]\n [126.37052917 127.860001 ]\n [126.23548126 128.789993 ]\n [126.33563232 127.480003 ]]\n\n--- Prediction Scores ---\n\nMAE: 1.3743458854678936\nMSE: 2.9073873130520353\nRMSE: 1.705106246851508\nR^2 Score: 0.9896667426705954\n" ] ], [ [ "#### Visualizing results", "_____no_output_____" ] ], [ [ "# Plot predicted and real closing stock prices\nplt.figure(figsize=(8,6))\nplt.plot(real_stock_price, color = 'blue', label = 'Real Stock Price')\nplt.plot(predicted_stock_price, color = 'red', label = 'Predicted Stock Price')\nplt.xlabel('Days')\nplt.ylabel('Stock Price ($)')\nplt.title('BMO Stock Price Prediction')\nplt.legend()\nplt.grid()\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
ecd501e970b2358fa3c327e1973075b7c5e64e3e
391,765
ipynb
Jupyter Notebook
doc/nb/quick_random_redshift_error.ipynb
HiramHerrera/desisim
3ae76e4c921f72b71ff7522462740e904136f428
[ "BSD-3-Clause" ]
15
2015-12-16T22:01:53.000Z
2022-01-14T07:31:55.000Z
doc/nb/quick_random_redshift_error.ipynb
HiramHerrera/desisim
3ae76e4c921f72b71ff7522462740e904136f428
[ "BSD-3-Clause" ]
455
2015-04-06T03:11:27.000Z
2022-02-28T18:11:16.000Z
doc/nb/quick_random_redshift_error.ipynb
HiramHerrera/desisim
3ae76e4c921f72b71ff7522462740e904136f428
[ "BSD-3-Clause" ]
21
2015-01-26T17:45:04.000Z
2022-02-22T19:46:20.000Z
1,017.571429
129,176
0.955917
[ [ [ "# The porpuse of this small notebook is to test the addition of a random error to the quasar redshift simultaed by quickquasar. \n## As well, it can be used to run a redrock over individual pixels. If want to run over several files is best t use a slurm script. \n### The idea is to keep one single paramater --zbest but not it is a float used to define the standard deviation of a gaussian distribution for the error. So far the gaussian is the same independtly of the quasar redshift or magnitude. ", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.table import Table\nimport desispec.io\nfrom scipy import constants\n%matplotlib inline\nc = constants.speed_of_light/1000. #- km/s\n\n", "_____no_output_____" ] ], [ [ "## Lets define a plotting function to be used later in order to compare the redshift files, for a single pixel. ", "_____no_output_____" ] ], [ [ "def plot_zinfo(zbest,ztrue,label=None):\n dv = c * (zbest['Z'] - ztrue['Z']) / (1.0 + ztrue['Z'])\n err=(zbest['Z']/ztrue['Z']-1.)*100\n plt.figure(figsize=(20,6),dpi=100)\n plt.subplot(1,3,1)\n plt.scatter(ztrue['Z'],dv)\n plt.xlabel('z_true')\n plt.ylabel('dv')\n plt.title(label)\n plt.subplot(1,3,2)\n plt.scatter(ztrue['Z'],zbest['Z'],marker='.')\n plt.xlabel('z_true')\n plt.ylabel('z_best')\n plt.title(label)\n plt.subplot(1,3,3)\n plt.scatter(ztrue['Z'],err)\n plt.ylim(-0.5,0.5)\n plt.xlim(1,4)\n plt.xlabel('z_true')\n plt.ylabel(r'$\\epsilon(\\%)$')\n plt.title(label)", "_____no_output_____" ], [ "#Only if you are using your local version of desisim. If using master you can comment this cell\nos.environ['PYTHONPATH']='/global/homes/a/alxogm/desi/code/desisim/py:'+os.environ['PYTHONPATH']\nos.environ['PATH']='/global/homes/a/alxogm/desi/code/desisim/bin/:'+os.environ['PATH']", "_____no_output_____" ] ], [ [ "#Only if you are using your local version of redrock. If using master you can comment this cell\nos.environ['PYTHONPATH']='/global/homes/a/alxogm/desi/code/redrock/py'+os.environ['PYTHONPATH']\nos.environ['PATH']='/global/homes/a/alxogm/desi/code/redrock/bin/:'+os.environ['PATH']\n", "_____no_output_____" ], [ "## Next we set the simulation parameters and the file to analyze. In this case is the transmission-16-0.fits file.\n\n### This first simulations saves the zbest file with the truth values, since zbest is set to 0.", "_____no_output_____" ] ], [ [ "exptime=4000 #Exposure time\n#ifile='/project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits' #Transmission file to read, v3 cointains metals, most of the notebook works also with v2.0, except the part corresponding to the metal addition from transrmision file. \nifile='/project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits' #Choose one pixel to simulate the quasars. \noutdir='/project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/' #Where do you want to save the files, although in this particular notebook we will be overwritring the files. \nzmin=1.8\nseed=123 #Seed to ensure reproducibility. \nspecfile=outdir+'spectra-16-0.fits' ", "_____no_output_____" ], [ "cmd ='quickquasars --exptime {exptime} -i {ifile} -o {outfile} --zmin {zmin} --zbest 0 --mags --desi-footprint --overwrite \\\n --seed {seed} --downsampling 0.4'.format(exptime=exptime,ifile=ifile,outfile=specfile,zmin=zmin,seed=seed) \n!time $cmd\nztrue=Table.read('/project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/zbest-16-0.fits')", "INFO:quickquasars.py:544:main: Load SIMQSO model\nINFO:quickquasars.py:549:main: Load DeCAM and WISE filters for target selection sim.\nifilename /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits\nINFO:quickquasars.py:120:get_healpix_info: healpix=HPXPIXEL=0\nINFO:quickquasars.py:125:get_healpix_info: nside=HPXNSIDE=16\nINFO:quickquasars.py:133:get_healpix_info: hpxnest from HPXNEST = True\nfound 0 16 True\nINFO:quickquasars.py:207:simulate_one_healpix: Read skewers in /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits, random seed = 2866\nINFO:quickquasars.py:211:simulate_one_healpix: Read transmission file /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits\nINFO:quickquasars.py:220:simulate_one_healpix: Select QSOs in DESI footprint 2384 -> 2384\nINFO:quickquasars.py:325:simulate_one_healpix: Increase wavelength range from 3548:5819 to 3329:5819 to compute magnitudes\nINFO:quickquasars.py:335:simulate_one_healpix: Increase wavelength range from 3329:5819 to 3329:55501 to compute magnitudes\nINFO:quickquasars.py:354:simulate_one_healpix: Simulate 943 QSOs with SIMQSO templates\nINFO:quickquasars.py:360:simulate_one_healpix: Resample to transmission wavelength grid\nINFO:quickquasars.py:384:simulate_one_healpix: Apply transmitted flux fraction\nINFO:quickquasars.py:410:simulate_one_healpix: Compute QSO magnitudes\nINFO:quickquasars.py:433:simulate_one_healpix: Resample to a linear wavelength grid (needed by DESI sim.)\nINFO:quickquasars.py:440:simulate_one_healpix: Simulate DESI observation and write output file\nINFO:quickspectra.py:61:sim_spectra: Starting simulation of 943 spectra\nDEBUG:simexp.py:416:simulate_spectra: loading specsim desi config desi\nDEBUG:simexp.py:420:simulate_spectra: creating specsim desi simulator\nINFO:simexp.py:442:simulate_spectra: MJD not in obsconditions, using DATE-OBS 2009-06-18T12:00:00.000\nDEBUG:simexp.py:446:simulate_spectra: obsconditions SEEING = 1.1\nDEBUG:simexp.py:446:simulate_spectra: obsconditions EXPTIME = 4000.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions AIRMASS = 1.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONFRAC = 0.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONALT = -60\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONSEP = 180\nDEBUG:simexp.py:683:get_source_types: qso 943 targets\nDEBUG:simexp.py:486:simulate_spectra: running simulation with fastsim fiber loss method\nDEBUG:simexp.py:492:simulate_spectra: source types: 943 qso\nINFO:quickspectra.py:230:sim_spectra: Wrote /project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/spectra-16-0.fits\nINFO:quickquasars.py:466:simulate_one_healpix: Read fibermap\nINFO:quickquasars.py:468:simulate_one_healpix: Writing a zbest file /project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/zbest-16-0.fits\n\nreal\t1m23.561s\nuser\t1m10.202s\nsys\t0m12.776s\n" ] ], [ [ "### Now we run a second simulation with zbest set to 200km/s. This defines the standard deviation of a gaussian distribution. ", "_____no_output_____" ] ], [ [ "cmd ='quickquasars --exptime {exptime} -i {ifile} -o {outfile} --zmin {zmin} --zbest 200 --mags --desi-footprint --overwrite \\\n --seed {seed} --downsampling 0.4'.format(exptime=exptime,ifile=ifile,outfile=specfile,zmin=zmin,seed=seed) \n!time $cmd\n\nzbest=Table.read('/project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/zbest-16-0.fits')", "INFO:quickquasars.py:544:main: Load SIMQSO model\nINFO:quickquasars.py:549:main: Load DeCAM and WISE filters for target selection sim.\nifilename /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits\nINFO:quickquasars.py:120:get_healpix_info: healpix=HPXPIXEL=0\nINFO:quickquasars.py:125:get_healpix_info: nside=HPXNSIDE=16\nINFO:quickquasars.py:133:get_healpix_info: hpxnest from HPXNEST = True\nfound 0 16 True\nINFO:quickquasars.py:207:simulate_one_healpix: Read skewers in /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits, random seed = 2866\nINFO:quickquasars.py:211:simulate_one_healpix: Read transmission file /project/projectdirs/desi/mocks/lya_forest/london/v2.0/0/0/transmission-16-0.fits\nINFO:quickquasars.py:220:simulate_one_healpix: Select QSOs in DESI footprint 2384 -> 2384\nINFO:quickquasars.py:325:simulate_one_healpix: Increase wavelength range from 3548:5819 to 3329:5819 to compute magnitudes\nINFO:quickquasars.py:335:simulate_one_healpix: Increase wavelength range from 3329:5819 to 3329:55501 to compute magnitudes\nINFO:quickquasars.py:354:simulate_one_healpix: Simulate 943 QSOs with SIMQSO templates\nINFO:quickquasars.py:360:simulate_one_healpix: Resample to transmission wavelength grid\nINFO:quickquasars.py:384:simulate_one_healpix: Apply transmitted flux fraction\nINFO:quickquasars.py:410:simulate_one_healpix: Compute QSO magnitudes\nINFO:quickquasars.py:433:simulate_one_healpix: Resample to a linear wavelength grid (needed by DESI sim.)\nINFO:quickquasars.py:440:simulate_one_healpix: Simulate DESI observation and write output file\nINFO:quickspectra.py:61:sim_spectra: Starting simulation of 943 spectra\nDEBUG:simexp.py:416:simulate_spectra: loading specsim desi config desi\nDEBUG:simexp.py:420:simulate_spectra: creating specsim desi simulator\nINFO:simexp.py:442:simulate_spectra: MJD not in obsconditions, using DATE-OBS 2009-06-18T12:00:00.000\nDEBUG:simexp.py:446:simulate_spectra: obsconditions SEEING = 1.1\nDEBUG:simexp.py:446:simulate_spectra: obsconditions EXPTIME = 4000.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions AIRMASS = 1.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONFRAC = 0.0\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONALT = -60\nDEBUG:simexp.py:446:simulate_spectra: obsconditions MOONSEP = 180\nDEBUG:simexp.py:683:get_source_types: qso 943 targets\nDEBUG:simexp.py:486:simulate_spectra: running simulation with fastsim fiber loss method\nDEBUG:simexp.py:492:simulate_spectra: source types: 943 qso\nINFO:quickspectra.py:230:sim_spectra: Wrote /project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/spectra-16-0.fits\nINFO:quickquasars.py:466:simulate_one_healpix: Read fibermap\nINFO:quickquasars.py:468:simulate_one_healpix: Writing a zbest file /project/projectdirs/desi/users/alxogm/desi/lya_forest/random_errors_lya/zbest-16-0.fits\n\nreal\t1m16.580s\nuser\t1m5.876s\nsys\t0m10.306s\n" ], [ "plot_zinfo(zbest,ztrue,'random_zbest_vs_ztrue')", "_____no_output_____" ] ], [ [ "### Let us now compare our simple redshift error addition, to the actual distribution obtained with redrock. If this notebook has been run several times you probably already have this computed, if this is the case then comment the cell to run redrock, and only read the file. ", "_____no_output_____" ] ], [ [ "rrzbestfile=outdir+'zbest_rr-16-0.fits'", "_____no_output_____" ] ], [ [ "#Uncomment this cell if you need to run redrock for the first time. If zbest computed with redrock already exist you just need to read the files to make the plots. \ncmd = 'rrdesi_mpi {} --zbest {}'.format(specfile,rrzbestfile)\nif 'NERSC_HOST' in os.environ:\n print('Running on a batch node:')\n print(cmd) \n print() \n #srun = 'srun -N 12 -n 384 -c 1 -t 00:01:30 -C haswell --qos interactive'\n srun = 'srun -N 4 -n 128 -c 1 -t 00:05:00 -C haswell --qos interactive'\n cmd = '{srun} {cmd}'.format(srun=srun, cmd=cmd)\n!$cmd\n\n", "_____no_output_____" ] ], [ [ "rrzbest=Table.read(rrzbestfile)", "WARNING: hdu= was not specified but multiple tables are present, reading in first available table (hdu=1) [astropy.io.fits.connect]\n" ], [ "plot_zinfo(zbest,ztrue,'random_zbest_vs_ztrue')\nplot_zinfo(rrzbest,ztrue,'rrzbest_vs_ztrue')\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd50f6ddd1149d2db1272ad86e3a594420ecd29
6,636
ipynb
Jupyter Notebook
Section 12/TR2(Lecture 102).ipynb
PacktPublishing/Comprehensive-Guide-to-Artificial-Intelligence-AI-for-All
e5fdfd1c7d516bcc22b70e75f78ed53cbb38ed64
[ "MIT" ]
1
2021-10-01T23:23:54.000Z
2021-10-01T23:23:54.000Z
Section 12/TR2(Lecture 102).ipynb
PacktPublishing/Comprehensive-Guide-to-Artificial-Intelligence-AI-for-All
e5fdfd1c7d516bcc22b70e75f78ed53cbb38ed64
[ "MIT" ]
null
null
null
Section 12/TR2(Lecture 102).ipynb
PacktPublishing/Comprehensive-Guide-to-Artificial-Intelligence-AI-for-All
e5fdfd1c7d516bcc22b70e75f78ed53cbb38ed64
[ "MIT" ]
2
2021-08-09T08:00:44.000Z
2022-02-26T12:23:04.000Z
23.7
98
0.561031
[ [ [ "# Transfer Learning re training the last layers\n\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import GlobalAveragePooling2D\nfrom keras import backend as K\nfrom keras.preprocessing import image\nimport numpy as np\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\n\n# variable initiallization\n\nbatch_size = 128\nnum_classes = 10\nepochs = 12\n\n\n# the data, split between train and test sets\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\n# convert class vectors to binary class matrices\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\n# reduce the data\nx_train_red = x_train[0:100]\nx_test_red = x_test[0:16]\ny_train_red = y_train[0:100]\ny_test_red = y_test[0:16]", "_____no_output_____" ], [ "x_train_red.shape", "_____no_output_____" ], [ "# resize the data set\n\nimport numpy as np\nxtrain1 = [np.resize(x_train_red[i], (200, 200, 3)) for i in range(0, len(x_train_red))] \nx_train_new = np.array(xtrain1)", "_____no_output_____" ], [ "x_train_new.shape", "_____no_output_____" ], [ "# resize the data set\n\nxtest1 = [np.resize(x_test_red[i], (200, 200, 3)) for i in range(0, len(x_test_red))] \nx_test_new = np.array(xtest1)", "_____no_output_____" ], [ "x_test_new.shape", "_____no_output_____" ], [ "# data rescaling \n\nx_train_new = x_train_new.astype('float32')\nx_test_new = x_test_new.astype('float32')\nx_train_new /= 255\nx_test_new /= 255\nprint('x_train shape:', x_train_new.shape)\nprint(x_train_new.shape[0], 'original train samples')\nprint(x_test_new.shape[0], 'original test samples')\n\n", "_____no_output_____" ], [ "x_train_new.shape", "_____no_output_____" ], [ "# Creating bottle neck features\n\nfrom keras.models import Model\n\n# create the base pre-trained model\nmodel = ResNet50(weights='imagenet', include_top=False, input_shape=(200,200,3))\n#Preprocessing the data, so that it can be fed to the pre-trained ResNet50 model. \nresnet_train_input = preprocess_input(x_train_new)\n\n#Creating bottleneck features for the training data\ntrain_features = model.predict(resnet_train_input)\n\nprint(\"Building Features\")", "_____no_output_____" ], [ "resnet_train_input.shape", "_____no_output_____" ], [ "train_features.shape", "_____no_output_____" ], [ "# Compile the model\n\nmodel = Sequential()\nmodel.add(GlobalAveragePooling2D(input_shape=train_features.shape[1:]))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(10, activation='softmax'))\nmodel.summary()\n\n# first: train only the top layers (which were randomly initialized)\n# i.e. freeze all convolutional ResNet50 layers\nfor layer in model.layers:\n layer.trainable = False\n\n# compile the model (should be done *after* setting layers to non-trainable)\nmodel.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\nprint(\"compiled successfully\")", "_____no_output_____" ], [ "resnet_train_input.shape", "_____no_output_____" ], [ "#Train the model\n\nmodel.fit(train_features, y_train_red,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_split=0.2)\n", "_____no_output_____" ], [ "# score the model\nscore = model.evaluate(train_features, y_train_red)", "_____no_output_____" ], [ "# check the loss and accuracy\nscore", "_____no_output_____" ], [ "# Predict \nmodel.predict(train_features[1:2])", "_____no_output_____" ], [ "# what was it ?\ny_train_red[1:2]", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd51413174c2b0296065de1cb1988007f7438fa
100,929
ipynb
Jupyter Notebook
1b. Daily Trend.ipynb
lfdversluis/wta-analysis
28384989dc1d27478dca20cddc55f1f2d7a9d8f2
[ "Apache-2.0" ]
1
2020-03-26T16:02:02.000Z
2020-03-26T16:02:02.000Z
1b. Daily Trend.ipynb
lfdversluis/wta-analysis
28384989dc1d27478dca20cddc55f1f2d7a9d8f2
[ "Apache-2.0" ]
1
2020-03-31T02:26:05.000Z
2020-03-31T02:26:05.000Z
1b. Daily Trend.ipynb
lfdversluis/wta-analysis
28384989dc1d27478dca20cddc55f1f2d7a9d8f2
[ "Apache-2.0" ]
null
null
null
433.171674
93,872
0.934598
[ [ [ "from pyspark.sql import SparkSession\nimport os\nfrom plotnine import *\nimport numpy as np\n\nimport pyspark.sql.functions as F", "_____no_output_____" ], [ "traces_location = \"<location to dir with trace directories>\"\nlist_of_workloads = list(map(lambda x: os.path.join(traces_location, x), os.listdir(traces_location)))", "_____no_output_____" ], [ "name_to_name_map = {\n \"askalon_ee\": \"Askalon Old 1\",\n \"askalon_ee2\": \"Askalon Old 2\",\n \"Two_Sigma_pit\": \"Two Sigma 1\",\n \"Two_Sigma_dft\": \"Two Sigma 2\",\n \"LANL_Trinity\": \"LANL 1\",\n \"Google\": \"Google\",\n \"alibaba2018\": \"Alibaba\"\n}", "_____no_output_____" ], [ "spark = (SparkSession.builder\n .master(\"local[4]\")\n .appName(\"Longitudinal Analysis\")\n .config(\"spark.executor.memory\", \"8G\")\n .config(\"spark.driver.memory\", \"2G\")\n .getOrCreate())", "_____no_output_____" ], [ "def do_tasks_exist(workload_location):\n return \"tasks\" in os.listdir(workload_location)\n\ndef read_trace(workload_location):\n return spark.read.parquet(os.path.join(workload_location, \"tasks\", \"schema-1.0\"))", "_____no_output_____" ], [ "# one_day_in_milliseconds = 1000*60*60*24\none_week_in_milliseconds = 1000*60*60*24*7\n\ndef is_trace_long_enough(tasks_df):\n # Exclude traces shorter than a week.\n max_timestamp = tasks_df.agg(F.max(\"ts_submit\")).toPandas().loc[0, \"max(ts_submit)\"]\n# print(tasks_df.count())\n# print(max_timestamp)\n return max_timestamp >= one_week_in_milliseconds", "_____no_output_____" ], [ "one_hour_in_milliseconds = 1000*60*60\nhours_in_a_day = 24\n\ndef get_tasks_per_hour_of_day(tasks_df):\n tasks_per_hour_df = tasks_df.groupby(F.floor(F.col(\"ts_submit\") / F.lit(one_hour_in_milliseconds)).alias(\"hour\")).count()\n tasks_per_hour_of_week = tasks_per_hour_df.groupby((F.col(\"hour\") % hours_in_a_day).alias(\"hour_of_day\")).agg(F.avg(\"count\"))\n return tasks_per_hour_of_week", "_____no_output_____" ], [ "hist_df = None\n\nfor workload in list_of_workloads:\n if str(workload).endswith(\"py\"): continue\n if not do_tasks_exist(workload):\n continue\n \n trace = read_trace(workload)\n \n if not is_trace_long_enough(trace):\n continue\n \n tasks_per_hour_of_week = get_tasks_per_hour_of_day(trace)\n \n workload_name = workload.split(\"/\")[-1]\n \n if workload_name.startswith(\"askalon-new_ee68_\"):\n continue\n \n for start, full_name in name_to_name_map.items():\n if workload_name.startswith(start):\n workload_name = full_name\n break\n \n workload_df = tasks_per_hour_of_week.toPandas()\n workload_df[\"workload\"] = workload_name\n \n if workload_name.startswith(\"Two\"):\n workload_df = workload_df.sort_values(\"hour_of_day\")\n workload_df[\"hour_of_day\"] = np.roll(workload_df[\"hour_of_day\"], -12)\n \n if hist_df is None:\n hist_df = workload_df\n else:\n hist_df = hist_df.append(workload_df)", "_____no_output_____" ], [ "def formatYaxisLabels(xl):\n def format_e(n):\n a = '%E' % n\n return '$10^{' + a.split('E')[1][2:] + '}$'\n return list(map(lambda x: format_e(x), xl))\n\nplt = ggplot(hist_df) +\\\n theme_light(base_size=16) +\\\n theme(legend_title=element_text(size=0, alpha=0),\n legend_box_spacing=0.1,\n legend_box_margin=0,\n legend_margin=0,\n legend_position=(0.51, 0.7),\n legend_direction=\"horizontal\",\n legend_key=element_blank(),\n legend_background=element_rect(fill=(0,0,0,0))) +\\\n guides(color=guide_legend(ncol=3)) +\\\n geom_line(aes(x=\"hour_of_day\", y=\"avg(count)\", group=\"workload\", color=\"workload\")) +\\\n geom_point(aes(x=\"hour_of_day\", y=\"avg(count)\", shape=\"workload\", color=\"workload\"), size=3) +\\\n scale_y_log10(labels=formatYaxisLabels) +\\\n xlab(\"Hour of day\") +\\\n ylab(\"Avg. num. tasks per hour\")\n\nplt.save(\"./plots/daily_trend.pdf\")\nplt", "/var/scratch/stalluri/miniconda3/envs/WTA/lib/python3.7/site-packages/plotnine/ggplot.py:706: UserWarning: Saving 6.4 x 4.8 in image.\n from_inches(height, units), units))\n/var/scratch/stalluri/miniconda3/envs/WTA/lib/python3.7/site-packages/plotnine/ggplot.py:707: UserWarning: Filename: ./plots/daily_trend.pdf\n warn('Filename: {}'.format(filename))\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd51d2460477d4c7e002cac79b57655db606e01
7,593
ipynb
Jupyter Notebook
docs/build/.doctrees/nbsphinx/Tutorials/Tutorial-8-Extending-built-in-classes.ipynb
Packer-Lab/packerlabimaging
95b4eb6285b045b0821f9835881000321a5972b7
[ "MIT" ]
null
null
null
docs/build/.doctrees/nbsphinx/Tutorials/Tutorial-8-Extending-built-in-classes.ipynb
Packer-Lab/packerlabimaging
95b4eb6285b045b0821f9835881000321a5972b7
[ "MIT" ]
1
2022-03-09T18:50:08.000Z
2022-03-09T21:42:48.000Z
docs/build/Tutorials/Tutorial-8-Extending-built-in-classes.ipynb
Packer-Lab/packerlabimaging
95b4eb6285b045b0821f9835881000321a5972b7
[ "MIT" ]
null
null
null
38.155779
332
0.554458
[ [ [ "# Tutorial 8 - Extending built in classes", "_____no_output_____" ], [ "This tutorial covers how to extend built-in classes to further customize your use of the package to suit your own unique data processing/anaysis needs.\n\nLet's say there is a particular analysis use-case that is not provided as a built-in in the current package release, by following the object-oriented structure of the package you can quickly create and add methods to extend the functionality of all existing class structures.", "_____no_output_____" ], [ "#### Creating a custom class for Interneuron Gcamp imaging during seizures", "_____no_output_____" ], [ "In the following example, we create a custom `Experiment` class by using the inheritance principles of Python. We create a new `Experiment` class called `InterneuronExp` that extends the built-in `Experiment` class.\n\n\nIn this experiment, Gcamp imaging was performed in Nkx2.1-cre-mCherry mice which label a sub-type of interneurons with mCherry. There were also two experimental phases of imaging: pre-4ap and post-4ap. We require a number of functionalities in this class that are not available in the built-in `Experiment` class structure:\n1) a list of trials in the pre4ap injection phase\n2) a list of trials in the post4ap injection phase\n3) a list of Suite2p ROIs that are also interneurons\n3a) add this list as a `obs` entry into the `anndata` storage of all trials\n\nWe also need to further create a custom `Trial` class to store trial-level data that is not allowed using built-in methods/attributes. This is demonstrated further below.", "_____no_output_____" ] ], [ [ "import packerlabimaging as pli\n\n\nclass InterneuronExp(pli.Experiment):\n def __init__(self, initalization_dict):\n\n super().__init__(**initalization_dict) # call to initialize the super-class (i.e. `pli.Experiment`)\n\n # set pre4ap and post4ap experiments:\n self.pre4ap_trials = []\n for trial in self.trialIDs:\n self.pre4ap_trials.append(trial) if 'pre 4ap' in self.TrialsInformation[trial]['expGroup'] else None\n\n self.post4ap_trials = []\n for trial in self.trialIDs:\n self.post4ap_trials.append(trial) if 'post 4ap' in self.TrialsInformation[trial]['expGroup'] else None", "_____no_output_____" ], [ "# create the initialization dictionary containing information about trials within this experiment\n\nprep = 'PS12'\ndate = '2021-01-25'\n\nExperimentMetainfo = {\n 'dataPath': f'/home/pshah/mnt/qnap/Data/{date}', # todo this seems very vauge, maybe add very specific documentation about what this is supposed to be, or just say tiff path?\n 'analysisSavePath': f'/home/pshah/mnt/qnap/Analysis/{date}/{prep}/',\n \"expID\": prep,\n 'date': date,\n 'comments': f'{prep} - interneuron gcamp imaging + LFP pre- and post-4ap'\n}\n\n# create Experiment using custom class\nexpobj = InterneuronExp(initalization_dict=ExperimentMetainfo)\n\n\n", "_____no_output_____" ], [ "# create initialization dictionary to initialize each trial and add trials to the previously created Experiment\ntrials_list_pre4ap = ['t-001', 't-002', 't-003']\n# todo - add functionality to add longer detailed comments for each trial (e.g. t-001: 30 mins spont, t-002: 30 mins spont + LFP, etc.) (other than expGroup)\n\nfor idx, trial in enumerate(trials_list_pre4ap):\n data_path_base = f'/home/pshah/mnt/qnap/Data/{date}'\n TwoPhotonImagingMetainfo = {'date': date,\n 'trial_id': trial,\n 'exp_id': prep,\n 'microscope': 'Bruker 2pPlus',\n 'tiff_path': f'{data_path_base}/{date}_{trial}/{date}_{trial}_Cycle00001_Ch3.tif',\n 'save_dir': expobj.analysisSavePath,\n 'expGroup': \"pre 4ap 2p imaging\",\n 'PaqInfoTrial': {'paq_path': f'{data_path_base}/{date}_{prep}_{trial[2:]}.paq', # path to the .paq files for the selected trials\n 'frame_channel': 'frame_clock'}\n }\n\n trialobj = pli.TwoPhotonImagingTrial(**TwoPhotonImagingMetainfo)\n\n # add each Trial to the overall Experiment using the trialobj\n expobj.add_trial(trialobj=trialobj)\n\n\ntrials_list_post4ap = ['t-006', 't-007', 't-008', 't-009']\nfor idx, trial in enumerate(trials_list_post4ap):\n data_path_base = f'/home/pshah/mnt/qnap/Data/{date}'\n TwoPhotonImagingMetainfo = {'date': date,\n 'trial_id': trial,\n 'exp_id': prep,\n 'microscope': 'Bruker 2pPlus',\n 'tiff_path': f'{data_path_base}/{date}_{trial}/{date}_{trial}_Cycle00001_Ch3.tif',\n 'save_dir': expobj.analysisSavePath,\n 'expGroup': \"post 4ap 2p imaging\",\n 'PaqInfoTrial': {'paq_path': f'{data_path_base}/{date}_{prep}_{trial[2:]}.paq', # path to the .paq files for the selected trials\n 'frame_channel': 'frame_clock'}\n }\n\n trialobj = pli.TwoPhotonImagingTrial(**TwoPhotonImagingMetainfo)\n\n # add each Trial to the overall Experiment using the trialobj\n expobj.add_trial(trialobj=trialobj)\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ] ]
ecd526b0b6067ab1bdb3d6064ba716e63cf50e70
65,123
ipynb
Jupyter Notebook
1. Neural Networks and Deep Learning/Week 2/Logistic Regression as a Neural Network/.ipynb_checkpoints/Logistic Regression with a Neural Network mindset v5-checkpoint.ipynb
adityajn105/Coursera-Deep-Learning-Specialization
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
[ "Unlicense" ]
2
2020-08-21T03:59:01.000Z
2020-09-05T13:13:19.000Z
1. Neural Networks and Deep Learning/Week 2/Logistic Regression as a Neural Network/.ipynb_checkpoints/Logistic Regression with a Neural Network mindset v5-checkpoint.ipynb
adityajn105/Coursera-Deep-Learning-Specialization
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
[ "Unlicense" ]
null
null
null
1. Neural Networks and Deep Learning/Week 2/Logistic Regression as a Neural Network/.ipynb_checkpoints/Logistic Regression with a Neural Network mindset v5-checkpoint.ipynb
adityajn105/Coursera-Deep-Learning-Specialization
26cf7da29b2f1cb32799e045cc9cdfab99ad0757
[ "Unlicense" ]
null
null
null
53.118271
22,274
0.680251
[ [ [ "# Logistic Regression with a Neural Network mindset\n\nWelcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.\n\n**Instructions:**\n- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.\n\n**You will learn to:**\n- Build the general architecture of a learning algorithm, including:\n - Initializing parameters\n - Calculating the cost function and its gradient\n - Using an optimization algorithm (gradient descent) \n- Gather all three functions above into a main model function, in the right order.", "_____no_output_____" ], [ "## 1 - Packages ##\n\nFirst, let's run the cell below to import all the packages that you will need during this assignment. \n- [numpy](https://www.numpy.org/) is the fundamental package for scientific computing with Python.\n- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.\n- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.\n- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport scipy\nfrom PIL import Image\nfrom scipy import ndimage\nfrom lr_utils import load_dataset\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## 2 - Overview of the Problem set ##\n\n**Problem Statement**: You are given a dataset (\"data.h5\") containing:\n - a training set of m_train images labeled as cat (y=1) or non-cat (y=0)\n - a test set of m_test images labeled as cat or non-cat\n - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).\n\nYou will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.\n\nLet's get more familiar with the dataset. Load the data by running the following code.", "_____no_output_____" ] ], [ [ "# Loading the data (cat/non-cat)\ntrain_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()", "_____no_output_____" ] ], [ [ "We added \"_orig\" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).\n\nEach line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ", "_____no_output_____" ] ], [ [ "# Example of a picture\nindex = 25\nplt.imshow(train_set_x_orig[index])\nprint (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") + \"' picture.\")", "y = [1], it's a 'cat' picture.\n" ] ], [ [ "Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. \n\n**Exercise:** Find the values for:\n - m_train (number of training examples)\n - m_test (number of test examples)\n - num_px (= height = width of a training image)\nRemember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.", "_____no_output_____" ] ], [ [ "### START CODE HERE ### (≈ 3 lines of code)\nm_train = len(train_set_x_orig)\nm_test = len(test_set_x_orig)\nnum_px = test_set_x_orig.shape[2]\n### END CODE HERE ###\n\nprint (\"Number of training examples: m_train = \" + str(m_train))\nprint (\"Number of testing examples: m_test = \" + str(m_test))\nprint (\"Height/Width of each image: num_px = \" + str(num_px))\nprint (\"Each image is of size: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\nprint (\"train_set_x shape: \" + str(train_set_x_orig.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x shape: \" + str(test_set_x_orig.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))", "Number of training examples: m_train = 209\nNumber of testing examples: m_test = 50\nHeight/Width of each image: num_px = 64\nEach image is of size: (64, 64, 3)\ntrain_set_x shape: (209, 64, 64, 3)\ntrain_set_y shape: (1, 209)\ntest_set_x shape: (50, 64, 64, 3)\ntest_set_y shape: (1, 50)\n" ] ], [ [ "**Expected Output for m_train, m_test and num_px**: \n<table style=\"width:15%\">\n <tr>\n <td>**m_train**</td>\n <td> 209 </td> \n </tr>\n \n <tr>\n <td>**m_test**</td>\n <td> 50 </td> \n </tr>\n \n <tr>\n <td>**num_px**</td>\n <td> 64 </td> \n </tr>\n \n</table>\n", "_____no_output_____" ], [ "For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.\n\n**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\\_px $*$ num\\_px $*$ 3, 1).\n\nA trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: \n```python\nX_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X\n```", "_____no_output_____" ] ], [ [ "# Reshape the training and test examples\n\n### START CODE HERE ### (≈ 2 lines of code)\ntrain_set_x_flatten = train_set_x_orig.reshape((train_set_x_orig.shape[0],-1)).T\ntest_set_x_flatten = test_set_x_orig.reshape((test_set_x_orig.shape[0],-1)).T\n### END CODE HERE ###\n\nprint (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\nprint (\"train_set_y shape: \" + str(train_set_y.shape))\nprint (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\nprint (\"test_set_y shape: \" + str(test_set_y.shape))\nprint (\"sanity check after reshaping: \" + str(train_set_x_flatten[0:5,0]))", "train_set_x_flatten shape: (12288, 209)\ntrain_set_y shape: (1, 209)\ntest_set_x_flatten shape: (12288, 50)\ntest_set_y shape: (1, 50)\nsanity check after reshaping: [17 31 56 22 33]\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:35%\">\n <tr>\n <td>**train_set_x_flatten shape**</td>\n <td> (12288, 209)</td> \n </tr>\n <tr>\n <td>**train_set_y shape**</td>\n <td>(1, 209)</td> \n </tr>\n <tr>\n <td>**test_set_x_flatten shape**</td>\n <td>(12288, 50)</td> \n </tr>\n <tr>\n <td>**test_set_y shape**</td>\n <td>(1, 50)</td> \n </tr>\n <tr>\n <td>**sanity check after reshaping**</td>\n <td>[17 31 56 22 33]</td> \n </tr>\n</table>", "_____no_output_____" ], [ "To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.\n\nOne common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).\n\n<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> \n\nLet's standardize our dataset.", "_____no_output_____" ] ], [ [ "train_set_x = train_set_x_flatten/255.\ntest_set_x = test_set_x_flatten/255.", "_____no_output_____" ] ], [ [ "<font color='blue'>\n**What you need to remember:**\n\nCommon steps for pre-processing a new dataset are:\n- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)\n- Reshape the datasets such that each example is now a vector of size (num_px \\* num_px \\* 3, 1)\n- \"Standardize\" the data", "_____no_output_____" ], [ "## 3 - General Architecture of the learning algorithm ##\n\nIt's time to design a simple algorithm to distinguish cat images from non-cat images.\n\nYou will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**\n\n<img src=\"images/LogReg_kiank.png\" style=\"width:650px;height:400px;\">\n\n**Mathematical expression of the algorithm**:\n\nFor one example $x^{(i)}$:\n$$z^{(i)} = w^T x^{(i)} + b \\tag{1}$$\n$$\\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\\tag{2}$$ \n$$ \\mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \\log(a^{(i)}) - (1-y^{(i)} ) \\log(1-a^{(i)})\\tag{3}$$\n\nThe cost is then computed by summing over all training examples:\n$$ J = \\frac{1}{m} \\sum_{i=1}^m \\mathcal{L}(a^{(i)}, y^{(i)})\\tag{6}$$\n\n**Key steps**:\nIn this exercise, you will carry out the following steps: \n - Initialize the parameters of the model\n - Learn the parameters for the model by minimizing the cost \n - Use the learned parameters to make predictions (on the test set)\n - Analyse the results and conclude", "_____no_output_____" ], [ "## 4 - Building the parts of our algorithm ## \n\nThe main steps for building a Neural Network are:\n1. Define the model structure (such as number of input features) \n2. Initialize the model's parameters\n3. Loop:\n - Calculate current loss (forward propagation)\n - Calculate current gradient (backward propagation)\n - Update parameters (gradient descent)\n\nYou often build 1-3 separately and integrate them into one function we call `model()`.\n\n### 4.1 - Helper functions\n\n**Exercise**: Using your code from \"Python Basics\", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \\frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: sigmoid\n\ndef sigmoid(z):\n \"\"\"\n Compute the sigmoid of z\n\n Arguments:\n z -- A scalar or numpy array of any size.\n\n Return:\n s -- sigmoid(z)\n \"\"\"\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1/(1+np.exp(-z))\n ### END CODE HERE ###\n \n return s", "_____no_output_____" ], [ "print (\"sigmoid([0, 2]) = \" + str(sigmoid(np.array([0,2]))))", "sigmoid([0, 2]) = [ 0.5 0.88079708]\n" ] ], [ [ "**Expected Output**: \n\n<table>\n <tr>\n <td>**sigmoid([0, 2])**</td>\n <td> [ 0.5 0.88079708]</td> \n </tr>\n</table>", "_____no_output_____" ], [ "### 4.2 - Initializing parameters\n\n**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_with_zeros\n\ndef initialize_with_zeros(dim):\n \"\"\"\n This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.\n \n Argument:\n dim -- size of the w vector we want (or number of parameters in this case)\n \n Returns:\n w -- initialized vector of shape (dim, 1)\n b -- initialized scalar (corresponds to the bias)\n \"\"\"\n \n ### START CODE HERE ### (≈ 1 line of code)\n w = np.zeros((dim,1))\n b = 0\n ### END CODE HERE ###\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int))\n \n return w, b", "_____no_output_____" ], [ "dim = 2\nw, b = initialize_with_zeros(dim)\nprint (\"w = \" + str(w))\nprint (\"b = \" + str(b))", "w = [[ 0.]\n [ 0.]]\nb = 0\n" ] ], [ [ "**Expected Output**: \n\n\n<table style=\"width:15%\">\n <tr>\n <td> ** w ** </td>\n <td> [[ 0.]\n [ 0.]] </td>\n </tr>\n <tr>\n <td> ** b ** </td>\n <td> 0 </td>\n </tr>\n</table>\n\nFor image inputs, w will be of shape (num_px $\\times$ num_px $\\times$ 3, 1).", "_____no_output_____" ], [ "### 4.3 - Forward and Backward propagation\n\nNow that your parameters are initialized, you can do the \"forward\" and \"backward\" propagation steps for learning the parameters.\n\n**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.\n\n**Hints**:\n\nForward Propagation:\n- You get X\n- You compute $A = \\sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$\n- You calculate the cost function: $J = -\\frac{1}{m}\\sum_{i=1}^{m}y^{(i)}\\log(a^{(i)})+(1-y^{(i)})\\log(1-a^{(i)})$\n\nHere are the two formulas you will be using: \n\n$$ \\frac{\\partial J}{\\partial w} = \\frac{1}{m}X(A-Y)^T\\tag{7}$$\n$$ \\frac{\\partial J}{\\partial b} = \\frac{1}{m} \\sum_{i=1}^m (a^{(i)}-y^{(i)})\\tag{8}$$", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: propagate\n\ndef propagate(w, b, X, Y):\n \"\"\"\n Implement the cost function and its gradient for the propagation explained above\n\n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)\n\n Return:\n cost -- negative log-likelihood cost for logistic regression\n dw -- gradient of the loss with respect to w, thus same shape as w\n db -- gradient of the loss with respect to b, thus same shape as b\n \n Tips:\n - Write your code step by step for the propagation. np.log(), np.dot()\n \"\"\"\n \n m = X.shape[1]\n \n # FORWARD PROPAGATION (FROM X TO COST)\n ### START CODE HERE ### (≈ 2 lines of code)\n A = sigmoid(np.dot(w.T,X)+b) # compute activation\n cost = -1*np.sum((Y*np.log(A) + (1-Y)*np.log(1-A)))/m # compute cost\n ### END CODE HERE ###\n \n # BACKWARD PROPAGATION (TO FIND GRAD)\n ### START CODE HERE ### (≈ 2 lines of code)\n dw = (np.dot(X,(A-Y).T))/m\n db = np.sum(A-Y)/m\n ### END CODE HERE ###\n\n assert(dw.shape == w.shape)\n assert(db.dtype == float)\n cost = np.squeeze(cost)\n assert(cost.shape == ())\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return grads, cost", "_____no_output_____" ], [ "w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])\ngrads, cost = propagate(w, b, X, Y)\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))\nprint (\"cost = \" + str(cost))", "dw = [[ 0.99845601]\n [ 2.39507239]]\ndb = 0.00145557813678\ncost = 5.80154531939\n" ] ], [ [ "**Expected Output**:\n\n<table style=\"width:50%\">\n <tr>\n <td> ** dw ** </td>\n <td> [[ 0.99845601]\n [ 2.39507239]]</td>\n </tr>\n <tr>\n <td> ** db ** </td>\n <td> 0.00145557813678 </td>\n </tr>\n <tr>\n <td> ** cost ** </td>\n <td> 5.801545319394553 </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "### 4.4 - Optimization\n- You have initialized your parameters.\n- You are also able to compute a cost function and its gradient.\n- Now, you want to update the parameters using gradient descent.\n\n**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\\theta$, the update rule is $ \\theta = \\theta - \\alpha \\text{ } d\\theta$, where $\\alpha$ is the learning rate.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: optimize\n\ndef optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \"\"\"\n This function optimizes w and b by running a gradient descent algorithm\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of shape (num_px * num_px * 3, number of examples)\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)\n num_iterations -- number of iterations of the optimization loop\n learning_rate -- learning rate of the gradient descent update rule\n print_cost -- True to print the loss every 100 steps\n \n Returns:\n params -- dictionary containing the weights w and bias b\n grads -- dictionary containing the gradients of the weights and bias with respect to the cost function\n costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.\n \n Tips:\n You basically need to write down two steps and iterate through them:\n 1) Calculate the cost and the gradient for the current parameters. Use propagate().\n 2) Update the parameters using gradient descent rule for w and b.\n \"\"\"\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ### \n grads, cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n \n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n \n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs", "_____no_output_____" ], [ "params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)\n\nprint (\"w = \" + str(params[\"w\"]))\nprint (\"b = \" + str(params[\"b\"]))\nprint (\"dw = \" + str(grads[\"dw\"]))\nprint (\"db = \" + str(grads[\"db\"]))", "w = [[ 0.19033591]\n [ 0.12259159]]\nb = 1.92535983008\ndw = [[ 0.67752042]\n [ 1.41625495]]\ndb = 0.219194504541\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:40%\">\n <tr>\n <td> **w** </td>\n <td>[[ 0.19033591]\n [ 0.12259159]] </td>\n </tr>\n \n <tr>\n <td> **b** </td>\n <td> 1.92535983008 </td>\n </tr>\n <tr>\n <td> **dw** </td>\n <td> [[ 0.67752042]\n [ 1.41625495]] </td>\n </tr>\n <tr>\n <td> **db** </td>\n <td> 0.219194504541 </td>\n </tr>\n\n</table>", "_____no_output_____" ], [ "**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:\n\n1. Calculate $\\hat{Y} = A = \\sigma(w^T X + b)$\n\n2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: predict\n\ndef predict(w, b, X):\n '''\n Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)\n \n Arguments:\n w -- weights, a numpy array of size (num_px * num_px * 3, 1)\n b -- bias, a scalar\n X -- data of size (num_px * num_px * 3, number of examples)\n \n Returns:\n Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X\n '''\n \n m = X.shape[1]\n Y_prediction = np.zeros((1,m))\n w = w.reshape(X.shape[0], 1)\n \n # Compute vector \"A\" predicting the probabilities of a cat being present in the picture\n ### START CODE HERE ### (≈ 1 line of code)\n A = sigmoid( np.dot(w.T,X)+b)\n ### END CODE HERE ###\n \n for i in range(A.shape[1]):\n \n # Convert probabilities A[0,i] to actual predictions p[0,i]\n ### START CODE HERE ### (≈ 4 lines of code)\n Y_prediction[0,i] = (A[0,i]>0.5)*1\n ### END CODE HERE ###\n \n assert(Y_prediction.shape == (1, m))\n \n return Y_prediction", "_____no_output_____" ], [ "w = np.array([[0.1124579],[0.23106775]])\nb = -0.3\nX = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])\nprint (\"predictions = \" + str(predict(w, b, X)))", "predictions = [[ 1. 1. 0.]]\n" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:30%\">\n <tr>\n <td>\n **predictions**\n </td>\n <td>\n [[ 1. 1. 0.]]\n </td> \n </tr>\n\n</table>\n", "_____no_output_____" ], [ "<font color='blue'>\n**What to remember:**\nYou've implemented several functions that:\n- Initialize (w,b)\n- Optimize the loss iteratively to learn parameters (w,b):\n - computing the cost and its gradient \n - updating the parameters using gradient descent\n- Use the learned (w,b) to predict the labels for a given set of examples", "_____no_output_____" ], [ "## 5 - Merge all functions into a model ##\n\nYou will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.\n\n**Exercise:** Implement the model function. Use the following notation:\n - Y_prediction_test for your predictions on the test set\n - Y_prediction_train for your predictions on the train set\n - w, costs, grads for the outputs of optimize()", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: model\n\ndef model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):\n \"\"\"\n Builds the logistic regression model by calling the function you've implemented previously\n \n Arguments:\n X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)\n Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)\n X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)\n Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)\n num_iterations -- hyperparameter representing the number of iterations to optimize the parameters\n learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()\n print_cost -- Set to true to print the cost every 100 iterations\n \n Returns:\n d -- dictionary containing information about the model.\n \"\"\"\n \n ### START CODE HERE ###\n \n # initialize parameters with zeros (≈ 1 line of code)\n w, b = initialize_with_zeros()\n\n # Gradient descent (≈ 1 line of code)\n parameters, grads, costs = None\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples (≈ 2 lines of code)\n Y_prediction_test = predict(w, b, X_train)\n Y_prediction_train = predict(w, b, X_test)\n\n ### END CODE HERE ###\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w, \n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d", "_____no_output_____" ] ], [ [ "Run the following cell to train your model.", "_____no_output_____" ] ], [ [ "d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)", "_____no_output_____" ] ], [ [ "**Expected Output**: \n\n<table style=\"width:40%\"> \n\n <tr>\n <td> **Cost after iteration 0 ** </td> \n <td> 0.693147 </td>\n </tr>\n <tr>\n <td> <center> $\\vdots$ </center> </td> \n <td> <center> $\\vdots$ </center> </td> \n </tr> \n <tr>\n <td> **Train Accuracy** </td> \n <td> 99.04306220095694 % </td>\n </tr>\n\n <tr>\n <td>**Test Accuracy** </td> \n <td> 70.0 % </td>\n </tr>\n</table> \n\n\n", "_____no_output_____" ], [ "**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!\n\nAlso, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.", "_____no_output_____" ] ], [ [ "# Example of a picture that was wrongly classified.\nindex = 1\nplt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))\nprint (\"y = \" + str(test_set_y[0,index]) + \", you predicted that it is a \\\"\" + classes[d[\"Y_prediction_test\"][0,index]].decode(\"utf-8\") + \"\\\" picture.\")", "_____no_output_____" ] ], [ [ "Let's also plot the cost function and the gradients.", "_____no_output_____" ] ], [ [ "# Plot learning curve (with costs)\ncosts = np.squeeze(d['costs'])\nplt.plot(costs)\nplt.ylabel('cost')\nplt.xlabel('iterations (per hundreds)')\nplt.title(\"Learning rate =\" + str(d[\"learning_rate\"]))\nplt.show()", "_____no_output_____" ] ], [ [ "**Interpretation**:\nYou can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. ", "_____no_output_____" ], [ "## 6 - Further analysis (optional/ungraded exercise) ##\n\nCongratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\\alpha$. ", "_____no_output_____" ], [ "#### Choice of learning rate ####\n\n**Reminder**:\nIn order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may \"overshoot\" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.\n\nLet's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ", "_____no_output_____" ] ], [ [ "learning_rates = [0.01, 0.001, 0.0001]\nmodels = {}\nfor i in learning_rates:\n print (\"learning rate is: \" + str(i))\n models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)\n print ('\\n' + \"-------------------------------------------------------\" + '\\n')\n\nfor i in learning_rates:\n plt.plot(np.squeeze(models[str(i)][\"costs\"]), label= str(models[str(i)][\"learning_rate\"]))\n\nplt.ylabel('cost')\nplt.xlabel('iterations (hundreds)')\n\nlegend = plt.legend(loc='upper center', shadow=True)\nframe = legend.get_frame()\nframe.set_facecolor('0.90')\nplt.show()", "_____no_output_____" ] ], [ [ "**Interpretation**: \n- Different learning rates give different costs and thus different predictions results.\n- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). \n- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.\n- In deep learning, we usually recommend that you: \n - Choose the learning rate that better minimizes the cost function.\n - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) \n", "_____no_output_____" ], [ "## 7 - Test with your own image (optional/ungraded exercise) ##\n\nCongratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:\n 1. Click on \"File\" in the upper bar of this notebook, then click \"Open\" to go on your Coursera Hub.\n 2. Add your image to this Jupyter Notebook's directory, in the \"images\" folder\n 3. Change your image's name in the following code\n 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!", "_____no_output_____" ] ], [ [ "## START CODE HERE ## (PUT YOUR IMAGE NAME) \nmy_image = \"my_image.jpg\" # change this to the name of your image file \n## END CODE HERE ##\n\n# We preprocess the image to fit your algorithm.\nfname = \"images/\" + my_image\nimage = np.array(ndimage.imread(fname, flatten=False))\nimage = image/255.\nmy_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T\nmy_predicted_image = predict(d[\"w\"], d[\"b\"], my_image)\n\nplt.imshow(image)\nprint(\"y = \" + str(np.squeeze(my_predicted_image)) + \", your algorithm predicts a \\\"\" + classes[int(np.squeeze(my_predicted_image)),].decode(\"utf-8\") + \"\\\" picture.\")", "_____no_output_____" ] ], [ [ "<font color='blue'>\n**What to remember from this assignment:**\n1. Preprocessing the dataset is important.\n2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().\n3. Tuning the learning rate (which is an example of a \"hyperparameter\") can make a big difference to the algorithm. You will see more examples of this later in this course!", "_____no_output_____" ], [ "Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:\n - Play with the learning rate and the number of iterations\n - Try different initialization methods and compare the results\n - Test other preprocessings (center the data, or divide each row by its standard deviation)", "_____no_output_____" ], [ "Bibliography:\n- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/\n- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
ecd54ee6474e0e5f58a006feacd00e573df56d80
434,357
ipynb
Jupyter Notebook
assignment2/TensorFlow.ipynb
miguelfrde/cs231n
c0dc0a505d7a8a6af3439fad33068dfe1428d2e4
[ "MIT" ]
null
null
null
assignment2/TensorFlow.ipynb
miguelfrde/cs231n
c0dc0a505d7a8a6af3439fad33068dfe1428d2e4
[ "MIT" ]
27
2019-12-16T20:19:11.000Z
2022-03-11T23:11:53.000Z
assignment2/TensorFlow.ipynb
miguelfrde/stanford-cs231n
c0dc0a505d7a8a6af3439fad33068dfe1428d2e4
[ "MIT" ]
null
null
null
353.999185
41,478
0.915908
[ [ [ "## What's this TensorFlow business?\n\nYou've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.\n\nFor the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, TensorFlow (or PyTorch, if you switch over to that notebook)\n\n#### What is it?\nTensorFlow is a system for executing computational graphs over Tensor objects, with native support for performing backpropogation for its Variables. In it, we work with Tensors which are n-dimensional arrays analogous to the numpy ndarray.\n\n#### Why?\n\n* Our code will now run on GPUs! Much faster training. Writing your own modules to run on GPUs is beyond the scope of this class, unfortunately.\n* We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand. \n* We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :) \n* We want you to be exposed to the sort of deep learning code you might run into in academia or industry. ", "_____no_output_____" ], [ "## How will I learn TensorFlow?\n\nTensorFlow has many excellent tutorials available, including those from [Google themselves](https://www.tensorflow.org/get_started/get_started).\n\nOtherwise, this notebook will walk you through much of what you need to do to train models in TensorFlow. See the end of the notebook for some links to helpful tutorials if you want to learn more or need further clarification on topics that aren't fully explained here.", "_____no_output_____" ], [ "## Load Datasets\n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np\nimport math\nimport timeit\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "from cs231n.data_utils import load_CIFAR10\n\ndef get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for the two-layer neural net classifier. These are the same steps as\n we used for the SVM, but condensed to a single function. \n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n return X_train, y_train, X_val, y_val, X_test, y_test\n\n\n# Invoke the above function to get our data.\nX_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()\nprint('Train data shape: ', X_train.shape)\nprint('Train labels shape: ', y_train.shape)\nprint('Validation data shape: ', X_val.shape)\nprint('Validation labels shape: ', y_val.shape)\nprint('Test data shape: ', X_test.shape)\nprint('Test labels shape: ', y_test.shape)", "Train data shape: (49000, 32, 32, 3)\nTrain labels shape: (49000,)\nValidation data shape: (1000, 32, 32, 3)\nValidation labels shape: (1000,)\nTest data shape: (10000, 32, 32, 3)\nTest labels shape: (10000,)\n" ] ], [ [ "## Example Model\n\n### Some useful utilities\n\n. Remember that our image data is initially N x H x W x C, where:\n* N is the number of datapoints\n* H is the height of each image in pixels\n* W is the height of each image in pixels\n* C is the number of channels (usually 3: R, G, B)\n\nThis is the right way to represent the data when we are doing something like a 2D convolution, which needs spatial understanding of where the pixels are relative to each other. When we input image data into fully connected affine layers, however, we want each data example to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data.", "_____no_output_____" ], [ "### The example model itself\n\nThe first step to training your own model is defining its architecture.\n\nHere's an example of a convolutional neural network defined in TensorFlow -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. \n\nIn that example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Hinge loss function, and the Adam optimizer being used. \n\nMake sure you understand why the parameters of the Linear layer are 5408 and 10.\n\n### TensorFlow Details\nIn TensorFlow, much like in our previous notebooks, we'll first specifically initialize our variables, and then our network model.", "_____no_output_____" ] ], [ [ "# clear old variables\ntf.reset_default_graph()\n\n# setup input (e.g. the data that changes every batch)\n# The first dim is None, and gets sets automatically based on batch size fed in\nX = tf.placeholder(tf.float32, [None, 32, 32, 3])\ny = tf.placeholder(tf.int64, [None])\nis_training = tf.placeholder(tf.bool)\n\ndef simple_model(X,y):\n # define our weights (e.g. init_two_layer_convnet)\n \n # setup variables\n Wconv1 = tf.get_variable(\"Wconv1\", shape=[7, 7, 3, 32])\n bconv1 = tf.get_variable(\"bconv1\", shape=[32])\n W1 = tf.get_variable(\"W1\", shape=[5408, 10])\n b1 = tf.get_variable(\"b1\", shape=[10])\n\n # define our graph (e.g. two_layer_convnet)\n a1 = tf.nn.conv2d(X, Wconv1, strides=[1,2,2,1], padding='VALID') + bconv1\n h1 = tf.nn.relu(a1)\n h1_flat = tf.reshape(h1,[-1,5408])\n y_out = tf.matmul(h1_flat,W1) + b1\n return y_out\n\ny_out = simple_model(X,y)\n\n# define our loss\ntotal_loss = tf.losses.hinge_loss(tf.one_hot(y,10),logits=y_out)\nmean_loss = tf.reduce_mean(total_loss)\n\n# define our optimizer\noptimizer = tf.train.AdamOptimizer(5e-4) # select optimizer and set learning rate\ntrain_step = optimizer.minimize(mean_loss)", "_____no_output_____" ] ], [ [ "TensorFlow supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful). \n\n* Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn\n* Optimizers: https://www.tensorflow.org/api_guides/python/train#Optimizers\n* BatchNorm: https://www.tensorflow.org/api_docs/python/tf/contrib/layers/batch_norm", "_____no_output_____" ], [ "### Training the model on one epoch\nWhile we have defined a graph of operations above, in order to execute TensorFlow Graphs, by feeding them input data and computing the results, we first need to create a `tf.Session` object. A session encapsulates the control and state of the TensorFlow runtime. For more information, see the TensorFlow [Getting started](https://www.tensorflow.org/get_started/get_started) guide.\n\nOptionally we can also specify a device context such as `/cpu:0` or `/gpu:0`. For documentation on this behavior see [this TensorFlow guide](https://www.tensorflow.org/tutorials/using_gpu)\n\nYou should see a validation loss of around 0.4 to 0.6 and an accuracy of 0.30 to 0.35 below", "_____no_output_____" ] ], [ [ "def run_model(session, predict, loss_val, Xd, yd,\n epochs=1, batch_size=64, print_every=100,\n training=None, plot_losses=False):\n # have tensorflow compute accuracy\n correct_prediction = tf.equal(tf.argmax(predict,1), y)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n # shuffle indicies\n train_indicies = np.arange(Xd.shape[0])\n np.random.shuffle(train_indicies)\n\n training_now = training is not None\n \n # setting up variables we want to compute (and optimizing)\n # if we have a training function, add that to things we compute\n variables = [mean_loss,correct_prediction,accuracy]\n if training_now:\n variables[-1] = training\n \n # counter \n iter_cnt = 0\n for e in range(epochs):\n # keep track of losses and accuracy\n correct = 0\n losses = []\n # make sure we iterate over the dataset once\n for i in range(int(math.ceil(Xd.shape[0]/batch_size))):\n # generate indicies for the batch\n start_idx = (i*batch_size)%X_train.shape[0]\n idx = train_indicies[start_idx:start_idx+batch_size]\n \n # create a feed dictionary for this batch\n feed_dict = {X: Xd[idx,:],\n y: yd[idx],\n is_training: training_now }\n # get batch size\n actual_batch_size = yd[i:i+batch_size].shape[0]\n \n # have tensorflow compute loss and correct predictions\n # and (if given) perform a training step\n loss, corr, _ = session.run(variables,feed_dict=feed_dict)\n \n # aggregate performance stats\n losses.append(loss*actual_batch_size)\n correct += np.sum(corr)\n \n # print every now and then\n if training_now and (iter_cnt % print_every) == 0:\n print(\"Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}\"\\\n .format(iter_cnt,loss,np.sum(corr)/actual_batch_size))\n iter_cnt += 1\n total_correct = correct/Xd.shape[0]\n total_loss = np.sum(losses)/Xd.shape[0]\n print(\"Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}\"\\\n .format(total_loss,total_correct,e+1))\n if plot_losses:\n plt.plot(losses)\n plt.grid(True)\n plt.title('Epoch {} Loss'.format(e+1))\n plt.xlabel('minibatch number')\n plt.ylabel('minibatch loss')\n plt.show()\n return total_loss,total_correct\n\nwith tf.Session() as sess:\n with tf.device(\"/cpu:0\"): #\"/cpu:0\" or \"/gpu:0\" \n sess.run(tf.global_variables_initializer())\n print('Training')\n run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step,True)\n print('Validation')\n run_model(sess,y_out,mean_loss,X_val,y_val,1,64)", "Training\nIteration 0: with minibatch training loss = 11.8 and accuracy of 0.11\nIteration 100: with minibatch training loss = 1.04 and accuracy of 0.2\nIteration 200: with minibatch training loss = 0.838 and accuracy of 0.31\nIteration 300: with minibatch training loss = 0.766 and accuracy of 0.27\nIteration 400: with minibatch training loss = 0.678 and accuracy of 0.31\nIteration 500: with minibatch training loss = 0.586 and accuracy of 0.3\nIteration 600: with minibatch training loss = 0.573 and accuracy of 0.28\nIteration 700: with minibatch training loss = 0.535 and accuracy of 0.31\nEpoch 1, Overall loss = 0.77 and accuracy of 0.309\n" ] ], [ [ "## Training a specific model\n\nIn this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the TensorFlow documentation and configuring your own model. \n\nUsing the code provided above as guidance, and using the following TensorFlow documentation, specify a model with the following architecture:\n\n* 7x7 Convolutional Layer with 32 filters and stride of 1\n* ReLU Activation Layer\n* Spatial Batch Normalization Layer (trainable parameters, with scale and centering)\n* 2x2 Max Pooling layer with a stride of 2\n* Affine layer with 1024 output units\n* ReLU Activation Layer\n* Affine layer from 1024 input units to 10 outputs\n\n", "_____no_output_____" ] ], [ [ "# clear old variables\ntf.reset_default_graph()\n\n# define our input (e.g. the data that changes every batch)\n# The first dim is None, and gets sets automatically based on batch size fed in\nX = tf.placeholder(tf.float32, [None, 32, 32, 3])\ny = tf.placeholder(tf.int64, [None])\nis_training = tf.placeholder(tf.bool)\n\n# define model\ndef complex_model(X,y,is_training):\n Wconv = tf.get_variable('Wconv1', shape=[7, 7, 3, 32])\n bconv = tf.get_variable('bconv1', shape=[32])\n W1 = tf.get_variable('W1', shape=[5408, 1024])\n b1 = tf.get_variable('b1', shape=[1024])\n W2 = tf.get_variable('W2', shape=[1024, 10])\n b2 = tf.get_variable('b2', shape=[10])\n\n # 7x7 Convolutional Layer with 32 filters and stride of 1\n conv = tf.nn.conv2d(X, Wconv, strides=[1, 1, 1, 1], padding='VALID') + bconv\n \n # ReLU Activation Layer\n conv_relu = tf.nn.relu(conv)\n \n # Spatial Batch Normalization Layer (trainable parameters, with scale and centering)\n conv_batch = tf.contrib.layers.batch_norm(\n conv_relu, center=True, trainable=True, scale=True, epsilon=1e-7,\n is_training=is_training)\n \n # 2x2 Max Pooling layer with a stride of 2\n conv_pool = tf.nn.max_pool(\n conv_batch, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n \n pool_flat = tf.reshape(conv_pool, [-1, 5408])\n \n # Affine layer with 1024 output units\n fc_hidden = tf.matmul(pool_flat, W1) + b1\n \n # ReLU Activation Layer\n fc_hidden_relu = tf.nn.relu(fc_hidden)\n \n # Affine layer from 1024 input units to 10 outputs\n y_out = tf.matmul(fc_hidden_relu, W2) + b2\n return y_out\n\ny_out = complex_model(X,y,is_training)", "_____no_output_____" ] ], [ [ "To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes):", "_____no_output_____" ] ], [ [ "# Now we're going to feed a random batch into the model \n# and make sure the output is the right size\nx = np.random.randn(64, 32, 32,3)\nwith tf.Session() as sess:\n with tf.device(\"/cpu:0\"): #\"/cpu:0\" or \"/gpu:0\"\n tf.global_variables_initializer().run()\n\n ans = sess.run(y_out,feed_dict={X:x,is_training:True})\n %timeit sess.run(y_out,feed_dict={X:x,is_training:True})\n print(ans.shape)\n print(np.array_equal(ans.shape, np.array([64, 10])))", "100 loops, best of 3: 3.52 ms per loop\n(64, 10)\nTrue\n" ] ], [ [ "You should see the following from the run above \n\n`(64, 10)`\n\n`True`", "_____no_output_____" ], [ "### GPU!\n\nNow, we're going to try and start the model under the GPU device, the rest of the code stays unchanged and all our variables and operations will be computed using accelerated code paths. However, if there is no GPU, we get a Python exception and have to rebuild our graph. On a dual-core CPU, you might see around 50-80ms/batch running the above, while the Google Cloud GPUs (run below) should be around 2-5ms/batch.", "_____no_output_____" ] ], [ [ "try:\n with tf.Session() as sess:\n with tf.device(\"/gpu:0\") as dev: #\"/cpu:0\" or \"/gpu:0\"\n tf.global_variables_initializer().run()\n\n ans = sess.run(y_out,feed_dict={X:x,is_training:True})\n %timeit sess.run(y_out,feed_dict={X:x,is_training:True})\nexcept tf.errors.InvalidArgumentError:\n print(\"no gpu found, please use Google Cloud if you want GPU acceleration\") \n # rebuild the graph\n # trying to start a GPU throws an exception \n # and also trashes the original graph\n tf.reset_default_graph()\n X = tf.placeholder(tf.float32, [None, 32, 32, 3])\n y = tf.placeholder(tf.int64, [None])\n is_training = tf.placeholder(tf.bool)\n y_out = complex_model(X,y,is_training)", "100 loops, best of 3: 3.51 ms per loop\n" ] ], [ [ "You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use GPU devices. However, with TensorFlow, the default device is a GPU if one is available, and a CPU otherwise, so we can skip the device specification from now on.", "_____no_output_____" ], [ "### Train the model.\n\nNow that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the complex_model you created provided above).\n\nMake sure you understand how each TensorFlow function used below corresponds to what you implemented in your custom neural network implementation.\n\nFirst, set up an **RMSprop optimizer** (using a 1e-3 learning rate) and a **cross-entropy loss** function. See the TensorFlow documentation for more information\n* Layers, Activations, Loss functions : https://www.tensorflow.org/api_guides/python/nn\n* Optimizers: https://www.tensorflow.org/api_guides/python/train#Optimizers", "_____no_output_____" ] ], [ [ "# Inputs\n# y_out: is what your model computes\n# y: is your TensorFlow variable with label information\n# Outputs\n# mean_loss: a TensorFlow variable (scalar) with numerical loss\n# optimizer: a TensorFlow optimizer\n# This should be ~3 lines of code!\nmean_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.one_hot(y, 10), logits=y_out))\noptimizer = tf.train.RMSPropOptimizer(learning_rate=1e-3)\n", "_____no_output_____" ], [ "# batch normalization in tensorflow requires this extra dependency\nextra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(extra_update_ops):\n train_step = optimizer.minimize(mean_loss)", "_____no_output_____" ] ], [ [ "### Train the model\nBelow we'll create a session and train the model over one epoch. You should see a loss of 1.4 to 1.8 and an accuracy of 0.4 to 0.5. There will be some variation due to random seeds and differences in initialization", "_____no_output_____" ] ], [ [ "sess = tf.Session()\n\nsess.run(tf.global_variables_initializer())\nprint('Training')\nrun_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step)", "Training\nIteration 0: with minibatch training loss = 0.956 and accuracy of 0.094\nIteration 100: with minibatch training loss = 0.298 and accuracy of 0.31\nIteration 200: with minibatch training loss = 0.316 and accuracy of 0.28\nIteration 300: with minibatch training loss = 0.23 and accuracy of 0.5\nIteration 400: with minibatch training loss = 0.251 and accuracy of 0.39\nIteration 500: with minibatch training loss = 0.201 and accuracy of 0.55\nIteration 600: with minibatch training loss = 0.227 and accuracy of 0.52\nIteration 700: with minibatch training loss = 0.192 and accuracy of 0.56\nEpoch 1, Overall loss = 0.269 and accuracy of 0.443\n" ] ], [ [ "### Check the accuracy of the model.\n\nLet's see the train and test code in action -- feel free to use these methods when evaluating the models you develop below. You should see a loss of 1.3 to 1.5 with an accuracy of 0.45 to 0.55.", "_____no_output_____" ] ], [ [ "print('Validation')\nrun_model(sess,y_out,mean_loss,X_val,y_val,1,64)", "Validation\nEpoch 1, Overall loss = 0.246 and accuracy of 0.519\n" ] ], [ [ "## Train a _great_ model on CIFAR-10!\n\nNow it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves ** >= 70% accuracy on the validation set** of CIFAR-10. You can use the `run_model` function from above.", "_____no_output_____" ], [ "### Things you should try:\n- **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient\n- **Number of filters**: Above we used 32 filters. Do more or fewer do better?\n- **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?\n- **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?\n- **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include:\n - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]\n - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]\n - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]\n- **Use TensorFlow Scope**: Use TensorFlow scope and/or [tf.layers](https://www.tensorflow.org/api_docs/python/tf/layers) to make it easier to write deeper networks. See [this tutorial](https://www.tensorflow.org/tutorials/layers) for making how to use `tf.layers`. \n- **Use Learning Rate Decay**: [As the notes point out](http://cs231n.github.io/neural-networks-3/#anneal), decaying the learning rate might help the model converge. Feel free to decay every epoch, when loss doesn't change over an entire epoch, or any other heuristic you find appropriate. See the [Tensorflow documentation](https://www.tensorflow.org/versions/master/api_guides/python/train#Decaying_the_learning_rate) for learning rate decay.\n- **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).\n- **Regularization**: Add l2 weight regularization, or perhaps use [Dropout as in the TensorFlow MNIST tutorial](https://www.tensorflow.org/get_started/mnist/pros)\n\n### Tips for training\nFor each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind:\n\n- If the parameters are working well, you should see improvement within a few hundred iterations\n- Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.\n- Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.\n- You should use the validation set for hyperparameter search, and we'll save the test set for evaluating your architecture on the best parameters as selected by the validation set.\n\n### Going above and beyond\nIf you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try for extra credit.\n\n- Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta.\n- Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.\n- Model ensembles\n- Data augmentation\n- New Architectures\n - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.\n - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.\n - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)\n\nIf you do decide to implement something extra, clearly describe it in the \"Extra Credit Description\" cell below.\n\n### What we expect\nAt the very least, you should be able to train a ConvNet that gets at **>= 70% accuracy on the validation set**. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches.\n\nYou should use the space below to experiment and train your network. The final cell in this notebook should contain the training and validation set accuracies for your final trained network.\n\nHave fun and happy training!", "_____no_output_____" ] ], [ [ "# Feel free to play with this cell\n\nCONVOLUTION_LAYERS = [\n {\n 'conv_kernel_size_1': [3, 3],\n 'conv_filters_1': 32,\n 'conv_kernel_size_2': [3, 3],\n 'conv_filters_2': 32,\n 'pool_size': [2, 2],\n 'pool_strides': [2, 2]\n },\n {\n 'conv_kernel_size_1': [3, 3],\n 'conv_filters_1': 64,\n 'conv_kernel_size_2': [3, 3],\n 'conv_filters_2': 64,\n 'pool_size': [2, 2],\n 'pool_strides': [2, 2]\n }\n]\n\n\ndef conv_layer(x, id,\n conv1_kernel_size,\n conv1_filters, conv2_kernel_size, conv2_filters, pool_size,\n pool_strides, is_training=False):\n with tf.variable_scope('conv_' + str(id)):\n conv1 = tf.layers.conv2d(\n inputs=x,\n filters=conv1_filters,\n kernel_size=conv1_kernel_size,\n padding='same',\n activation=tf.nn.relu)\n conv_batch = tf.contrib.layers.batch_norm(\n conv1, center=True, trainable=True, scale=True, epsilon=1e-7,\n is_training=is_training)\n conv2 = tf.layers.conv2d(\n inputs=conv_batch,\n filters=conv2_filters,\n kernel_size=conv2_kernel_size,\n padding='same',\n activation=tf.nn.relu)\n conv2_batch = tf.contrib.layers.batch_norm(\n conv2, center=True, trainable=True, scale=True, epsilon=1e-7,\n is_training=is_training)\n pool = tf.layers.max_pooling2d(\n inputs=conv2_batch, pool_size=pool_size, strides=pool_strides)\n return pool\n \n\ndef my_model(X, y, is_training=False, fc_size=1024, convolution_layers=CONVOLUTION_LAYERS):\n current_layer = X\n for i, layer_desc in enumerate(CONVOLUTION_LAYERS):\n current_layer = conv_layer(\n current_layer, i, layer_desc['conv_kernel_size_1'], layer_desc['conv_filters_1'],\n layer_desc['conv_kernel_size_2'], layer_desc['conv_filters_2'],\n layer_desc['pool_size'], layer_desc['pool_strides'], is_training=is_training)\n \n flatten_layer = tf.contrib.layers.flatten(inputs=current_layer)\n with tf.variable_scope('fc_1'):\n fc_layer = tf.layers.dense(inputs=flatten_layer, units=fc_size, activation=tf.nn.relu)\n dropout_layer = tf.layers.dropout(\n inputs=fc_layer, rate=0.5, training=is_training)\n\n logits = tf.layers.dense(inputs=dropout_layer, units=10)\n return logits\n\n\ntf.reset_default_graph()\n\nX = tf.placeholder(tf.float32, [None, 32, 32, 3])\ny = tf.placeholder(tf.int64, [None])\nis_training = tf.placeholder(tf.bool)\n\ny_out = my_model(X,y,is_training=is_training)\nmean_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(y, 10), logits=y_out))\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\n\n# batch normalization in tensorflow requires this extra dependency\nextra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(extra_update_ops):\n train_step = optimizer.minimize(mean_loss)", "_____no_output_____" ], [ "# Feel free to play with this cell\n# This default code creates a session\n# and trains your model for 10 epochs\n# then prints the validation set accuracy\nsess = tf.Session()\n\nsess.run(tf.global_variables_initializer())\nprint('Training')\nrun_model(sess,y_out,mean_loss,X_train,y_train,10,64,100,train_step,True)\nprint('Validation')\nrun_model(sess,y_out,mean_loss,X_val,y_val,1,64)", "Training\nIteration 0: with minibatch training loss = 5.1 and accuracy of 0.12\nIteration 100: with minibatch training loss = 1.34 and accuracy of 0.55\nIteration 200: with minibatch training loss = 1.53 and accuracy of 0.45\nIteration 300: with minibatch training loss = 1.27 and accuracy of 0.53\nIteration 400: with minibatch training loss = 1.59 and accuracy of 0.41\nIteration 500: with minibatch training loss = 1.41 and accuracy of 0.45\nIteration 600: with minibatch training loss = 1.08 and accuracy of 0.59\nIteration 700: with minibatch training loss = 1.14 and accuracy of 0.59\nEpoch 1, Overall loss = 1.59 and accuracy of 0.48\n" ], [ "# Test your model here, and make sure \n# the output of this cell is the accuracy\n# of your best model on the training and val sets\n# We're looking for >= 70% accuracy on Validation\nprint('Training')\nrun_model(sess,y_out,mean_loss,X_train,y_train,1,64)\nprint('Validation')\nrun_model(sess,y_out,mean_loss,X_val,y_val,1,64)", "Training\nEpoch 1, Overall loss = 0.12 and accuracy of 0.962\nValidation\nEpoch 1, Overall loss = 0.757 and accuracy of 0.814\n" ] ], [ [ "### Describe what you did here\nIn this cell you should also write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network", "_____no_output_____" ], [ "Simple model with the following architecture:\n\n[conv-relu-batch-conv-relu-batch-pool]x2 -> [affine]x2 -> [softmax]", "_____no_output_____" ], [ "### Test Set - Do this only once\nNow that we've gotten a result that we're happy with, we test our final model on the test set. This would be the score we would achieve on a competition. Think about how this compares to your validation set accuracy.", "_____no_output_____" ] ], [ [ "print('Test')\nrun_model(sess,y_out,mean_loss,X_test,y_test,1,64)", "Test\nEpoch 1, Overall loss = 0.255 and accuracy of 0.495\n" ] ], [ [ "## Going further with TensorFlow\n\nThe next assignment will make heavy use of TensorFlow. You might also find it useful for your projects. \n", "_____no_output_____" ], [ "# Extra Credit Description\nIf you implement any additional features for extra credit, clearly describe them here with pointers to any code in this or other files if applicable.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ecd54f47d1f29a84cd50adb472f5acac170fcd06
682,874
ipynb
Jupyter Notebook
notebooks/5.1-duc-eval-unet-on-COVID-19.ipynb
minhduc0711/lung-segmentation
9cceea4a2120d731ec1b51ffbf8a965502f3088a
[ "FTL" ]
null
null
null
notebooks/5.1-duc-eval-unet-on-COVID-19.ipynb
minhduc0711/lung-segmentation
9cceea4a2120d731ec1b51ffbf8a965502f3088a
[ "FTL" ]
null
null
null
notebooks/5.1-duc-eval-unet-on-COVID-19.ipynb
minhduc0711/lung-segmentation
9cceea4a2120d731ec1b51ffbf8a965502f3088a
[ "FTL" ]
null
null
null
1,996.707602
359,776
0.960101
[ [ [ "%load_ext autoreload\n%autoreload 2\n%cd ..", "/storage/ducpm/lung-segmentation\n" ], [ "import time\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, Subset, random_split\nimport pytorch_lightning as pl\nfrom torchsummary import summary\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nfrom torchvision import transforms\n\nfrom src.data import PlethoraDataModule, Covid19DataModule, Covid19Dataset\nfrom src.data.preprocess import *\nfrom src.models.unet import UNet\nfrom src.visualization import plot_batch, plot_true_vs_pred\nfrom src.metrics import dice_coeff_vectorized", "_____no_output_____" ], [ "data_module_args = { \n \"batch_size\": 16,\n \"img_size\": 512,\n \"clip_low\": -1000,\n \"clip_high\": 1000,\n \"pin_memory\": True,\n \"num_workers\": 4\n} dm = Covid19DataModule(**data_module_args) \ndm.setup()\nprint(\"No. test samples:\", len(dm.test_ds))\ntest_loader = dm.test_dataloader()", "No. test samples: 3520\n" ], [ "# ignore normalized ct scans\nct_ids = os.listdir(\"data/raw/COVID-19-CT-Seg_20cases/ct_scans/\")\nct_ids = sorted([os.path.basename(ct_id).split(\".\")[0] for ct_id in ct_ids])\nct_ids = [ct_id for ct_id in ct_ids if ct_id.find(\"radio\") == -1]\n\nds = Covid19Dataset(ct_dir=\"data/raw/COVID-19-CT-Seg_20cases/ct_scans/\",\n mask_dir=\"data/raw/COVID-19-CT-Seg_20cases/lung_masks/\",\n ct_ids=ct_ids,\n transform=dm.transform)\ntest_loader = DataLoader(ds, batch_size=16, pin_memory=True, shuffle=False)\nlen(ds)", "_____no_output_____" ], [ "ds.metadata[\"ct_id\"].unique()", "_____no_output_____" ], [ "sample = ds[36]\nimg, mask = sample['img'], sample['mask']\nplt.imshow(img.squeeze(), cmap='gray')", "_____no_output_____" ], [ "device = \"cuda:0\"\nfrom src.models.unet import UNet\nnet = UNet.load_from_checkpoint(\"logs/unet-plethora-512/version_0/ckpts/epoch=8-dice_coeff_val=0.942.ckpt\")\nnet.to(device).eval();", "_____no_output_____" ], [ "# sanity check model predictions\nit = iter(test_loader)\n#for _ in range(8):\n# batch = next(it)\n# batch['img'] = batch['img'][:4]\n# batch['mask'] = batch['mask'][:4]\nwith torch.no_grad():\n logits = net(batch['img'].to(device))\n pred_masks = torch.argmax(logits, dim=1)", "_____no_output_____" ], [ "plot_true_vs_pred(batch['img'], \n batch['mask'], \n pred_masks.cpu(), mask_alpha=0.3)", "_____no_output_____" ], [ "dice_scores = []\n#pbar = tqdm(dm.test_dataloader())\npbar = tqdm(test_loader)\n# evaluate on test set\nfor batch in pbar:\n X, y = batch[\"img\"].to(device), batch[\"mask\"].to(device)\n with torch.no_grad():\n logits = net(X.to(device))\n pred_masks = torch.argmax(logits, dim=1)\n batch_dsc = dice_coeff_vectorized(pred_masks, y, reduce_fn=None)\n dice_scores.append(batch_dsc)\n pbar.set_description(f\"dsc={batch_dsc.mean().item():.3f}\")\ndice_scores = torch.cat(dice_scores)", "dsc=0.000: 100%|██████████| 162/162 [07:29<00:00, 2.78s/it]\n" ], [ "print(\"dsc mean:\", dice_scores.mean().item())\n\nworst_idxs = torch.argsort(dice_scores, descending=False).cpu()\nworst_idxs = worst_idxs[:500].numpy()\nworst_idxs = np.random.choice(worst_idxs, size=10, replace=False)\ndice_scores[worst_idxs]", "dsc mean: 0.89993816614151\n" ], [ "plethora_dm = PlethoraDataModule(**data_module_args)\nplethora_dm.setup()\nplethora_ds = plethora_dm.test_dataloader()", "_____no_output_____" ], [ "it = iter(plethora_ds)\nfor i in range(9):\n batch = next(it)\nplot_batch(batch['img'][:4], batch['mask'][:4], \n nrows=2, ncols=2)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd552711a36329ac0b05ac3f1bcfcff37d3951b
137,704
ipynb
Jupyter Notebook
Assignment4- QuickSort.ipynb
bblank70/MSDS432
f4cc8f42d1dfef8e5c42e92b9e356b43c2584052
[ "MIT" ]
1
2021-04-28T04:35:21.000Z
2021-04-28T04:35:21.000Z
Assignment4- QuickSort.ipynb
bblank70/MSDS432
f4cc8f42d1dfef8e5c42e92b9e356b43c2584052
[ "MIT" ]
null
null
null
Assignment4- QuickSort.ipynb
bblank70/MSDS432
f4cc8f42d1dfef8e5c42e92b9e356b43c2584052
[ "MIT" ]
null
null
null
278.189899
47,505
0.698404
[ [ [ "# Assignment 3: Implementation a Recursive Algorightm\n\n\nIn this Mini Programming Assignment, we will explore the differences between iteration and recursion. We will begin with the base code for recursion as described in Chapter 3 of Grokking Algorithms (Bhargava 2016).\n", "_____no_output_____" ], [ "## Deliverables:\n\nWe will again generate random data for this assignment. \n\n 1) Set up five lists of randomly generated strings of characters (i.e. a-z or A-Z e.g. abcdefkjklkjlkjlkjkljlkjlkjlkjkljasdfgtredghjkiuyt). \n Each string should be 50 characters long. The first list should have 200 strings (each string should be 50 characters long), \n second should have 400 strings (again each string is 50 characters), third 600 (same length strings i.e. 50 characters), \n fourth 800 (string length continues to be 50 characters), and \n last/fifth 1000 strings (yes 50 characters in each string). \n 2) You may use the code we used in previous homework assignments. Make sure the list is unsorted and does not contain any duplicates.\n 3) Use the textbook implementation of quicksort to sort the data, being sure to capture the amount of time it takes for each list. \n 4) Now use at least two other sorting algorithms on the same data set (e.g. merge sort, bubble sort, insertion sort, etc). \n Make sure that you code the sorting algorithm and you do not use a builtin function. By this I mean you may not use np.sort() or sorted(), etc.\n You must code the algorithm in order to compare the complexity of each. Here is a good resource https://www.geeksforgeeks.org/sorting-algorithms/\n \n Capture the computation time for each list using each sorting algorithm that you have used. \n\n 5) Create a table containing each algorithm and the timings for each list. Provide a graph showing how each algorithm scales with size of list (also compare the algorithms themselves).\n Discuss your findings with explanations for what you observe. \n\n### Prepare an executive summary of your results, referring to the table and figures you have generated. Explain how your results relate to big O notation. Describe your results in language that management can understand. This summary should be included as text paragraphs in the Jupyter notebook. Explain how the algorithm works and why it is a useful to data engineers.", "_____no_output_____" ], [ "# A. Setup: Library imports, Function construction and Array generation", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nimport seaborn as sns\nimport time\n\nimport random\nimport string\n\n\nRANDOM_SEED = 8 #sets random seed", "_____no_output_____" ], [ "\n\ndef random_string(str_length, num_strings):\n str_list = [] #instantiates an empty list to hold the strings\n for i in range(0,num_strings): #loop to generate the specified number of strings\n str_list.append(''.join(random.choice(string.ascii_lowercase) for m in range(str_length))) #generates a string of the defined character length \n return str_list #returns the string list\n\n\ndef QuickSort(arr):\n if len(arr) <2:\n return arr\n else:\n pivot = arr[0] #assigns the pivot as the first item of the array\n less = [i for i in arr[1:] if i <= pivot] #collects items less than the pivot value in less\n greater = [i for i in arr[1:] if i >pivot] #collects items greater than pivot value in greater\n\n return QuickSort(less) + [pivot] + QuickSort(greater) #recursively calls quicksort on items less than pivot and greater than pivot.\n\n\ndef MergeSort(arr):\n if len(arr) > 1:\n \n mid = len(arr)//2 # gets middle\n Left = arr[:mid] #splits elements left of middle\n Right = arr[mid:] #splits elements right of middle\n\n MergeSort(Left) #recursive call on left \n MergeSort(Right) #recursive call on right\n #set all indicies to 0 \n i=0\n k=0\n j=0\n #below checks the values for if elements are sorted, if unsorted: swap. Merge to the original list \n while i < len(Left) and j < len(Right):\n if Left[i] < Right[j]:\n arr[k] = Left[i] #makes k index of arr left[i] if it's less than Right[j]\n i += 1 #increments i (the left index)\n \n else:\n arr[k] = Right[j] #if right value is lss than left, makes arr[k] the value of right and increments the right index\n j += 1 #increments j\n k += 1 #increments the arr index\n \n while i < len(Left): #checks to see if reamaining elements in left (less than mid), if so adds to arr at k index and increments i and k\n arr[k] = Left[i]\n i += 1 #increments i\n k += 1 #increments k\n \n while j < len(Right): #checks to see if remaining elements in right (greater than mid), if so adds to arr at k index and increments j and k.\n arr[k] = Right[j]\n j += 1 #increments j\n k += 1 #increments k\n \n return arr\n\ndef BubbleSort(arr):\n [arr.append(arr.pop(0) if i == len(arr) - 1 or arr[0] < arr[1] else arr.pop(1)) for j in range(0, len(arr)) for i in range(0, len(arr))]\n #this will bubblesort in place. The comprehension adds the lower of the first two indicies to the end of the list iteratively over the length of the original array until a sorted list is achieved\n return arr\n\n\n\ndef Container(arr, fun):\n objects = [] #instantiates an empty list to collect the returns\n times = [] #instantiates an empty list to collect times for each computation\n\n start= time.perf_counter() #collects the start time\n obj = fun(arr) # applies the function to the arr object\n end = time.perf_counter() # collects end time\n duration = (end-start)* 1E3 #converts to milliseconds\n objects.append(obj)# adds the returns of the functions to the objects list\n times.append(duration) # adds the duration for computation to list\n return objects, duration\n", "_____no_output_____" ], [ " \nstr200 = random_string(str_length=50, num_strings=200)\nstr200_copy = str200[:]\nstr400 = random_string(str_length=50, num_strings=400)\nstr400_copy = str400[:]\nstr600 = random_string(str_length=50, num_strings=600)\nstr600_copy = str600[:]\nstr800 = random_string(str_length=50, num_strings=800)\nstr800_copy = str800[:]\nstr1000 = random_string(str_length=50, num_strings=1000)\nstr1000_copy = str1000[:]", "_____no_output_____" ] ], [ [ "# B. Sorting\n", "_____no_output_____" ] ], [ [ "str200_bubble = Container(str200, BubbleSort)\nstr200 = str200_copy[:]\nstr200_quick = Container(str200, QuickSort)\nstr200 = str200_copy[:]\nstr200_merge = Container(str200, MergeSort)\n\n\nstr200_bubble[1], str200_quick[1], str200_merge[1]\n\n", "_____no_output_____" ], [ "str400_bubble = Container(str400, BubbleSort)\nstr400 = str400_copy[:]\nstr400_quick = Container(str400, QuickSort)\nstr400 = str400_copy[:]\nstr400_merge = Container(str400, MergeSort)\n\n\nstr400_bubble[1], str400_quick[1], str400_merge[1]\n", "_____no_output_____" ], [ "str600_bubble = Container(str600, BubbleSort)\nstr600 = str600_copy[:]\nstr600_quick = Container(str600, QuickSort)\nstr600 = str600_copy[:]\nstr600_merge = Container(str600, MergeSort)\n\n\nstr600_bubble[1], str600_quick[1], str600_merge[1]", "_____no_output_____" ], [ "str800_bubble = Container(str800, BubbleSort)\nstr800 = str800_copy[:]\nstr800_quick = Container(str800, QuickSort)\nstr800 = str800_copy[:]\nstr800_merge = Container(str800, MergeSort)\n\n\nstr800_bubble[1], str800_quick[1], str800_merge[1]", "_____no_output_____" ], [ "str1000_bubble = Container(str1000, BubbleSort)\nstr1000 = str1000_copy[:]\nstr1000_quick = Container(str1000, QuickSort)\nstr1000 = str1000_copy[:]\nstr1000_merge = Container(str1000, MergeSort)\n\n\nstr1000_bubble[1], str1000_quick[1], str1000_merge[1]", "_____no_output_____" ] ], [ [ "# C. Summary", "_____no_output_____" ] ], [ [ "Summary = {\n 'NumberOfStrings': [200, 400, 600, 800, 1000],\n 'BubbleSort': [str200_bubble[1], str400_bubble[1], str600_bubble[1], str800_bubble[1], str1000_bubble[1]],\n 'MergeSort': [str200_merge[1], str400_merge[1] , str600_merge[1], str800_merge[1], str1000_merge[1]],\n 'QuickSort': [str200_quick[1], str400_quick[1], str600_quick[1], str800_quick[1], str1000_quick[1]]\n }\n\ndf = pd.DataFrame.from_dict(Summary)\n", "_____no_output_____" ] ], [ [ "## Table 1: Times for each algorithm given the length of the starting list", "_____no_output_____" ] ], [ [ "df", "_____no_output_____" ], [ "long_df = df.melt(id_vars=['NumberOfStrings'],\n value_vars=['BubbleSort', 'MergeSort', 'QuickSort'],var_name='Algo', value_name='Time(ms)') ", "_____no_output_____" ] ], [ [ "## Figure 1: Sorth Algorithm Time Complexity", "_____no_output_____" ] ], [ [ "sns.scatterplot(data = long_df, x='NumberOfStrings', hue='Algo', y='Time(ms)', s=100)\n", "_____no_output_____" ] ], [ [ "## Figure 2: Merge and Quick Sort time complexity", "_____no_output_____" ] ], [ [ "MergeQuick_df = long_df[long_df.Algo != 'BubbleSort']\nsns.scatterplot(data = MergeQuick_df, x='NumberOfStrings', hue='Algo', y='Time(ms)', s=100)", "_____no_output_____" ] ], [ [ "# Discussion\n\nThree sorting algorithms were tested for their time complexity in sorting lists of varying sizes of string elements. Each string element in the list was randomly populated with 50 alphabetic lower case characters. The number of elements within the list was varied. Five lists containing 200, 400, 600, 800, and 1000 strings were sorted via BubbleSort, MergeSort, and QuickSort. The times (given in milliseconds) required to perform the sort are collected and displayed in Table 1. By far, the most inefficient sorting algorithm demonstrated here is the bubble sort whose complexity is shown graphically (figure 1) to grow at n\\*n or O(n^2) rate. This makes sense for bubble sort as it compares n elements amongst n elements. \n\nAlternatively, the other two methodologies utilize a divide and conquer strategy. The list of strings when using QuickSort are divided into two arrays (greater and less) which contain values which are greater or less than a pivot value. In MergeSort a similar strategy is achieved by dividing the list into two arrays (left and right) which are left and right respectivly from the center element of the list. In both of these arrays recursion is used as the QuickSort and MergeSort functions are called on the subarrays. The result of this divide and conquer strategy is a complexity of n*logn or O(n*logn) in big O notation. A direct comparision of the times required for sorting the lists with these two methodologies are shown in Figure 2. \n\nIn rare instances QuickSort may also dramatically underperform as the pivot element is always selected as the first item of the array (or subarray). If an array contained a list which was sorted largest to smallest already, this method could also have very high complexity as you would not divide the list recursively for an array of n size (this would also be n\\*n complexity O(n^2)). It is interesting the QuickSort seems to perform slightly better than MergeSort, but both are quite efficient. Because of the splitting methodology employed by the MergeSort, there lacks a risk of any deviation from the O(n*logn) complexity. The begining array and subarrays are always split in half size-wise. It's therefore recommended that the MergeSort method be used as the time complexity will always be constant. ", "_____no_output_____" ], [ "# ------------------------ END ------------------------\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
ecd57ed73b81e14bf86250ef05345263769b001c
1,186
ipynb
Jupyter Notebook
examples/linalg.ipynb
AtsushiSakai/SciPy.jl
073706533f68989ccd761d813cd35593ea7c2a50
[ "MIT" ]
12
2020-06-06T05:11:38.000Z
2022-03-29T02:17:44.000Z
examples/linalg.ipynb
AtsushiSakai/SciPy.jl
073706533f68989ccd761d813cd35593ea7c2a50
[ "MIT" ]
26
2020-05-30T13:45:25.000Z
2021-05-01T05:15:36.000Z
examples/linalg.ipynb
AtsushiSakai/SciPy.jl
073706533f68989ccd761d813cd35593ea7c2a50
[ "MIT" ]
6
2020-06-18T09:37:38.000Z
2022-01-03T02:47:23.000Z
17.969697
91
0.495784
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
ecd584b2e82aae653809172554c2fb4305db18b6
268,809
ipynb
Jupyter Notebook
4_Graph_Pipeline.ipynb
aditya-mengani/network_analysis_cbase_p1_ml
fd06d54053f5cd2ca26f813d63cfe3eafd984e53
[ "MIT" ]
null
null
null
4_Graph_Pipeline.ipynb
aditya-mengani/network_analysis_cbase_p1_ml
fd06d54053f5cd2ca26f813d63cfe3eafd984e53
[ "MIT" ]
null
null
null
4_Graph_Pipeline.ipynb
aditya-mengani/network_analysis_cbase_p1_ml
fd06d54053f5cd2ca26f813d63cfe3eafd984e53
[ "MIT" ]
null
null
null
35.225921
327
0.512066
[ [ [ "# **This notebook's process**\n\n1. Load in Crunchbase dataframes(4 merged CSVs created in `1_SS_EDA.ipynb`)\n - Organizations: `files/output/organizations_merged.csv`\n - Jobs: `files/output/p1_jobs.csv`\n - Investments: `files/output/p1_investments.csv`\n - Partner investments: `files/output/p1_investments_partner.csv`\n2. Select date and filter the dataframes by date\n3. Save filtered dataframes as separate CSVs, and then load in as SFrames.\n - Crunchbase network: `files/output/graph_temp/cb/{}_df.csv`\n - Pledge 1% network: `files/output/graph_temp/p1/{}_df.csv`\n - Model network: `files/output/graph_temp/model/{}_df.csv`\n - Not Pledge 1% network: `files/output/graph_temp/np1/{}_df.csv`\n4. Load SFrames into graph and remove duplicate edges. Produce 8 graphs based on # of edges allowed & direction.\n5. Reduce size of dataset by limiting degrees of freedom from Pledge 1% companies, and save the vertices list for a few different network sizes\n6. Produce 100 samples of the Crunchbase graphs and save to CSV.\n - 5 Degrees from Pledge 1% Companies: `Model_DF_D5`\n - Baseline: `files/output/Model_DF_D5/B/{}.csv`\n - Baseline Reduced: `files/output/Model_DF_D5/BR/{}.csv`\n - Graph & Baseline: `files/output/Model_DF_D5/GB/{}.csv`\n - Graph & Baseline Reduced: `files/output/Model_DF_D5/GBR/{}.csv`\n - Graph: `files/output/Model_DF_D5/G/{}.csv`\n - 4 Degrees from Pledge 1% Companies: `Model_DF_D4`\n - Baseline: `files/output/Model_DF_D4/B/{}.csv`\n - Baseline Reduced: `files/output/Model_DF_D4/BR/{}.csv`\n - Graph & Baseline: `files/output/Model_DF_D4/GB/{}.csv`\n - Graph & Baseline Reduced: `files/output/Model_DF_D4/GBR/{}.csv`\n - Graph: `files/output/Model_DF_D4/G/{}.csv`\n\n## **Model**\n`p1_tag` ~ `rank` + `total_funding_usd` + `age_yr` + `employee_count` (ordinal) + `country` (nominal, 112 indicator columns) + `category_groups` (nominal, 46 indicator columns) + ((GRAPH FEATURES))", "_____no_output_____" ] ], [ [ "'''Importing basic data analysis packages'''\nimport numpy as np\nimport pandas as pd\nimport csv\nimport warnings\nimport os\nimport time\nimport math\nfrom functools import reduce\nfrom datetime import datetime\nwarnings.filterwarnings('ignore')\n\n'''Graph'''\nimport networkx as nx\nfrom pyvis.network import Network\nimport turicreate\nfrom turicreate import pagerank, kcore, degree_counting, shortest_path, connected_components, triangle_counting\nfrom turicreate import SFrame, SGraph, SArray, load_sgraph, aggregate \n\n'''Plotting packages'''\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style='white', font_scale=1.3)\n\ndef reduce_mem_usage(df, verbose=True): \n numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']\n start_mem = df.memory_usage().sum() / 1024**2 \n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == 'int':\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64) \n else:\n if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:\n df[col] = df[col].astype(np.float16)\n elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64) \n end_mem = df.memory_usage().sum() / 1024**2\n if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))\n return df\n\ndef network_by_date(date, df_input, jobs_input, invest_input, invest_prtnr_input, model_uuids=[], skip_not_p1=True):\n '''\n This function filters down Crunchbase dataframes by date \n to ensure that the companies/people/investments being used in modeling exist at a given time.\n\n INPUT:\n - `date`: string w/ format 'YEAR-MO-DY' (e.g. '2020-09-08')\n - `df`: pandas dataframe of Crunchbase organizationss with necessary column fields:\n * `p1_date`, `founded_on`, `closed_on`\n - `jobs`: pandas dataframe of Crunchbase jobss with necessary column fields:\n * `p1_date`, `started_on`, `ended_on`\n - `invest`: pandas dataframe of Crunchbase investmentss with necessary column fields:\n * `p1_date`, `announced_on`\n - `invest_prtnr`: pandas dataframe of Crunchbase investments with necessary column fields:\n * `p1_date`, `announced_on`\n - `model_uuids`: list that contains the uuids of organizations that are used to construct the model graph\n - `skip_no_p1`: Boolean that defaults to excluding the opposite of the Pledge 1% neighborhood. Likely will delete option altogether later.\n \n OUTPUT:\n - List of dataframe lists, 2 lists of length 12: \n * [Crunchbase neighborhood dataframes], [Pledge 1% neighborhood dataframes]\n OR\n [Crunchbase neighborhood dataframes], [Model neighborhood dataframes]\n - Each dataframe list contains 12 dataframes that will be saved & loaded as SFrames in the next processing step.\n 0. Companies\n 1. Investors\n 2. Investments\n 3. Partner investments\n 4. Current Jobs\n 5. Former jobs\n 6. Former affiliated's new jobs\n 7. Partner investor's affiliation (if not in jobs dataframes)\n 8. Partner investor's coworkers at the investing firm\n 9. Partner investor's coworkers' partner investments\n 10. Current affiliated's old jobs\n 11. Organization nodes from edges in 2,3,6,7,9,10 if not already in 0 or 1\n '''\n # Soft copy of dataframes\n df = df_input.copy()\n jobs = jobs_input.copy()\n invest = invest_input.copy()\n invest_prtnr = invest_prtnr_input.copy()\n \n #*******************************************************************************************************\n # DATE PROCESSING\n \n # Convert date columns to datetime\n df['p1_date'] = pd.to_datetime(df['p1_date'], errors='coerce')\n df['founded_on'] = pd.to_datetime(df['founded_on'], errors='coerce')\n df['closed_on'] = pd.to_datetime(df['closed_on'], errors='coerce')\n jobs['p1_date'] = pd.to_datetime(jobs['p1_date'], errors='coerce')\n jobs['started_on'] = pd.to_datetime(jobs['started_on'], errors='coerce')\n jobs['ended_on'] = pd.to_datetime(jobs['ended_on'], errors='coerce')\n invest['p1_date'] = pd.to_datetime(invest['p1_date'], errors='coerce')\n invest['announced_on'] = pd.to_datetime(invest['announced_on'], errors='coerce')\n invest_prtnr['p1_date'] = pd.to_datetime(invest_prtnr['p1_date'], errors='coerce')\n invest_prtnr['announced_on'] = pd.to_datetime(invest_prtnr['announced_on'], errors='coerce')\n \n # Convert input date to datetime object\n date = pd.Timestamp(date)\n print('\\nAS OF {}:\\n'.format(date.strftime('%B %d, %Y').upper()))\n \n #*******************************************************************************************************\n # Create new row for tagging model companies\n df['add_to_model'] = 0\n df['add_to_model'][df['uuid'].isin(model_uuids)] = 1\n jobs['add_to_model'] = 0\n jobs['add_to_model'][jobs['org_uuid'].isin(model_uuids)] = 1\n invest['add_to_model'] = 0\n invest['add_to_model'][invest['org_uuid'].isin(model_uuids)] = 1\n invest_prtnr['add_to_model'] = 0\n invest_prtnr['add_to_model'][invest_prtnr['org_uuid'].isin(model_uuids)] = 1\n \n #*******************************************************************************************************\n # COMPANY FILTER\n # Crunchbase company must be founded after DATE and closed before DATE (or DATE == NaT)\n CB_companies = df[(df['founded_on']<=date) & \n ((df['closed_on']>date) | (pd.isnull(df['closed_on']))) & \n (df['primary_role']=='company')].reset_index(drop=True)\n \n #*******************************************************************************************************\n # INVESTOR FILTER:\n # Crunchbase investor must be founded AFTER date and closed BEFORE date (or date == NaT)\n CB_investors = df[(df['founded_on']<=date) & \n ((df['closed_on']>date) | (pd.isnull(df['closed_on']))) & \n (df['primary_role']=='investor')].reset_index(drop=True)\n \n #*******************************************************************************************************\n # INVESTMENT FILTER\n # Crunchbase investment must have taken place BEFORE date\n CB_investments = invest[(invest['announced_on']<=date) & \n (invest['investor_type']=='organization')].reset_index(drop=True)\n \n #*******************************************************************************************************\n # PARTNER INVESTMENT FILTER\n # Crunchbase partner investment must have taken place BEFORE date\n CB_investment_partners = invest_prtnr[invest_prtnr['announced_on']<=date].reset_index(drop=True)\n \n #*******************************************************************************************************\n # CURRENT JOB FILTER\n # Crunchbase job must have started BEFORE date and ended AFTER date (or date == NaT)\n CB_jobs = jobs[(jobs['job_type'].isin(['executive','board_member','advisor','board_observer'])) & \n (jobs['started_on']<=date) & \n ((jobs['ended_on']>date) | (pd.isnull(jobs['ended_on'])))].reset_index(drop=True)\n \n #*******************************************************************************************************\n # FORMER JOB FILTER\n # Crunchbase job must have ended BEFORE date or started AFTER date\n CB_jobs_former = jobs[(jobs['job_type'].isin(['executive','board_member','advisor','board_observer'])) & \n ((jobs['ended_on']<=date) | (jobs['started_on']>date))].reset_index(drop=True)\n \n #*******************************************************************************************************\n # COMBINE THESE 6 (or 7) INTO LIST OF FRAMES\n lst_of_frames = []\n # Crunchbase frames\n CB_frames = [CB_companies,CB_investors,CB_investments,CB_investment_partners,CB_jobs,CB_jobs_former]\n # Add to list of frames\n lst_of_frames.append(CB_frames)\n # If model_uuids are not supplied, calculate Pledge 1% neighborhood\n if model_uuids == []:\n P1_frames = []\n for frame in CB_frames:\n # Pledge 1% frames must have Crunchbase assumptions in addition to an earlier pledge date\n new_frame = frame[frame['p1_date']<=date].reset_index(drop=True).drop('add_to_model',axis=1)\n P1_frames.append(new_frame)\n # Add to list of frames\n lst_of_frames.append(P1_frames)\n # If model_uuids are supplied, calculate model neighborhood\n if model_uuids != []:\n model_frames = []\n for frame in CB_frames:\n # Include model dataframe if condition satisfied: either are a Pledge 1% company or tagged by model_uuids\n new_frame=frame[(frame['p1_date']<=date) | (frame['add_to_model']==1)].reset_index(drop=True).drop('add_to_model',axis=1)\n model_frames.append(new_frame)\n # Add to list of frames\n lst_of_frames.append(model_frames)\n # If this boolean value is False, calculate ~Pledge 1% neighborhood\n if skip_not_p1 is False:\n not_P1_frames = []\n for frame in CB_frames:\n # Non-Pledge 1% frames must have Crunchbase assumptions in addition to NaT pledge date or later pledge date\n new_frame = frame[(pd.isnull(frame['p1_date']) | (frame['p1_date']>date))].reset_index(drop=True).drop('add_to_model',axis=1)\n not_P1_frames.append(new_frame)\n # Add to list of frames\n lst_of_frames.append(not_P1_frames) \n # Remove extra column 'add_to_model'\n for idx,frame in enumerate(CB_frames):\n CB_frames[idx] = frame.drop('add_to_model',axis=1)\n\n #*******************************************************************************************************\n # FORMER NEW JOB FILTER\n print('CaLcUlAtInG... FORMER NEW JOB FILTER')\n \n for frame in lst_of_frames:\n # Where do the former affiliated work now?\n # Pull their uuids\n former_people = frame[5].person_uuid.unique()\n # Pull their current jobs from Crunchbase\n jobs_former_new = CB_frames[4][CB_frames[4].person_uuid.isin(former_people)] \n # Check they're not already in the current jobs dataframe\n # Combine into one temp data frame\n combined_jobs = pd.concat([frame[4], jobs_former_new]).reset_index(drop=True) \n df_gpby = combined_jobs.groupby(list(combined_jobs.columns))\n # Only count non-duplicated columns\n idx = [x[0] for x in df_gpby.groups.values() if len(x) == 1]\n # Reindex dataframe\n jobs_former_new = combined_jobs.reindex(idx)\n # Add to list of frames\n frame.append(jobs_former_new)\n \n #*******************************************************************************************************\n # PARTNER INVESTMENT JOB FILTER\n print('CaLcUlAtInG... PARTNER INVESTMENT JOB FILTER')\n \n for frame in lst_of_frames:\n # Are the partner investment jobs already in one of the jobs dataframes? If not, we should add them.\n # Create temporary dataframe and column to make checking the intersection between dataframes easier \n # frame[4]: current jobs | frame[5]: former jobs | frame[6]: former new jobs\n jobs_combined = pd.concat([frame[4],frame[5],frame[6]])\n jobs_combined['person,company'] = jobs_combined['person_uuid'] + ',' + jobs_combined['org_uuid']\n # frame[3]: partner investments\n frame[3]['person,company'] = frame[3]['partner_uuid']+ ',' + frame[3]['investor_uuid']\n # Number of unique partner investments\n unique_PI = frame[3]['person,company'].unique()\n # Overlap between PI and combined J frames, create temporary jobs view\n # These PI are already found in J frames, so we do not need to include them\n jobs_already_in_J = jobs_combined[jobs_combined['person,company'].isin(unique_PI)] \n # This will return non intersecting value of PI with temp J\n # These PI are not found in J, so we would like to include them\n PI_not_in_J = np.setdiff1d(unique_PI,jobs_already_in_J['person,company'].unique())\n # Need to create separate jobs dataframe for non intersecting PI/J person/company pairs\n grouped = frame[3][frame[3]['person,company'].isin(PI_not_in_J)].groupby(['partner_uuid','partner_name','investor_uuid','investor_name']).count()\n grouped_df = grouped.reset_index()[['partner_uuid','partner_name','investor_uuid','investor_name']]\n grouped_df['job_type'] = 'executive'\n # Add to list of frames\n frame.append(grouped_df)\n \n #*******************************************************************************************************\n # OTHER FIRM PARNTERS\n print('CaLcUlAtInG... OTHER FIRM PARTNER JOBS & INVESTMENTS FILTER')\n \n for frame in lst_of_frames:\n # OTHER FIRM PARNTERS - JOBS\n # Who are the other partners that work at the investment firms present in the neighborhood?\n # Get the unique investor uuids associated with the dataframes\n # frame[2]: from investments dataframe\n unique_investor_firm_A = list(frame[2]['investor_uuid'].unique())\n # frame[3]: from partner investments dataframe\n unique_investor_firm_B = list(frame[3]['investor_uuid'].unique())\n partners = list(frame[3]['partner_uuid'].unique())\n # Combine to get list of unique uuids of VC firms\n unique_firms = list(set(unique_investor_firm_A+unique_investor_firm_B))\n # Grab current jobs from Crunchbase for these investing firms\n # Exclude duplicate partner job (already represented by partners list calculated above)\n partner_jobs = CB_frames[4][(CB_frames[4]['org_uuid'].isin(unique_firms)) & \n ~(CB_frames[4]['person_uuid'].isin(partners))].reset_index(drop=True)\n # Check they're not already in the current/former jobs dataframe\n # Combine into one temp data frame\n combined_jobs = pd.concat([frame[4], partner_jobs]).reset_index(drop=True) \n df_gpby = combined_jobs.groupby(list(combined_jobs.columns))\n # Only count non-duplicated rows\n idx = [x[0] for x in df_gpby.groups.values() if len(x) == 1]\n # Reindex dataframe\n partner_jobs = combined_jobs.reindex(idx)\n # Add to list of frames\n frame.append(partner_jobs)\n # OTHER FIRM PARNTERS - PARTNER INVESTMENTS\n # For these new partners, what companies are they invested in?\n # Get the unique parnter uuids associated with the dataframes\n other_partners = partner_jobs['person_uuid'].unique()\n other_partner_investments = CB_frames[3][CB_frames[3]['partner_uuid'].isin(other_partners)]\n # Check they're not already in the partner investments dataframe\n # Combine into one temp data frame\n combined_jobs = pd.concat([frame[3], other_partner_investments]).reset_index(drop=True) \n df_gpby = combined_jobs.groupby(list(combined_jobs.columns))\n # Only count non-duplicated rows\n idx = [x[0] for x in df_gpby.groups.values() if len(x) == 1]\n # Reindex dataframe\n other_partner_investments = combined_jobs.reindex(idx)\n # Add to list of frames\n frame.append(other_partner_investments)\n \n #*******************************************************************************************************\n # CURRENT OLD JOB FILTER\n print('CaLcUlAtInG... CURRENT OLD JOB FILTER')\n \n for frame in lst_of_frames:\n # Where did the current affiliated work previously?\n current_people = frame[4].person_uuid.unique() # Pull their IDs\n jobs_current_old = CB_frames[5][CB_frames[5].person_uuid.isin(current_people)] # Pull their current jobs from Crunchbase\n # Check they're not already in the current jobs dataframe\n # Combine into one temp data frame\n combined_jobs = pd.concat([frame[5], jobs_current_old]).reset_index(drop=True) \n df_gpby = combined_jobs.groupby(list(combined_jobs.columns))\n # Only count non-duplicated columns\n idx = [x[0] for x in df_gpby.groups.values() if len(x) == 1]\n # Reindex dataframe\n jobs_current_old = combined_jobs.reindex(idx)\n # Add to list of frames\n frame.append(jobs_current_old)\n \n #*******************************************************************************************************\n # GET EXTRA ORG UUID ATTRIBUTES FROM INVESTMENTS & JOBS\n print('CaLcUlAtInG... EXTRA ORGANIZATION NODES')\n \n CB_orgs = pd.concat([CB_companies, CB_investors])\n for frame in lst_of_frames:\n unique_orgs = []\n # Investments\n unique_orgs.extend(list(frame[2]['investor_uuid'].unique()))\n # Partner investments\n unique_orgs.extend(list(frame[3]['investor_uuid'].unique()))\n # Former new jobs organizations\n unique_orgs.extend(list(frame[6]['org_uuid'].unique()))\n # Parter jobs organizations\n unique_orgs.extend(list(frame[7]['investor_uuid'].unique()))\n # Other parter investments organizations\n unique_orgs.extend(list(frame[9]['org_uuid'].unique()))\n # Current old jobs organizations\n unique_orgs.extend(list(frame[10]['org_uuid'].unique()))\n # Pull their organization information from Crunchbase\n new_org_nodes = CB_orgs[CB_orgs['uuid'].isin(list(set(unique_orgs)))]\n # Add to list of frames\n frame.append(new_org_nodes)\n \n #*******************************************************************************************************\n \n # Output print statements\n print('\\nCrunchbase Neighborhood')\n print('NODES | OUTPUT FRAME 0/CB_companies {}'.format(CB_frames[0].shape))\n print('NODES | OUTPUT FRAME 1/CB_investors {}'.format(CB_frames[1].shape))\n print('NODES&EDGES | OUTPUT FRAME 2/CB_investments {}'.format(CB_frames[2].shape))\n print('NODES&EDGES | OUTPUT FRAME 3/CB_investment_partners {}'.format(CB_frames[3].shape))\n print('NODES&EDGES | OUTPUT FRAME 4/CB_jobs {}'.format(CB_frames[4].shape))\n print('NODES&EDGES | OUTPUT FRAME 5/CB_jobs_former {}'.format(CB_frames[5].shape))\n print('NODES&EDGES | OUTPUT FRAME 6/CB_jobs_former_new {}'.format(CB_frames[6].shape))\n print('NODES&EDGES | OUTPUT FRAME 7/CB_jobs_partner {}'.format(CB_frames[7].shape))\n print('NODES&EDGES | OUTPUT FRAME 8/CB_jobs_other_partners {}'.format(CB_frames[8].shape))\n print('NODES&EDGES | OUTPUT FRAME 9/CB_invest_other_partners {}'.format(CB_frames[9].shape))\n print('NODES&EDGES | OUTPUT FRAME 10/CB_jobs_current_old {}'.format(CB_frames[10].shape))\n print('NODES | OUTPUT FRAME 11/CB_extra_org_nodes {}'.format(CB_frames[11].shape))\n if model_uuids != []:\n print('\\nModel Neighborhood')\n print('NODES | OUTPUT FRAME 0/model_companies {}'.format(model_frames[0].shape))\n print('NODES | OUTPUT FRAME 1/model_investors {}'.format(model_frames[1].shape))\n print('NODES&EDGES | OUTPUT FRAME 2/model_investments {}'.format(model_frames[2].shape))\n print('NODES&EDGES | OUTPUT FRAME 3/model_investment_partners {}'.format(model_frames[3].shape))\n print('NODES&EDGES | OUTPUT FRAME 4/model_jobs {}'.format(model_frames[4].shape))\n print('NODES&EDGES | OUTPUT FRAME 5/model_jobs_former {}'.format(model_frames[5].shape))\n print('NODES&EDGES | OUTPUT FRAME 6/model_jobs_former_new {}'.format(model_frames[6].shape))\n print('NODES&EDGES | OUTPUT FRAME 7/model_jobs_partner {}'.format(model_frames[7].shape))\n print('NODES&EDGES | OUTPUT FRAME 8/model_jobs_other_partners {}'.format(model_frames[8].shape))\n print('NODES&EDGES | OUTPUT FRAME 9/model_invest_other_partners {}'.format(model_frames[9].shape))\n print('NODES&EDGES | OUTPUT FRAME 10/model_jobs_current_old {}'.format(model_frames[10].shape))\n print('NODES | OUTPUT FRAME 11/model_extra_org_nodes {}'.format(model_frames[11].shape))\n return lst_of_frames\n print('\\nPledge 1% Neighborhood')\n print('NODES | OUTPUT FRAME 0/P1_companies {}'.format(P1_frames[0].shape))\n print('NODES | OUTPUT FRAME 1/P1_investors {}'.format(P1_frames[1].shape))\n print('NODES&EDGES | OUTPUT FRAME 2/P1_investments {}'.format(P1_frames[2].shape))\n print('NODES&EDGES | OUTPUT FRAME 3/P1_investment_partners {}'.format(P1_frames[3].shape))\n print('NODES&EDGES | OUTPUT FRAME 4/P1_jobs {}'.format(P1_frames[4].shape))\n print('NODES&EDGES | OUTPUT FRAME 5/P1_jobs_former {}'.format(P1_frames[5].shape))\n print('NODES&EDGES | OUTPUT FRAME 6/P1_jobs_former_new {}'.format(P1_frames[6].shape))\n print('NODES&EDGES | OUTPUT FRAME 7/P1_jobs_partner {}'.format(P1_frames[7].shape))\n print('NODES&EDGES | OUTPUT FRAME 8/P1_jobs_other_partners {}'.format(P1_frames[8].shape))\n print('NODES&EDGES | OUTPUT FRAME 9/P1_invest_other_partners {}'.format(P1_frames[9].shape))\n print('NODES&EDGES | OUTPUT FRAME 10/P1_jobs_current_old {}'.format(P1_frames[10].shape))\n print('NODES | OUTPUT FRAME 11/P1_extra_org_nodes {}'.format(P1_frames[11].shape))\n # Skip Not P1 Calculations\n if skip_not_p1 is False:\n print('\\n~Pledge 1% Neighborhood')\n print('NODES | OUTPUT FRAME 0/not_P1_companies {}'.format(not_P1_frames[0].shape))\n print('NODES | OUTPUT FRAME 1/not_P1_investors {}'.format(not_P1_frames[1].shape))\n print('NODES&EDGES | OUTPUT FRAME 2/not_P1_investments {}'.format(not_P1_frames[2].shape))\n print('NODES&EDGES | OUTPUT FRAME 3/not_P1_investment_partners {}'.format(not_P1_frames[3].shape))\n print('NODES&EDGES | OUTPUT FRAME 4/not_P1_jobs {}'.format(not_P1_frames[4].shape))\n print('NODES&EDGES | OUTPUT FRAME 5/not_P1_jobs_former {}'.format(not_P1_frames[5].shape))\n print('NODES&EDGES | OUTPUT FRAME 6/not_P1_jobs_former_new {}'.format(not_P1_frames[6].shape))\n print('NODES&EDGES | OUTPUT FRAME 7/not_P1_jobs_partner {}'.format(not_P1_frames[7].shape))\n print('NODES&EDGES | OUTPUT FRAME 8/not_P1_jobs_other_partners {}'.format(not_P1_frames[8].shape))\n print('NODES&EDGES | OUTPUT FRAME 9/not_P1_invest_other_partners {}'.format(not_P1_frames[9].shape))\n print('NODES&EDGES | OUTPUT FRAME 10/not_P1_jobs_current_old {}'.format(not_P1_frames[10].shape))\n print('NODES | OUTPUT FRAME 11/not_P1_extra_org_nodes {}'.format(not_P1_frames[11].shape))\n return lst_of_frames\n\ndef load_vertices(sframes, g):\n # For jobs dataframes\n for idx in [4,5,6,8,10]:\n # Keep relevant node attributes\n frame_temp = sframes[idx][['person_uuid', 'person_name']].rename({'person_uuid':'__id', 'person_name':'name'})\n frame_temp['__node_type'] = 'person'\n # Add p1_tag to the vertex\n frame_temp['p1_tag'] = 0\n g = g.add_vertices(vertices=frame_temp, vid_field='__id')\n # For jobs and partner investments dataframes\n for idx in [2,3,4,5,6,8,9,10]:\n # Keep relevant node attributes\n frame_temp = sframes[idx][['org_uuid', 'org_name', 'p1_tag']].rename({'org_uuid':'__id', 'org_name':'name'})\n frame_temp['__node_type'] = 'company'\n # Add p1_tag to the vertex\n frame_temp['p1_tag'] = frame_temp['p1_tag'].apply(lambda x: 0 if (x==\"\" or x==0) else 1)\n frame_temp['p1_tag'] = frame_temp['p1_tag'].astype(int)\n g = g.add_vertices(vertices=frame_temp, vid_field='__id')\n # For investments dataframes\n for idx in [2,3,7,9]:\n # Keep relevant node attributes\n frame_temp = sframes[idx][['investor_uuid', 'investor_name']].rename({'investor_uuid':'__id', 'investor_name':'name'})\n frame_temp['__node_type'] = 'investor'\n # Add p1_tag to the vertex\n frame_temp['p1_tag'] = 0\n g = g.add_vertices(vertices=frame_temp, vid_field='__id')\n # For partner investments dataframes\n for idx in [3,7,9]:\n # Keep relevant node attributes\n frame_temp = sframes[idx][['partner_uuid', 'partner_name']].rename({'partner_uuid':'__id', 'partner_name':'name'})\n frame_temp['__node_type'] = 'person'\n # Add p1_tag to the vertex\n frame_temp['p1_tag'] = 0\n g = g.add_vertices(vertices=frame_temp, vid_field='__id')\n # Organizations\n for idx in [0,1,11]:\n # Keep relevant node attributes\n frame_temp = sframes[idx][['uuid', 'name', 'primary_role', 'p1_tag']].rename({'uuid':'__id', 'primary_role':'__node_type'})\n # Add p1_tag to the vertex\n frame_temp['p1_tag'] = frame_temp['p1_tag'].apply(lambda x: 0 if (x==\"\" or x==0) else 1)\n frame_temp['p1_tag'] = frame_temp['p1_tag'].astype(int)\n # Load into graph\n g = g.add_vertices(vertices=frame_temp, vid_field='__id')\n # Return SGraph\n return g\n\ndef find_p1_affiliations(p1_sframes):\n frames = p1_sframes.copy()\n # Combine company and investor Pledge 1% dataframes, keeping only the uuid column\n p1_affiliations = frames[0][['uuid']].append(frames[1][['uuid']])\n # Add edge connecting to Pledge 1% uuid\n p1_affiliations['p1_uuid'] = 'fd9e2d10-a882-c6f4-737e-fd388d4ffd7c'\n # Create id, source, destination fields in SFrame\n p1_affiliations = p1_affiliations.rename({'uuid':'src','p1_uuid':'dst'})\n p1_affiliations['p1_tag'] = 1\n # Return SFrame\n return p1_affiliations\n\ndef load_edges(sframes, g, p1_affiliations=[], include_edges=[2,3], reverse=False, add_weights=False):\n w = {'status':{'primary':3,'secondary':2,'tertiary':1}, '__edge_type':{'job':1, 'investment':2}}\n # Since it is a directed graph, need to include option for reverse direction\n # Forward\n source = 'src'\n destination = 'dst'\n # Reverse\n if reverse:\n source = 'dst'\n destination = 'src'\n if type(p1_affiliations) == SFrame:\n # P1 Companies: Company/Investor --> Pledge 1%\n g = g.add_edges(edges=p1_affiliations, src_field=source, dst_field=destination)\n if add_weights:\n frame_temp['weight'] = 6\n # Investments: Investor --> Company\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[2][['investment_uuid','investor_uuid','org_uuid','investment_type','raised_amount_usd','investor_count','is_lead_investor','lead_investor_count']].rename({'investment_uuid':'__id','investor_uuid':'src','org_uuid':'dst'})\n frame_temp['__edge_type'] = 'investment'\n frame_temp['status'] = 'primary'\n if add_weights:\n frame_temp['weight'] = w['__edge_type']['investment'] * w['status']['primary']\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n # Partner Investments, Investments: Person --> Company\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[3][['investment_uuid','partner_uuid','org_uuid','investment_type','raised_amount_usd','investor_count']].rename({'investment_uuid':'__id','partner_uuid':'src','org_uuid':'dst'})\n frame_temp['__edge_type'] = 'investment'\n frame_temp['status'] = 'primary'\n if add_weights:\n frame_temp['weight'] = w['__edge_type']['investment'] * w['status']['primary']\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n # Partner Investments, Investments: Investor --> Company\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[3][['investor_uuid','org_uuid','investment_type','investor_count']].rename({'investor_uuid':'src','org_uuid':'dst'})\n frame_temp['__edge_type'] = 'investment'\n frame_temp['status'] = 'secondary'\n if add_weights:\n frame_temp['weight'] = w['__edge_type']['investment'] * w['status']['secondary']\n # Secondary relationships, skip if not specified at input\n if 2 in include_edges:\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n # Partner Investments, Jobs: Person --> Company\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[7][['partner_uuid','investor_uuid']].rename({'partner_uuid':'src','investor_uuid':'dst'})\n frame_temp['__edge_type'] = 'job'\n frame_temp['status'] = 'secondary'\n if add_weights:\n frame_temp['weight'] = w['__edge_type']['job'] * w['status']['secondary']\n # Secondary relationships, skip if not specified at input\n if 2 in include_edges:\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination) \n # Other Partner Investments, Investments: Person --> Company\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[9][['investment_uuid','partner_uuid','org_uuid','investment_type','raised_amount_usd','investor_count']].rename({'investment_uuid':'__id','partner_uuid':'src','org_uuid':'dst'})\n frame_temp['__edge_type'] = 'investment'\n frame_temp['status'] = 'tertiary'\n if add_weights:\n frame_temp['weight'] = w['status']['tertiary'] * w['__edge_type']['investment']\n # Tertiary relationships, skip if not specified at input\n if 3 in include_edges:\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n # Jobs: Person --> Company\n for idx in [4,5,6,8,10]:\n # Create id, source, destination fields in SFrame\n frame_temp = sframes[idx][['job_uuid','person_uuid','org_uuid','job_type','title']].rename({'job_uuid':'__id','person_uuid':'src','org_uuid':'dst'})\n frame_temp['__edge_type'] = 'job'\n # Current jobs\n if idx == 4:\n frame_temp['status'] = 'primary'\n if add_weights:\n frame_temp['weight'] = w['status']['primary'] * w['__edge_type']['job']\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n continue\n # Secondary relationships, skip if not specified at input\n if 2 in include_edges:\n # Former jobs | Former new jobs | Current old jobs \n if idx in [5,6,10]:\n frame_temp['status'] = 'secondary'\n if add_weights:\n frame_temp['weight'] = w['status']['secondary'] * w['__edge_type']['job']\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n continue \n # Tertiary relationships, skip if not specified at input\n if 3 in include_edges:\n # Other partners at firm\n if idx == 8:\n frame_temp['status'] = 'tertiary'\n if add_weights:\n frame_temp['weight'] = w['status']['tertiary'] * w['__edge_type']['job']\n g = g.add_edges(edges=frame_temp, src_field=source, dst_field=destination)\n continue\n # Return SGraph\n return g\n\ndef update_cb_weights(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n edge['weight'] = 0\n edge['weight_status'] = 0\n edge['weight_type'] = 0\n if edge['status'] == 'primary':\n edge['weight_status'] = 3\n if edge['status'] == 'secondary':\n edge['weight_status'] = 2\n if edge['status'] == 'tertiary':\n edge['weight_status'] = 1\n if edge['__edge_type'] == 'job':\n edge['weight_type'] = 1\n if edge['__edge_type'] == 'investment':\n edge['weight_type'] = 2\n edge['weight'] = edge['weight_status'] * edge['weight_type']\n return (src, edge, dst)\n\ndef update_pagerank_weight(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n dst['pagerank'] += src['prev_pagerank'] * edge['weight']\n return (src, edge, dst)\n\ndef update_pagerank_reset_prob(src, edge, dst):\n global reset\n if src['__id'] != dst['__id']: # ignore self-links\n dst['pagerank'] *= (1 - reset)\n dst['pagerank'] += reset\n return (src, edge, dst)\n\ndef update_pagerank_prev_to_current(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n src['prev_pagerank'] = src['pagerank']\n return (src, edge, dst)\n\ndef sum_weight(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n src['total_weight'] += edge['weight']\n return src, edge, dst\n\ndef make_pagerank_zero(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n dst['pagerank'] = 0\n return src, edge, dst\n\ndef update_l1_delta(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n dst['l1_delta'] = abs(dst['pagerank'] - dst['prev_pagerank'])\n src['l1_delta'] = abs(src['pagerank'] - src['prev_pagerank'])\n return src, edge, dst\n\ndef normalize_weight(src, edge, dst):\n if src['__id'] != dst['__id']: # ignore self-links\n edge['weight'] /= src['total_weight']\n return src, edge, dst\n\ndef pagerank_weighted(input_graph, reset_prob=0.15, threshold=0.01, max_iterations=3):\n g = SGraph(input_graph.vertices, input_graph.edges)\n global reset\n reset = reset_prob\n # compute normalized edge weight\n g.vertices['total_weight'] = 0.0\n g = g.triple_apply(sum_weight, ['total_weight'])\n g = g.triple_apply(normalize_weight, ['weight'])\n del g.vertices['total_weight']\n # initialize vertex field\n g.vertices['prev_pagerank'] = 1.0\n it = 0\n total_l1_delta = len(g.vertices)\n start = time.time()\n while(total_l1_delta > threshold and it < max_iterations):\n if 'pagerank' not in g.get_vertex_fields():\n g.vertices['pagerank'] = 0.0\n else:\n g = g.triple_apply(make_pagerank_zero, ['pagerank'])\n g = g.triple_apply(update_pagerank_weight, ['pagerank'])\n g = g.triple_apply(update_pagerank_reset_prob, ['pagerank'])\n if 'l1_delta' not in g.get_vertex_fields():\n g.vertices['l1_delta'] = (g.vertices['pagerank'] - g.vertices['prev_pagerank']).apply(lambda x: abs(x))\n else:\n g = g.triple_apply(update_l1_delta, ['l1_delta'])\n total_l1_delta = g.vertices['l1_delta'].sum()\n g = g.triple_apply(update_pagerank_prev_to_current, ['prev_pagerank'])\n print (\"Iteration %d: total pagerank changed in L1 = %f\" % (it, total_l1_delta))\n it = it + 1\n print (\"Weighted pagerank finished in: %f secs\" % (time.time() - start))\n del g.vertices['prev_pagerank']\n return g.vertices", "_____no_output_____" ] ], [ [ "# Visualization of Relationships\nCompanies are blue\n\nPeople are red\n\n#### Primary relationships\n\n- VC Firm investing in a company\n- Partner at VC Firm investing in a company\n- Person has current job at a company", "_____no_output_____" ] ], [ [ "# g = Network(notebook=True, directed=True, heading='Primary')\n# g.force_atlas_2based()\n\n# # Primary nodes\n# g.add_node(0, label='0', color='#add8e6', size=9)\n# g.add_node(1, label='1', color='blue', size=9)\n# g.add_node(2, label='2', color='red', size=5)\n# g.add_node(3, label='3', color='red', size=5)\n\n# # Primary edges\n# g.add_edge(1,0, label=\"investment\", color=\"grey\")\n# g.add_edge(2,0, label=\"investment\", color=\"grey\")\n# g.add_edge(3,0, label='job', color='grey')\n\n# g.show('Primary.html')", "_____no_output_____" ] ], [ [ "#### Secondary relationships\n\nWhat's added in green:\n- Former jobs of current employees of companies\n- Former employees of companies and their new jobs\n- Partner investors' affiliation with their VC firm and the indirect link to the same invested company", "_____no_output_____" ] ], [ [ "# g = Network(notebook=True, directed=True, heading='Secondary')\n# g.force_atlas_2based()\n\n# # Primary nodes\n# g.add_node(0, label='0', color='#add8e6', size=9)\n# g.add_node(1, label='1', color='blue', size=9)\n# g.add_node(2, label='2', color='red', size=5)\n# g.add_node(3, label='3', color='red', size=5)\n# g.add_node(4, label='4', color='blue', size=9)\n\n# # Primary edges\n# g.add_edge(1,0, label=\"investment\", color=\"grey\")\n# g.add_edge(2,0, label=\"investment\", color=\"grey\")\n# g.add_edge(3,0, label='job', color='grey')\n\n# # Secondary nodes\n# g.add_node(5, label='5', color='red', size=5)\n# g.add_node(8, label='8', color='blue', size=9)\n# g.add_node(11, label='11', color='blue', size=9)\n\n# # Secondary edges\n# g.add_edge(5,0, label='job(former)', color='green')\n# g.add_edge(2,4, label='job', color='green')\n# g.add_edge(4,0, label='investment(indirect)', color='green')\n# g.add_edge(3,11, label='job(former)', color='green')\n# g.add_edge(5,8, label='job', color='green')\n\n# g.show('Secondary.html')", "_____no_output_____" ] ], [ [ "#### Teritiary relationships\n\nWhat's added in orange:\n- Coworkers of partner investors and their current investments", "_____no_output_____" ] ], [ [ "# g = Network(notebook=True, directed=True, heading='Teritiary')\n# g.force_atlas_2based()\n\n# # Primary nodes\n# g.add_node(0, label='0', color='#add8e6', size=9)\n# g.add_node(1, label='1', color='blue', size=9)\n# g.add_node(2, label='2', color='red', size=5)\n# g.add_node(3, label='3', color='red', size=5)\n# g.add_node(4, label='4', color='blue', size=9)\n\n# # Primary edges\n# g.add_edge(1,0, label=\"investment\", color=\"grey\")\n# g.add_edge(2,0, label=\"investment\", color=\"grey\")\n# g.add_edge(3,0, label='job', color='grey')\n\n# # Secondary nodes\n# g.add_node(5, label='5', color='red', size=5)\n# g.add_node(8, label='8', color='blue', size=9)\n# g.add_node(11, label='11', color='blue', size=9)\n\n# # Secondary edges\n# g.add_edge(5,0, label='job(former)', color='green')\n# g.add_edge(2,4, label='job', color='green')\n# g.add_edge(4,0, label='investment(indirect)', color='green')\n# g.add_edge(3,11, label='job(former)', color='green')\n# g.add_edge(5,8, label='job', color='green')\n\n# # Tertiary nodes\n# g.add_node(6, label='6', color='red', size=5)\n# g.add_node(7, label='7', color='red', size=5)\n# g.add_node(9, label='9', color='blue', size=9)\n# g.add_node(10, label='10', color='blue', size=9)\n\n# # Tertiary edges\n# g.add_edge(6,1, label='job', color='orange')\n# g.add_edge(7,4, label='job', color='orange')\n# g.add_edge(6,9, label='investment', color='orange')\n# g.add_edge(7,10, label='investment', color='orange')\n\n# g.show('Teritiary.html')", "_____no_output_____" ] ], [ [ "## ", "_____no_output_____" ], [ "# 1. Load in Crunchbase dataframes. Comment out once you've completed Step 3.", "_____no_output_____" ] ], [ [ "# # Import CSVs as Pandas DataFrames\n# path = 'files/output/organizations_merged.csv'\n# df = pd.read_csv(path).drop(['Unnamed: 0'],axis=1)\n# print('INPUT df=p1+org FROM CSV: {}'.format(path))\n# print('ORGANIZATION/df cols: {}\\nSHAPE: {}'.format(df.columns.to_list(), df.shape))\n# df = reduce_mem_usage(df, verbose=True)\n\n# path = 'files/output/p1_jobs.csv'\n# jobs = pd.read_csv(path)\n# print('\\nINPUT jobs FROM CSV: {}'.format(path))\n# print('JOBS/jobs cols: {}\\nSHAPE: {}'.format(jobs.columns.to_list(), jobs.shape))\n# jobs = reduce_mem_usage(jobs, verbose=True)\n\n# path = 'files/output/p1_investments.csv'\n# invest = pd.read_csv(path)\n# print('\\nINPUT invest FROM CSV: {}'.format(path))\n# print('INVESTMENTS/invest cols: {}\\nSHAPE: {}'.format(invest.columns.to_list(), invest.shape))\n# invest = reduce_mem_usage(invest, verbose=True)\n\n# path = 'files/output/p1_investments_partner.csv'\n# invest_prtnr = pd.read_csv(path)\n# print('\\nINPUT invest_prtnr FROM CSV: {}'.format(path))\n# print('PARTNER INVESTMENTS/invest_prtnr cols: {}\\nSHAPE: {}'.format(invest_prtnr.columns.to_list(), invest_prtnr.shape))\n# invest_prtnr = reduce_mem_usage(invest_prtnr, verbose=True)\n\n# print('\\n\\nPledge 1% UUID: {}'.format(df[df['name']=='Pledge 1%'].uuid.values[0]))", "_____no_output_____" ] ], [ [ "# 2. Create mutliple merged pandaframes based on relationships using `network_by_date` function, which filters the dataframes by date to ensure the job/investment/company existed at that time. Comment out once you've saved these as CSVs in Step 3.", "_____no_output_____" ] ], [ [ "# date = '2020-09-08'\n# cb_frames,p1_frames = network_by_date(date, df, jobs, invest, invest_prtnr)", "_____no_output_____" ] ], [ [ "# 3. Save filtered dataframes as separate CSVs, and then load in as SFrames", "_____no_output_____" ], [ "### Save filtered dataframes as separate CSVs. Load in nodes and edges as SFrames. Comment out once you've saved these.", "_____no_output_____" ] ], [ [ "# for idx, frame in enumerate(cb_frames):\n# path = 'files/output/graph_temp/cb/{}_df.csv'.format(idx)\n# print('SAVED TO CSV', path)\n# frame.to_csv(path, index=False)\n# for idx, frame in enumerate(p1_frames):\n# path = 'files/output/graph_temp/p1/{}_df.csv'.format(idx)\n# print('SAVED TO CSV', path)\n# frame.to_csv(path, index=False)\n \n# lst_of_frames = []\n# for val in ['cb','p1']:\n# lst = []\n# for idx in range(12):\n# path = 'files/output/graph_temp/{}/{}_df.csv'.format(val, idx)\n# lst.append(SFrame(data=path))\n# lst_of_frames.append(lst)\n# cb_sframes,p1_sframes = lst_of_frames", "_____no_output_____" ] ], [ [ "### ((((START FROM HERE)))) IF USING THE SAME DATE AS PREVIOUS RUNS: Load in nodes and edges as SFrames.\n", "_____no_output_____" ] ], [ [ "# lst_of_frames = []\n# for val in ['cb','p1']:\n# lst = []\n# for idx in range(12):\n# path = 'files/output/graph_temp/{}/{}_df.csv'.format(val, idx)\n# lst.append(SFrame(data=path))\n# lst_of_frames.append(lst)\n# cb_sframes,p1_sframes = lst_of_frames\n\n# # List of Pledge 1% uuids\n# global p1_companies_uuid\n# p1_companies_uuid = []\n# p1_companies_uuid.extend(list(p1_sframes[0]['uuid'].unique()))\n# p1_companies_uuid.extend(list(p1_sframes[1]['uuid'].unique()))\n# p1_companies_uuid = list(set(p1_companies_uuid))", "_____no_output_____" ] ], [ [ "# 4. Load SFrames into graph and remove duplicate edges. Comment out once you've created the 8 graphs below. ", "_____no_output_____" ], [ "### Use functions to format SFrames to load into SGraph, `load_vertices`, `p1_affiliations`, and `load_edges`. Remove duplicate edges.\n\n#### Vertices: Person, Company, or Investor\n\nNode attributes: `__id`, `__node_type`, `name`, `p1_tag`\n\n#### Edges: Investment, Job\n\nEdge attributes: `__src_id`, `__dst_id`, `__edge_type`, `status`, {`__id`}, {`investment_type`,`raised_amount_usd`, `investor_count`, `is_lead_investor`, `lead_investor_count`}, {`job_type`, `title`}\n\nReference: <a href='https://github.com/turi-code/how-to/blob/master/remove_duplicate_edges.py'>Remove duplicate edges from SGraph</a>", "_____no_output_____" ] ], [ [ "# def make_graph(cb_sframes, weights=False, reverse_edges=False, remove_parallel_edges=False):\n \n# print('\\nBuIlDiNg GrApH...')\n# # Load in crunchbase with relationships\n \n# # If adding weights...\n# if weights:\n# print('- ADDING WEIGHTS IN THE FORWARD DIRECTION')\n# cb = load_edges(cb_sframes, load_vertices(cb_sframes, SGraph()), p1_affiliations=[], include_edges=[2,3], reverse=False, add_weights=True)\n# elif not weights:\n# cb = load_edges(cb_sframes, load_vertices(cb_sframes, SGraph()), p1_affiliations=[], include_edges=[2,3], reverse=False, add_weights=False)\n \n# # If adding reversed edges...\n# if reverse_edges:\n# print('- ADDING EDGES IN THE REVERSE DIRECTION')\n# # If adding weights...\n# if weights:\n# print(' - ADDING WEIGHTS IN THE REVERSE DIRECTION')\n# cb = load_edges(cb_sframes, cb, p1_affiliations=[], include_edges=[2,3], reverse=True, add_weights=True)\n# elif not weights:\n# cb = load_edges(cb_sframes, cb, p1_affiliations=[], include_edges=[2,3], reverse=True, add_weights=False)\n\n# # # Before comparison\n# # before = cb.summary()\n# # before_pri = cb.get_edges(fields={'status':'primary'}).shape[0]\n# # before_sec = cb.get_edges(fields={'status':'secondary'}).shape[0]\n# # before_ter = cb.get_edges(fields={'status':'tertiary'}).shape[0]\n\n# # Get list of edge fields\n# graph_edge_fields = cb.get_edge_fields()\n \n# # If removing parallel edges...\n# if remove_parallel_edges:\n# print('- REMOVING PARALLEL EDGES')\n# # Create temporary edge attribute that you'll use in aggregate function\n# cb.edges['relationship'] = cb.edges['status']\n# if weights:\n# cb = SGraph(cb.vertices, cb.edges.groupby(['__src_id','__dst_id','__edge_type','weight'], {'status': aggregate.SELECT_ONE('relationship')}))\n# elif not weights:\n# cb = SGraph(cb.vertices, cb.edges.groupby(['__src_id','__dst_id','__edge_type'], {'status': aggregate.SELECT_ONE('relationship')}))\n# elif not remove_parallel_edges:\n# # Create temporary edge attribute that you'll use in aggregate function\n# cb.edges['combined'] = cb.edges['__id']+','+cb.edges['status']+','+cb.edges['__src_id']+','+cb.edges['__dst_id']\n# cb = SGraph(cb.vertices, cb.edges.groupby(graph_edge_fields, {'combined': aggregate.SELECT_ONE('combined')}))\n# del cb.edges['combined']\n\n# # After comparison\n# after = cb.summary()\n# after_pri = cb.get_edges(fields={'status':'primary'}).shape[0]\n# after_sec = cb.get_edges(fields={'status':'secondary'}).shape[0]\n# after_ter = cb.get_edges(fields={'status':'tertiary'}).shape[0]\n\n# # # Output\n# # print('\\nRemove duplicates from Crunchbase graph')\n# # print('\\nNode change: {:,} --> {:,}'.format(before['num_vertices'], after['num_vertices']))\n# # print('Edge change: {:,} --> {:,}'.format(before['num_edges'], after['num_edges']))\n# # print('\\nPRIMARY Edge change: {:,} --> {:,}'.format(before_pri,after_pri))\n# # print('SECONDARY Edge change: {:,} --> {:,}'.format(before_sec,after_sec))\n# # print('TERTIARY Edge change: {:,} --> {:,}'.format(before_ter,after_ter))\n \n# # Save and load graphs\n# # UPDATE PATH \n# if not reverse_edges and not remove_parallel_edges: #(~A,~B)\n# name = 'Cruncbase_1Way_MultiEdge'\n# elif reverse_edges and not remove_parallel_edges: #(A,~B)\n# name = 'Crunchbase_2Ways_MultiEdge'\n# elif not reverse_edges and remove_parallel_edges: #(~A,B)\n# name = 'Cruncbase_1Way_SingleEdge'\n# elif reverse_edges and remove_parallel_edges: #(A,B)\n# name = 'Crunchbase_2Ways_SingleEdge'\n# if weights:\n# name += '_Weighted'\n# print('\\nSAVING {}: ({},{})'.format(name, after['num_vertices'],after['num_edges']))\n# print('*'*50)\n# path = 'CrunchbaseGraphs/{}'.format(name)\n# cb.save(path)\n# cb = load_sgraph(path)\n# return cb\n\n# # Construct all 8\n# for weights_bool in [False, True]:\n# for reverse_bool in [False, True]:\n# for parallel_bool in [False, True]:\n# cb = make_graph(cb_sframes, weights=weights_bool, reverse_edges=reverse_bool, remove_parallel_edges=parallel_bool)", "_____no_output_____" ] ], [ [ "### Load Graph(s) In (Checkpoint!)\n- `Cruncbase_1Way_MultiEdge`: Directed SGraph, one way, parallel edges ***MAIN GRAPH\n - $G(V,E) = (1290346, 2085199)$\n\n- `Cruncbase_1Way_SingleEdge`: Directed SGraph, one way, **no parallel edges**\n - $G(V,E) = (1290346, 981877)$\n\n- `Crunchbase_2Ways_MultiEdge`: Directed SGraph, **two ways**, parallel edges **WHEN NEEDED FOR FEATURES\n - $G(V,E) = (1290346, 4170144)$ \n \n- `Crunchbase_2Ways_SingleEdge`: Directed SGraph, two ways, **no parallel edges**\n - $G(V,E) = (1290346, 1963489)$\n \nAnd there are another 4 w/ weights added! See list in code cell.", "_____no_output_____" ] ], [ [ "# Load\n#cb = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge')\n#cb = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge')\n#cb = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge')\n#cb = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge')\n\n# With Weights\n#cb = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge_Weighted')\n#cb = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge_Weighted')\n#cb = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge_Weighted')\n#cb = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge_Weighted')", "_____no_output_____" ] ], [ [ "# 5. Reduce size of dataset by limiting degrees of freedom from Pledge 1% companies. Comment out once you've saved the vertices list.\n\nThis creates the `ALL_CB_Pick_Sample_Companies_From_Here.csv`,`DEGREE_4_Pick_Sample_Companies_From_Here.csv`, `DEGREE_2_Pick_Sample_Companies_From_Here.csv` files.", "_____no_output_____" ] ], [ [ "# # Get subgraph vertices to sample from\n# cb_vertices = cb.get_vertices()\n\n# # Append investors + companies together into new SFrame\n# sample_vertices = cb_vertices[cb_vertices['__node_type']=='investor']\n# sample_vertices = sample_vertices.append(cb_vertices[cb_vertices['__node_type']=='company'])\n\n# # Save to CSV so you don't have to re-do this !\n# pd.DataFrame(sample_vertices).to_csv('ALL_CB_Pick_Sample_Companies_From_Here.csv', index=False)", "_____no_output_____" ] ], [ [ "### Reduce the CB dataset\n\n- Retrieve the graph neighborhood around a set of vertices, ignoring edge directions.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.SGraph.get_neighborhood.html'>turicreate.SGraph.get_neighborhood</a>", "_____no_output_____" ] ], [ [ "# # Define radius for calculating degrees of separation away from Pledge 1% companies\n# rad = 5\n\n# # Create subgraph\n# cb_smol = cb.get_neighborhood(ids=p1_companies_uuid, radius=rad, full_subgraph=True)\n\n# # Save dictionaries which store info about graph\n# before = cb.summary() # Full graph\n# after = cb_smol.summary() # Subgraph\n\n# # Output\n# print('Radius of the neighborhood: {} degrees of separation from Pledge 1% companies uuids'.format(rad))\n# print('Reduction in nodes: {:.2f}%'.format((1-(after['num_vertices']/before['num_vertices']))*100))\n# print('Reduction in edges: {:.2f}%'.format((1-(after['num_edges']/before['num_edges']))*100))\n# print('\\nNode change: {:,} --> {:,}'.format(before['num_vertices'], after['num_vertices']))\n# print('Edge change: {:,} --> {:,}'.format(before['num_edges'], after['num_edges']))\n\n# # Get subgraph vertices to sample from\n# cb_smol_vertices = cb_smol.get_vertices()\n\n# # Append investors + companies together into new SFrame\n# sample_vertices = cb_smol_vertices[cb_smol_vertices['__node_type']=='investor']\n# sample_vertices = sample_vertices.append(cb_smol_vertices[cb_smol_vertices['__node_type']=='company'])\n\n# # Save to CSV so you don't have to re-do this !\n# pd.DataFrame(sample_vertices).to_csv('DEGREE_5_Pick_Sample_Companies_From_Here.csv', index=False)", "Radius of the neighborhood: 5 degrees of separation from Pledge 1% companies uuids\nReduction in nodes: 64.26%\nReduction in edges: 21.36%\n\nNode change: 1,290,346 --> 461,229\nEdge change: 4,170,144 --> 3,279,543\n" ] ], [ [ "# 6. Produce 100 samples of the Crunchbase graphs. 10 for each scenario below. Save to CSV.\n\n**Graph Network Size: 4 Degrees (`Model_DF_D4`)**\n1. Features: Baseline reduced only\n2. Features: Baseline only\n3. Features: Graph only\n4. Features: Graph + Baseline reduced\n5. Features: Graph + Baseline\n**Graph Network Size: 5 Degrees (`Model_DF_D5`)**\n- Same as above (6-10)\n", "_____no_output_____" ] ], [ [ "# lst_of_frames = []\n# for val in ['cb','p1']:\n# lst = []\n# for idx in range(12):\n# path = 'files/output/graph_temp/{}/{}_df.csv'.format(val, idx)\n# lst.append(SFrame(data=path))\n# lst_of_frames.append(lst)\n# cb_sframes,p1_sframes = lst_of_frames\n\n# # List of Pledge 1% uuids\n# global p1_companies_uuid\n# p1_companies_uuid = []\n# p1_companies_uuid.extend(list(p1_sframes[0]['uuid'].unique()))\n# p1_companies_uuid.extend(list(p1_sframes[1]['uuid'].unique()))\n# p1_companies_uuid = list(set(p1_companies_uuid))", "_____no_output_____" ], [ "# # Load CB Graphs\n# cb0 = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge')\n# cb1 = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge')\n# cb2 = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge')\n# cb3 = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge')\n\n# # Load CB Graphs With Weights\n# cb0w = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge_Weighted')\n# cb1w = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge_Weighted')\n# cb2w = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge_Weighted')\n# cb3w = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge_Weighted')\n\n# # P1 Companie uuids\n# positive_labels = pd.read_csv('Pledge1_09_08_2020.csv')['src'].to_list()\n\n# # Vertices from different Crunchbase graphs\n# ALL_vertices = pd.read_csv('ALL_CB_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_5_vertices = pd.read_csv('DEGREE_5_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_4_vertices = pd.read_csv('DEGREE_4_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_2_vertices = pd.read_csv('DEGREE_2_Pick_Sample_Companies_From_Here.csv')", "_____no_output_____" ] ], [ [ "## (DONE) Output 20 graphs for each graph network.\n\n### The code below is for Baseline Reduced (`BR`) & Baseline (`B`) scenarios, which require no graph feature calculations.\n- Included `Model_DF_D4`,`Model_DF_D5` (plus `Model_DF_D2`, `Model_DF_ALL`)", "_____no_output_____" ] ], [ [ "# # P1 Companie uuids\n# positive_labels = pd.read_csv('Pledge1_09_08_2020.csv')['src'].to_list()\n\n# # Grab relevant neighborhood\n# ALL_vertices = pd.read_csv('ALL_CB_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_5_vertices = pd.read_csv('DEGREE_5_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_4_vertices = pd.read_csv('DEGREE_4_Pick_Sample_Companies_From_Here.csv')\n# DEGREE_2_vertices = pd.read_csv('DEGREE_2_Pick_Sample_Companies_From_Here.csv')\n\n# # Setting up loop\n# neighborhoods_name = ['Model_DF_D2', 'Model_DF_D4', 'Model_DF_D5', 'Model_DF_ALL']\n# neighborhoods = [DEGREE_2_vertices, DEGREE_4_vertices, DEGREE_5_vertices, ALL_vertices]\n# neighborhoods_dict = dict(zip(neighborhoods_name,neighborhoods))\n\n# for neighborhood in neighborhoods_name:\n# for scenario in ['B', 'BR']:\n# for idx in range(10):\n \n# DF = neighborhoods_dict[neighborhood]\n# # Sample equal size of non-P1 companies from vertices dataframe\n# negatives_labels = DF.sample(int(len(positive_labels)), replace=False)['__id'].to_list()\n \n# # Combine, avoid duplicates\n# model_labels = list(np.unique(positive_labels + negatives_labels))\n \n# # Reduce to sample CSV\n# smol_DF = DF[['__id']][DF['__id'].isin(model_labels)].reset_index(drop=True).rename({'__id':'uuid'},axis=1)\n \n# # Output to CSV\n# path = 'files/output/{}/{}/{}.csv'.format(neighborhood,scenario,idx)\n# smol_DF.to_csv(path, index=False)\n# print('SAVING to {}\\n'.format(path))", "_____no_output_____" ], [ "# # Fields needed for this function\n# lst_of_graphs = [cb0,cb1,cb2,cb3,cb0w,cb1w,cb2w,cb3w]\n# sgraph_idx_assign = {0:'cb0',1:'cb1',2:'cb2',3:'cb3',0:'cb0',1:'cb1',2:'cb2',3:'cb3'}\n# vertex_type_list = ['cb_smol_ALL', 'cb_smol_D4', 'cb_smol_D2']\n# model_uuids_dict = {v:[] for v in vertex_type_list}\n\n# def make_smol_sgraphs(positive_labels, vertex_df, string, SGraph_list, radius=3):\n \n# # Sample equal size of non-P1 companies from vertices dataframe\n# negatives_labels = vertex_df.sample(int(len(positive_labels)), replace=False)['__id'].to_list()\n \n# # Combine, avoid duplicates\n# model_labels = list(np.unique(positive_labels + negatives_labels))\n\n# for idx,graph in enumerate(lst_of_graphs):\n \n# # Create subgraph\n# print('Creating graph {}'.format(sgraph_idx_assign[idx].upper()))\n# smol = graph.get_neighborhood(ids=model_labels, radius=radius, full_subgraph=True) \n \n# # Save subgraph\n# path = 'ModelGraphs/test/{}_{}'.format(string,sgraph_idx_assign[idx])\n# smol.save(path)\n# print('SAVING to {}\\n'.format(path))\n \n# # Output model labels for this set of graphs\n# return model_labels\n\n# model_labels = make_smol_sgraphs(positive_labels, ALL_vertices, 'cb_smol_ALL', lst_of_graphs, radius=3)\n# model_uuids_dict['cb_smol_ALL'] = model_labels\n\n# model_labels = make_smol_sgraphs(positive_labels, DEGREE_4_vertices, 'cb_smol_D4',lst_of_graphs, radius=3)\n# model_uuids_dict['cb_smol_D4'] = model_labels\n\n# model_labels = make_smol_sgraphs(positive_labels, DEGREE_2_vertices, 'cb_smol_D2', lst_of_graphs, radius=3)\n# model_uuids_dict['cb_smol_D2'] = model_labels", "_____no_output_____" ] ], [ [ "### `feature_creation` methods for computing Graph features\n\n#### Pagerank\n- The pagerank.create() method computes the pagerank for each vertex and returns a PagerankModel. The pagerank value indicates the centrality of each node in the graph.\n- Compute the PageRank for each vertex in the graph. Return a model object with total PageRank as well as the PageRank value for each vertex in the graph.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.pagerank.create.html#turicreate.pagerank.create'>turicreate.pagerank.create</a>\n\n#### Shortest path\n- Compute the single source shortest path distance from the source vertex to all vertices in the graph. Note that because SGraph is directed, shortest paths are also directed. To find undirected shortest paths add edges to the SGraph in both directions. Return a model object with distance each of vertex in the graph.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.shortest_path.create.html#turicreate.shortest_path.create'>turicreate.shortest_path.create</a>\n\n#### K-core decomposition\n- Compute the K-core decomposition of the graph. Return a model object with total number of cores as well as the core id for each vertex in the graph.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.kcore.create.html'>turicreate.kcore.create</a>\n\n#### Degree counting\n- Compute the in degree, out degree and total degree of each vertex.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.degree_counting.create.html#turicreate.degree_counting.create'>turicreate.degree_counting.create</a>\n\n#### Triangle Counting\n- Compute the number of triangles each vertex belongs to, ignoring edge directions. A triangle is a complete subgraph with only three vertices. Return a model object with total number of triangles as well as the triangle counts for each vertex in the graph.\n- <a href='https://apple.github.io/turicreate/docs/api/generated/turicreate.triangle_counting.create.html#turicreate.triangle_counting.create'>turicreate.triangle_counting.create</a>\n\n", "_____no_output_____" ] ], [ [ "def feature_creation(model_labels, list_of_graphs, p1_companies_uuid, radius=3):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n list_of_frames = []\n for idx,graph in enumerate(list_of_graphs):\n \n # CREATE SUBGRAPH\n print('Creating graph {}'.format(sgraph_idx[idx].upper()))\n smol_graph = graph.get_neighborhood(ids=model_labels, radius=radius, full_subgraph=True) \n \n # FUNCTION FOR PAGERANK\n print('HERE_PR')\n DF_PG = add_pagerank(smol_graph, model_labels, idx)\n print(DF_PG.columns.to_list())\n if idx==1:\n DF_PG_1 = DF_PG\n if DF_PG.shape[0] != 0:\n list_of_frames.append(DF_PG)\n \n # FUNCTION FOR WEIGHTED PAGERANK\n print('HERE_PR_W')\n DF_PG_W = add_weighted_pagerank(smol_graph, model_labels, idx)\n print(DF_PG_W.columns.to_list())\n if idx==1:\n DF_PG_W_1 = DF_PG_W\n if DF_PG_W.shape[0] != 0:\n list_of_frames.append(DF_PG_W)\n \n # FUNCTION FOR SHORTEST PATH TOP 5\n print('HERE_SP')\n if idx==3:\n DF = add_shortest_path(smol_graph, model_labels, idx, DF_PG_1, p1_companies_uuid)\n else:\n DF = add_shortest_path(smol_graph, model_labels, idx, DF_PG, p1_companies_uuid)\n print(DF.columns.to_list())\n if DF.shape[0] != 0:\n list_of_frames.append(DF)\n \n # FUNCTION FOR SHORTEST PATH TOP 5 WEIGHTED\n print('HERE_SP_W')\n if idx==3:\n DF = add_weighted_shortest_path(smol_graph, model_labels, idx, DF_PG_W_1, p1_companies_uuid)\n else:\n DF = add_weighted_shortest_path(smol_graph, model_labels, idx, DF_PG_W, p1_companies_uuid)\n print(DF.columns.to_list())\n if DF.shape[0] != 0:\n list_of_frames.append(DF)\n \n # FUNCTION FOR K-CORE DECOPOSITION\n print('HERE_KC')\n DF = add_kcore(smol_graph, model_labels, idx)\n print(DF.columns.to_list())\n if DF.shape[0] != 0:\n list_of_frames.append(DF)\n \n # FUNCTION FOR DEGREES\n print('HERE_D')\n DF = add_degree(smol_graph, model_labels, idx)\n print(DF.columns.to_list())\n if DF.shape[0] != 0:\n list_of_frames.append(DF)\n \n # FUNCTION FOR TRIANGLE\n print('HERE_T')\n DF = add_triangle(smol_graph, model_labels, idx)\n print(DF.columns.to_list())\n if DF.shape[0] != 0:\n list_of_frames.append(DF)\n \n # Merge all feature dataframes together\n DF_ALL = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), list_of_frames)\n print('DATAFRAME SHAPE: {}'.format(DF_ALL.shape))\n \n # Output final DF\n return DF_ALL\n\ndef add_pagerank(graph, model_labels, index):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['pagerank']:\n # Create pagerank SFrame\n pr = pagerank.create(graph, verbose=False)\n pr_sframe = pr['pagerank']\n # Modifying output SFrame\n pr_df = pd.DataFrame(pr_sframe)\n pr_df = pr_df.drop('delta', axis=1)\n pr_df = pr_df[pr_df['__id'].isin(model_labels)].reset_index(drop=True)\n pr_df = pr_df.rename({'pagerank':'pr_{}'.format(index)}, axis=1)\n # Return modified dataframe\n return pr_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])\n\ndef add_weighted_pagerank(graph, model_labels, index):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['pagerank_weight']:\n pr_w = pagerank_weighted(graph)\n pr_w_sframe = pr_w['__id', 'pagerank']\n # Modifying output SFrame\n pr_w_df = pd.DataFrame(pr_w_sframe)\n pr_w_df = pr_w_df[pr_w_df['__id'].isin(model_labels)].reset_index(drop=True)\n pr_w_df = pr_w_df.rename({'pagerank':'w_pr_{}'.format(index)}, axis=1)\n # Return modified dataframe\n return pr_w_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id']) \n \ndef add_shortest_path(graph, model_labels, index, pagerank_dataframe, p1_companies_uuid):\n #mapping_for_pr = {1:1, 2:1}\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['shortest']:\n # Grab pagerank dataframe\n pr = pagerank_dataframe[['__id', 'pr_1']].sort_values(by='pr_1',ascending=False)\n pr = pr['__id'].to_list()\n # Find top 5 p1 companies \n count = 0\n top_p1 = []\n while len(top_p1) < 5:\n if pr[count] in p1_companies_uuid:\n top_p1.append(pr[count])\n count += 1\n print(count)\n # Loop over top 5 companies to find shortest path to each\n list_of_frames = []\n for jdx,uuid in enumerate(top_p1):\n # Create shortest path SFrame\n sp = shortest_path.create(graph, source_vid=uuid, verbose=False)\n sp_sframe = sp['distance']\n # Modifying output SFrame\n sp_df = pd.DataFrame(sp_sframe)\n sp_df = sp_df[sp_df['__id'].isin(model_labels)].reset_index(drop=True)\n sp_df = sp_df.rename({'distance': 'spath_top_{}_{}'.format(index,jdx)}, axis=1)\n list_of_frames.append(sp_df)\n # Combine 5 shortest path columns\n sp_df = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), list_of_frames)\n # Add minimum path (to top 5) column\n sp_df['spath_top_min_{}'.format(index)] = sp_df.min(axis=1)\n # Return modified dataframe\n return sp_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])\n\ndef add_weighted_shortest_path(graph, model_labels, index, pagerank_dataframe_weighted, p1_companies_uuid):\n #mapping_for_pr = {1:1, 2:1}\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['shortest_weight']:\n # Grab weighted pagerank dataframe\n pr = pagerank_dataframe_weighted[['__id', 'w_pr_1']].sort_values(by='w_pr_1',ascending=False)\n pr = pr['__id'].to_list()\n # Find top 5 p1 companies \n count = 0\n top_p1 = []\n while len(top_p1) < 5:\n if pr[count] in p1_companies_uuid:\n top_p1.append(pr[count])\n count += 1\n print(count)\n # Loop over top 5 companies to find shortest path to each\n list_of_frames = []\n for jdx,uuid in enumerate(top_p1):\n # Create shortest path SFrame\n sp = shortest_path.create(graph, source_vid=uuid, weight_field='weight', verbose=False)\n sp_sframe = sp['distance']\n # Modifying output SFrame\n sp_df = pd.DataFrame(sp_sframe)\n sp_df = sp_df[sp_df['__id'].isin(model_labels)].reset_index(drop=True)\n sp_df = sp_df.rename({'distance': 'w_spath_top_{}_{}'.format(index,jdx)}, axis=1)\n list_of_frames.append(sp_df)\n # Combine 5 shortest path columns\n sp_df = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), list_of_frames)\n # Add minimum path (to top 5) column\n sp_df['w_spath_top_min_{}'.format(index)] = sp_df.min(axis=1)\n # Return modified dataframe\n return sp_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])\n\ndef add_kcore(graph, model_labels, index):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['kcore']:\n # Create kcore SFrame\n kc = kcore.create(graph, kmin=0, kmax=5, verbose=False)\n kc_sframe = kc['core_id'] \n # Modifying output SFrame\n kc_df = pd.DataFrame(kc_sframe)\n kc_df = kc_df[kc_df['__id'].isin(model_labels)].reset_index(drop=True)\n kc_df = kc_df.rename({'core_id':'kc_{}'.format(index)}, axis=1)\n # Return modified dataframe\n return kc_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])\n\ndef add_degree(graph, model_labels, index):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['degree']:\n # Create degree SGraph\n deg = degree_counting.create(graph)\n deg_sgraph = deg['graph'] \n # Modifying output SFrame\n deg_df = pd.DataFrame(deg_sgraph.vertices[['__id', 'in_degree', 'out_degree']])\n deg_df = deg_df[deg_df['__id'].isin(model_labels)].reset_index(drop=True)\n deg_df = deg_df.rename({'in_degree':'in_deg_{}'.format(index),'out_degree':'out_deg_{}'.format(index)}, axis=1)\n # Return modified dataframe\n return deg_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])\n \ndef add_triangle(graph, model_labels, index):\n turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', 96)\n # If this particular graph is in the list of approved graphs, then continue, otherwise return empty dataframe\n if sgraph_idx[index] in feat_graph_map['triangle']:\n # Create triangle counting SFrame\n tc = triangle_counting.create(graph, verbose=False)\n tc_sframes = tc['triangle_count']\n # Modifying output SFrame\n tri_df = pd.DataFrame(tc_sframes)\n tri_df = tri_df[tri_df['__id'].isin(model_labels)].reset_index(drop=True)\n tri_df = tri_df.rename({'triangle_count':'tri_{}'.format(index)},axis=1)\n # Return modified dataframe\n return tri_df\n else:\n # Return empty dataframe\n return pd.DataFrame(columns=['__id'])", "_____no_output_____" ] ], [ [ "### (IN PROGRESS) Output 20 graphs for each graph network. Produce samples for Graph only (`G`), Graph & Baseline (`GB`), Graph & Baseline Reduced (`GBR`) scenarios in 2 graph networks.", "_____no_output_____" ] ], [ [ "#turicreate.config.set_runtime_config('TURI_DEFAULT_NUM_GRAPH_LAMBDA_WORKERS', 96)\nturicreate.config.set_num_gpus(1)\n# # Load CB Graphs\n# cb0 = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge')\n# cb1 = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge')\n# cb2 = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge')\n# cb3 = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge')\n\n# Load CB Graphs With Weights\ncb0w = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_MultiEdge_Weighted')\ncb1w = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_MultiEdge_Weighted')\ncb2w = load_sgraph('CrunchbaseGraphs/Cruncbase_1Way_SingleEdge_Weighted') # Wasn't needed\ncb3w = load_sgraph('CrunchbaseGraphs/Crunchbase_2Ways_SingleEdge_Weighted')\n\n# P1 Companie uuids\npositive_labels = pd.read_csv('Pledge1_09_08_2020.csv')['src'].to_list()\n\n# Grab relevant neighborhood\n#ALL_vertices = pd.read_csv('ALL_CB_Pick_Sample_Companies_From_Here.csv')\nDEGREE_5_vertices = pd.read_csv('DEGREE_5_Pick_Sample_Companies_From_Here.csv')\nDEGREE_4_vertices = pd.read_csv('DEGREE_4_Pick_Sample_Companies_From_Here.csv')\n#DEGREE_2_vertices = pd.read_csv('DEGREE_2_Pick_Sample_Companies_From_Here.csv')\n\n# Setting up loop\nneighborhoods_name = ['Model_DF_D4', 'Model_DF_D5']\nneighborhoods = [DEGREE_4_vertices, DEGREE_5_vertices]\n#neighborhoods_name = ['Model_DF_D2', 'Model_DF_D4', 'Model_DF_D5', 'Model_DF_ALL']\n#neighborhoods = [DEGREE_2_vertices, DEGREE_4_vertices, DEGREE_5_vertices, ALL_vertices]\nneighborhoods_dict = dict(zip(neighborhoods_name,neighborhoods))\n\n# Fields needed\n#sgraph_idx = {0:'cb0',1:'cb1',2:'cb2',3:'cb3',4:'cb0w',5:'cb1w',6:'cb2w',7:'cb3w'}\nsgraph_idx = {0:'cb0w',1:'cb1w',2:'cb2w', 3:'cb3w'}\n\nsgraph_idx_inv = {v:k for (k,v) in sgraph_idx.items()} # For saving the right column name\n#list_of_graphs = [cb0,cb1,cb2,cb3,cb0w,cb1w,cb2w,cb3w]\nlist_of_graphs = [cb0w,cb1w,cb2w,cb3w]\n#list_of_graphs = [cb0w,cb1w,cb3w]\n\n# Coordinating -- for loading in graphs\nfeat_graph_map = {'pagerank':['cb0w', 'cb1w', 'cb2w'],\n 'pagerank_weight':['cb0w', 'cb1w', 'cb2w'],\n 'kcore':['cb2w', 'cb3w'], # Number of edges does not matter, single edge\n 'degree':['cb0w', 'cb1w', 'cb2w', 'cb3w'], # Doesn't require a lot of computational power\n 'triangle':['cb0w', 'cb2w'], # Ignores edge directions, 1-way\n 'shortest':['cb3w'], # Requires bi-directional edges\n 'shortest_weight':['cb3w']} # Requires bi-directional edges\n\nfor neighborhood in neighborhoods_name: # 2 times\n for scenario in ['G','GB','GBR']: # 3 times\n for idx in range(1,10): # 10 times\n print('{} | {} | {}'.format(neighborhood,scenario,idx))\n print('*'*50)\n # Grab neighborhood DF to start with\n DF = neighborhoods_dict[neighborhood]\n # Sample equal size of non-P1 companies from vertices dataframe\n negatives_labels = DF.sample(int(len(positive_labels)), replace=False)['__id'].to_list()\n # Combine, avoid duplicates\n model_labels = list(np.unique(positive_labels + negatives_labels))\n # SEND TO GRAPH FEATURE METHOD WHICH: CREATES GRAPH FOR FEATURE & APPENDS FEATURE TO MODEL DATAFRAME\n smol_DF = feature_creation(model_labels, list_of_graphs, p1_companies_uuid)\n # Output to CSV\n path = 'files/output/{}/{}/{}.csv'.format(neighborhood,scenario,idx)\n smol_DF.to_csv(path, index=False)\n print('SAVING to {}\\n'.format(path))", "Model_DF_D4 | G | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308819.196625\nIteration 1: total pagerank changed in L1 = 74723.674463\nIteration 2: total pagerank changed in L1 = 74077.515237\nWeighted pagerank finished in: 59.649648 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206601.596298\nIteration 1: total pagerank changed in L1 = 8781.752519\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 95.212619 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310731.924903\nIteration 1: total pagerank changed in L1 = 76851.090886\nIteration 2: total pagerank changed in L1 = 76156.697704\nWeighted pagerank finished in: 49.480700 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10471, 31)\nSAVING to files/output/Model_DF_D4/G/1.csv\n\nModel_DF_D4 | G | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309278.237340\nIteration 1: total pagerank changed in L1 = 74593.313624\nIteration 2: total pagerank changed in L1 = 73948.154866\nWeighted pagerank finished in: 59.562743 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206830.935619\nIteration 1: total pagerank changed in L1 = 8776.130745\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 95.243176 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311202.310058\nIteration 1: total pagerank changed in L1 = 76727.298693\nIteration 2: total pagerank changed in L1 = 76033.397499\nWeighted pagerank finished in: 49.755565 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10495, 31)\nSAVING to files/output/Model_DF_D4/G/2.csv\n\nModel_DF_D4 | G | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308323.101588\nIteration 1: total pagerank changed in L1 = 74142.004025\nIteration 2: total pagerank changed in L1 = 73496.357553\nWeighted pagerank finished in: 60.291381 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206214.979520\nIteration 1: total pagerank changed in L1 = 8739.737722\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 95.550964 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310227.097256\nIteration 1: total pagerank changed in L1 = 76262.322977\nIteration 2: total pagerank changed in L1 = 75567.730020\nWeighted pagerank finished in: 50.098686 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10482, 31)\nSAVING to files/output/Model_DF_D4/G/3.csv\n\nModel_DF_D4 | G | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 307801.406658\nIteration 1: total pagerank changed in L1 = 74147.127778\nIteration 2: total pagerank changed in L1 = 73496.947215\nWeighted pagerank finished in: 59.921792 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 205826.402146\nIteration 1: total pagerank changed in L1 = 8710.343872\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.204026 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309706.432257\nIteration 1: total pagerank changed in L1 = 76268.609512\nIteration 2: total pagerank changed in L1 = 75570.670257\nWeighted pagerank finished in: 50.472859 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10495, 31)\nSAVING to files/output/Model_DF_D4/G/4.csv\n\nModel_DF_D4 | G | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309612.392797\nIteration 1: total pagerank changed in L1 = 74372.159213\nIteration 2: total pagerank changed in L1 = 73718.582903\nWeighted pagerank finished in: 60.010062 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207056.529755\nIteration 1: total pagerank changed in L1 = 8766.820117\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.497548 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311518.101116\nIteration 1: total pagerank changed in L1 = 76490.616144\nIteration 2: total pagerank changed in L1 = 75790.619264\nWeighted pagerank finished in: 50.621150 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10492, 31)\nSAVING to files/output/Model_DF_D4/G/5.csv\n\nModel_DF_D4 | G | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309080.411516\nIteration 1: total pagerank changed in L1 = 74567.703603\nIteration 2: total pagerank changed in L1 = 73901.965235\nWeighted pagerank finished in: 60.790987 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206778.938531\nIteration 1: total pagerank changed in L1 = 8829.009885\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.297816 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311008.516263\nIteration 1: total pagerank changed in L1 = 76697.214159\nIteration 2: total pagerank changed in L1 = 75983.117319\nWeighted pagerank finished in: 50.879601 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n9\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10486, 31)\nSAVING to files/output/Model_DF_D4/G/6.csv\n\nModel_DF_D4 | G | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308917.688689\nIteration 1: total pagerank changed in L1 = 74575.358613\nIteration 2: total pagerank changed in L1 = 73913.065238\nWeighted pagerank finished in: 60.947754 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206644.388254\nIteration 1: total pagerank changed in L1 = 8800.353784\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.188277 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310835.106195\nIteration 1: total pagerank changed in L1 = 76706.428190\nIteration 2: total pagerank changed in L1 = 75995.574371\nWeighted pagerank finished in: 50.654153 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10482, 31)\nSAVING to files/output/Model_DF_D4/G/7.csv\n\nModel_DF_D4 | G | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308970.110003\nIteration 1: total pagerank changed in L1 = 74378.109981\nIteration 2: total pagerank changed in L1 = 73716.654733\nWeighted pagerank finished in: 60.838280 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206687.994804\nIteration 1: total pagerank changed in L1 = 8822.123919\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.410011 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310893.428796\nIteration 1: total pagerank changed in L1 = 76511.224028\nIteration 2: total pagerank changed in L1 = 75803.420414\nWeighted pagerank finished in: 50.761263 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10494, 31)\nSAVING to files/output/Model_DF_D4/G/8.csv\n\nModel_DF_D4 | G | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308808.208763\nIteration 1: total pagerank changed in L1 = 74404.271996\nIteration 2: total pagerank changed in L1 = 73749.471984\nWeighted pagerank finished in: 61.202795 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206612.521201\nIteration 1: total pagerank changed in L1 = 8754.745789\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.844569 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310727.383852\nIteration 1: total pagerank changed in L1 = 76529.421653\nIteration 2: total pagerank changed in L1 = 75828.439114\nWeighted pagerank finished in: 51.392866 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10489, 31)\nSAVING to files/output/Model_DF_D4/G/9.csv\n\nModel_DF_D4 | GB | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309410.968443\nIteration 1: total pagerank changed in L1 = 75206.098876\nIteration 2: total pagerank changed in L1 = 74549.215792\nWeighted pagerank finished in: 61.348769 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206958.113593\nIteration 1: total pagerank changed in L1 = 8850.440916\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 96.902639 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311319.100135\nIteration 1: total pagerank changed in L1 = 77337.468279\nIteration 2: total pagerank changed in L1 = 76633.544043\nWeighted pagerank finished in: 51.651452 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n9\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10489, 31)\nSAVING to files/output/Model_DF_D4/GB/1.csv\n\nModel_DF_D4 | GB | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308140.949486\nIteration 1: total pagerank changed in L1 = 74040.812752\nIteration 2: total pagerank changed in L1 = 73390.455138\nWeighted pagerank finished in: 61.629350 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206113.643585\nIteration 1: total pagerank changed in L1 = 8778.567312\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.286128 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310050.466459\nIteration 1: total pagerank changed in L1 = 76163.868899\nIteration 2: total pagerank changed in L1 = 75467.632551\nWeighted pagerank finished in: 51.474840 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10491, 31)\nSAVING to files/output/Model_DF_D4/GB/2.csv\n\nModel_DF_D4 | GB | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308757.151129\nIteration 1: total pagerank changed in L1 = 73872.653725\nIteration 2: total pagerank changed in L1 = 73227.905528\nWeighted pagerank finished in: 61.415309 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206496.894583\nIteration 1: total pagerank changed in L1 = 8703.148300\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.623037 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310665.792506\nIteration 1: total pagerank changed in L1 = 75994.672718\nIteration 2: total pagerank changed in L1 = 75298.347142\nWeighted pagerank finished in: 51.899793 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10487, 31)\nSAVING to files/output/Model_DF_D4/GB/3.csv\n\nModel_DF_D4 | GB | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309107.434353\nIteration 1: total pagerank changed in L1 = 74777.505151\nIteration 2: total pagerank changed in L1 = 74113.400863\nWeighted pagerank finished in: 61.829585 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206811.759032\nIteration 1: total pagerank changed in L1 = 8874.217250\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.958242 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311026.295777\nIteration 1: total pagerank changed in L1 = 76906.723398\nIteration 2: total pagerank changed in L1 = 76194.794438\nWeighted pagerank finished in: 51.955735 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10487, 31)\nSAVING to files/output/Model_DF_D4/GB/4.csv\n\nModel_DF_D4 | GB | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 307424.443038\nIteration 1: total pagerank changed in L1 = 73837.463518\nIteration 2: total pagerank changed in L1 = 73192.218498\nWeighted pagerank finished in: 61.854275 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 205564.313740\nIteration 1: total pagerank changed in L1 = 8674.059169\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.971571 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309337.962050\nIteration 1: total pagerank changed in L1 = 75960.638963\nIteration 2: total pagerank changed in L1 = 75268.008561\nWeighted pagerank finished in: 51.686146 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10483, 31)\nSAVING to files/output/Model_DF_D4/GB/5.csv\n\nModel_DF_D4 | GB | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308147.583904\nIteration 1: total pagerank changed in L1 = 73950.894328\nIteration 2: total pagerank changed in L1 = 73317.621951\nWeighted pagerank finished in: 61.602432 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206147.058021\nIteration 1: total pagerank changed in L1 = 8744.847697\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.221320 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310069.116132\nIteration 1: total pagerank changed in L1 = 76071.774243\nIteration 2: total pagerank changed in L1 = 75393.506544\nWeighted pagerank finished in: 52.264076 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10471, 31)\nSAVING to files/output/Model_DF_D4/GB/6.csv\n\nModel_DF_D4 | GB | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309251.231527\nIteration 1: total pagerank changed in L1 = 74525.501576\nIteration 2: total pagerank changed in L1 = 73871.975827\nWeighted pagerank finished in: 61.965787 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206871.827708\nIteration 1: total pagerank changed in L1 = 8835.254238\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 98.026476 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311171.779406\nIteration 1: total pagerank changed in L1 = 76658.468342\nIteration 2: total pagerank changed in L1 = 75952.758864\nWeighted pagerank finished in: 51.825561 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10485, 31)\nSAVING to files/output/Model_DF_D4/GB/7.csv\n\nModel_DF_D4 | GB | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308634.226500\nIteration 1: total pagerank changed in L1 = 74258.884607\nIteration 2: total pagerank changed in L1 = 73615.825212\nWeighted pagerank finished in: 62.225771 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206424.224949\nIteration 1: total pagerank changed in L1 = 8799.464227\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 97.805889 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310553.326959\nIteration 1: total pagerank changed in L1 = 76386.145560\nIteration 2: total pagerank changed in L1 = 75696.567057\nWeighted pagerank finished in: 51.980979 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10508, 31)\nSAVING to files/output/Model_DF_D4/GB/8.csv\n\nModel_DF_D4 | GB | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308957.237023\nIteration 1: total pagerank changed in L1 = 74654.376370\nIteration 2: total pagerank changed in L1 = 73985.136048\nWeighted pagerank finished in: 62.300282 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206712.444380\nIteration 1: total pagerank changed in L1 = 8790.118996\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 98.082741 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310878.471601\nIteration 1: total pagerank changed in L1 = 76780.131775\nIteration 2: total pagerank changed in L1 = 76064.193460\nWeighted pagerank finished in: 52.430972 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10490, 31)\nSAVING to files/output/Model_DF_D4/GB/9.csv\n\nModel_DF_D4 | GBR | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309656.993860\nIteration 1: total pagerank changed in L1 = 74816.041238\nIteration 2: total pagerank changed in L1 = 74158.519055\nWeighted pagerank finished in: 62.366497 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207153.009824\nIteration 1: total pagerank changed in L1 = 8919.650059\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 98.142206 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311572.011829\nIteration 1: total pagerank changed in L1 = 76942.370050\nIteration 2: total pagerank changed in L1 = 76236.813524\nWeighted pagerank finished in: 53.087506 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10507, 31)\nSAVING to files/output/Model_DF_D4/GBR/1.csv\n\nModel_DF_D4 | GBR | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308226.416244\nIteration 1: total pagerank changed in L1 = 74299.128786\nIteration 2: total pagerank changed in L1 = 73666.402670\nWeighted pagerank finished in: 63.068143 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206202.313764\nIteration 1: total pagerank changed in L1 = 8737.980571\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 98.755625 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310141.786959\nIteration 1: total pagerank changed in L1 = 76426.126656\nIteration 2: total pagerank changed in L1 = 75746.045057\nWeighted pagerank finished in: 52.708963 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10485, 31)\nSAVING to files/output/Model_DF_D4/GBR/2.csv\n\nModel_DF_D4 | GBR | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308888.620013\nIteration 1: total pagerank changed in L1 = 74212.853963\nIteration 2: total pagerank changed in L1 = 73575.931803\nWeighted pagerank finished in: 63.325423 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206526.190418\nIteration 1: total pagerank changed in L1 = 8801.218534\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 98.640171 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310797.422584\nIteration 1: total pagerank changed in L1 = 76339.240922\nIteration 2: total pagerank changed in L1 = 75656.427558\nWeighted pagerank finished in: 52.650603 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10476, 31)\nSAVING to files/output/Model_DF_D4/GBR/3.csv\n\nModel_DF_D4 | GBR | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308682.748283\nIteration 1: total pagerank changed in L1 = 74117.402771\nIteration 2: total pagerank changed in L1 = 73473.373679\nWeighted pagerank finished in: 62.962267 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206534.620873\nIteration 1: total pagerank changed in L1 = 8767.166427\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 99.005942 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310594.766823\nIteration 1: total pagerank changed in L1 = 76243.562597\nIteration 2: total pagerank changed in L1 = 75552.279418\nWeighted pagerank finished in: 53.137675 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10472, 31)\nSAVING to files/output/Model_DF_D4/GBR/4.csv\n\nModel_DF_D4 | GBR | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308339.933733\nIteration 1: total pagerank changed in L1 = 74267.786857\nIteration 2: total pagerank changed in L1 = 73597.954423\nWeighted pagerank finished in: 63.102150 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206181.958029\nIteration 1: total pagerank changed in L1 = 8783.890804\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 99.644235 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310270.832001\nIteration 1: total pagerank changed in L1 = 76399.390482\nIteration 2: total pagerank changed in L1 = 75682.622600\nWeighted pagerank finished in: 53.146741 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10500, 31)\nSAVING to files/output/Model_DF_D4/GBR/5.csv\n\nModel_DF_D4 | GBR | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309068.839630\nIteration 1: total pagerank changed in L1 = 75102.317602\nIteration 2: total pagerank changed in L1 = 74448.174822\nWeighted pagerank finished in: 62.990773 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206821.491848\nIteration 1: total pagerank changed in L1 = 8843.967045\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 99.821089 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310990.762296\nIteration 1: total pagerank changed in L1 = 77237.542141\nIteration 2: total pagerank changed in L1 = 76536.707870\nWeighted pagerank finished in: 52.721782 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10463, 31)\nSAVING to files/output/Model_DF_D4/GBR/6.csv\n\nModel_DF_D4 | GBR | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 307757.336913\nIteration 1: total pagerank changed in L1 = 74026.722806\nIteration 2: total pagerank changed in L1 = 73385.890843\nWeighted pagerank finished in: 63.452911 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 205822.496116\nIteration 1: total pagerank changed in L1 = 8694.364983\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 99.438910 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309677.175442\nIteration 1: total pagerank changed in L1 = 76155.252982\nIteration 2: total pagerank changed in L1 = 75467.190242\nWeighted pagerank finished in: 52.517357 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10493, 31)\nSAVING to files/output/Model_DF_D4/GBR/7.csv\n\nModel_DF_D4 | GBR | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308661.698199\nIteration 1: total pagerank changed in L1 = 74643.616061\nIteration 2: total pagerank changed in L1 = 73978.784224\nWeighted pagerank finished in: 63.524068 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206530.368159\nIteration 1: total pagerank changed in L1 = 8851.389422\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.450946 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310585.839264\nIteration 1: total pagerank changed in L1 = 76773.266857\nIteration 2: total pagerank changed in L1 = 76061.607433\nWeighted pagerank finished in: 53.487429 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10490, 31)\nSAVING to files/output/Model_DF_D4/GBR/8.csv\n\nModel_DF_D4 | GBR | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309451.846642\nIteration 1: total pagerank changed in L1 = 74856.468473\nIteration 2: total pagerank changed in L1 = 74190.928395\nWeighted pagerank finished in: 63.423074 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206960.908694\nIteration 1: total pagerank changed in L1 = 8908.478008\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 99.643557 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311370.427861\nIteration 1: total pagerank changed in L1 = 76984.332495\nIteration 2: total pagerank changed in L1 = 76272.201841\nWeighted pagerank finished in: 53.266641 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n9\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10479, 31)\nSAVING to files/output/Model_DF_D4/GBR/9.csv\n\nModel_DF_D5 | G | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309941.575787\nIteration 1: total pagerank changed in L1 = 74555.912774\nIteration 2: total pagerank changed in L1 = 73906.443756\nWeighted pagerank finished in: 64.094456 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207308.769710\nIteration 1: total pagerank changed in L1 = 8887.135623\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.177410 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311856.793603\nIteration 1: total pagerank changed in L1 = 76680.326355\nIteration 2: total pagerank changed in L1 = 75986.679627\nWeighted pagerank finished in: 53.869183 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10467, 31)\nSAVING to files/output/Model_DF_D5/G/1.csv\n\nModel_DF_D5 | G | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308180.245756\nIteration 1: total pagerank changed in L1 = 74117.080124\nIteration 2: total pagerank changed in L1 = 73480.268214\nWeighted pagerank finished in: 64.104767 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206139.188000\nIteration 1: total pagerank changed in L1 = 8744.730651\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.012065 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310106.411737\nIteration 1: total pagerank changed in L1 = 76245.539103\nIteration 2: total pagerank changed in L1 = 75560.129278\nWeighted pagerank finished in: 53.062662 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10496, 31)\nSAVING to files/output/Model_DF_D5/G/2.csv\n\nModel_DF_D5 | G | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310753.540038\nIteration 1: total pagerank changed in L1 = 75591.686916\nIteration 2: total pagerank changed in L1 = 74934.620631\nWeighted pagerank finished in: 64.072235 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 208081.464913\nIteration 1: total pagerank changed in L1 = 8913.207742\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.985955 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 312679.971236\nIteration 1: total pagerank changed in L1 = 77728.129232\nIteration 2: total pagerank changed in L1 = 77028.901202\nWeighted pagerank finished in: 54.058814 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10499, 31)\nSAVING to files/output/Model_DF_D5/G/3.csv\n\nModel_DF_D5 | G | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308445.725251\nIteration 1: total pagerank changed in L1 = 74562.665529\nIteration 2: total pagerank changed in L1 = 73916.242488\nWeighted pagerank finished in: 64.150107 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206473.271781\nIteration 1: total pagerank changed in L1 = 8783.906221\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.570537 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310365.351946\nIteration 1: total pagerank changed in L1 = 76693.059586\nIteration 2: total pagerank changed in L1 = 75998.855569\nWeighted pagerank finished in: 53.594058 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10494, 31)\nSAVING to files/output/Model_DF_D5/G/4.csv\n\nModel_DF_D5 | G | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309689.890629\nIteration 1: total pagerank changed in L1 = 74697.866920\nIteration 2: total pagerank changed in L1 = 74058.361615\nWeighted pagerank finished in: 64.355428 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207146.730886\nIteration 1: total pagerank changed in L1 = 8851.614322\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 101.116549 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311610.581394\nIteration 1: total pagerank changed in L1 = 76826.863601\nIteration 2: total pagerank changed in L1 = 76138.437258\nWeighted pagerank finished in: 53.768107 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10484, 31)\nSAVING to files/output/Model_DF_D5/G/5.csv\n\nModel_DF_D5 | G | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309490.571828\nIteration 1: total pagerank changed in L1 = 74844.490881\nIteration 2: total pagerank changed in L1 = 74186.603312\nWeighted pagerank finished in: 64.915354 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207113.826066\nIteration 1: total pagerank changed in L1 = 8870.631810\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 101.099721 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311410.246493\nIteration 1: total pagerank changed in L1 = 76974.298702\nIteration 2: total pagerank changed in L1 = 76271.622549\nWeighted pagerank finished in: 53.723389 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n29\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10493, 31)\nSAVING to files/output/Model_DF_D5/G/6.csv\n\nModel_DF_D5 | G | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310288.114456\nIteration 1: total pagerank changed in L1 = 75087.242308\nIteration 2: total pagerank changed in L1 = 74433.424111\nWeighted pagerank finished in: 64.856072 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207617.188635\nIteration 1: total pagerank changed in L1 = 8898.528193\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.410438 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 312208.486102\nIteration 1: total pagerank changed in L1 = 77225.025487\nIteration 2: total pagerank changed in L1 = 76522.033082\nWeighted pagerank finished in: 53.863494 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10504, 31)\nSAVING to files/output/Model_DF_D5/G/7.csv\n\nModel_DF_D5 | G | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309508.122891\nIteration 1: total pagerank changed in L1 = 74363.166547\nIteration 2: total pagerank changed in L1 = 73706.399069\nWeighted pagerank finished in: 64.250860 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207105.284593\nIteration 1: total pagerank changed in L1 = 8786.812598\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.958817 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311418.676808\nIteration 1: total pagerank changed in L1 = 76487.791249\nIteration 2: total pagerank changed in L1 = 75780.556096\nWeighted pagerank finished in: 54.117466 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10486, 31)\nSAVING to files/output/Model_DF_D5/G/8.csv\n\nModel_DF_D5 | G | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309330.018256\nIteration 1: total pagerank changed in L1 = 74727.300786\nIteration 2: total pagerank changed in L1 = 74069.143808\nWeighted pagerank finished in: 69.261519 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207041.570719\nIteration 1: total pagerank changed in L1 = 8814.658332\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 105.236730 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311242.006060\nIteration 1: total pagerank changed in L1 = 76852.834293\nIteration 2: total pagerank changed in L1 = 76145.648024\nWeighted pagerank finished in: 57.325589 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10497, 31)\nSAVING to files/output/Model_DF_D5/G/9.csv\n\nModel_DF_D5 | GB | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309997.591592\nIteration 1: total pagerank changed in L1 = 75170.113392\nIteration 2: total pagerank changed in L1 = 74516.180308\nWeighted pagerank finished in: 70.222094 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207428.296081\nIteration 1: total pagerank changed in L1 = 8943.347075\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 104.496952 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311926.893835\nIteration 1: total pagerank changed in L1 = 77307.619696\nIteration 2: total pagerank changed in L1 = 76606.074062\nWeighted pagerank finished in: 57.840186 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10502, 31)\nSAVING to files/output/Model_DF_D5/GB/1.csv\n\nModel_DF_D5 | GB | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309811.617626\nIteration 1: total pagerank changed in L1 = 74888.699582\nIteration 2: total pagerank changed in L1 = 74214.954842\nWeighted pagerank finished in: 67.956559 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207298.143575\nIteration 1: total pagerank changed in L1 = 8875.131777\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 105.182196 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311740.470034\nIteration 1: total pagerank changed in L1 = 77022.132138\nIteration 2: total pagerank changed in L1 = 76300.720010\nWeighted pagerank finished in: 57.105257 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10490, 31)\nSAVING to files/output/Model_DF_D5/GB/2.csv\n\nModel_DF_D5 | GB | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308157.120429\nIteration 1: total pagerank changed in L1 = 74210.589278\nIteration 2: total pagerank changed in L1 = 73562.603706\nWeighted pagerank finished in: 66.110016 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206250.369657\nIteration 1: total pagerank changed in L1 = 8731.776920\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 102.848105 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310076.913093\nIteration 1: total pagerank changed in L1 = 76332.788107\nIteration 2: total pagerank changed in L1 = 75637.173484\nWeighted pagerank finished in: 61.046690 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10490, 31)\nSAVING to files/output/Model_DF_D5/GB/3.csv\n\nModel_DF_D5 | GB | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309540.019922\nIteration 1: total pagerank changed in L1 = 75082.108247\nIteration 2: total pagerank changed in L1 = 74440.911594\nWeighted pagerank finished in: 67.990449 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207163.811203\nIteration 1: total pagerank changed in L1 = 8830.815113\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 105.305885 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311461.272097\nIteration 1: total pagerank changed in L1 = 77202.093349\nIteration 2: total pagerank changed in L1 = 76516.544830\nWeighted pagerank finished in: 58.079599 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10487, 31)\nSAVING to files/output/Model_DF_D5/GB/4.csv\n\nModel_DF_D5 | GB | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309805.073491\nIteration 1: total pagerank changed in L1 = 75065.827588\nIteration 2: total pagerank changed in L1 = 74402.728321\nWeighted pagerank finished in: 68.732853 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207305.469905\nIteration 1: total pagerank changed in L1 = 8871.618459\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 104.762974 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311730.373163\nIteration 1: total pagerank changed in L1 = 77195.306356\nIteration 2: total pagerank changed in L1 = 76485.060928\nWeighted pagerank finished in: 58.713132 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n27\n28\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10491, 31)\nSAVING to files/output/Model_DF_D5/GB/5.csv\n\nModel_DF_D5 | GB | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309691.155838\nIteration 1: total pagerank changed in L1 = 75253.041210\nIteration 2: total pagerank changed in L1 = 74604.416903\nWeighted pagerank finished in: 68.562439 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207342.489702\nIteration 1: total pagerank changed in L1 = 8874.518780\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 106.083153 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311625.673734\nIteration 1: total pagerank changed in L1 = 77391.332915\nIteration 2: total pagerank changed in L1 = 76696.191216\nWeighted pagerank finished in: 57.990825 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10487, 31)\nSAVING to files/output/Model_DF_D5/GB/6.csv\n\nModel_DF_D5 | GB | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310080.779943\nIteration 1: total pagerank changed in L1 = 75030.056268\nIteration 2: total pagerank changed in L1 = 74380.369693\nWeighted pagerank finished in: 68.316117 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207408.606973\nIteration 1: total pagerank changed in L1 = 8846.147398\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 105.496130 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 312017.835436\nIteration 1: total pagerank changed in L1 = 77169.662372\nIteration 2: total pagerank changed in L1 = 76473.337264\nWeighted pagerank finished in: 57.306376 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n25\n26\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10497, 31)\nSAVING to files/output/Model_DF_D5/GB/7.csv\n\nModel_DF_D5 | GB | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309842.799460\nIteration 1: total pagerank changed in L1 = 74740.412544\nIteration 2: total pagerank changed in L1 = 74103.226911\nWeighted pagerank finished in: 70.247587 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207326.783800\nIteration 1: total pagerank changed in L1 = 8759.413902\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 108.353822 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311773.463083\nIteration 1: total pagerank changed in L1 = 76883.882825\nIteration 2: total pagerank changed in L1 = 76201.000655\nWeighted pagerank finished in: 58.692572 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10477, 31)\nSAVING to files/output/Model_DF_D5/GB/8.csv\n\nModel_DF_D5 | GB | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308346.109632\nIteration 1: total pagerank changed in L1 = 74500.152996\nIteration 2: total pagerank changed in L1 = 73855.556234\nWeighted pagerank finished in: 69.749530 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206278.384076\nIteration 1: total pagerank changed in L1 = 8794.753512\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 108.244922 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310272.338986\nIteration 1: total pagerank changed in L1 = 76627.549929\nIteration 2: total pagerank changed in L1 = 75938.248745\nWeighted pagerank finished in: 57.826027 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10471, 31)\nSAVING to files/output/Model_DF_D5/GB/9.csv\n\nModel_DF_D5 | GBR | 1\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309112.153279\nIteration 1: total pagerank changed in L1 = 74545.395048\nIteration 2: total pagerank changed in L1 = 73896.979243\nWeighted pagerank finished in: 69.241977 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206792.811049\nIteration 1: total pagerank changed in L1 = 8853.003168\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 107.220875 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311017.629229\nIteration 1: total pagerank changed in L1 = 76664.032829\nIteration 2: total pagerank changed in L1 = 75973.463997\nWeighted pagerank finished in: 57.828423 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10490, 31)\nSAVING to files/output/Model_DF_D5/GBR/1.csv\n\nModel_DF_D5 | GBR | 2\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310061.571471\nIteration 1: total pagerank changed in L1 = 75334.261917\nIteration 2: total pagerank changed in L1 = 74687.684617\nWeighted pagerank finished in: 68.096467 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207559.214565\nIteration 1: total pagerank changed in L1 = 8971.887885\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 104.628464 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311983.600517\nIteration 1: total pagerank changed in L1 = 77470.634213\nIteration 2: total pagerank changed in L1 = 76776.042487\nWeighted pagerank finished in: 57.366746 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10480, 31)\nSAVING to files/output/Model_DF_D5/GBR/2.csv\n\nModel_DF_D5 | GBR | 3\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309529.872400\nIteration 1: total pagerank changed in L1 = 74566.876180\nIteration 2: total pagerank changed in L1 = 73925.831081\nWeighted pagerank finished in: 68.973220 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207055.289034\nIteration 1: total pagerank changed in L1 = 8819.727424\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 105.820770 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311451.967423\nIteration 1: total pagerank changed in L1 = 76692.262571\nIteration 2: total pagerank changed in L1 = 76004.530816\nWeighted pagerank finished in: 57.048176 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10488, 31)\nSAVING to files/output/Model_DF_D5/GBR/3.csv\n\nModel_DF_D5 | GBR | 4\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309627.272353\nIteration 1: total pagerank changed in L1 = 74431.877005\nIteration 2: total pagerank changed in L1 = 73803.575351\nWeighted pagerank finished in: 67.845630 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207181.784545\nIteration 1: total pagerank changed in L1 = 8822.931661\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 104.393288 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311544.158669\nIteration 1: total pagerank changed in L1 = 76561.751308\nIteration 2: total pagerank changed in L1 = 75882.856672\nWeighted pagerank finished in: 57.178374 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10501, 31)\nSAVING to files/output/Model_DF_D5/GBR/4.csv\n\nModel_DF_D5 | GBR | 5\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 308885.841074\nIteration 1: total pagerank changed in L1 = 74750.682438\nIteration 2: total pagerank changed in L1 = 74097.705065\nWeighted pagerank finished in: 67.762434 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206645.872469\nIteration 1: total pagerank changed in L1 = 8932.179882\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 100.941221 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 310814.222641\nIteration 1: total pagerank changed in L1 = 76884.014178\nIteration 2: total pagerank changed in L1 = 76185.589768\nWeighted pagerank finished in: 55.198013 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n7\n8\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10477, 31)\nSAVING to files/output/Model_DF_D5/GBR/5.csv\n\nModel_DF_D5 | GBR | 6\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309247.786876\nIteration 1: total pagerank changed in L1 = 74720.046537\nIteration 2: total pagerank changed in L1 = 74070.181056\nWeighted pagerank finished in: 64.986822 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 206837.100036\nIteration 1: total pagerank changed in L1 = 8827.368376\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 101.009409 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311162.773739\nIteration 1: total pagerank changed in L1 = 76845.151130\nIteration 2: total pagerank changed in L1 = 76150.436011\nWeighted pagerank finished in: 54.487596 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10491, 31)\nSAVING to files/output/Model_DF_D5/GBR/6.csv\n\nModel_DF_D5 | GBR | 7\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309772.286104\nIteration 1: total pagerank changed in L1 = 74914.351284\nIteration 2: total pagerank changed in L1 = 74262.466372\nWeighted pagerank finished in: 65.531677 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207334.165783\nIteration 1: total pagerank changed in L1 = 8830.337366\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 101.910600 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311695.851232\nIteration 1: total pagerank changed in L1 = 77044.165540\nIteration 2: total pagerank changed in L1 = 76348.472539\nWeighted pagerank finished in: 54.815183 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10508, 31)\nSAVING to files/output/Model_DF_D5/GBR/7.csv\n\nModel_DF_D5 | GBR | 8\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309508.786258\nIteration 1: total pagerank changed in L1 = 74854.336260\nIteration 2: total pagerank changed in L1 = 74206.919473\nWeighted pagerank finished in: 65.788757 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 207092.104305\nIteration 1: total pagerank changed in L1 = 8872.106273\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 103.663398 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 311423.794258\nIteration 1: total pagerank changed in L1 = 76985.742505\nIteration 2: total pagerank changed in L1 = 76289.601519\nWeighted pagerank finished in: 54.702837 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n6\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10468, 31)\nSAVING to files/output/Model_DF_D5/GBR/8.csv\n\nModel_DF_D5 | GBR | 9\n**************************************************\nCreating graph CB0W\nHERE_PR\n['__id', 'pr_0']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 307617.987070\nIteration 1: total pagerank changed in L1 = 73786.785604\nIteration 2: total pagerank changed in L1 = 73141.558980\nWeighted pagerank finished in: 65.303255 secs\n['__id', 'w_pr_0']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_0', 'out_deg_0']\nHERE_T\n['__id', 'tri_0']\nCreating graph CB1W\nHERE_PR\n['__id', 'pr_1']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 205820.365583\nIteration 1: total pagerank changed in L1 = 8718.878504\nIteration 2: total pagerank changed in L1 = 0.000000\nWeighted pagerank finished in: 102.526897 secs\n['__id', 'w_pr_1']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id']\nHERE_D\n['__id', 'in_deg_1', 'out_deg_1']\nHERE_T\n['__id']\nCreating graph CB2W\nHERE_PR\n['__id', 'pr_2']\nHERE_PR_W\nIteration 0: total pagerank changed in L1 = 309529.064855\nIteration 1: total pagerank changed in L1 = 75904.099804\nIteration 2: total pagerank changed in L1 = 75213.846855\nWeighted pagerank finished in: 54.644327 secs\n['__id', 'w_pr_2']\nHERE_SP\n['__id']\nHERE_SP_W\n['__id']\nHERE_KC\n['__id', 'kc_2']\nHERE_D\n['__id', 'in_deg_2', 'out_deg_2']\nHERE_T\n['__id', 'tri_2']\nCreating graph CB3W\nHERE_PR\n['__id']\nHERE_PR_W\n['__id']\nHERE_SP\n1\n2\n3\n4\n5\n['__id', 'spath_top_3_0', 'spath_top_3_1', 'spath_top_3_2', 'spath_top_3_3', 'spath_top_3_4', 'spath_top_min_3']\nHERE_SP_W\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14\n15\n16\n17\n18\n19\n20\n21\n22\n23\n24\n['__id', 'w_spath_top_3_0', 'w_spath_top_3_1', 'w_spath_top_3_2', 'w_spath_top_3_3', 'w_spath_top_3_4', 'w_spath_top_min_3']\nHERE_KC\n['__id', 'kc_3']\nHERE_D\n['__id', 'in_deg_3', 'out_deg_3']\nHERE_T\n['__id']\nDATAFRAME SHAPE: (10486, 31)\nSAVING to files/output/Model_DF_D5/GBR/9.csv\n\n" ] ], [ [ "### Old code from previous graph feature testing...", "_____no_output_____" ] ], [ [ "# # Coordinating -- for loading in graphs\n# vertex_type_list = ['cb_smol_ALL', 'cb_smol_D4','cb_smol_D2']\n# feat_graph_map = {'pagerank':['cb0','cb1','cb2','cb3'], \n# 'kcore':['cb0','cb1','cb2','cb3'],\n# 'degree':['cb0','cb1'], \n# 'triangle':['cb0','cb1'],\n# 'shortest':['cb1', 'cb3'], \n# 'shortest_weight':['cb1w', 'cb3w']}\n# vertex_df_map = {v:pd.DataFrame(columns=['__id']) for v in vertex_type_list}", "_____no_output_____" ], [ "# from turicreate import pagerank\n# from functools import reduce\n\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb0',1:'cb1',2:'cb2',3:'cb3'}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['pagerank']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n\n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n# for idx,smol in enumerate(feat_graph_map['pagerank']):\n# print('CaLcUlAtInG pAgeRaNk for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type,smol)\n# graph = load_sgraph(path)\n# pr = pagerank.create(graph, verbose=False)\n# pr_sframe = pr['pagerank']\n\n# # Modifying output SFrame\n# pr_df = pd.DataFrame(pr_sframe)\n# pr_df = pr_df.drop('delta', axis=1)\n# pr_df = pr_df[pr_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# pr_df = pr_df.rename({'pagerank':'pr_{}'.format(idx)}, axis=1)\n \n# # Save to temp lst_of_frames\n# lst_of_frames.append(pr_df)\n \n# PR_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], PR_DF, on='__id', how='outer')\n \n#################################################################################\n# from turicreate import kcore\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb0',1:'cb1',2:'cb2',3:'cb3'}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['kcore']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n\n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n# for idx,smol in enumerate(feat_graph_map['kcore']):\n# print('CaLcUlAtInG kCoRe for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type, smol)\n# graph = load_sgraph(path)\n# kc = kcore.create(graph, kmin=0, kmax=10, verbose=False)\n# kc_sframe = kc['core_id'] \n \n# # Modifying output SFrame\n# kc_df = pd.DataFrame(kc_sframe)\n# kc_df = kc_df[kc_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# kc_df = kc_df.rename({'core_id':'kc_{}'.format(idx)}, axis=1)\n \n# # Save to temp lst_of_frames\n# lst_of_frames.append(kc_df)\n \n# KC_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], KC_DF, on='__id', how='outer')\n\n#################################################################################\n# from turicreate import degree_counting\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb0',1:'cb1'}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['degree']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n\n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n# for idx,smol in enumerate(feat_graph_map['degree']):\n# print('CaLcUlAtInG dEgReEs for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type, smol)\n# graph = load_sgraph(path)\n# deg = degree_counting.create(graph)\n# deg_sgraph = deg['graph'] \n# deg_df = pd.DataFrame(deg_sgraph.vertices[['__id', 'in_degree', 'out_degree']])\n# deg_df = deg_df[deg_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# deg_df = deg_df.rename({'in_degree':'in_deg_{}'.format(idx),\n# 'out_degree':'out_deg_{}'.format(idx)}, axis=1)\n# # Save to temp lst_of_frames\n# lst_of_frames.append(deg_df)\n# DEG_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], DEG_DF, on='__id', how='outer')\n \n#################################################################################\n# from turicreate import triangle_counting\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb0', 1:'cb1'}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['triangle']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n \n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n# for idx,smol in enumerate(feat_graph_map['triangle']):\n# print('CaLcUlAtInG TrIaNgLeS for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type, smol)\n# graph = load_sgraph(path)\n# tc = triangle_counting.create(graph, verbose=False)\n# tri_df = pd.DataFrame(tc['triangle_count'])\n# tri_df = tri_df[tri_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# tri_df = tri_df.rename({'triangle_count':'tri_{}'.format(idx)},axis=1)\n# # Save to temp lst_of_frames\n# lst_of_frames.append(tri_df)\n# TRI_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], TRI_DF, on='__id', how='outer')\n \n#################################################################################\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb1',1:'cb3'}\n# sgraph_idx_jdx_assign = {0:1, 1:3}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['shortest']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n \n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n\n# for idx,smol in enumerate(feat_graph_map['shortest']):\n# print('CaLcUlAtInG sHoRtEsT PaTh tOP P1 for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type, smol)\n# graph = load_sgraph(path)\n# pr = vertex_df_map[vertex_type][['__id', 'pr_{}'.format(sgraph_idx_jdx_assign[idx])]].sort_values(by='pr_{}'.format(sgraph_idx_jdx_assign[idx]),ascending=False)\n# pr = pr['__id'].to_list()[:200]\n# count = 0\n# top_p1 = []\n# while len(top_p1) < 5:\n# if pr[count] in p1_companies_uuid:\n# top_p1.append(pr[count])\n# count += 1\n# lst_of_lst_of_frames = []\n# for jdx,uuid in enumerate(top_p1):\n# sp = shortest_path.create(graph, source_vid=uuid, verbose=False)\n# sp_df = pd.DataFrame(sp['distance'])\n# sp_df = sp_df[sp_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# sp_df = sp_df.rename({'distance': 'spath_top_{}_{}'.format(sgraph_idx_jdx_assign[idx],jdx)}, axis=1)\n# lst_of_lst_of_frames.append(sp_df)\n# sp_df = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_lst_of_frames)\n# sp_df['spath_top_min_{}'.format(sgraph_idx_jdx_assign[idx])] = sp_df.min(axis=1) \n# lst_of_frames.append(sp_df)\n\n# DIST_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], DIST_DF, on='__id', how='outer')\n \n#################################################################################\n# from turicreate import shortest_path\n\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb1w',1:'cb3w'}\n# sgraph_idx_jdx_assign = {0:1, 1:3}\n\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['shortest']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n \n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n\n# for idx,smol in enumerate(feat_graph_map['shortest_weight']):\n# print('CaLcUlAtInG sHoRtEsT PaTh tOP P1 for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type, smol)\n# graph = load_sgraph(path)\n# pr = vertex_df_map[vertex_type][['__id', 'pr_{}'.format(sgraph_idx_jdx_assign[idx])]].sort_values(by='pr_{}'.format(sgraph_idx_jdx_assign[idx]),ascending=False)\n# pr = pr['__id'].to_list()[:200]\n# count = 0\n# top_p1 = []\n# while len(top_p1) < 5:\n# if pr[count] in p1_companies_uuid:\n# top_p1.append(pr[count])\n# count += 1\n# lst_of_lst_of_frames = []\n# for jdx,uuid in enumerate(top_p1):\n# sp = shortest_path.create(graph, source_vid=uuid, weight_field='weight', verbose=False)\n# sp_df = pd.DataFrame(sp['distance'])\n# sp_df = sp_df[sp_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# sp_df = sp_df.rename({'distance': 'w_spath_top_{}_{}'.format(sgraph_idx_jdx_assign[idx],jdx)}, axis=1)\n# lst_of_lst_of_frames.append(sp_df)\n# sp_df = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_lst_of_frames)\n# sp_df['w_spath_top_min_{}'.format(sgraph_idx_jdx_assign[idx])] = sp_df.min(axis=1) \n# lst_of_frames.append(sp_df)\n\n# DIST_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], DIST_DF, on='__id', how='outer')\n\n#################################################################################\n# # Weighted pagerank\n# # Mapping for this function\n# sgraph_idx_assign = {0:'cb1w',1:'cb2w', 2:'cb3w', 3:'cb4w'}\n# if not len(sgraph_idx_assign.items())==len(feat_graph_map['pagerank_weight']):\n# print('THE ASSIGNMENT DOES NOT MATCH NUMBER OF GRAPHS')\n# for vertex_type in vertex_type_list:\n# lst_of_frames = []\n# for idx,smol in enumerate(feat_graph_map['pagerank_weight']):\n# print('CaLcUlAtInG wEiGhTeD pAgeRaNk for graph {}, in graph neighborhood {}'.format(sgraph_idx_assign[idx].upper(),vertex_type.upper()))\n# path = 'ModelGraphs/test/{}_{}'.format(vertex_type,smol)\n# graph = load_sgraph(path)\n# pr_w = pagerank_weighted(graph)\n# pr_w_sframe = pr_w['__id', 'pagerank']\n# # Modifying output SFrame\n# pr_w_df = pd.DataFrame(pr_w_sframe)\n# pr_w_df = pr_w_df[pr_w_df['__id'].isin(model_uuids_dict[vertex_type])].reset_index(drop=True)\n# pr_w_df = pr_w_df.rename({'pagerank_weight':'w_pr_{}'.format(idx)}, axis=1)\n# # Save to temp lst_of_frames\n# lst_of_frames.append(pr_w_df)\n# PR_W_DF = reduce(lambda df1,df2: pd.merge(df1,df2,on='__id'), lst_of_frames)\n# vertex_df_map[vertex_type] = pd.merge(vertex_df_map[vertex_type], PR_W_DF, on='__id', how='outer')", "CaLcUlAtInG pAgeRaNk for graph CB0, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG pAgeRaNk for graph CB1, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG pAgeRaNk for graph CB2, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG pAgeRaNk for graph CB3, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG pAgeRaNk for graph CB0, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG pAgeRaNk for graph CB1, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG pAgeRaNk for graph CB2, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG pAgeRaNk for graph CB3, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG pAgeRaNk for graph CB0, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG pAgeRaNk for graph CB1, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG pAgeRaNk for graph CB2, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG pAgeRaNk for graph CB3, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG kCoRe for graph CB0, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG kCoRe for graph CB1, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG kCoRe for graph CB2, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG kCoRe for graph CB3, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG kCoRe for graph CB0, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG kCoRe for graph CB1, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG kCoRe for graph CB2, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG kCoRe for graph CB3, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG kCoRe for graph CB0, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG kCoRe for graph CB1, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG kCoRe for graph CB2, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG kCoRe for graph CB3, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG dEgReEs for graph CB0, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG dEgReEs for graph CB1, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG dEgReEs for graph CB0, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG dEgReEs for graph CB1, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG dEgReEs for graph CB0, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG dEgReEs for graph CB1, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG TrIaNgLeS for graph CB0, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG TrIaNgLeS for graph CB1, in graph neighborhood CB_SMOL_ALL\nCaLcUlAtInG TrIaNgLeS for graph CB0, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG TrIaNgLeS for graph CB1, in graph neighborhood CB_SMOL_D4\nCaLcUlAtInG TrIaNgLeS for graph CB0, in graph neighborhood CB_SMOL_D2\nCaLcUlAtInG TrIaNgLeS for graph CB1, in graph neighborhood CB_SMOL_D2\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd58e61005ea0bfae2382324b8f96114c701cee
4,865
ipynb
Jupyter Notebook
archiv/0.1.0-tb-helpers.ipynb
bohniti/master-thesis
59541ceb46d30b105e8f5cdbdba74c0d3fc13a63
[ "BSD-3-Clause" ]
null
null
null
archiv/0.1.0-tb-helpers.ipynb
bohniti/master-thesis
59541ceb46d30b105e8f5cdbdba74c0d3fc13a63
[ "BSD-3-Clause" ]
1
2021-11-02T14:24:22.000Z
2021-11-02T14:24:22.000Z
archiv/0.1.0-tb-helpers.ipynb
bohniti/master-thesis
59541ceb46d30b105e8f5cdbdba74c0d3fc13a63
[ "BSD-3-Clause" ]
1
2021-11-02T12:42:50.000Z
2021-11-02T12:42:50.000Z
30.987261
111
0.570606
[ [ [ "# Helper Functions", "_____no_output_____" ] ], [ [ "from os import listdir\nfrom os.path import isfile, join\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Create csv-files for PyTorch Dataclass", "_____no_output_____" ] ], [ [ "def create_info_csv(path, split=True):\n fnames = [f for f in listdir(path) if isfile(join(path, f))]\n fnames = [f.split('.',1)[0] for f in fnames]\n fnames_frame = pd.DataFrame(fnames, columns=['fnames'])\n\n if split:\n new = fnames_frame['fnames'].str.split(\"_\", n = 2, expand = True)\n\n\n # making separate first name column from new data frame\n fnames_frame[\"wid\"]= new[0]\n\n # making separate last name column from new data frame\n fnames_frame[\"pid\"]= new[1]\n\n # making separate last name column from new data frame\n fnames_frame[\"fid\"]= new[2]\n\n # Dropping old Name columns\n fnames_frame.drop(columns =['fnames'], inplace = True)\n\n # save as csf\n info_path = join(path, 'info.csv')\n fnames_frame.to_csv(info_path)", "_____no_output_____" ], [ "create_info_csv('/Users/beantown/PycharmProjects/jigsaw-puzzle-solver/data/hisfrag20/raw/hisfrag20')\ncreate_info_csv('/Users/beantown/PycharmProjects/jigsaw-puzzle-solver/data/hisfrag20/raw/hisfrag20_test')", "_____no_output_____" ], [ "\n# Identifying prominent colors from image\n\n# Import Libraries\nfrom sklearn.cluster import KMeans\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport cv2\nfrom collections import Counter\nfrom skimage.color import rgb2lab, deltaE_cie76\nimport os\n\n# (R,G,B) to hexadecimal converter function\ndef RGB2HEX(color):\n return \"#{:02x}{:02x}{:02x}\".format(int(color[0]), int(color[1]), int(color[2]))\n\n# Image Path utility function\ndef get_image(image_path):\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n# Function to get prominent colors from image\ndef get_colors(image, number_of_colors, show_chart):\n\n # Modifies image to usable format\n modified_image = cv2.resize(image, (600, 400), interpolation = cv2.INTER_AREA)\n modified_image = modified_image.reshape(modified_image.shape[0]*modified_image.shape[1], 3)\n\n # Clusters color regions\n clf = KMeans(n_clusters = number_of_colors)\n labels = clf.fit_predict(modified_image)\n\n counts = Counter(labels)\n center_colors = clf.cluster_centers_\n\n # We get ordered colors by iterating through the keys\n ordered_colors = [center_colors[i] for i in counts.keys()]\n hex_colors = [RGB2HEX(ordered_colors[i]) for i in counts.keys()]\n rgb_colors = [ordered_colors[i] for i in counts.keys()]\n\n if (show_chart):\n plt.figure(figsize = (8, 6))\n plt.pie(counts.values(), labels = hex_colors, colors = hex_colors)\n\n return rgb_colors\n\n\n# Use this function - input image file, no. of colors to extract, plot pie chart\nrgb_colors = get_colors(image, 5, True)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
ecd58f07a5c1d6001fa3e956739e1f0043072e6e
30,966
ipynb
Jupyter Notebook
class07b_pagerank_python3.ipynb
curiositymap/Networks-in-Computational-Biology
c7734cf2c03c7a794ab6990d433b1614c1837b58
[ "Apache-2.0" ]
11
2020-09-17T14:59:30.000Z
2022-03-29T16:35:39.000Z
class07b_pagerank_python3.ipynb
curiositymap/Networks-in-Computational-Biology
c7734cf2c03c7a794ab6990d433b1614c1837b58
[ "Apache-2.0" ]
null
null
null
class07b_pagerank_python3.ipynb
curiositymap/Networks-in-Computational-Biology
c7734cf2c03c7a794ab6990d433b1614c1837b58
[ "Apache-2.0" ]
5
2020-03-12T19:21:56.000Z
2022-03-28T08:23:58.000Z
84.606557
12,024
0.753181
[ [ [ "# CSX46 - Class Session 10 - Pagerank centrality\n\nIn this class session we are going to compute the outgoing-edge PageRank centrality of each gene (vertex) in a human gene regulatory network (a directed graph) from a landmark paper on human gene regulation (Neph et al., Cell, volume 150, pages 1274-1286, 2012; see PDF on Canvas in `Files/Optional Reading`). We will also write our own PageRank function and compare it to the `igraph.Graph.pagerank` method.", "_____no_output_____" ], [ "Using Pandas `read_csv`, read in the ifle `shared/neph_gene_network.txt`, which has two columns of text (first column is the regulator gene, second column is the target gene), into a data frame. The file has no header and is tab-delimited. Assign the column names of the dataframe to be `regulator` and `target`, respectively.", "_____no_output_____" ], [ "Let's load the Python packages that we will need for this exercise", "_____no_output_____" ] ], [ [ "import pandas\nimport igraph\nimport numpy\nimport matplotlib.pyplot\nimport random", "_____no_output_____" ] ], [ [ "Using `pandas.read_csv`, read the file `shared/neph_gene_network.txt`; name the two columns of the resulting data frame, `regulator` and `target`.", "_____no_output_____" ] ], [ [ "edge_list_neph = pandas.read_csv(\"shared/neph_gene_network.txt\",\n sep=\"\\t\", \n names=[\"regulator\",\"target\"])", "_____no_output_____" ] ], [ [ "Load the edge-list data into a directed `igraph.Graph` object `neph_graph`, using `igraph.Graph.TupleList`. NOTE: igraph's `igraph.Graph.pagerank` computes only *incoming* pagerank centrality, and we want *outgoing* pagerank centrality. So: *make sure to reverse the columns of the data frame when you input the data frame into `Graph.TupleList` since we want the outgoing pagerank centrality not incoming pagerank centrality.* Print out a summary of the graph, using the `igraph.Graph.summary` method:", "_____no_output_____" ] ], [ [ "neph_graph = igraph.Graph.TupleList(edge_list_neph[[\"target\",\"regulator\"]].values.tolist(), directed=True)\nneph_graph.summary()", "_____no_output_____" ] ], [ [ "Compute the pagerank centrality measures of all vertices, using `igraph.Graph.pagerank`. Use the resulting object to initialize a `numpy.array`, `pageranks`.", "_____no_output_____" ] ], [ [ "pageranks = numpy.array(neph_graph.pagerank())", "_____no_output_____" ] ], [ [ "Which vertex has highest pagerank centrality in the gene regulatory network, and what is its pagerank centrality value? (think `numpy.max` and `numpy.argmax`). Get a `VertexSet` sequence using the `igraph.Graph.vs` property, and then index into that sequence using Python indexing:", "_____no_output_____" ] ], [ [ "print(numpy.max(pageranks))\nneph_graph.vs[numpy.argmax(pageranks)]", "0.00749112688039444\n" ] ], [ [ "Calculate the in-degree of all vertices in the graph, and scatter plot `log(degree)` vs. `log(pagerank)`. (Do you see why we used `in` here? Note the column swapping we did earlier). Note-- you will have to eliminate one vertex that has zero in-degree.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot\nax = matplotlib.pyplot.gca()\nax.set_xscale(\"log\")\nax.set_yscale(\"log\")\ndegrees = numpy.array(neph_graph.indegree())\ninds_keep = numpy.where(degrees > 0)\nax.scatter(degrees[inds_keep], \n pageranks[inds_keep])\n#ax.scatter(neph_graph.indegree(), pageranks)\nmatplotlib.pyplot.xlabel(\"degree\")\nmatplotlib.pyplot.ylabel(\"pagerank\")\nmatplotlib.pyplot.ylim(3*1e-4, 1e-2)\nmatplotlib.pyplot.xlim(0.8, 600)\nmatplotlib.pyplot.show()", "_____no_output_____" ] ], [ [ "See if you can calculate the pagerank centrality yourself, using the matrix inversion (Eq. 7.19 from Newman). Test your function on a small directed graph.", "_____no_output_____" ] ], [ [ "def pagerank(g):\n # N is the number of vertices\n N = len(g.vs)\n \n # alpha is the damping parameter\n alpha = 0.85\n \n # beta = (1-alpha)/N\n beta = (1-alpha)/N\n \n # compute the out-degree of each vertex\n degree_values = g.degree(mode=\"out\")\n \n # get a floating-point adjacency matrix M in the Newman format (take transpose from igraph format)\n M = numpy.matrix(g.get_adjacency().data).transpose().astype(float)\n \n # or each column in 0,N-1:\n for j in range(0,N):\n \n # get the out degree of the vertex as a float\n degree_value = float(degree_values[j])\n \n # if degree is nonzero, normalize the column of M\n if degree_value > 0:\n M[:,j] /= degree_value\n else:\n # set the column to zero\n M[:,j] = 0\n \n # compute pagerank following Newman Eq. 7.19, where M = AD^(-1)\n pr = numpy.linalg.inv(numpy.diag([1.]*N) - alpha * M) * numpy.matrix([beta]*N).transpose()\n \n # normalize pagerank centrality by its sum\n pr /= numpy.sum(pr)\n \n retlist = pr.transpose().tolist()[0]\n return(retlist)", "_____no_output_____" ] ], [ [ "Test out your function on a small 5-vertex Barabasi-Albert graph:", "_____no_output_____" ] ], [ [ "g = igraph.Graph.Barabasi(n=5, m=2) \nimport cairo\nigraph.drawing.plot(g)\nprint(pagerank(g))\nprint(g.pagerank())", "[0.2770649250864387, 0.21240875912408758, 0.21240875912408758, 0.14905877833269304, 0.14905877833269304]\n[0.2770649250864387, 0.21240875912408758, 0.21240875912408758, 0.14905877833269301, 0.14905877833269304]\n" ], [ "igraph.drawing.plot(g, bbox=[0,0,200,200], vertex_label=list(range(0,5)))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd5afa63078f3bb655f7ee9f3735cb3dab13bfc
1,955
ipynb
Jupyter Notebook
week10/PCA class.ipynb
AnaRita93/spiced_projects
64f0caec4008cc9ccb528e71ec16afba78728b8e
[ "MIT" ]
null
null
null
week10/PCA class.ipynb
AnaRita93/spiced_projects
64f0caec4008cc9ccb528e71ec16afba78728b8e
[ "MIT" ]
null
null
null
week10/PCA class.ipynb
AnaRita93/spiced_projects
64f0caec4008cc9ccb528e71ec16afba78728b8e
[ "MIT" ]
null
null
null
21.966292
108
0.560102
[ [ [ "#Packages\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n#from sklearn.cluster import Kmeans\n#from sklearn.preprocessing ", "_____no_output_____" ], [ "# get data \ndf = sns.load_dataset('penguins')", "_____no_output_____" ], [ "# Scale the numeric data\npen_stats = penguins[['flipper_length_mm', body_mass_g]]\nscaler = MinMaxScaler()\npen", "_____no_output_____" ], [ "# Instantiate the algorithm (set hyperparameter) # only works on numeric features \n# 6 centroids randomly located --> check techniques on how to decide the number of clusters \n# its going to iterate so that the dist around centroid is min and distance between clusters is max\nkmeans = KMeans(n_clusters=6)\nkmeans.fit(pens)", "_____no_output_____" ], [ "## Check hierarchical clustering ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ecd5b3b2e627ff2cc3dd4d5537f9bd732e093a25
40,150
ipynb
Jupyter Notebook
Amarachukwu Eze-WT-018/Import Export Project.ipynb
ruthwaiharo/Week-5-Assessment
f320a9e553c9b723fff996128fcdca45bbe0f2b0
[ "MIT" ]
1
2021-06-18T22:08:40.000Z
2021-06-18T22:08:40.000Z
Amarachukwu Eze-WT-018/Import Export Project.ipynb
ruthwaiharo/Week-5-Assessment
f320a9e553c9b723fff996128fcdca45bbe0f2b0
[ "MIT" ]
4
2021-06-19T00:36:02.000Z
2021-07-05T08:48:08.000Z
Amarachukwu Eze-WT-018/Import Export Project.ipynb
ruthwaiharo/Week-5-Assessment
f320a9e553c9b723fff996128fcdca45bbe0f2b0
[ "MIT" ]
68
2021-06-12T09:24:30.000Z
2021-08-31T12:14:36.000Z
43.311758
12,112
0.58274
[ [ [ "A country's economy depends, sometimes heavily, on its exports and imports. The United Nations Comtrade database provides data on global trade. It will be used to analyse the UK's imports and exports of milk and cream in 2015:\n\nHow much does the UK export and import and is the balance positive (more exports than imports)?\nWhich are the main trading partners, i.e. from/to which countries does the UK import/export the most?\nWhich are the regular customers, i.e. which countries buy milk from the UK every month?\nWhich countries does the UK both import from and export to?", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.simplefilter('ignore', FutureWarning)\n\nfrom pandas import *\n%matplotlib inline\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "LOCATION = 'comtradee.csv'", "_____no_output_____" ] ], [ [ "Getting and preparing the data\nThe data is obtained from the United Nations Comtrade website, by selecting the following configuration:\n\nType of Product: goods\nFrequency: annual\nPeriods: 2020\nReporter: all\nPartners: World and USA\nFlows: imports, exports, re-imports and re-exports\nHS (as reported) commodity codes: TOTAL (All commodities, neither concentrated nor sweetened) and Not Total (Not All commodities, concentrated or sweetened)", "_____no_output_____" ] ], [ [ "transport = read_csv(LOCATION, dtype={'Commodity Code':str})\ntransport.tail(2)", "_____no_output_____" ], [ "def transportType(code):\n if code == 'TOTAL': # total of all HS commodities\n return 'All Commodities'\n if code == 'Not Total': # concentrated or sweetened\n return 'Not All commodities' \n return 'All CPCs'\n\nCOMMODITY = 'All Commodities'\ntransport[COMMODITY] = transport['Commodity Code'].apply(transportType)\nMONTH = 'Period'\nREPORTER = 'Reporter'\nFLOW = 'Trade Flow'\nVALUE = 'Trade Value (US$)'\nheadings = [MONTH, REPORTER, FLOW, COMMODITY, VALUE]\ntransport = transport[headings]\ntransport.head()", "_____no_output_____" ], [ "transport = transport[transport[REPORTER] != 'World']\ntransport.head()", "_____no_output_____" ] ], [ [ "Q1 How much does the UK export and import and is the balance positive (more exports than imports)?", "_____no_output_____" ] ], [ [ "grouped = transport.groupby([FLOW])\ngrouped[VALUE].aggregate(sum)", "_____no_output_____" ] ], [ [ "Q2 Which are the main trading partners, i.e. from/to which countries does the UK import/export the most? ", "_____no_output_____" ] ], [ [ "imports = transport[transport[FLOW] == 'Import']\ngrouped = imports.groupby([REPORTER])\nprint('The UK imports from', len(grouped), 'countries.')\nprint('The 5 biggest exporters to the UK are:')\ntotalImports = grouped[VALUE].aggregate(sum).sort_values(inplace=False,ascending=False)\ntotalImports.head()", "The UK imports from 81 countries.\nThe 5 biggest exporters to the UK are:\n" ], [ "totalImports.head(10).plot(kind='barh')", "_____no_output_____" ], [ "exports = transport[transport[FLOW] == 'Export']\ngrouped = exports.groupby([REPORTER])\nprint('The UK exports to', len(grouped), 'countries.')\nprint('The 5 biggest importers from the UK are:')\ngrouped[VALUE].aggregate(sum).sort_values(ascending=False,inplace=False).head()", "The UK exports to 81 countries.\nThe 5 biggest importers from the UK are:\n" ] ], [ [ "Q3 Which are the regular customers, i.e. which countries buy milk from the UK every month?", "_____no_output_____" ] ], [ [ "def buysEveryMonth(group):\n return len(group) == 10\n\ngrouped = exports.groupby([REPORTER])\nregular = grouped.filter(buysEveryMonth)\nregular[(regular[MONTH] == 2020) & (regular[COMMODITY] != 'All Commodities')]", "_____no_output_____" ], [ "regular[VALUE].sum() / exports[VALUE].sum()", "_____no_output_____" ], [ "Q4 Which countries does the UK both import from and export to?", "_____no_output_____" ], [ "countries = pivot_table(transport, index=[REPORTER], columns=[FLOW], \n values=VALUE, aggfunc=sum)\ncountries.head()", "_____no_output_____" ], [ "countries.dropna()", "_____no_output_____" ] ] ]
[ "raw", "code", "raw", "code", "raw", "code", "raw", "code", "raw", "code" ]
[ [ "raw" ], [ "code", "code" ], [ "raw" ], [ "code", "code", "code" ], [ "raw" ], [ "code" ], [ "raw" ], [ "code", "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code" ] ]
ecd5e295525c18c669a9c8c3a95a68058abb783f
4,526
ipynb
Jupyter Notebook
notebooks/pandas-11.ipynb
gileno/pyne2018
dcde5d6a61d7adbe39a7861321879cf33a3dc225
[ "MIT" ]
5
2018-05-25T17:07:05.000Z
2020-04-02T18:43:06.000Z
notebooks/pandas-11.ipynb
gileno/pyne2018
dcde5d6a61d7adbe39a7861321879cf33a3dc225
[ "MIT" ]
null
null
null
notebooks/pandas-11.ipynb
gileno/pyne2018
dcde5d6a61d7adbe39a7861321879cf33a3dc225
[ "MIT" ]
4
2018-05-25T18:06:00.000Z
2022-01-26T00:05:46.000Z
17.960317
97
0.488069
[ [ [ "import pandas as pd\nfrom db import DemoDB", "_____no_output_____" ], [ "database = DemoDB()", "_____no_output_____" ], [ "database.tables", "_____no_output_____" ], [ "album = database.tables.Album.all()", "_____no_output_____" ], [ "album.head()", "_____no_output_____" ], [ "artist = database.tables.Artist.all()", "_____no_output_____" ], [ "artist.head()", "_____no_output_____" ], [ "album_artist = pd.merge(artist, album)", "_____no_output_____" ], [ "album_artist.head()", "_____no_output_____" ], [ "album_artist = pd.merge(artist, album, on='ArtistId')", "_____no_output_____" ], [ "album_artist.head()", "_____no_output_____" ], [ "album.rename(columns={'ArtistId': 'Artist_Id'}, inplace=True)", "_____no_output_____" ], [ "album.head()", "_____no_output_____" ], [ "album_artist = pd.merge(album, artist, left_on='Artist_Id', right_on='ArtistId')", "_____no_output_____" ], [ "album_artist.head()", "_____no_output_____" ], [ "pd.merge(album, artist, left_on='Artist_Id', right_on='ArtistId').drop('Artist_Id', axis=1)", "_____no_output_____" ], [ "alunos1 = pd.DataFrame(\n {\n 'nome': ['Maria', 'Sofia'],\n 'nota': [8, 9],\n }\n)\nalunos2 = pd.DataFrame(\n {\n 'nome': ['João', 'Pedro', 'Maria'],\n 'cod': [1, 2, 3],\n }\n)", "_____no_output_____" ], [ "alunos_total = pd.merge(alunos1, alunos2, on='nome')", "_____no_output_____" ], [ "alunos_total", "_____no_output_____" ], [ "pd.merge(alunos1, alunos2, how='outer')", "_____no_output_____" ], [ "pd.merge(alunos1, alunos2, how='inner')", "_____no_output_____" ], [ "pd.merge(alunos1, alunos2, how='left')", "_____no_output_____" ], [ "pd.merge(alunos1, alunos2, how='right')", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd5e93be7053fdd86cc4784ff03de18d4a4d6cc
74,569
ipynb
Jupyter Notebook
Neural_Networks_and_Deep_Learning/4_Building_your_Deep_Neural_Network_Step_by_Step/Building_your_Deep_Neural_Network_Step_by_Step.ipynb
EricTsai83/deep_learning_specialization
17df65638f7da6a20c6c7936e4a012d74d8943bc
[ "MIT" ]
null
null
null
Neural_Networks_and_Deep_Learning/4_Building_your_Deep_Neural_Network_Step_by_Step/Building_your_Deep_Neural_Network_Step_by_Step.ipynb
EricTsai83/deep_learning_specialization
17df65638f7da6a20c6c7936e4a012d74d8943bc
[ "MIT" ]
null
null
null
Neural_Networks_and_Deep_Learning/4_Building_your_Deep_Neural_Network_Step_by_Step/Building_your_Deep_Neural_Network_Step_by_Step.ipynb
EricTsai83/deep_learning_specialization
17df65638f7da6a20c6c7936e4a012d74d8943bc
[ "MIT" ]
1
2021-11-24T08:39:23.000Z
2021-11-24T08:39:23.000Z
37.062127
490
0.540184
[ [ [ "# Building your Deep Neural Network: Step by Step\n\nWelcome to your week 4 assignment (part 1 of 2)! Previously you trained a 2-layer Neural Network with a single hidden layer. This week, you will build a deep neural network with as many layers as you want!\n\n- In this notebook, you'll implement all the functions required to build a deep neural network.\n- For the next assignment, you'll use these functions to build a deep neural network for image classification.\n\n**By the end of this assignment, you'll be able to:**\n\n- Use non-linear units like ReLU to improve your model\n- Build a deeper neural network (with more than 1 hidden layer)\n- Implement an easy-to-use neural network class\n\n**Notation**:\n- Superscript $[l]$ denotes a quantity associated with the $l^{th}$ layer. \n - Example: $a^{[L]}$ is the $L^{th}$ layer activation. $W^{[L]}$ and $b^{[L]}$ are the $L^{th}$ layer parameters.\n- Superscript $(i)$ denotes a quantity associated with the $i^{th}$ example. \n - Example: $x^{(i)}$ is the $i^{th}$ training example.\n- Lowerscript $i$ denotes the $i^{th}$ entry of a vector.\n - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the $l^{th}$ layer's activations).\n\nLet's get started!", "_____no_output_____" ], [ "## Table of Contents\n- [1 - Packages](#1)\n- [2 - Outline](#2)\n- [3 - Initialization](#3)\n - [3.1 - 2-layer Neural Network](#3-1)\n - [Exercise 1 - initialize_parameters](#ex-1)\n - [3.2 - L-layer Neural Network](#3-2)\n - [Exercise 2 - initialize_parameters_deep](#ex-2)\n- [4 - Forward Propagation Module](#4)\n - [4.1 - Linear Forward](#4-1)\n - [Exercise 3 - linear_forward](#ex-3)\n - [4.2 - Linear-Activation Forward](#4-2)\n - [Exercise 4 - linear_activation_forward](#ex-4)\n - [4.3 - L-Layer Model](#4-3)\n - [Exercise 5 - L_model_forward](#ex-5)\n- [5 - Cost Function](#5)\n - [Exercise 6 - compute_cost](#ex-6)\n- [6 - Backward Propagation Module](#6)\n - [6.1 - Linear Backward](#6-1)\n - [Exercise 7 - linear_backward](#ex-7)\n - [6.2 - Linear-Activation Backward](#6-2)\n - [Exercise 8 - linear_activation_backward](#ex-8)\n - [6.3 - L-Model Backward](#6-3)\n - [Exercise 9 - L_model_backward](#ex-9)\n - [6.4 - Update Parameters](#6-4)\n - [Exercise 10 - update_parameters](#ex-10)", "_____no_output_____" ], [ "<a name='1'></a>\n## 1 - Packages\n\nFirst, import all the packages you'll need during this assignment. \n\n- [numpy](www.numpy.org) is the main package for scientific computing with Python.\n- [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.\n- dnn_utils provides some necessary functions for this notebook.\n- testCases provides some test cases to assess the correctness of your functions\n- np.random.seed(1) is used to keep all the random function calls consistent. It helps grade your work. Please don't change the seed! ", "_____no_output_____" ] ], [ [ "# !pygmentize public_tests.py", "_____no_output_____" ], [ "import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom testCases import *\nfrom dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward\nfrom public_tests import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n%load_ext autoreload\n%autoreload 2\n\nnp.random.seed(1)", "_____no_output_____" ] ], [ [ "<a name='2'></a>\n## 2 - Outline\n\nTo build your neural network, you'll be implementing several \"helper functions.\" These helper functions will be used in the next assignment to build a two-layer neural network and an L-layer neural network. \n\nEach small helper function will have detailed instructions to walk you through the necessary steps. Here's an outline of the steps in this assignment:\n\n- Initialize the parameters for a two-layer network and for an $L$-layer neural network\n- Implement the forward propagation module (shown in purple in the figure below)\n - Complete the LINEAR part of a layer's forward propagation step (resulting in $Z^{[l]}$).\n - The ACTIVATION function is provided for you (relu/sigmoid)\n - Combine the previous two steps into a new [LINEAR->ACTIVATION] forward function.\n - Stack the [LINEAR->RELU] forward function L-1 time (for layers 1 through L-1) and add a [LINEAR->SIGMOID] at the end (for the final layer $L$). This gives you a new L_model_forward function.\n- Compute the loss\n- Implement the backward propagation module (denoted in red in the figure below)\n - Complete the LINEAR part of a layer's backward propagation step\n - The gradient of the ACTIVATE function is provided for you(relu_backward/sigmoid_backward) \n - Combine the previous two steps into a new [LINEAR->ACTIVATION] backward function\n - Stack [LINEAR->RELU] backward L-1 times and add [LINEAR->SIGMOID] backward in a new L_model_backward function\n- Finally, update the parameters\n\n<img src=\"images/final outline.png\" style=\"width:800px;height:500px;\">\n<caption><center><b>Figure 1</b></center></caption><br>\n\n\n**Note**:\n\nFor every forward function, there is a corresponding backward function. This is why at every step of your forward module you will be storing some values in a cache. These cached values are useful for computing gradients. \n\nIn the backpropagation module, you can then use the cache to calculate the gradients. Don't worry, this assignment will show you exactly how to carry out each of these steps! ", "_____no_output_____" ], [ "<a name='3'></a>\n## 3 - Initialization\n\nYou will write two helper functions to initialize the parameters for your model. The first function will be used to initialize parameters for a two layer model. The second one generalizes this initialization process to $L$ layers.\n\n<a name='3-1'></a>\n### 3.1 - 2-layer Neural Network\n\n<a name='ex-1'></a>\n### Exercise 1 - initialize_parameters\n\nCreate and initialize the parameters of the 2-layer neural network.\n\n**Instructions**:\n\n- The model's structure is: *LINEAR -> RELU -> LINEAR -> SIGMOID*. \n- Use this random initialization for the weight matrices: `np.random.randn(shape)*0.01` with the correct shape\n- Use zero initialization for the biases: `np.zeros(shape)`", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_parameters\n\ndef initialize_parameters(n_x, n_h, n_y):\n \"\"\"\n Argument:\n n_x -- size of the input layer\n n_h -- size of the hidden layer\n n_y -- size of the output layer\n \n Returns:\n parameters -- python dictionary containing your parameters:\n W1 -- weight matrix of shape (n_h, n_x)\n b1 -- bias vector of shape (n_h, 1)\n W2 -- weight matrix of shape (n_y, n_h)\n b2 -- bias vector of shape (n_y, 1)\n \"\"\"\n \n np.random.seed(1)\n \n #(≈ 4 lines of code)\n # W1 = ...\n # b1 = ...\n # W2 = ...\n # b2 = ...\n # YOUR CODE STARTS HERE\n W1 = np.random.randn(n_h, n_x)*0.01\n b1 = np.zeros((n_h, 1))\n W2 = np.random.randn(n_y, n_h)*0.01\n b2 = np.zeros((n_y, 1))\n \n # YOUR CODE ENDS HERE\n \n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2}\n \n return parameters ", "_____no_output_____" ], [ "parameters = initialize_parameters(3,2,1)\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\ninitialize_parameters_test(initialize_parameters)", "W1 = [[ 0.01624345 -0.00611756 -0.00528172]\n [-0.01072969 0.00865408 -0.02301539]]\nb1 = [[0.]\n [0.]]\nW2 = [[ 0.01744812 -0.00761207]]\nb2 = [[0.]]\n\u001b[92m All tests passed.\n" ] ], [ [ "***Expected output***\n```\nW1 = [[ 0.01624345 -0.00611756 -0.00528172]\n [-0.01072969 0.00865408 -0.02301539]]\nb1 = [[0.]\n [0.]]\nW2 = [[ 0.01744812 -0.00761207]]\nb2 = [[0.]]\n```", "_____no_output_____" ], [ "<a name='3-2'></a>\n### 3.2 - L-layer Neural Network\n\nThe initialization for a deeper L-layer neural network is more complicated because there are many more weight matrices and bias vectors. When completing the `initialize_parameters_deep` function, you should make sure that your dimensions match between each layer. Recall that $n^{[l]}$ is the number of units in layer $l$. For example, if the size of your input $X$ is $(12288, 209)$ (with $m=209$ examples) then:\n\n<table style=\"width:100%\">\n <tr>\n <td> </td> \n <td> <b>Shape of W</b> </td> \n <td> <b>Shape of b</b> </td> \n <td> <b>Activation</b> </td>\n <td> <b>Shape of Activation</b> </td> \n <tr>\n <tr>\n <td> <b>Layer 1</b> </td> \n <td> $(n^{[1]},12288)$ </td> \n <td> $(n^{[1]},1)$ </td> \n <td> $Z^{[1]} = W^{[1]} X + b^{[1]} $ </td> \n <td> $(n^{[1]},209)$ </td> \n <tr>\n <tr>\n <td> <b>Layer 2</b> </td> \n <td> $(n^{[2]}, n^{[1]})$ </td> \n <td> $(n^{[2]},1)$ </td> \n <td>$Z^{[2]} = W^{[2]} A^{[1]} + b^{[2]}$ </td> \n <td> $(n^{[2]}, 209)$ </td> \n <tr>\n <tr>\n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$ </td> \n <td> $\\vdots$</td> \n <td> $\\vdots$ </td> \n <tr> \n <tr>\n <td> <b>Layer L-1</b> </td> \n <td> $(n^{[L-1]}, n^{[L-2]})$ </td> \n <td> $(n^{[L-1]}, 1)$ </td> \n <td>$Z^{[L-1]} = W^{[L-1]} A^{[L-2]} + b^{[L-1]}$ </td> \n <td> $(n^{[L-1]}, 209)$ </td> \n <tr>\n <tr>\n <td> <b>Layer L</b> </td> \n <td> $(n^{[L]}, n^{[L-1]})$ </td> \n <td> $(n^{[L]}, 1)$ </td>\n <td> $Z^{[L]} = W^{[L]} A^{[L-1]} + b^{[L]}$</td>\n <td> $(n^{[L]}, 209)$ </td> \n <tr>\n</table>\n\nRemember that when you compute $W X + b$ in python, it carries out broadcasting. For example, if: \n\n$$ W = \\begin{bmatrix}\n w_{00} & w_{01} & w_{02} \\\\\n w_{10} & w_{11} & w_{12} \\\\\n w_{20} & w_{21} & w_{22} \n\\end{bmatrix}\\;\\;\\; X = \\begin{bmatrix}\n x_{00} & x_{01} & x_{02} \\\\\n x_{10} & x_{11} & x_{12} \\\\\n x_{20} & x_{21} & x_{22} \n\\end{bmatrix} \\;\\;\\; b =\\begin{bmatrix}\n b_0 \\\\\n b_1 \\\\\n b_2\n\\end{bmatrix}\\tag{2}$$\n\nThen $WX + b$ will be:\n\n$$ WX + b = \\begin{bmatrix}\n (w_{00}x_{00} + w_{01}x_{10} + w_{02}x_{20}) + b_0 & (w_{00}x_{01} + w_{01}x_{11} + w_{02}x_{21}) + b_0 & \\cdots \\\\\n (w_{10}x_{00} + w_{11}x_{10} + w_{12}x_{20}) + b_1 & (w_{10}x_{01} + w_{11}x_{11} + w_{12}x_{21}) + b_1 & \\cdots \\\\\n (w_{20}x_{00} + w_{21}x_{10} + w_{22}x_{20}) + b_2 & (w_{20}x_{01} + w_{21}x_{11} + w_{22}x_{21}) + b_2 & \\cdots\n\\end{bmatrix}\\tag{3} $$\n", "_____no_output_____" ], [ "<a name='ex-2'></a>\n### Exercise 2 - initialize_parameters_deep\n\nImplement initialization for an L-layer Neural Network. \n\n**Instructions**:\n- The model's structure is *[LINEAR -> RELU] $ \\times$ (L-1) -> LINEAR -> SIGMOID*. I.e., it has $L-1$ layers using a ReLU activation function followed by an output layer with a sigmoid activation function.\n- Use random initialization for the weight matrices. Use `np.random.randn(shape) * 0.01`.\n- Use zeros initialization for the biases. Use `np.zeros(shape)`.\n- You'll store $n^{[l]}$, the number of units in different layers, in a variable `layer_dims`. For example, the `layer_dims` for last week's Planar Data classification model would have been [2,4,1]: There were two inputs, one hidden layer with 4 hidden units, and an output layer with 1 output unit. This means `W1`'s shape was (4,2), `b1` was (4,1), `W2` was (1,4) and `b2` was (1,1). Now you will generalize this to $L$ layers! \n- Here is the implementation for $L=1$ (one layer neural network). It should inspire you to implement the general case (L-layer neural network).\n```python\n if L == 1:\n parameters[\"W\" + str(L)] = np.random.randn(layer_dims[1], layer_dims[0]) * 0.01\n parameters[\"b\" + str(L)] = np.zeros((layer_dims[1], 1))\n```", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: initialize_parameters_deep\n\ndef initialize_parameters_deep(layer_dims):\n \"\"\"\n Arguments:\n layer_dims -- python array (list) containing the dimensions of each layer in our network\n \n Returns:\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", ..., \"WL\", \"bL\":\n Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])\n bl -- bias vector of shape (layer_dims[l], 1)\n \"\"\"\n \n np.random.seed(3)\n parameters = {}\n L = len(layer_dims) # number of layers in the network\n\n for l in range(1, L):\n #(≈ 2 lines of code)\n # parameters['W' + str(l)] = ...\n # parameters['b' + str(l)] = ...\n # YOUR CODE STARTS HERE\n parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01\n parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n\n \n # YOUR CODE ENDS HERE\n \n assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))\n assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n\n \n return parameters", "_____no_output_____" ], [ "parameters = initialize_parameters_deep([5,4,3])\n\nprint(\"W1 = \" + str(parameters[\"W1\"]))\nprint(\"b1 = \" + str(parameters[\"b1\"]))\nprint(\"W2 = \" + str(parameters[\"W2\"]))\nprint(\"b2 = \" + str(parameters[\"b2\"]))\n\ninitialize_parameters_deep_test(initialize_parameters_deep)", "W1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]\nb1 = [[0.]\n [0.]\n [0.]\n [0.]]\nW2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]\nb2 = [[0.]\n [0.]\n [0.]]\n\u001b[92m All tests passed.\n" ] ], [ [ "***Expected output***\n```\nW1 = [[ 0.01788628 0.0043651 0.00096497 -0.01863493 -0.00277388]\n [-0.00354759 -0.00082741 -0.00627001 -0.00043818 -0.00477218]\n [-0.01313865 0.00884622 0.00881318 0.01709573 0.00050034]\n [-0.00404677 -0.0054536 -0.01546477 0.00982367 -0.01101068]]\nb1 = [[0.]\n [0.]\n [0.]\n [0.]]\nW2 = [[-0.01185047 -0.0020565 0.01486148 0.00236716]\n [-0.01023785 -0.00712993 0.00625245 -0.00160513]\n [-0.00768836 -0.00230031 0.00745056 0.01976111]]\nb2 = [[0.]\n [0.]\n [0.]]\n```", "_____no_output_____" ], [ "<a name='4'></a>\n## 4 - Forward Propagation Module\n\n<a name='4-1'></a>\n### 4.1 - Linear Forward \n\nNow that you have initialized your parameters, you can do the forward propagation module. Start by implementing some basic functions that you can use again later when implementing the model. Now, you'll complete three functions in this order:\n\n- LINEAR\n- LINEAR -> ACTIVATION where ACTIVATION will be either ReLU or Sigmoid. \n- [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID (whole model)\n\nThe linear forward module (vectorized over all the examples) computes the following equations:\n\n$$Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}\\tag{4}$$\n\nwhere $A^{[0]} = X$. \n\n<a name='ex-3'></a>\n### Exercise 3 - linear_forward \n\nBuild the linear part of forward propagation.\n\n**Reminder**:\nThe mathematical representation of this unit is $Z^{[l]} = W^{[l]}A^{[l-1]} +b^{[l]}$. You may also find `np.dot()` useful. If your dimensions don't match, printing `W.shape` may help.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: linear_forward\n\ndef linear_forward(A, W, b):\n \"\"\"\n Implement the linear part of a layer's forward propagation.\n\n Arguments:\n A -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n\n Returns:\n Z -- the input of the activation function, also called pre-activation parameter \n cache -- a python tuple containing \"A\", \"W\" and \"b\" ; stored for computing the backward pass efficiently\n \"\"\"\n \n #(≈ 1 line of code)\n # Z = ...\n # YOUR CODE STARTS HERE\n Z = np.dot(W,A) + b\n \n # YOUR CODE ENDS HERE\n cache = (A, W, b)\n \n return Z, cache", "_____no_output_____" ], [ "t_A, t_W, t_b = linear_forward_test_case()\nt_Z, t_linear_cache = linear_forward(t_A, t_W, t_b)\nprint(\"Z = \" + str(t_Z))\n\nlinear_forward_test(linear_forward)", "Z = [[ 3.26295337 -1.23429987]]\n\u001b[92m All tests passed.\n" ] ], [ [ "***Expected output***\n```\nZ = [[ 3.26295337 -1.23429987]]\n```", "_____no_output_____" ], [ "<a name='4-2'></a>\n### 4.2 - Linear-Activation Forward\n\nIn this notebook, you will use two activation functions:\n\n- **Sigmoid**: $\\sigma(Z) = \\sigma(W A + b) = \\frac{1}{ 1 + e^{-(W A + b)}}$. You've been provided with the `sigmoid` function which returns **two** items: the activation value \"`a`\" and a \"`cache`\" that contains \"`Z`\" (it's what we will feed in to the corresponding backward function). To use it you could just call: \n``` python\nA, activation_cache = sigmoid(Z)\n```\n\n- **ReLU**: The mathematical formula for ReLu is $A = RELU(Z) = max(0, Z)$. You've been provided with the `relu` function. This function returns **two** items: the activation value \"`A`\" and a \"`cache`\" that contains \"`Z`\" (it's what you'll feed in to the corresponding backward function). To use it you could just call:\n``` python\nA, activation_cache = relu(Z)\n```", "_____no_output_____" ], [ "For added convenience, you're going to group two functions (Linear and Activation) into one function (LINEAR->ACTIVATION). Hence, you'll implement a function that does the LINEAR forward step, followed by an ACTIVATION forward step.\n\n<a name='ex-4'></a>\n### Exercise 4 - linear_activation_forward\n\nImplement the forward propagation of the *LINEAR->ACTIVATION* layer. Mathematical relation is: $A^{[l]} = g(Z^{[l]}) = g(W^{[l]}A^{[l-1]} +b^{[l]})$ where the activation \"g\" can be sigmoid() or relu(). Use `linear_forward()` and the correct activation function.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: linear_activation_forward\n\ndef linear_activation_forward(A_prev, W, b, activation):\n \"\"\"\n Implement the forward propagation for the LINEAR->ACTIVATION layer\n\n Arguments:\n A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)\n W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)\n b -- bias vector, numpy array of shape (size of the current layer, 1)\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n\n Returns:\n A -- the output of the activation function, also called the post-activation value \n cache -- a python tuple containing \"linear_cache\" and \"activation_cache\";\n stored for computing the backward pass efficiently\n \"\"\"\n \n if activation == \"sigmoid\":\n #(≈ 2 lines of code)\n # Z, linear_cache = ...\n # A, activation_cache = ...\n # YOUR CODE STARTS HERE\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n \n # YOUR CODE ENDS HERE\n \n elif activation == \"relu\":\n #(≈ 2 lines of code)\n # Z, linear_cache = ...\n # A, activation_cache = ...\n # YOUR CODE STARTS HERE\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n \n # YOUR CODE ENDS HERE\n cache = (linear_cache, activation_cache)\n\n return A, cache", "_____no_output_____" ], [ "t_A_prev, t_W, t_b = linear_activation_forward_test_case()\n\nt_A, t_linear_activation_cache = linear_activation_forward(t_A_prev, t_W, t_b, activation = \"sigmoid\")\nprint(\"With sigmoid: A = \" + str(t_A))\n\nt_A, t_linear_activation_cache = linear_activation_forward(t_A_prev, t_W, t_b, activation = \"relu\")\nprint(\"With ReLU: A = \" + str(t_A))\n\nlinear_activation_forward_test(linear_activation_forward)", "With sigmoid: A = [[0.96890023 0.11013289]]\nWith ReLU: A = [[3.43896131 0. ]]\n\u001b[92m All tests passed.\n" ] ], [ [ "***Expected output***\n```\nWith sigmoid: A = [[0.96890023 0.11013289]]\nWith ReLU: A = [[3.43896131 0. ]]\n```", "_____no_output_____" ], [ "**Note**: In deep learning, the \"[LINEAR->ACTIVATION]\" computation is counted as a single layer in the neural network, not two layers. ", "_____no_output_____" ], [ "<a name='4-3'></a>\n### 4.3 - L-Layer Model \n\nFor even *more* convenience when implementing the $L$-layer Neural Net, you will need a function that replicates the previous one (`linear_activation_forward` with RELU) $L-1$ times, then follows that with one `linear_activation_forward` with SIGMOID.\n\n<img src=\"images/model_architecture_kiank.png\" style=\"width:600px;height:300px;\">\n<caption><center> <b>Figure 2</b> : *[LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model</center></caption><br>\n\n<a name='ex-5'></a>\n### Exercise 5 - L_model_forward\n\nImplement the forward propagation of the above model.\n\n**Instructions**: In the code below, the variable `AL` will denote $A^{[L]} = \\sigma(Z^{[L]}) = \\sigma(W^{[L]} A^{[L-1]} + b^{[L]})$. (This is sometimes also called `Yhat`, i.e., this is $\\hat{Y}$.) \n\n**Hints**:\n- Use the functions you've previously written \n- Use a for loop to replicate [LINEAR->RELU] (L-1) times\n- Don't forget to keep track of the caches in the \"caches\" list. To add a new value `c` to a `list`, you can use `list.append(c)`.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: L_model_forward\n\ndef L_model_forward(X, parameters):\n \"\"\"\n Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation\n \n Arguments:\n X -- data, numpy array of shape (input size, number of examples)\n parameters -- output of initialize_parameters_deep()\n \n Returns:\n AL -- last post-activation value\n caches -- list of caches containing:\n every cache of linear_activation_forward() (there are L-1 of them, indexed from 0 to L-1)\n \"\"\"\n\n caches = []\n A = X\n L = len(parameters) // 2 # number of layers in the neural network\n \n # Implement [LINEAR -> RELU]*(L-1). Add \"cache\" to the \"caches\" list.\n # The for loop starts at 1 because layer 0 is the input\n for l in range(1, L):\n A_prev = A \n #(≈ 2 lines of code)\n # A, cache = ...\n # caches ...\n # YOUR CODE STARTS HERE\n A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)], activation=\"relu\")\n caches.append(cache)\n \n # YOUR CODE ENDS HERE\n \n # Implement LINEAR -> SIGMOID. Add \"cache\" to the \"caches\" list.\n #(≈ 2 lines of code)\n # AL, cache = ...\n # caches ...\n # YOUR CODE STARTS HERE\n AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L)], activation=\"sigmoid\")\n caches.append(cache)\n \n # YOUR CODE ENDS HERE\n \n return AL, caches", "_____no_output_____" ], [ "t_X, t_parameters = L_model_forward_test_case_2hidden()\nt_AL, t_caches = L_model_forward(t_X, t_parameters)\n\nprint(\"AL = \" + str(t_AL))\n\nL_model_forward_test(L_model_forward)", "AL = [[0.03921668 0.70498921 0.19734387 0.04728177]]\n\u001b[92m All tests passed.\n" ] ], [ [ "***Expected output***\n```\nAL = [[0.03921668 0.70498921 0.19734387 0.04728177]]\n```", "_____no_output_____" ], [ "**Awesome!** You've implemented a full forward propagation that takes the input X and outputs a row vector $A^{[L]}$ containing your predictions. It also records all intermediate values in \"caches\". Using $A^{[L]}$, you can compute the cost of your predictions.", "_____no_output_____" ], [ "<a name='5'></a>\n## 5 - Cost Function\n\nNow you can implement forward and backward propagation! You need to compute the cost, in order to check whether your model is actually learning.\n\n<a name='ex-6'></a>\n### Exercise 6 - compute_cost\nCompute the cross-entropy cost $J$, using the following formula: $$-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} (y^{(i)}\\log\\left(a^{[L] (i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right)) \\tag{7}$$\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost\n\ndef compute_cost(AL, Y):\n \"\"\"\n Implement the cost function defined by equation (7).\n\n Arguments:\n AL -- probability vector corresponding to your label predictions, shape (1, number of examples)\n Y -- true \"label\" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)\n\n Returns:\n cost -- cross-entropy cost\n \"\"\"\n \n m = Y.shape[1]\n\n # Compute loss from aL and y.\n # (≈ 1 lines of code)\n # cost = ...\n # YOUR CODE STARTS HERE\n cost = -(1/m)*sum( np.dot(Y, np.log(AL).T) + np.dot( (1-Y), np.log(1-AL).T ) )\n \n # YOUR CODE ENDS HERE\n \n cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).\n\n \n return cost", "_____no_output_____" ], [ "t_Y, t_AL = compute_cost_test_case()\nt_cost = compute_cost(t_AL, t_Y)\n\nprint(\"Cost: \" + str(t_cost))\n\ncompute_cost_test(compute_cost)", "Cost: 0.2797765635793422\n\u001b[92m All tests passed.\n" ] ], [ [ "**Expected Output**:\n\n<table>\n <tr>\n <td><b>cost</b> </td>\n <td> 0.2797765635793422</td> \n </tr>\n</table>", "_____no_output_____" ], [ "<a name='6'></a>\n## 6 - Backward Propagation Module\n\nJust as you did for the forward propagation, you'll implement helper functions for backpropagation. Remember that backpropagation is used to calculate the gradient of the loss function with respect to the parameters. \n\n**Reminder**: \n<img src=\"images/backprop_kiank.png\" style=\"width:650px;height:250px;\">\n<caption><center><font color='purple'><b>Figure 3</b>: Forward and Backward propagation for LINEAR->RELU->LINEAR->SIGMOID <br> <i>The purple blocks represent the forward propagation, and the red blocks represent the backward propagation.</font></center></caption>\n\n\n<!-- \nFor those of you who are experts in calculus (which you don't need to be to do this assignment!), the chain rule of calculus can be used to derive the derivative of the loss $\\mathcal{L}$ with respect to $z^{[1]}$ in a 2-layer network as follows:\n\n$$\\frac{d \\mathcal{L}(a^{[2]},y)}{{dz^{[1]}}} = \\frac{d\\mathcal{L}(a^{[2]},y)}{{da^{[2]}}}\\frac{{da^{[2]}}}{{dz^{[2]}}}\\frac{{dz^{[2]}}}{{da^{[1]}}}\\frac{{da^{[1]}}}{{dz^{[1]}}} \\tag{8} $$\n\nIn order to calculate the gradient $dW^{[1]} = \\frac{\\partial L}{\\partial W^{[1]}}$, use the previous chain rule and you do $dW^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial W^{[1]}}$. During backpropagation, at each step you multiply your current gradient by the gradient corresponding to the specific layer to get the gradient you wanted.\n\nEquivalently, in order to calculate the gradient $db^{[1]} = \\frac{\\partial L}{\\partial b^{[1]}}$, you use the previous chain rule and you do $db^{[1]} = dz^{[1]} \\times \\frac{\\partial z^{[1]} }{\\partial b^{[1]}}$.\n\nThis is why we talk about **backpropagation**.\n!-->\n\nNow, similarly to forward propagation, you're going to build the backward propagation in three steps:\n1. LINEAR backward\n2. LINEAR -> ACTIVATION backward where ACTIVATION computes the derivative of either the ReLU or sigmoid activation\n3. [LINEAR -> RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID backward (whole model)", "_____no_output_____" ], [ "For the next exercise, you will need to remember that:\n\n- `b` is a matrix(np.ndarray) with 1 column and n rows, i.e: b = [[1.0], [2.0]] (remember that `b` is a constant)\n- np.sum performs a sum over the elements of a ndarray\n- axis=1 or axis=0 specify if the sum is carried out by rows or by columns respectively\n- keepdims specifies if the original dimensions of the matrix must be kept.\n- Look at the following example to clarify:", "_____no_output_____" ] ], [ [ "A = np.array([[1, 2], [3, 4]])\n\nprint('axis=1 and keepdims=True')\nprint(np.sum(A, axis=1, keepdims=True))\nprint('axis=1 and keepdims=False')\nprint(np.sum(A, axis=1, keepdims=False))\nprint('axis=0 and keepdims=True')\nprint(np.sum(A, axis=0, keepdims=True))\nprint('axis=0 and keepdims=False')\nprint(np.sum(A, axis=0, keepdims=False))", "axis=1 and keepdims=True\n[[3]\n [7]]\naxis=1 and keepdims=False\n[3 7]\naxis=0 and keepdims=True\n[[4 6]]\naxis=0 and keepdims=False\n[4 6]\n" ] ], [ [ "<a name='6-1'></a>\n### 6.1 - Linear Backward\n\nFor layer $l$, the linear part is: $Z^{[l]} = W^{[l]} A^{[l-1]} + b^{[l]}$ (followed by an activation).\n\nSuppose you have already calculated the derivative $dZ^{[l]} = \\frac{\\partial \\mathcal{L} }{\\partial Z^{[l]}}$. You want to get $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$.\n\n<img src=\"images/linearback_kiank.png\" style=\"width:250px;height:300px;\">\n<caption><center><font color='purple'><b>Figure 4</b></font></center></caption>\n\nThe three outputs $(dW^{[l]}, db^{[l]}, dA^{[l-1]})$ are computed using the input $dZ^{[l]}$.\n\nHere are the formulas you need:\n$$ dW^{[l]} = \\frac{\\partial \\mathcal{J} }{\\partial W^{[l]}} = \\frac{1}{m} dZ^{[l]} A^{[l-1] T} \\tag{8}$$\n$$ db^{[l]} = \\frac{\\partial \\mathcal{J} }{\\partial b^{[l]}} = \\frac{1}{m} \\sum_{i = 1}^{m} dZ^{[l](i)}\\tag{9}$$\n$$ dA^{[l-1]} = \\frac{\\partial \\mathcal{L} }{\\partial A^{[l-1]}} = W^{[l] T} dZ^{[l]} \\tag{10}$$\n\n\n$A^{[l-1] T}$ is the transpose of $A^{[l-1]}$. ", "_____no_output_____" ], [ "<a name='ex-7'></a>\n### Exercise 7 - linear_backward \n\nUse the 3 formulas above to implement `linear_backward()`.\n\n**Hint**:\n\n- In numpy you can get the transpose of an ndarray `A` using `A.T` or `A.transpose()`", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: linear_backward\n\ndef linear_backward(dZ, cache):\n \"\"\"\n Implement the linear portion of backward propagation for a single layer (layer l)\n\n Arguments:\n dZ -- Gradient of the cost with respect to the linear output (of current layer l)\n cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer\n\n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n A_prev, W, b = cache\n m = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n # dW = ...\n # db = ... sum by the rows of dZ with keepdims=True\n # dA_prev = ...\n # YOUR CODE STARTS HERE\n dW = (1/m)*np.dot(dZ, A_prev.T) \n db = (1/m)*np.sum(dZ, axis=1, keepdims=True)\n dA_prev = np.dot(W.T, dZ)\n \n # YOUR CODE ENDS HERE\n \n return dA_prev, dW, db", "_____no_output_____" ], [ "t_dZ, t_linear_cache = linear_backward_test_case()\nt_dA_prev, t_dW, t_db = linear_backward(t_dZ, t_linear_cache)\n\nprint(\"dA_prev: \" + str(t_dA_prev))\nprint(\"dW: \" + str(t_dW))\nprint(\"db: \" + str(t_db))\n\nlinear_backward_test(linear_backward)", "dA_prev: [[-1.15171336 0.06718465 -0.3204696 2.09812712]\n [ 0.60345879 -3.72508701 5.81700741 -3.84326836]\n [-0.4319552 -1.30987417 1.72354705 0.05070578]\n [-0.38981415 0.60811244 -1.25938424 1.47191593]\n [-2.52214926 2.67882552 -0.67947465 1.48119548]]\ndW: [[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716]\n [ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808]\n [ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]]\ndb: [[-0.14713786]\n [-0.11313155]\n [-0.13209101]]\n\u001b[92m All tests passed.\n" ] ], [ [ "**Expected Output**:\n```\ndA_prev: [[-1.15171336 0.06718465 -0.3204696 2.09812712]\n [ 0.60345879 -3.72508701 5.81700741 -3.84326836]\n [-0.4319552 -1.30987417 1.72354705 0.05070578]\n [-0.38981415 0.60811244 -1.25938424 1.47191593]\n [-2.52214926 2.67882552 -0.67947465 1.48119548]]\ndW: [[ 0.07313866 -0.0976715 -0.87585828 0.73763362 0.00785716]\n [ 0.85508818 0.37530413 -0.59912655 0.71278189 -0.58931808]\n [ 0.97913304 -0.24376494 -0.08839671 0.55151192 -0.10290907]]\ndb: [[-0.14713786]\n [-0.11313155]\n [-0.13209101]]\n ```", "_____no_output_____" ], [ "<a name='6-2'></a>\n### 6.2 - Linear-Activation Backward\n\nNext, you will create a function that merges the two helper functions: **`linear_backward`** and the backward step for the activation **`linear_activation_backward`**. \n\nTo help you implement `linear_activation_backward`, two backward functions have been provided:\n- **`sigmoid_backward`**: Implements the backward propagation for SIGMOID unit. You can call it as follows:\n\n```python\ndZ = sigmoid_backward(dA, activation_cache)\n```\n\n- **`relu_backward`**: Implements the backward propagation for RELU unit. You can call it as follows:\n\n```python\ndZ = relu_backward(dA, activation_cache)\n```\n\nIf $g(.)$ is the activation function, \n`sigmoid_backward` and `relu_backward` compute $$dZ^{[l]} = dA^{[l]} * g'(Z^{[l]}). \\tag{11}$$ \n\n<a name='ex-8'></a>\n### Exercise 8 - linear_activation_backward\n\nImplement the backpropagation for the *LINEAR->ACTIVATION* layer.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: linear_activation_backward\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer.\n \n Arguments:\n dA -- post-activation gradient for current layer l \n cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently\n activation -- the activation to be used in this layer, stored as a text string: \"sigmoid\" or \"relu\"\n \n Returns:\n dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev\n dW -- Gradient of the cost with respect to W (current layer l), same shape as W\n db -- Gradient of the cost with respect to b (current layer l), same shape as b\n \"\"\"\n linear_cache, activation_cache = cache\n \n if activation == \"relu\":\n #(≈ 2 lines of code)\n # dZ = ...\n # dA_prev, dW, db = ...\n # YOUR CODE STARTS HERE\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n # YOUR CODE ENDS HERE\n \n elif activation == \"sigmoid\":\n #(≈ 2 lines of code)\n # dZ = ...\n # dA_prev, dW, db = ...\n # YOUR CODE STARTS HERE\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n \n # YOUR CODE ENDS HERE\n \n return dA_prev, dW, db", "_____no_output_____" ], [ "t_dAL, t_linear_activation_cache = linear_activation_backward_test_case()\n\nt_dA_prev, t_dW, t_db = linear_activation_backward(t_dAL, t_linear_activation_cache, activation = \"sigmoid\")\nprint(\"With sigmoid: dA_prev = \" + str(t_dA_prev))\nprint(\"With sigmoid: dW = \" + str(t_dW))\nprint(\"With sigmoid: db = \" + str(t_db))\n\nt_dA_prev, t_dW, t_db = linear_activation_backward(t_dAL, t_linear_activation_cache, activation = \"relu\")\nprint(\"With relu: dA_prev = \" + str(t_dA_prev))\nprint(\"With relu: dW = \" + str(t_dW))\nprint(\"With relu: db = \" + str(t_db))\n\nlinear_activation_backward_test(linear_activation_backward)", "With sigmoid: dA_prev = [[ 0.11017994 0.01105339]\n [ 0.09466817 0.00949723]\n [-0.05743092 -0.00576154]]\nWith sigmoid: dW = [[ 0.10266786 0.09778551 -0.01968084]]\nWith sigmoid: db = [[-0.05729622]]\nWith relu: dA_prev = [[ 0.44090989 -0. ]\n [ 0.37883606 -0. ]\n [-0.2298228 0. ]]\nWith relu: dW = [[ 0.44513824 0.37371418 -0.10478989]]\nWith relu: db = [[-0.20837892]]\n\u001b[92m All tests passed.\n" ] ], [ [ "**Expected output:**\n\n```\nWith sigmoid: dA_prev = [[ 0.11017994 0.01105339]\n [ 0.09466817 0.00949723]\n [-0.05743092 -0.00576154]]\nWith sigmoid: dW = [[ 0.10266786 0.09778551 -0.01968084]]\nWith sigmoid: db = [[-0.05729622]]\nWith relu: dA_prev = [[ 0.44090989 0. ]\n [ 0.37883606 0. ]\n [-0.2298228 0. ]]\nWith relu: dW = [[ 0.44513824 0.37371418 -0.10478989]]\nWith relu: db = [[-0.20837892]]\n```", "_____no_output_____" ], [ "<a name='6-3'></a>\n### 6.3 - L-Model Backward \n\nNow you will implement the backward function for the whole network! \n\nRecall that when you implemented the `L_model_forward` function, at each iteration, you stored a cache which contains (X,W,b, and z). In the back propagation module, you'll use those variables to compute the gradients. Therefore, in the `L_model_backward` function, you'll iterate through all the hidden layers backward, starting from layer $L$. On each step, you will use the cached values for layer $l$ to backpropagate through layer $l$. Figure 5 below shows the backward pass. \n\n\n<img src=\"images/mn_backward.png\" style=\"width:450px;height:300px;\">\n<caption><center><font color='purple'><b>Figure 5</b>: Backward pass</font></center></caption>\n\n**Initializing backpropagation**:\n\nTo backpropagate through this network, you know that the output is: \n$A^{[L]} = \\sigma(Z^{[L]})$. Your code thus needs to compute `dAL` $= \\frac{\\partial \\mathcal{L}}{\\partial A^{[L]}}$.\nTo do so, use this formula (derived using calculus which, again, you don't need in-depth knowledge of!):\n```python\ndAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL\n```\n\nYou can then use this post-activation gradient `dAL` to keep going backward. As seen in Figure 5, you can now feed in `dAL` into the LINEAR->SIGMOID backward function you implemented (which will use the cached values stored by the L_model_forward function). \n\nAfter that, you will have to use a `for` loop to iterate through all the other layers using the LINEAR->RELU backward function. You should store each dA, dW, and db in the grads dictionary. To do so, use this formula : \n\n$$grads[\"dW\" + str(l)] = dW^{[l]}\\tag{15} $$\n\nFor example, for $l=3$ this would store $dW^{[l]}$ in `grads[\"dW3\"]`.\n\n<a name='ex-9'></a>\n### Exercise 9 - L_model_backward\n\nImplement backpropagation for the *[LINEAR->RELU] $\\times$ (L-1) -> LINEAR -> SIGMOID* model.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: L_model_backward\n\ndef L_model_backward(AL, Y, caches):\n \"\"\"\n Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group\n \n Arguments:\n AL -- probability vector, output of the forward propagation (L_model_forward())\n Y -- true \"label\" vector (containing 0 if non-cat, 1 if cat)\n caches -- list of caches containing:\n every cache of linear_activation_forward() with \"relu\" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)\n the cache of linear_activation_forward() with \"sigmoid\" (it's caches[L-1])\n \n Returns:\n grads -- A dictionary with the gradients\n grads[\"dA\" + str(l)] = ... \n grads[\"dW\" + str(l)] = ...\n grads[\"db\" + str(l)] = ... \n \"\"\"\n grads = {}\n L = len(caches) # the number of layers\n m = AL.shape[1]\n Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL\n \n # Initializing the backpropagation\n #(1 line of code)\n # dAL = ...\n # YOUR CODE STARTS HERE\n dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))\n \n # YOUR CODE ENDS HERE\n \n # Lth layer (SIGMOID -> LINEAR) gradients. Inputs: \"dAL, current_cache\". Outputs: \"grads[\"dAL-1\"], grads[\"dWL\"], grads[\"dbL\"]\n #(approx. 5 lines)\n # current_cache = ...\n # dA_prev_temp, dW_temp, db_temp = ...\n # grads[\"dA\" + str(L-1)] = ...\n # grads[\"dW\" + str(L)] = ...\n # grads[\"db\" + str(L)] = ...\n # YOUR CODE STARTS HERE\n current_cache = caches[-1]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(dAL, current_cache, activation='sigmoid')\n grads[\"dA\" + str(L-1)] = dA_prev_temp\n grads[\"dW\" + str(L)] = dW_temp\n grads[\"db\" + str(L)] = db_temp\n \n # YOUR CODE ENDS HERE\n \n # Loop from l=L-2 to l=0\n for l in reversed(range(L-1)):\n # lth layer: (RELU -> LINEAR) gradients.\n # Inputs: \"grads[\"dA\" + str(l + 1)], current_cache\". Outputs: \"grads[\"dA\" + str(l)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n #(approx. 5 lines)\n # current_cache = ...\n # dA_prev_temp, dW_temp, db_temp = ...\n # grads[\"dA\" + str(l)] = ...\n # grads[\"dW\" + str(l + 1)] = ...\n # grads[\"db\" + str(l + 1)] = ...\n # YOUR CODE STARTS HERE\n current_cache = caches[l]\n dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l+1)], current_cache, activation='relu')\n grads[\"dA\" + str(l)] = dA_prev_temp\n grads[\"dW\" + str(l + 1)] = dW_temp\n grads[\"db\" + str(l + 1)] = db_temp\n \n # YOUR CODE ENDS HERE\n\n return grads", "_____no_output_____" ], [ "t_AL, t_Y_assess, t_caches = L_model_backward_test_case()\ngrads = L_model_backward(t_AL, t_Y_assess, t_caches)\n\nprint(\"dA0 = \" + str(grads['dA0']))\nprint(\"dA1 = \" + str(grads['dA1']))\nprint(\"dW1 = \" + str(grads['dW1']))\nprint(\"dW2 = \" + str(grads['dW2']))\nprint(\"db1 = \" + str(grads['db1']))\nprint(\"db2 = \" + str(grads['db2']))\n\nL_model_backward_test(L_model_backward)", "dA0 = [[ 0. 0.52257901]\n [ 0. -0.3269206 ]\n [ 0. -0.32070404]\n [ 0. -0.74079187]]\ndA1 = [[ 0.12913162 -0.44014127]\n [-0.14175655 0.48317296]\n [ 0.01663708 -0.05670698]]\ndW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]\n [0. 0. 0. 0. ]\n [0.05283652 0.01005865 0.01777766 0.0135308 ]]\ndW2 = [[-0.39202432 -0.13325855 -0.04601089]]\ndb1 = [[-0.22007063]\n [ 0. ]\n [-0.02835349]]\ndb2 = [[0.15187861]]\n\u001b[92m All tests passed.\n" ] ], [ [ "**Expected output:**\n\n```\ndA0 = [[ 0. 0.52257901]\n [ 0. -0.3269206 ]\n [ 0. -0.32070404]\n [ 0. -0.74079187]]\ndA1 = [[ 0.12913162 -0.44014127]\n [-0.14175655 0.48317296]\n [ 0.01663708 -0.05670698]]\ndW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]\n [0. 0. 0. 0. ]\n [0.05283652 0.01005865 0.01777766 0.0135308 ]]\ndW2 = [[-0.39202432 -0.13325855 -0.04601089]]\ndb1 = [[-0.22007063]\n [ 0. ]\n [-0.02835349]]\ndb2 = [[0.15187861]]\n```", "_____no_output_____" ], [ "<a name='6-4'></a>\n### 6.4 - Update Parameters\n\nIn this section, you'll update the parameters of the model, using gradient descent: \n\n$$ W^{[l]} = W^{[l]} - \\alpha \\text{ } dW^{[l]} \\tag{16}$$\n$$ b^{[l]} = b^{[l]} - \\alpha \\text{ } db^{[l]} \\tag{17}$$\n\nwhere $\\alpha$ is the learning rate. \n\nAfter computing the updated parameters, store them in the parameters dictionary. ", "_____no_output_____" ], [ "<a name='ex-10'></a>\n### Exercise 10 - update_parameters\n\nImplement `update_parameters()` to update your parameters using gradient descent.\n\n**Instructions**:\nUpdate parameters using gradient descent on every $W^{[l]}$ and $b^{[l]}$ for $l = 1, 2, ..., L$. ", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: update_parameters\n\ndef update_parameters(params, grads, learning_rate):\n \"\"\"\n Update parameters using gradient descent\n \n Arguments:\n params -- python dictionary containing your parameters \n grads -- python dictionary containing your gradients, output of L_model_backward\n \n Returns:\n parameters -- python dictionary containing your updated parameters \n parameters[\"W\" + str(l)] = ... \n parameters[\"b\" + str(l)] = ...\n \"\"\"\n parameters = params.copy()\n L = len(parameters) // 2 # number of layers in the neural network\n\n # Update rule for each parameter. Use a for loop.\n #(≈ 2 lines of code)\n for l in range(L):\n # parameters[\"W\" + str(l+1)] = ...\n # parameters[\"b\" + str(l+1)] = ...\n # YOUR CODE STARTS HERE\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - np.dot(learning_rate, grads[\"dW\" + str(l+1)])\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate*grads[\"db\" + str(l+1)]\n \n # YOUR CODE ENDS HERE\n return parameters", "_____no_output_____" ], [ "t_parameters, grads = update_parameters_test_case()\nt_parameters = update_parameters(t_parameters, grads, 0.1)\n\nprint (\"W1 = \"+ str(t_parameters[\"W1\"]))\nprint (\"b1 = \"+ str(t_parameters[\"b1\"]))\nprint (\"W2 = \"+ str(t_parameters[\"W2\"]))\nprint (\"b2 = \"+ str(t_parameters[\"b2\"]))\n\nupdate_parameters_test(update_parameters)", "W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]\n [-1.76569676 -0.80627147 0.51115557 -1.18258802]\n [-1.0535704 -0.86128581 0.68284052 2.20374577]]\nb1 = [[-0.04659241]\n [-1.28888275]\n [ 0.53405496]]\nW2 = [[-0.55569196 0.0354055 1.32964895]]\nb2 = [[-0.84610769]]\n\u001b[92m All tests passed.\n" ] ], [ [ "**Expected output:**\n\n```\nW1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]\n [-1.76569676 -0.80627147 0.51115557 -1.18258802]\n [-1.0535704 -0.86128581 0.68284052 2.20374577]]\nb1 = [[-0.04659241]\n [-1.28888275]\n [ 0.53405496]]\nW2 = [[-0.55569196 0.0354055 1.32964895]]\nb2 = [[-0.84610769]]\n```", "_____no_output_____" ], [ "### Congratulations! \n\nYou've just implemented all the functions required for building a deep neural network, including: \n\n- Using non-linear units improve your model\n- Building a deeper neural network (with more than 1 hidden layer)\n- Implementing an easy-to-use neural network class\n\nThis was indeed a long assignment, but the next part of the assignment is easier. ;) \n\nIn the next assignment, you'll be putting all these together to build two models:\n\n- A two-layer neural network\n- An L-layer neural network\n\nYou will in fact use these models to classify cat vs non-cat images! (Meow!) Great work and see you next time. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
ecd5e9656dbb1aae6e3502575baaf60426fce06a
1,568
ipynb
Jupyter Notebook
Exercise06/Exercise06.ipynb
Develop-Packt/Advanced-Operations-on-Python-Data-Structures
108291c80f1a2cda89e4a5bdb98c3e22301394a2
[ "MIT" ]
22
2020-06-27T04:21:49.000Z
2022-03-08T04:39:44.000Z
Exercise06/Exercise06.ipynb
Develop-Packt/Advanced-Operations-on-Python-Data-Structures
108291c80f1a2cda89e4a5bdb98c3e22301394a2
[ "MIT" ]
2
2021-02-02T22:49:16.000Z
2021-06-02T02:09:21.000Z
Chapter02/Exercise 2.06/Exercise 2.06.ipynb
Hubertus444/The-Data-Wrangling-Workshop
ddad20f8676602ac6624e72e802769fcaff45b0f
[ "MIT" ]
46
2020-04-20T13:04:11.000Z
2022-03-22T05:23:52.000Z
17.818182
88
0.462372
[ [ [ "list_of_words = [\"Hello\", \"there.\", \"How\", \"are\", \"you\", \"doing?\"] \nlist_of_words", "_____no_output_____" ], [ "check_for = [\"How\", \"are\"] \ncheck_for", "_____no_output_____" ], [ "all(w in list_of_words for w in check_for) ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
ecd5fb687afcea038837c6b52d2977885795dc71
113,185
ipynb
Jupyter Notebook
Applying AI to 2D Medical Imaging Data/Models for Classification of 2D Medical Images/Exercise - Split Dataset for Model Development/Exercise.ipynb
mayank1101/AI-for-Healthcare
90a70863d8731cbac0662067030340c68374af15
[ "MIT" ]
2
2020-12-15T19:35:39.000Z
2021-09-21T19:47:28.000Z
Applying AI to 2D Medical Imaging Data/Models for Classification of 2D Medical Images/Exercise - Split Dataset for Model Development/Exercise.ipynb
mayank1101/AI-for-Healthcare
90a70863d8731cbac0662067030340c68374af15
[ "MIT" ]
null
null
null
Applying AI to 2D Medical Imaging Data/Models for Classification of 2D Medical Images/Exercise - Split Dataset for Model Development/Exercise.ipynb
mayank1101/AI-for-Healthcare
90a70863d8731cbac0662067030340c68374af15
[ "MIT" ]
2
2021-12-27T15:13:32.000Z
2022-03-27T22:43:25.000Z
194.142367
68,468
0.889977
[ [ [ "%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom random import sample\n\nfrom itertools import chain\nfrom random import sample \nimport scipy\n\nimport sklearn.model_selection", "_____no_output_____" ] ], [ [ "## 1. Read the Data\nFirst read in the dataframe. You'll notice it's similar to the dataframe that you ended the final solution with in Lesson 2, Exercise 4, only with more data:", "_____no_output_____" ] ], [ [ "d = pd.read_csv('findings_data_5000.csv')\nd.head()", "_____no_output_____" ] ], [ [ "## 2. Understand the Distribution\nJust like in Lesson 2, Exercise 4, we want to see how different diseases are distributed with our disease of interest, as well as how age and gender are distributed:", "_____no_output_____" ] ], [ [ "all_labels = np.unique(list(chain(*d['Finding Labels'].map(lambda x: x.split('|')).tolist())))\nall_labels = [x for x in all_labels if len(x)>0]", "_____no_output_____" ], [ "ax = d[all_labels].sum().plot(kind='bar')\nax.set(ylabel = 'Number of Images with Label')", "_____no_output_____" ] ], [ [ "**Since there are many combinations of potential findings, let's look at the 30 most common co-occurrences:**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(16,6))\nd[d.Pneumothorax==1]['Finding Labels'].value_counts()[0:30].plot(kind='bar')", "_____no_output_____" ], [ "plt.figure(figsize=(6,6))\nd[d.Pneumothorax ==1]['Patient Gender'].value_counts().plot(kind='bar')", "_____no_output_____" ], [ "plt.figure(figsize=(10,6))\nplt.hist(d[d.Pneumothorax==1]['Patient Age'])", "_____no_output_____" ] ], [ [ "## 3. To Do - Exercise - Split the Data into Train/Test Partitions\nNow, knowing what we know from above, let's create the appropriate training and validation sets for a model that we want to train to classify the presence of a Pneumothorax. Also, take care of the following conditions: \n1. To have _EQUAL_ amount of positive and negative cases of Pneumothorax in Training\n2. To have 20% positive cases of Pneumothorax in the Test Set\n\n**Hint: you can random sample the negative cases to obtain a balanced training set.**", "_____no_output_____" ] ], [ [ "# Your code goes here\ntrain_df, valid_df = sklearn.model_selection.train_test_split(d,\n test_size=0.2,\n stratify=d['Pneumothorax'])", "_____no_output_____" ], [ "print(train_df['Pneumothorax'].sum()/len(train_df), len(train_df), train_df['Pneumothorax'].sum()) ", "0.5 352 176.0\n" ], [ "print(valid_df['Pneumothorax'].sum()/len(valid_df), len(valid_df), valid_df['Pneumothorax'].sum())", "0.044 1000 44.0\n" ], [ "print(len(d))", "4999\n" ] ], [ [ "### 1. To have EQUAL amount of positive and negative cases of Pneumothorax in Training", "_____no_output_____" ] ], [ [ "p_inds = train_df[train_df.Pneumothorax == 1].index.tolist()\nnp_inds = train_df[train_df.Pneumothorax == 0].index.tolist()\n\nnp_inds = sample(np_inds, len(p_inds))\ntrain_df = train_df.loc[p_inds+np_inds]", "_____no_output_____" ], [ "train_df['Pneumothorax'].sum()/len(train_df)", "_____no_output_____" ] ], [ [ "### 2. To have 20% positive cases of Pneumothorax in the Test Set", "_____no_output_____" ] ], [ [ "p_inds = valid_df[valid_df.Pneumothorax == 1].index.tolist()\nnp_inds = valid_df[valid_df.Pneumothorax == 0].index.tolist()\nnp_inds = sample(np_inds, 4*len(p_inds))\nvalid_df = valid_df.loc[p_inds + np_inds]", "_____no_output_____" ], [ "valid_df['Pneumothorax'].sum()/len(valid_df)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd6033a4ab90fe1cdc99c65b5ba5fefe7d35f0c
236,159
ipynb
Jupyter Notebook
example_notebooks/1_gluoncv_finetune/TRAIN-gluon-ssd_512_resnet50_v1_voc.ipynb
Boltuzamaki/Monk_Object_Detection
baf113ef6db8b531d0ef6413538e49d422163a20
[ "Apache-2.0" ]
549
2020-01-02T05:14:57.000Z
2022-03-29T18:34:12.000Z
example_notebooks/1_gluoncv_finetune/TRAIN-gluon-ssd_512_resnet50_v1_voc.ipynb
nathnim/Monk_Object_Detection
1d7a07193ea3455221caa41d07c33c81d50c6b3f
[ "Apache-2.0" ]
98
2020-01-21T09:41:30.000Z
2022-03-12T00:53:06.000Z
example_notebooks/1_gluoncv_finetune/TRAIN-gluon-ssd_512_resnet50_v1_voc.ipynb
nathnim/Monk_Object_Detection
1d7a07193ea3455221caa41d07c33c81d50c6b3f
[ "Apache-2.0" ]
233
2020-01-18T03:46:27.000Z
2022-03-19T03:17:47.000Z
353.00299
217,264
0.934946
[ [ [ "<a href=\"https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/1_gluoncv_finetune/TRAIN-gluon-ssd_512_resnet50_v1_voc.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Installation\n\n - Run these commands\n \n - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git\n \n - cd Monk_Object_Detection/1_gluoncv_finetune/installation\n \n - Select the right requirements file and run\n \n - cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install", "_____no_output_____" ] ], [ [ "! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git", "_____no_output_____" ], [ "# For colab use the command below\n! cd Monk_Object_Detection/1_gluoncv_finetune/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install\n\n\n# For Local systems and cloud select the right CUDA version\n# !cd Monk_Object_Detection/1_gluoncv_finetune/installation && cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install", "_____no_output_____" ] ], [ [ "## Dataset Directory Structure\n\n Parent_Directory (root)\n |\n |-----------Images (img_dir)\n | |\n | |------------------img1.jpg\n | |------------------img2.jpg\n | |------------------.........(and so on)\n |\n |\n |-----------train_labels.csv (anno_file)\n \n \n## Annotation file format\n\n | Id | Labels |\n | img1.jpg | x1 y1 x2 y2 label1 x1 y1 x2 y2 label2 |\n \n- Labels: xmin ymin xmax ymax label\n- xmin, ymin - top left corner of bounding box\n- xmax, ymax - bottom right corner of bounding box", "_____no_output_____" ], [ "# About the Network\n\n1. Blog 1 on Resnet Network - https://medium.com/@14prakash/understanding-and-implementing-architectures-of-resnet-and-resnext-for-state-of-the-art-image-cf51669e1624\n\n2. Blog 2 on Resnet Network - https://neurohive.io/en/popular-networks/resnet/\n\n3. Blog 3 on Resnet Network - https://cv-tricks.com/keras/understand-implement-resnets/\n\n4. Blog 4 on Redisual blocks - https://d2l.ai/chapter_convolutional-modern/resnet.html\n\n5. Blog 1 on SSD - https://towardsdatascience.com/review-ssd-single-shot-detector-object-detection-851a94607d11\n\n6. Blog 2 on SSD-512 - https://medium.com/@jonathan_hui/ssd-object-detection-single-shot-multibox-detector-for-real-time-processing-9bd8deac0e06\n\n7. Blog 3 on SSD - https://towardsdatascience.com/understanding-ssd-multibox-real-time-object-detection-in-deep-learning-495ef744fab\n\n8. Reference Tutorial - https://gluon.mxnet.io/chapter08_computer-vision/object-detection.html", "_____no_output_____" ] ], [ [ "import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/1_gluoncv_finetune/lib/\");", "_____no_output_____" ], [ "from detector_prototype import Detector", "_____no_output_____" ], [ "gtf = Detector();", "_____no_output_____" ] ], [ [ "# Sample Dataset Credits\n\n- credits: https://github.com/experiencor/kangaroo", "_____no_output_____" ] ], [ [ "root = \"Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/\"; \nimg_dir = \"Images/\"; \nanno_file = \"train_labels.csv\";\nbatch_size=2;", "_____no_output_____" ], [ "gtf.Dataset(root, img_dir, anno_file, batch_size=batch_size);", "_____no_output_____" ], [ "pretrained = True; \ngpu=True;\nmodel_name = \"ssd_512_resnet50_v1_voc\"; ", "_____no_output_____" ], [ "gtf.Model(model_name, use_pretrained=pretrained, use_gpu=gpu);", "/home/abhi/.virtualenvs/monk_obj_1_gluoncv_finetune/lib/python3.6/site-packages/mxnet/gluon/block.py:1159: UserWarning: Cannot decide type for the following arguments. Consider providing them as input:\n\tdata: None\n input_sym_arg_type = in_param.infer_type()[0]\n" ], [ "gtf.Set_Learning_Rate(0.001);", "_____no_output_____" ], [ "epochs=10;\nparams_file = \"saved_model.params\";", "_____no_output_____" ], [ "gtf.Train(epochs, params_file);", "[Epoch 0][Batch 0], Speed: 0.191 samples/sec, CrossEntropy=9.256, SmoothL1=0.401\n[Epoch 0][Batch 20], Speed: 7.882 samples/sec, CrossEntropy=4.728, SmoothL1=0.952\n[Epoch 0][Batch 40], Speed: 11.296 samples/sec, CrossEntropy=3.764, SmoothL1=0.922\n[Epoch 0][Batch 60], Speed: 11.210 samples/sec, CrossEntropy=3.414, SmoothL1=1.069\n[Epoch 0][Batch 80], Speed: 11.811 samples/sec, CrossEntropy=3.189, SmoothL1=1.040\n[Epoch 1][Batch 0], Speed: 10.571 samples/sec, CrossEntropy=2.932, SmoothL1=1.341\n[Epoch 1][Batch 20], Speed: 9.558 samples/sec, CrossEntropy=2.346, SmoothL1=1.126\n[Epoch 1][Batch 40], Speed: 11.375 samples/sec, CrossEntropy=2.407, SmoothL1=1.117\n[Epoch 1][Batch 60], Speed: 5.440 samples/sec, CrossEntropy=2.383, SmoothL1=1.136\n[Epoch 1][Batch 80], Speed: 12.132 samples/sec, CrossEntropy=2.360, SmoothL1=1.141\n[Epoch 2][Batch 0], Speed: 7.703 samples/sec, CrossEntropy=2.405, SmoothL1=1.979\n[Epoch 2][Batch 20], Speed: 11.210 samples/sec, CrossEntropy=2.425, SmoothL1=0.980\n[Epoch 2][Batch 40], Speed: 8.836 samples/sec, CrossEntropy=2.417, SmoothL1=1.025\n[Epoch 2][Batch 60], Speed: 10.057 samples/sec, CrossEntropy=2.427, SmoothL1=1.117\n[Epoch 2][Batch 80], Speed: 12.241 samples/sec, CrossEntropy=2.406, SmoothL1=1.127\n[Epoch 3][Batch 0], Speed: 6.900 samples/sec, CrossEntropy=2.495, SmoothL1=0.615\n[Epoch 3][Batch 20], Speed: 4.541 samples/sec, CrossEntropy=2.291, SmoothL1=0.956\n[Epoch 3][Batch 40], Speed: 6.171 samples/sec, CrossEntropy=2.405, SmoothL1=1.091\n[Epoch 3][Batch 60], Speed: 8.157 samples/sec, CrossEntropy=2.376, SmoothL1=1.023\n[Epoch 3][Batch 80], Speed: 12.659 samples/sec, CrossEntropy=2.368, SmoothL1=1.068\n[Epoch 4][Batch 0], Speed: 10.006 samples/sec, CrossEntropy=2.050, SmoothL1=0.715\n[Epoch 4][Batch 20], Speed: 5.963 samples/sec, CrossEntropy=2.226, SmoothL1=0.999\n[Epoch 4][Batch 40], Speed: 5.050 samples/sec, CrossEntropy=2.268, SmoothL1=0.925\n[Epoch 4][Batch 60], Speed: 7.628 samples/sec, CrossEntropy=2.236, SmoothL1=0.964\n[Epoch 4][Batch 80], Speed: 9.803 samples/sec, CrossEntropy=2.219, SmoothL1=0.969\n[Epoch 5][Batch 0], Speed: 9.976 samples/sec, CrossEntropy=1.787, SmoothL1=0.779\n[Epoch 5][Batch 20], Speed: 12.440 samples/sec, CrossEntropy=2.115, SmoothL1=1.098\n[Epoch 5][Batch 40], Speed: 8.785 samples/sec, CrossEntropy=2.167, SmoothL1=1.100\n[Epoch 5][Batch 60], Speed: 12.552 samples/sec, CrossEntropy=2.122, SmoothL1=1.036\n[Epoch 5][Batch 80], Speed: 11.691 samples/sec, CrossEntropy=2.176, SmoothL1=1.086\n[Epoch 6][Batch 0], Speed: 7.171 samples/sec, CrossEntropy=1.745, SmoothL1=0.878\n[Epoch 6][Batch 20], Speed: 13.138 samples/sec, CrossEntropy=1.919, SmoothL1=0.855\n[Epoch 6][Batch 40], Speed: 8.185 samples/sec, CrossEntropy=2.017, SmoothL1=1.023\n[Epoch 6][Batch 60], Speed: 8.078 samples/sec, CrossEntropy=2.018, SmoothL1=0.966\n[Epoch 6][Batch 80], Speed: 5.621 samples/sec, CrossEntropy=2.074, SmoothL1=0.962\n[Epoch 7][Batch 0], Speed: 7.584 samples/sec, CrossEntropy=1.835, SmoothL1=0.992\n[Epoch 7][Batch 20], Speed: 4.008 samples/sec, CrossEntropy=2.307, SmoothL1=0.979\n[Epoch 7][Batch 40], Speed: 11.833 samples/sec, CrossEntropy=2.232, SmoothL1=0.949\n[Epoch 7][Batch 60], Speed: 3.778 samples/sec, CrossEntropy=2.211, SmoothL1=1.012\n[Epoch 7][Batch 80], Speed: 12.047 samples/sec, CrossEntropy=2.132, SmoothL1=1.009\n[Epoch 8][Batch 0], Speed: 10.684 samples/sec, CrossEntropy=2.342, SmoothL1=1.578\n[Epoch 8][Batch 20], Speed: 10.537 samples/sec, CrossEntropy=1.956, SmoothL1=0.852\n[Epoch 8][Batch 40], Speed: 10.238 samples/sec, CrossEntropy=1.894, SmoothL1=0.826\n[Epoch 8][Batch 60], Speed: 10.280 samples/sec, CrossEntropy=1.938, SmoothL1=0.856\n[Epoch 8][Batch 80], Speed: 11.372 samples/sec, CrossEntropy=2.011, SmoothL1=0.891\n[Epoch 9][Batch 0], Speed: 5.307 samples/sec, CrossEntropy=1.510, SmoothL1=0.508\n[Epoch 9][Batch 20], Speed: 10.229 samples/sec, CrossEntropy=1.877, SmoothL1=0.920\n[Epoch 9][Batch 40], Speed: 7.743 samples/sec, CrossEntropy=1.953, SmoothL1=0.912\n[Epoch 9][Batch 60], Speed: 11.586 samples/sec, CrossEntropy=1.981, SmoothL1=0.921\n[Epoch 9][Batch 80], Speed: 10.698 samples/sec, CrossEntropy=1.999, SmoothL1=0.898\n" ] ], [ [ "# Running Inference", "_____no_output_____" ] ], [ [ "import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/1_gluoncv_finetune/lib/\");", "_____no_output_____" ], [ "from inference_prototype import Infer", "_____no_output_____" ], [ "model_name = \"ssd_512_resnet50_v1_voc\";\nparams_file = \"saved_model.params\";\nclass_list = [\"kangaroo\"];", "_____no_output_____" ], [ "gtf = Infer(model_name, params_file, class_list, use_gpu=True);", "_____no_output_____" ], [ "img_name = \"Monk_Object_Detection/example_notebooks/sample_dataset/kangaroo/test/kg5.jpeg\"; \nvisualize = True;\nthresh = 0.56;", "_____no_output_____" ], [ "output = gtf.run(img_name, visualize=visualize, thresh=thresh);", "_____no_output_____" ] ], [ [ "# Author - Tessellate Imaging - https://www.tessellateimaging.com/\n\n# Monk Library - https://github.com/Tessellate-Imaging/monk_v1\n\n Monk is an opensource low-code tool for computer vision and deep learning\n\n\n## Monk features\n - low-code\n - unified wrapper over major deep learning framework - keras, pytorch, gluoncv\n - syntax invariant wrapper\n\n## Enables\n\n - to create, manage and version control deep learning experiments\n - to compare experiments across training metrics\n - to quickly find best hyper-parameters\n\n## At present it only supports transfer learning, but we are working each day to incorporate\n\n - GUI based custom model creation\n - various object detection and segmentation algorithms\n - deployment pipelines to cloud and local platforms\n - acceleration libraries such as TensorRT\n - preprocessing and post processing libraries\n\n\n## To contribute to Monk AI or Monk Object Detection repository raise an issue in the git-repo or dm us on linkedin\n\n - Abhishek - https://www.linkedin.com/in/abhishek-kumar-annamraju/\n - Akash - https://www.linkedin.com/in/akashdeepsingh01/\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
ecd60bc5c15fa7d3bc80adf7108633278fa5c520
476,333
ipynb
Jupyter Notebook
example.ipynb
danielhey/Echelle
bf46f36c0a89336b1a78614e06e426fe5953ee1c
[ "MIT" ]
6
2020-02-13T22:44:19.000Z
2021-11-20T19:00:13.000Z
example.ipynb
danielhey/Echelle
bf46f36c0a89336b1a78614e06e426fe5953ee1c
[ "MIT" ]
1
2019-04-03T08:32:50.000Z
2019-04-03T11:35:22.000Z
example.ipynb
danielhey/Echelle
bf46f36c0a89336b1a78614e06e426fe5953ee1c
[ "MIT" ]
4
2019-11-07T14:37:22.000Z
2021-01-06T08:09:42.000Z
159.682534
129,891
0.832554
[ [ [ "import echelle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport lightkurve as lk", "_____no_output_____" ], [ "# np.savetxt('bla.txt', list(zip(frequency, amplitude)))\nfrequency, amplitude = np.loadtxt('bla.txt').T", "_____no_output_____" ], [ "c = echelle.interact_echelle(frequency, amplitude, 0.5, 2, \n return_coords=True,\n cmap='Blues', smooth=True, step=None, scale='sqrt', \n# backend='bokeh'\n )", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=[3, 3])\nechelle.plot_echelle(frequency, amplitude, 1.14, scale='sqrt', ax=ax,\n fmin=10, fmax=20, cmap='Blues', interpolation='bicubic', smooth=True)\nplt.xlim(0, 0.5)\nplt.axis('off')\nplt.savefig('logo.png', dpi=300)", "_____no_output_____" ], [ "plt.text?", "_____no_output_____" ] ], [ [ "I know the large separation is 1.14 cpd. Let's try it out", "_____no_output_____" ] ], [ [ "x, y, z = echelle.echelle(frequency, amplitude, 5)\nz.min()", "_____no_output_____" ] ], [ [ "Nice! But what if we didn't know? Thankfully, there's an interactive echelle module we can use to hone in on the correct value. Judging from the periodogram, the large separation is probably between 0.5 and 2.", "_____no_output_____" ] ], [ [ "from notebook import notebookapp\nservers = list(notebookapp.list_running_servers())\nprint(servers)", "[{'base_url': '/', 'hostname': 'localhost', 'notebook_dir': '/Users/daniel', 'password': True, 'pid': 56164, 'port': 8888, 'secure': False, 'sock': '', 'token': '', 'url': 'http://localhost:8888/'}]\n" ] ], [ [ "If you have a large amount of data, it is usually preferable to zoom in on the relevant regions to avoid the expensive re-plotting:", "_____no_output_____" ] ], [ [ "echelle.interact_echelle(frequency, amplitude, 0.5, 2, step=0.01, fmin=10, fmax=20)", "_____no_output_____" ] ], [ [ "There are a few features in interact_echelle that may be useful for you. One of them is an argument to return any frequencies that were clicked on. To do this, we must specify `return_coords=True`. We can see this in action below:", "_____no_output_____" ] ], [ [ "clicked_frequencies = echelle.interact_echelle(frequency, amplitude, 0.5, 2, step=0.01, return_coords=True)", "_____no_output_____" ] ], [ [ "You can't see it, but I clicked on a few of the frequencies along the strongest ridge. They're stored as a list and can be accessed like so", "_____no_output_____" ] ], [ [ "clicked_frequencies", "_____no_output_____" ] ], [ [ "Note that these are the x, y coordinates of the echelle diagram. The first column is the frequency modulo dnu, the second is the frequency. ", "_____no_output_____" ], [ "If you want to use your own plotting routines, or want to do something fancy with the echelle values (like plotting a collapsed echelle diagram!), you can just call `echelle.echelle`. Note that `echelle.echelle` is very barebones, and will not perform any smoothing of your data. If you want to do that, you must smooth your amplitudes before passing them in!", "_____no_output_____" ] ], [ [ "x, y, z = echelle.echelle(frequency, amplitude, 1.14)\n\nplt.plot(x, np.sum(z, axis=0))\nplt.xlabel('Frequency mod 1.14')\nplt.ylabel('Summed amplitudes')", "_____no_output_____" ] ], [ [ "And that's mostly it! There are a lot of extra parameters that can be passed into the functions. So check out the docs.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
ecd615e80cf8778714d216c7f2c565ae1552caff
665,570
ipynb
Jupyter Notebook
60_segregation_analysis.ipynb
quarcs-lab/pysal-env
b32eb30d45d8421cdcffe4aaaf230f805fcd1c5c
[ "MIT" ]
1
2021-01-17T04:49:48.000Z
2021-01-17T04:49:48.000Z
60_segregation_analysis.ipynb
quarcs-lab/pysal-env
b32eb30d45d8421cdcffe4aaaf230f805fcd1c5c
[ "MIT" ]
null
null
null
60_segregation_analysis.ipynb
quarcs-lab/pysal-env
b32eb30d45d8421cdcffe4aaaf230f805fcd1c5c
[ "MIT" ]
null
null
null
357.44898
375,756
0.934617
[ [ [ "# Segregation Analysis with PySAL", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "%watermark -v -a \"author: eli knaap\" -d -u -p segregation,libpysal,geopandas", "Author: author: eli knaap\n\nLast updated: 2021-01-02\n\nPython implementation: CPython\nPython version : 3.7.9\nIPython version : 7.19.0\n\nsegregation: 1.4.0\nlibpysal : 4.3.0\ngeopandas : 0.8.1\n\n" ] ], [ [ "Here, we'll use PySAL's `segregation` module to analyze racial segregation in southern california", "_____no_output_____" ] ], [ [ "import geopandas as gpd", "_____no_output_____" ] ], [ [ "## Data Prep", "_____no_output_____" ] ], [ [ "scag = gpd.read_file(\"data/scag_region.gpkg\", layer=\"tracts\")", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/geopandas/geodataframe.py:422: RuntimeWarning: Sequential read of iterator was interrupted. Resetting iterator. This can negatively impact the performance.\n for feature in features_lst:\n" ] ], [ [ "We need to reproject the data into a more appropriate coordinate system. UTM11 should work ", "_____no_output_____" ] ], [ [ "scag = scag.to_crs(epsg=26911)\nscag.crs", "_____no_output_____" ], [ "scag.dropna(subset=['p_hispanic_persons']).plot(column='p_hispanic_persons',\n scheme='quantiles', \n cmap='Blues',\n k=8, \n legend=True)", "_____no_output_____" ] ], [ [ "some background on [fips codes](https://www.policymap.com/2012/08/tips-on-fips-a-quick-guide-to-geographic-place-codes-part-iii/)", "_____no_output_____" ] ], [ [ "scag['county'] = scag.geoid.str[:5]", "_____no_output_____" ], [ "scag.county.unique()", "_____no_output_____" ], [ "county_names = [\"Los Angeles\", \"Imperial\", \"Orange\", \"San Bernadino\", \"San Diego\", \"Riverside\", \"Ventura\"]", "_____no_output_____" ], [ "namer = dict(zip(scag.county.unique(), county_names))", "_____no_output_____" ], [ "namer", "_____no_output_____" ] ], [ [ "Now that we know which county is which, we could just use these codes to divide up the region into pieces. But lets go ahead and replace the codes with their names. It's more to type, but if we want to subset later, we won't have to go look up the codes again", "_____no_output_____" ] ], [ [ "scag['county'] = scag.county.replace(to_replace=namer)", "_____no_output_____" ], [ "scag.county", "_____no_output_____" ], [ "coastal = scag[scag.county.isin([\"Los Angeles\", \"Orange\", \"San Diego\", \"Ventura\"])]", "_____no_output_____" ], [ "inland = scag[scag.county.isin(['Riverside', \"San Bernadino\", \"Imperial\"])]", "_____no_output_____" ], [ "coastal.plot(column='county')", "_____no_output_____" ], [ "inland.plot(column='county')", "_____no_output_____" ] ], [ [ "## Calculating Segregation Measures", "_____no_output_____" ], [ "### Classic (aspatial) Single-Group Indices", "_____no_output_____" ] ], [ [ "from segregation.aspatial import Dissim, GiniSeg, Entropy", "_____no_output_____" ], [ "dissim = Dissim(scag, \"n_hispanic_persons\", \"n_total_pop\")\ngini = GiniSeg(scag, \"n_hispanic_persons\", \"n_total_pop\")\nentropy = Entropy(scag, \"n_hispanic_persons\", \"n_total_pop\")", "_____no_output_____" ], [ "dissim.statistic", "_____no_output_____" ], [ "gini.statistic", "_____no_output_____" ], [ "entropy.statistic", "_____no_output_____" ] ], [ [ "### Multigroup Indices", "_____no_output_____" ] ], [ [ "from segregation.aspatial import MultiInformationTheory, MultiGiniSeg, MultiDiversity", "_____no_output_____" ], [ "pop_groups = ['n_asian_persons', 'n_hispanic_persons', 'n_nonhisp_black_persons', 'n_nonhisp_white_persons']", "_____no_output_____" ], [ "multi_div = MultiDiversity(scag, pop_groups)\nmulti_info = MultiInformationTheory(scag, pop_groups)", "_____no_output_____" ], [ "multi_div.statistic", "_____no_output_____" ], [ "multi_info.statistic", "_____no_output_____" ] ], [ [ "### Spatial Indices", "_____no_output_____" ] ], [ [ "from libpysal import weights", "_____no_output_____" ], [ "from segregation.spatial import SpatialDissim, SpatialInformationTheory", "_____no_output_____" ], [ "w_queen = weights.Queen.from_dataframe(scag)\nw_knn = weights.KNN.from_dataframe(scag, k=10)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 3 disconnected components.\n There is 1 island with id: 4285.\n warnings.warn(message)\n" ] ], [ [ "#### Single Group", "_____no_output_____" ] ], [ [ "spatial_dissim = SpatialDissim(scag, 'n_hispanic_persons', 'n_total_pop', w=w_queen)", "_____no_output_____" ], [ "spatial_dissim.statistic", "_____no_output_____" ], [ "spatial_dissim_dist = SpatialDissim(scag, 'n_hispanic_persons', 'n_total_pop', w=w_knn)", "_____no_output_____" ], [ "spatial_dissim_dist.statistic", "_____no_output_____" ] ], [ [ "#### Multi Group", "_____no_output_____" ], [ "we can also look at how different concepts of space influence the resulting index statistic", "_____no_output_____" ] ], [ [ "spatial_info_queen = SpatialInformationTheory(scag, pop_groups, w=w_queen)\nspatial_info_dist = SpatialInformationTheory(scag, pop_groups, w=w_knn)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 3 disconnected components.\n warnings.warn(message)\n" ], [ "spatial_info_queen.statistic", "_____no_output_____" ], [ "spatial_info_dist.statistic", "_____no_output_____" ] ], [ [ "#### Multiscalar Profile", "_____no_output_____" ], [ "The multiscalar segregation profile is a way of measuring how global versus local the segregation patterns are in a region. ", "_____no_output_____" ] ], [ [ "from segregation.spatial import compute_segregation_profile", "_____no_output_____" ], [ "distances = [1500., 2500., 3500., 4500., 5500.]", "_____no_output_____" ], [ "prof = compute_segregation_profile(scag, pop_groups, distances)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 1167 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 421 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 258 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 196 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 152 disconnected components.\n warnings.warn(message)\n" ], [ "import pandas as pd\npd.Series(prof).plot()", "_____no_output_____" ] ], [ [ "We can also look at how the segregation profiles differ by region. If we plot them all on the same graph, we can compare the slopes of the lines to see how the shape of segregation differs between places in the southern cal region", "_____no_output_____" ] ], [ [ "coastal_prof = compute_segregation_profile(coastal, pop_groups, distances)\ninland_prof = compute_segregation_profile(inland, pop_groups, distances)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 660 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 197 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 119 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 85 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 66 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 508 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 226 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 140 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 113 disconnected components.\n warnings.warn(message)\n/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 87 disconnected components.\n warnings.warn(message)\n" ], [ "\npd.Series(prof, name='socal').plot(legend=True)\npd.Series(coastal_prof, name='coastal').plot(legend=True)\npd.Series(inland_prof, name='inland').plot(legend=True)", "_____no_output_____" ] ], [ [ "This shows that segregation in the coastal region is considerably larger than the inland region at every scale, though have similar shapes to their overall segregation profiles.", "_____no_output_____" ], [ "## Single-Value Inference", "_____no_output_____" ] ], [ [ "from segregation.inference import SingleValueTest", "_____no_output_____" ], [ "entropy_test = SingleValueTest(entropy)", "_____no_output_____" ], [ "dissim_test = SingleValueTest(dissim)", "_____no_output_____" ], [ "entropy_test.p_value", "_____no_output_____" ], [ "entropy_test.plot()", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ], [ "dissim_test.plot()", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ] ], [ [ "## Comparative Inference", "_____no_output_____" ] ], [ [ "from segregation.inference import TwoValueTest", "_____no_output_____" ], [ "info_test = TwoValueTest(MultiInformationTheory(coastal, pop_groups),\n MultiInformationTheory(inland, pop_groups))", "_____no_output_____" ], [ "info_test.est_point_diff", "_____no_output_____" ], [ "info_test.plot()", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).\n warnings.warn(msg, FutureWarning)\n" ] ], [ [ "## Decomposition", "_____no_output_____" ] ], [ [ "from segregation.decomposition import DecomposeSegregation", "_____no_output_____" ], [ "w_coastal = weights.DistanceBand.from_dataframe(coastal, 2000)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 304 disconnected components.\n There are 253 islands with ids: 209, 344, 393, 461, 481, 482, 539, 774, 796, 873, 923, 963, 1053, 1055, 1082, 1118, 1141, 1158, 1167, 1209, 1211, 1295, 1374, 1412, 1543, 1544, 1571, 1572, 1583, 1584, 1603, 1611, 1612, 1613, 1621, 1628, 1637, 1639, 1640, 1642, 1656, 1660, 1669, 1670, 1679, 1699, 1727, 1729, 1746, 1757, 1758, 1786, 1813, 1851, 1852, 1856, 1867, 1931, 1933, 1950, 2000, 2012, 2017, 2019, 2021, 2022, 2033, 2035, 2077, 2091, 2119, 2137, 2174, 2175, 2177, 2178, 2185, 2231, 2240, 2242, 2253, 2254, 2260, 2292, 2313, 2330, 2368, 2379, 2388, 2398, 2399, 2414, 2433, 2439, 2443, 2446, 2447, 2462, 2482, 2483, 2485, 2486, 2487, 2488, 2499, 2500, 2501, 2503, 2539, 2540, 2586, 2601, 2604, 2618, 2629, 2642, 2643, 2671, 2672, 2674, 2675, 2678, 2690, 2706, 2725, 2729, 2770, 2784, 2794, 2795, 2798, 2860, 2864, 2866, 2867, 2888, 2934, 3013, 3014, 3021, 3072, 3109, 3128, 3198, 3213, 3215, 3218, 3227, 3229, 3250, 3268, 3271, 3272, 3273, 3282, 3288, 3299, 3323, 3337, 3360, 3362, 3365, 3373, 3427, 3428, 3430, 3463, 3546, 3571, 3584, 3596, 3600, 3636, 3663, 3669, 3670, 3673, 3674, 3703, 3713, 3715, 3716, 3759, 3775, 3811, 3815, 3816, 3824, 3844, 3862, 3876, 3878, 3884, 3934, 3939, 3950, 3951, 3980, 3981, 3984, 3998, 4007, 4008, 4033, 4034, 4038, 4039, 4041, 4047, 4048, 4086, 4087, 4128, 4133, 4137, 4138, 4141, 4145, 4157, 4181, 4182, 4185, 4190, 4198, 4214, 4215, 4228, 4247, 4248, 4254, 4255, 4275, 4285, 4286, 4287, 4288, 4290, 4292, 4302, 4320, 4337, 4362, 4363, 4364, 4425, 4426, 4442, 4466, 4486, 4529, 4535, 4540, 4561.\n warnings.warn(message)\n" ], [ "w_inland = weights.DistanceBand.from_dataframe(inland, 2000)", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 324 disconnected components.\n There are 275 islands with ids: 157, 159, 161, 162, 163, 164, 165, 171, 236, 240, 241, 243, 245, 276, 289, 405, 406, 415, 437, 444, 447, 453, 457, 459, 484, 489, 490, 491, 492, 500, 518, 679, 877, 907, 908, 947, 970, 971, 972, 973, 1017, 1066, 1070, 1105, 1168, 1212, 1221, 1222, 1230, 1390, 1394, 1413, 1424, 1426, 1427, 1433, 1435, 1444, 1477, 1486, 1492, 1512, 1559, 1565, 1574, 1575, 1592, 1605, 1606, 1636, 1661, 1663, 1694, 1696, 1697, 1698, 1702, 1719, 1720, 1737, 1738, 1741, 1743, 1744, 1762, 1763, 1764, 1782, 1784, 1794, 1800, 1805, 1807, 1861, 1863, 1873, 1876, 1879, 1892, 1954, 1955, 1956, 1957, 1959, 1963, 1964, 1965, 1966, 1993, 2004, 2005, 2024, 2027, 2029, 2282, 2312, 2318, 2319, 2320, 2321, 2331, 2334, 2337, 2338, 2339, 2341, 2342, 2359, 2375, 2418, 2419, 2420, 2429, 2430, 2444, 2451, 2457, 2458, 2459, 2461, 2468, 2469, 2470, 2480, 2481, 2505, 2512, 2514, 2515, 2522, 2527, 2542, 2568, 2570, 2571, 2580, 2610, 2611, 2612, 2613, 2617, 2628, 2647, 2648, 2650, 2651, 2660, 2663, 2683, 2691, 2695, 2700, 2710, 2711, 2733, 2735, 2736, 2737, 2738, 2740, 2756, 2766, 2870, 2871, 2873, 2894, 2897, 2907, 2944, 2949, 2956, 2969, 2971, 2972, 2976, 2985, 2996, 3002, 3009, 3015, 3018, 3027, 3103, 3190, 3220, 3593, 3609, 3613, 3614, 3615, 3616, 3667, 3680, 3684, 3694, 3695, 3699, 3760, 3931, 3935, 3936, 3937, 3938, 3959, 3970, 3971, 3972, 3975, 3976, 3977, 3978, 3979, 4014, 4018, 4022, 4023, 4024, 4035, 4066, 4067, 4174, 4175, 4224, 4245, 4257, 4260, 4264, 4303, 4308, 4323, 4331, 4341, 4388, 4390, 4393, 4403, 4412, 4418, 4445, 4446, 4447, 4451, 4456, 4467, 4469, 4471, 4472, 4474, 4475, 4478, 4489, 4516, 4550, 4568, 4571.\n warnings.warn(message)\n" ], [ "one = SpatialDissim(coastal, 'n_nonhisp_black_persons', 'n_total_pop', w=w_coastal)", "_____no_output_____" ], [ "decomp = DecomposeSegregation(SpatialDissim(coastal, 'n_nonhisp_black_persons', 'n_total_pop', w=w_coastal),\n SpatialDissim(inland,'n_nonhisp_black_persons', 'n_total_pop', w=w_inland))", "/Users/carlos/anaconda3/envs/pysal-spatialapi20/lib/python3.7/site-packages/libpysal/weights/weights.py:172: UserWarning: The weights matrix is not fully connected: \n There are 3 disconnected components.\n There is 1 island with id: 3502.\n warnings.warn(message)\n" ], [ "decomp.plot(plot_type='maps', scheme='equalinterval', k=10, city_a='Coastal', city_b='inland')", "_____no_output_____" ], [ "decomp.plot()", "_____no_output_____" ] ], [ [ "## Exercise", "_____no_output_____" ], [ "1. Which county in the socal region has the greatest level of multiracial segregation, (using the 4 categories above) according to the MultiInformationTheory index?\n\n2. According to the Gini index, is hispanic/latino segregation in Riverside County greater or less than Ventura County? Is that difference significant?\n\n3. According to the Spatial Dissimilarity index, does the difference in segregation between Riverside and Venture result from the demograpic structure or the spatial structure?", "_____no_output_____" ] ], [ [ "# %load solutions/06.py", "_____no_output_____" ], [ "# List of used packages to be added to the `requirements.txt` file (ref: https://stackoverflow.com/questions/40428931/package-for-listing-version-of-packages-used-in-a-jupyter-notebook)\nimport pkg_resources\nimport types\ndef get_imports():\n for name, val in globals().items():\n if isinstance(val, types.ModuleType):\n # Split ensures you get root package, \n # not just imported function\n name = val.__name__.split(\".\")[0]\n\n elif isinstance(val, type):\n name = val.__module__.split(\".\")[0]\n\n # Some packages are weird and have different\n # imported names vs. system/pip names. Unfortunately,\n # there is no systematic way to get pip names from\n # a package's imported name. You'll have to add\n # exceptions to this list manually!\n poorly_named_packages = {\n \"PIL\": \"Pillow\",\n \"sklearn\": \"scikit-learn\"\n }\n if name in poorly_named_packages.keys():\n name = poorly_named_packages[name]\n\n yield name\nimports = list(set(get_imports()))\n\n# The only way I found to get the version of the root package\n# from only the name of the package is to cross-check the names \n# of installed packages vs. imported packages\nrequirements = []\nfor m in pkg_resources.working_set:\n if m.project_name in imports and m.project_name!=\"pip\":\n requirements.append((m.project_name, m.version))\n\nfor r in requirements:\n print(\"{}=={}\".format(*r))", "segregation==1.4.0\npandas==1.2.0\nlibpysal==4.3.0\ngeopandas==0.8.1\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
ecd62a97999f27f19875a6940e1573558a0242de
21,832
ipynb
Jupyter Notebook
Spotify Playlist Homogenizer.ipynb
joebonneau/spotify-playlist-homogenizer
735eb2f8fc9465583769fcd7a4a5f72b6de412de
[ "MIT" ]
null
null
null
Spotify Playlist Homogenizer.ipynb
joebonneau/spotify-playlist-homogenizer
735eb2f8fc9465583769fcd7a4a5f72b6de412de
[ "MIT" ]
null
null
null
Spotify Playlist Homogenizer.ipynb
joebonneau/spotify-playlist-homogenizer
735eb2f8fc9465583769fcd7a4a5f72b6de412de
[ "MIT" ]
null
null
null
37.641379
194
0.455158
[ [ [ "import os\nimport spotipy\nimport spotipy.oauth2 as oauth2\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport spotipy.util as util\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport ipywidgets as widgets\n\nSPOTIPY_CLIENT_ID = ''\nSPOTIPY_CLIENT_SECRET = ''\nSPOTIPY_REDIRECT_URI = ''", "_____no_output_____" ], [ "username = 'hp5s41fd8dkrl0mi77gwptmco'\nscope = 'playlist-modify-public'\n\ntoken = util.prompt_for_user_token(username,scope,\n client_id= SPOTIPY_CLIENT_ID,\n client_secret = SPOTIPY_CLIENT_SECRET,\n redirect_uri = SPOTIPY_REDIRECT_URI)\nsp = spotipy.Spotify(auth=token)", "_____no_output_____" ], [ "user_playlists = sp.current_user_playlists()['items']\nuser_playlists_list = []\nuser_playlists_dict = {}\n\nfor i in range(0,len(user_playlists)):\n user_playlists_list.append(user_playlists[i]['name'])\n user_playlists_dict[user_playlists[i]['name']] = user_playlists[i]['id']", "_____no_output_____" ], [ "user_playlists_list.sort()\n\n# selecting 'tempo' will actually select a version of 'tempo' that is rounded to the nearest multiple of 5\naudio_feature_dict = {'danceability':'danceability',\n 'energy':'energy',\n 'key':'key',\n 'loudness':'loudness',\n 'mode':'mode',\n 'speechiness':'speechiness',\n 'acousticness':'acousticness',\n 'instrumentalness':'instrumentalness',\n 'liveness':'liveness',\n 'valence':'valence',\n 'tempo':'tempo_rounded'}\naudio_feature_list.sort()\n\nradio_list = [('ascending',True),('descending',False)]\nplaylist_layout = widgets.Layout(margin='0px 0px 10px 200px',\n padding='0px 0px 0px 100px')\ntiers_layout = widgets.Layout(margin='0px 75px 5px 150px')\n\nplaylist_dropdown = widgets.Dropdown(options=user_playlists_list,\n layout=playlist_layout)\nprimary_dropdown = widgets.Dropdown(options=audio_feature_dict,\n value='tempo_rounded',\n layout=tiers_layout)\nsecondary_dropdown = widgets.Dropdown(options=audio_feature_dict,\n value='danceability',\n layout=tiers_layout)\ntertiary_dropdown = widgets.Dropdown(options=audio_feature_dict,\n value='valence',\n layout=tiers_layout)\nprimary_radio = widgets.RadioButtons(options=radio_list,\n description='Sort by: ')\nsecondary_radio = widgets.RadioButtons(options=radio_list,\n description='Sort by: ')\ntertiary_radio = widgets.RadioButtons(options=radio_list,\n description='Sort by: ',)\n\nplaylist_selection = widgets.VBox([widgets.Label('Select the playlist to homogenize: ',layout=playlist_layout),playlist_dropdown,\n widgets.Label('Select which audio feature to perform the primary sort on: ',layout=tiers_layout),widgets.HBox([primary_dropdown,primary_radio]),\n widgets.Label('Select which audio feature to perform the secondary sort on: ',layout=tiers_layout),widgets.HBox([secondary_dropdown,secondary_radio]),\n widgets.Label('Select which audio feature to perform the tertiary sort on: ',layout=tiers_layout),widgets.HBox([tertiary_dropdown,tertiary_radio])])\ndisplay(playlist_selection)", "_____no_output_____" ], [ "playlist = sp.user_playlist(username, playlist_id=user_playlists_dict.get(playlist_dropdown.value))\ntracks = playlist['tracks']\nsongs = tracks['items']\n\ntrack_ids = []\ntrack_names = []\n\nfor idx,item in enumerate(songs):\n if songs[idx]['track']['id'] != None:\n track_ids.append(songs[idx]['track']['id'])\n track_names.append(songs[idx]['track']['name'])", "_____no_output_____" ], [ "danceability = []\nenergy = []\nkey = []\nloudness = []\nmode = []\nspeechiness = []\nacousticness = []\ninstrumentalness = []\nliveness = []\nvalence = []\ntempo = []\n\naudio_features = sp.audio_features(tracks=track_ids)\n\nfor idx,item in enumerate(audio_features):\n danceability.append(audio_features[idx]['danceability'])\n energy.append(audio_features[idx]['energy'])\n key.append(audio_features[idx]['key'])\n loudness.append(audio_features[idx]['loudness'])\n mode.append(audio_features[idx]['mode'])\n speechiness.append(audio_features[idx]['speechiness'])\n acousticness.append(audio_features[idx]['acousticness'])\n instrumentalness.append(audio_features[idx]['instrumentalness'])\n liveness.append(audio_features[idx]['liveness'])\n valence.append(audio_features[idx]['valence'])\n tempo.append(audio_features[idx]['tempo'])\n", "_____no_output_____" ], [ "data = list(zip(track_ids,track_names,danceability,energy,key,loudness,mode,speechiness,acousticness,instrumentalness,\n liveness,valence,tempo))\nplaylist_df = pd.DataFrame(data, \n columns=['id','track_name','danceability','energy','key','loudness','mode',\n 'speechiness','acousticness','instrumentalness','liveness','valence',\n 'tempo'])", "_____no_output_____" ], [ "#playlist_df = playlist_df.set_index('id')\nplaylist_df['tempo_rounded'] = playlist_df['tempo'].apply(lambda x: 5*round(x/5))\nplaylist_df = playlist_df.sort_values(by=[primary_dropdown.value,secondary_dropdown.value,tertiary_dropdown.value],\n ascending=[primary_radio.value,secondary_radio.value,tertiary_radio.value])", "_____no_output_____" ], [ "playlist_df.head(10)", "_____no_output_____" ], [ "playlist_stats_df = playlist_df.describe()\n\naverage_danceability = average_danceability\naverage_energy = average_energy\naverage_valence = playlist_stats_df['valence'].loc['mean'].round(2)\n\nprint('Your playlist is...')\nif average_danceability >= 0 and average_danceability <= 0.25:\n print('\\tReally hard to dance to! The average danceability score is:',average_danceability)\n\nelif average_danceability > 0.25 and average_danceability <= 0.50:\n print('\\tPretty hard to dance to! The average danceability score is:',average_danceability)\n\nelif average_danceability > 0.5 and average_danceability <= 0.75:\n print('\\tPretty easy to dance to! The average danceability score is:',average_danceability)\n\nelse:\n print('\\tA straight rug-cutter! The average danceability score is:',average_danceability)\n \nif average_energy >= 0 and average_energy <= 0.25:\n print('\\tVery low energy! The average energy score is:',average_energy)\n\nelif average_energy > 0.25 and average_energy <= 0.5:\n print('\\tPretty lethargic! The average energy score is:',average_energy)\n\nelif average_energy > 0.5 and average_energy <= 0.75:\n print(\"\\tGroovin'! The average energy score is:\",average_energy)\n\nelse:\n print('\\tElectric! The average energy score is:',average_energy)\n \nif average_valence >= 0 and average_valence <= 0.25:\n print('\\tExtremely melancholy... where are the tissues at?! The average valence score is:',average_valence)\n \nelif average_valence > 0.25 and average_valence <= 0.5:\n print('\\tKinda depressing! The average valence score is:',average_valence)\n \nelif average_valence > 0.5 and average_valence <= 0.75:\n print('\\tPositive and feel good! The average valence score is:',average_valence)\n \nelse:\n print('\\tEuphoric! The average valence score is:',average_valence)", "Your playlist is...\n\tPretty easy to dance to! The average danceability score is: 0.67\n\tGroovin'! The average energy score is: 0.64\n\tPositive and feel good! The average valence score is: 0.6\n" ], [ "sp.user_playlist_replace_tracks(user=username,\n playlist_id=user_playlists_dict.get(playlist_dropdown.value),\n tracks=playlist_df['id'].tolist())", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd64460f9a2d3bb92fd63c840ae972ff4e6e8a7
80,857
ipynb
Jupyter Notebook
notebooks/Dfs3 - Temperature profiles.ipynb
liuli01/mikeio
9dc2f9800fd92057d728f4e9ed2f9848e792ba71
[ "BSD-3-Clause" ]
1
2020-08-06T21:39:11.000Z
2020-08-06T21:39:11.000Z
notebooks/Dfs3 - Temperature profiles.ipynb
mohm-dhi/mikeio
2d808d3a048fe949484089819a2c9437003f84ba
[ "BSD-3-Clause" ]
null
null
null
notebooks/Dfs3 - Temperature profiles.ipynb
mohm-dhi/mikeio
2d808d3a048fe949484089819a2c9437003f84ba
[ "BSD-3-Clause" ]
null
null
null
153.138258
22,892
0.886095
[ [ [ "# Global Forecasting System - Temperature profiles in the atmosphere", "_____no_output_____" ] ], [ [ "from datetime import datetime\nimport xarray\nimport numpy as np\nimport pandas as pd\nfrom mikeio import Dfs3", "_____no_output_____" ] ], [ [ "Let's try to download todays forecast from the OpenDAP server.", "_____no_output_____" ] ], [ [ "now = datetime.now()\n\nforecast = datetime(now.year,now.month,now.day)", "_____no_output_____" ], [ "dtstr = forecast.strftime(\"%Y%m%d\")\nhour = \"00\" # valid options are 00,06,12,18\nurl = f\"https://nomads.ncep.noaa.gov/dods/gfs_0p25/gfs{dtstr}/gfs_0p25_{hour}z\"\nds = xarray.open_dataset(url)", "_____no_output_____" ] ], [ [ "Create a small subset of the data to make it faster to download.", "_____no_output_____" ] ], [ [ "ds = ds.sel(lon=slice(10,15), lat=slice(54,58)).isel(time=slice(0,2))", "_____no_output_____" ] ], [ [ "Temperature on pressure levels in the atmosphere is named *tmpprs*", "_____no_output_____" ] ], [ [ "ds.tmpprs", "_____no_output_____" ], [ "ds.lev", "_____no_output_____" ] ], [ [ "Level 0 = 1000 mbar, i.e. close to the ground.", "_____no_output_____" ] ], [ [ "ds.tmpprs.isel(time=0,lev=0).plot()", "_____no_output_____" ] ], [ [ "Top level = 0.4 mbar, i.e. at the top of the atmosphere.", "_____no_output_____" ] ], [ [ "ds.tmpprs.isel(time=0,lev=-1).plot()", "_____no_output_____" ] ], [ [ "Let's look at a profile", "_____no_output_____" ] ], [ [ "ds.tmpprs.sel(lon=10,lat=56).isel(time=0).plot()", "_____no_output_____" ] ], [ [ "## Convert to dfs3\n\nDfs3 does not support irregularly spaced spatial axis as is used by the vertical coordinate axis in this case (pressure levels).\n\n**Thus, please note that the vertical coordinates are not correct in this example.**", "_____no_output_____" ] ], [ [ "lat = ds.lat.values\nlon = ds.lon.values\nlev = ds.lev.values\n\nnx = len(lon)\nny = len(lat)\nnz = len(lev)\n\nx0 = lon[0]\ny0 = lat[0]\n\ndx = np.round((lon[-1] - lon[0]) / (nx-1),2)\ndy = np.round((lat[-1] - lat[0]) / (ny-1),2)\n\nx0, y0, nx, ny, dx, dy", "_____no_output_____" ] ], [ [ "## Time", "_____no_output_____" ] ], [ [ "t = ds.time.values\nprint(t[0])\nstart_time = pd.to_datetime(t).to_pydatetime()[0]", "2020-09-11T00:00:00.000000000\n" ] ], [ [ "## Variable types", "_____no_output_____" ] ], [ [ "from mikeio.eum import EUMType\nEUMType.Temperature", "_____no_output_____" ], [ "EUMType.Temperature.units", "_____no_output_____" ] ], [ [ "# Data manipulation\nFlip upside / down", "_____no_output_____" ] ], [ [ "temperature = ds.tmpprs.values\n\ntemperature = np.flip(temperature,axis=1)", "_____no_output_____" ], [ "from mikeio.eum import ItemInfo, EUMUnit\n \ndfs = Dfs3()\ndfsfilename = f\"gfs_{dtstr}_{hour}_temperature_profile.dfs3\"\ndfs.write(filename=dfsfilename,\n data=[temperature],\n start_time = start_time,\n dt=3600,\n items=[ItemInfo(EUMType.Temperature, EUMUnit.degree_Kelvin)],\n coordinate=['LONG/LAT', x0, y0, 0],\n dx=dx, dy=dy\n)", "_____no_output_____" ] ], [ [ "## Clean up (don't run this)", "_____no_output_____" ] ], [ [ "import os\nos.remove(dfsfilename)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
ecd646f3771f465be0712ff3926e4e40ba65b572
182,700
ipynb
Jupyter Notebook
Pymaceuticals/pymaceuticals_starter.ipynb
AIndian/Matplotlib-challenge
50712c560a7b8489f9814557955a2fbf9653bd52
[ "ADSL" ]
null
null
null
Pymaceuticals/pymaceuticals_starter.ipynb
AIndian/Matplotlib-challenge
50712c560a7b8489f9814557955a2fbf9653bd52
[ "ADSL" ]
null
null
null
Pymaceuticals/pymaceuticals_starter.ipynb
AIndian/Matplotlib-challenge
50712c560a7b8489f9814557955a2fbf9653bd52
[ "ADSL" ]
null
null
null
106.097561
20,620
0.789677
[ [ [ "## Observations and Insights ", "_____no_output_____" ] ], [ [ "(1) The two drugs that showed the best final results is Capomulin and Ramicane \nboth showing a median under the 45.00 mm3 tumor start size.\n\n(2) There is a very positive correlation (r^2 = 0.76) between the average tumor volume \nand the weight of the mouse (speicifically for the Capomulin Regime)\nbut shows weight can be an important factor of treatment and care. \n\n(3) The distribution of female and male mice are even allowing for a removal of gender bias. \n\n(4) According to statistics, best drug for controlling and treating the Tumor is Ramicane\n(median = 40.673, mean = 40.217, var 23.48 ) ", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy.stats as st\nimport random\n\n# Study data files\nmouse_metadata_path = \"data/Mouse_metadata.csv\"\nstudy_results_path = \"data/Study_results.csv\"\n\n# Read the mouse data and the study results\nmouse_metadata = pd.read_csv(mouse_metadata_path)\nstudy_results = pd.read_csv(study_results_path)\n\n# Combine the data into a single dataset\nmousedata_df = pd.merge(mouse_metadata,study_results, on = 'Mouse ID')\n\n# Display the data table for preview\nmousedata_df\n", "_____no_output_____" ], [ "# Checking the number of mice.\nlen(mousedata_df['Mouse ID'].unique())\n", "_____no_output_____" ], [ "# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint. \nduplicateinfo = mousedata_df[mousedata_df.duplicated([\"Mouse ID\", \"Timepoint\"])]\n#saves all duplicate mouse information to a list\nduplicateID = duplicateinfo.iloc[:,0].tolist()\nduplicateID\n", "_____no_output_____" ], [ "# Optional: Get all the data for the duplicate mouse ID. \n# Gets all duplicate mouseID and Data printed here\nduplicatemouse = mousedata_df.loc[mousedata_df['Mouse ID'].isin(duplicateID)]\nduplicatemouse", "_____no_output_____" ], [ "# Create a clean DataFrame by dropping the duplicate mouse by its ID.\n# Code is used to automate, so whateever the found duplicate ID(s), it will drop\nmousedata = mousedata_df[~mousedata_df['Mouse ID'].isin(duplicateID)]\nmousedata", "_____no_output_____" ], [ "# Checking the number of mice in the clean DataFrame.\n# Unique mouse should be one less since the duplicate mouseID was dropped (bad data, no verification on which is the correct one)\n\nlen(mousedata['Mouse ID'].unique())\n", "_____no_output_____" ] ], [ [ "## Summary Statistics", "_____no_output_____" ] ], [ [ "# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n# Use groupby and summary statistical methods to calculate the following properties of each drug regimen: \n# mean, median, variance, standard deviation, and SEM of the tumor volume. \ndrug_means = mousedata[['Drug Regimen','Tumor Volume (mm3)']].rename(columns = \n {'Tumor Volume (mm3)': 'Mean'}).groupby(['Drug Regimen']).mean()\ndrug_median = mousedata[['Drug Regimen','Tumor Volume (mm3)']].rename(columns =\n {'Tumor Volume (mm3)': 'Median'}).groupby(['Drug Regimen']).median()\ndrug_var = mousedata[['Drug Regimen','Tumor Volume (mm3)']].rename(columns =\n {'Tumor Volume (mm3)': 'Variance'}).groupby(['Drug Regimen']).var()\ndrug_std = mousedata[['Drug Regimen','Tumor Volume (mm3)']].rename(columns =\n {'Tumor Volume (mm3)': 'Standard Deviation'}).groupby(['Drug Regimen']).std()\ndrug_sem = mousedata[['Drug Regimen','Tumor Volume (mm3)']].rename(columns =\n {'Tumor Volume (mm3)': 'SEM'}).groupby(['Drug Regimen']).sem()\n\n# Assemble the resulting series into a single summary dataframe.\nmerge1 = pd.merge(drug_means, drug_median, left_index=True, right_index=True)\nmerge2 = pd.merge(drug_var, drug_std, left_index=True, right_index=True)\nDrugStats = pd.merge(merge1, merge2, left_index=True, right_index=True)\nDrugStats = pd.merge(DrugStats, drug_sem, left_index=True, right_index=True)\nDrugStats\n\n\n", "_____no_output_____" ], [ "# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen\n# Using the aggregation method, produce the same summary statistics in a single line\nDrugStat = mousedata[['Drug Regimen','Tumor Volume (mm3)']].groupby('Drug Regimen').agg({'Tumor Volume (mm3)':['mean', 'median', 'var', 'std', 'sem']})\nDrugStat", "_____no_output_____" ] ], [ [ "## Bar and Pie Charts", "_____no_output_____" ] ], [ [ "# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.\n\n#Pandas DataFrame.Plot()\ndrugcount = mousedata['Drug Regimen'].value_counts().to_frame()\ndrugbar = drugcount.plot.bar(legend = None)\nplt.title('Total Number of Measurments of each Drug')\nplt.xticks(rotation = 45)\nplt.show()\n\n\n", "_____no_output_____" ], [ "# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.\n#matplotlib pyplot\ndrugnames = list(drugcount.index.values)\ndrugcounts = drugcount.iloc[:,0]\nplt.bar(drugnames, drugcounts)\nplt.title('Total Number of Measurments of each Drug')\nplt.xticks(rotation = 45)\nplt.show()\n", "_____no_output_____" ], [ "# Generate a pie plot showing the distribution of female versus male mice using pandas\ngendercount = mousedata.drop_duplicates('Mouse ID')['Sex'].value_counts().to_frame()\nexplode = (0.1,0)\ngenderpie = gendercount.plot(kind = 'pie', y = 'Sex', explode = explode, autopct = \"%1.2f%%\", startangle = 90, legend = None)\nplt.title('Distribution of Female versus Male Mice')\nplt.axis('off')\nplt.show()", "_____no_output_____" ], [ "# Generate a pie plot showing the distribution of female versus male mice using pyplot\ngendercounts = mousedata.drop_duplicates('Mouse ID')['Sex'].value_counts()\ngenderpies = plt.pie(gendercounts, explode = explode, labels = gendercounts.index.values,autopct = \"%1.2f%%\", startangle = 90)\nplt.title('Distribution of Female versus Male Mice')\nplt.axis('off')\nplt.show()", "_____no_output_____" ] ], [ [ "## Quartiles, Outliers and Boxplots", "_____no_output_____" ] ], [ [ "# Calculate the final tumor volume of each mouse across four of the treatment regimens: \n# Capomulin, Ramicane, Infubinol, and Ceftamin\ndrugs_4 = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']\ntumorVol = mousedata[mousedata['Drug Regimen'].isin(drugs_4)]\n\n# Start by getting the last (greatest) timepoint for each mouse\n# Merge this group df with the original dataframe to get the tumor volume at the last timepoint\n## Easier way of doing this is to sort it descending by , then remove duplicates by MouseID. \n## The highest timepoint of each Mouse ID will be kept\ntumorVol = tumorVol.sort_values(by = 'Timepoint', ascending = 0)\ntumorVol = tumorVol.drop_duplicates('Mouse ID')\ntumorVol\n\n\n\n\n", "_____no_output_____" ], [ "# Put treatments into a list for for loop (and later for plot labels)\n## Already done using drugs_4\n\n# Create empty list to fill with tumor vol data (for plotting)\ntumorDataRows= []\n\n# Calculate the IQR and quantitatively determine if there are any potential outliers. \n\nfor drug in drugs_4:\n # Locate the rows which contain mice on each drug and get the tumor volumes \n tumorData = tumorVol[tumorVol['Drug Regimen']==drug]\n tumorDataRows.append(tumorData)\n # add subset \n quartiles = tumorData['Tumor Volume (mm3)'].quantile([0.25,0.5,0.75])\n lowerq = quartiles[0.25]\n median = quartiles[0.50]\n upperq = quartiles[0.75]\n iqr = upperq-lowerq\n lowerb = lowerq - (1.5*iqr)\n upperb = upperq + (1.5*iqr)\n # Determine outliers using upper and lower bounds\n outliers_data = tumorData.loc[(tumorData['Tumor Volume (mm3)'] > upperb) | (tumorData['Tumor Volume (mm3)'] < lowerb)].set_index('Mouse ID')\n print(f'\\n{drug} Statistics:')\n print(f'The lower and upper quartile of is: {lowerq:.2f} and {upperq:.2f}')\n print(f'The IQR is {iqr:.2f} and median is {median:.2f}' )\n print(f'Any values outside of the range {lowerb:.2f} to {upperb:.2f} are outliers')\n print('Outliers:')\n if outliers_data.empty:\n print('None')\n else:\n print(outliers_data[['Tumor Volume (mm3)']])\n \n \n\n ", "\nCapomulin Statistics:\nThe lower and upper quartile of is: 32.38 and 40.16\nThe IQR is 7.78 and median is 38.13\nAny values outside of the range 20.70 to 51.83 are outliers\nOutliers:\nNone\n\nRamicane Statistics:\nThe lower and upper quartile of is: 31.56 and 40.66\nThe IQR is 9.10 and median is 36.56\nAny values outside of the range 17.91 to 54.31 are outliers\nOutliers:\nNone\n\nInfubinol Statistics:\nThe lower and upper quartile of is: 54.05 and 65.53\nThe IQR is 11.48 and median is 60.17\nAny values outside of the range 36.83 to 82.74 are outliers\nOutliers:\n Tumor Volume (mm3)\nMouse ID \nc326 36.321346\n\nCeftamin Statistics:\nThe lower and upper quartile of is: 48.72 and 64.30\nThe IQR is 15.58 and median is 59.85\nAny values outside of the range 25.36 to 87.67 are outliers\nOutliers:\nNone\n" ], [ "# Generate a box plot of the final tumor volume of each mouse across four regimens of interest\ntumorCl = tumorVol[['Drug Regimen','Tumor Volume (mm3)']]\ntumorCl.boxplot(by = 'Drug Regimen')\nplt.title('Tumor Volume of Each Mouse across Four Regimens of Interest')\nplt.suptitle('')\nplt.ylabel('Tumor Volume (mm3)')\nplt.xlabel('Drug Regimen')\n\n\n\n", "_____no_output_____" ] ], [ [ "## Line and Scatter Plots", "_____no_output_____" ] ], [ [ "# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin\nIDCapo = mousedata.loc[mousedata['Drug Regimen'] == 'Capomulin']['Mouse ID'].unique()\nCID= random.choices(IDCapo)\nCapo_df = mousedata.loc[mousedata['Mouse ID'].isin(CID)]\nx_axis = Capo_df.loc[:,'Timepoint'].tolist()\ny_axis = Capo_df.loc[:,'Tumor Volume (mm3)'].tolist()\nplt.plot(x_axis,y_axis, marker = 'o',markersize = 8)\nplt.title('Tumor Volume vs. Time Point for Mouse ID' + str(CID) + ' treated with Capomulin')\nplt.xlabel('Timepoint (Days)')\nplt.ylabel('Tumor Volume (mm3)')\n\n", "_____no_output_____" ], [ "# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen\nMouseCapo = tumorVol.loc[tumorVol['Drug Regimen'] == 'Capomulin']\nx_axis2 = MouseCapo['Weight (g)']\ny_axis2 = MouseCapo['Tumor Volume (mm3)']\nplt.scatter(x_axis2, y_axis2)\nplt.title('Average Tumor Volume vs. Mouse Weight for the Capomulin Regimen')\nplt.xlabel('Mouse Weight (g)')\nplt.ylabel('Tumor Volume (mm3)')\nplt.show()", "_____no_output_____" ] ], [ [ "## Correlation and Regression", "_____no_output_____" ] ], [ [ "# Calculate the correlation coefficient and linear regression model \n# for mouse weight and average tumor volume for the Capomulin regimen\n", "_____no_output_____" ], [ "(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x_axis2,y_axis2)\nregress_values = x_axis2 * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\nplt.scatter(x_axis2, y_axis2)\nplt.title('Average Tumor Volume vs. Mouse Weight for the Capomulin Regimen')\nplt.xlabel('Mouse Weight (g)')\nplt.ylabel('Tumor Volume (mm3)')\nplt.annotate(line_eq,(20,30),fontsize=15,color=\"black\")\nplt.plot(x_axis2,regress_values,\"r-\")\nplt.show()\nprint(f\"The r-squared is: {rvalue**2}\")", "_____no_output_____" ] ] ]
[ "markdown", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
ecd64a338195df5e0d2c2c359004bd32e1373a2d
16,309
ipynb
Jupyter Notebook
Twitter_bot_keywords_v6-Descarb.ipynb
dpastoresc/NarrativeDynamics
bc0c502744c215274d34a23cbce6ad6a9d39a333
[ "MIT" ]
1
2020-06-29T16:44:40.000Z
2020-06-29T16:44:40.000Z
Twitter_bot_keywords_v6-Descarb.ipynb
dpastoresc/NarrativeDynamics
bc0c502744c215274d34a23cbce6ad6a9d39a333
[ "MIT" ]
null
null
null
Twitter_bot_keywords_v6-Descarb.ipynb
dpastoresc/NarrativeDynamics
bc0c502744c215274d34a23cbce6ad6a9d39a333
[ "MIT" ]
1
2020-05-18T20:36:03.000Z
2020-05-18T20:36:03.000Z
43.959569
359
0.49396
[ [ [ "import tweepy\nimport json\nimport pandas as pd\nimport csv\n\nimport mysql.connector\nfrom mysql.connector import Error\n\n#imports for catching the errors\nfrom ssl import SSLError\nfrom requests.exceptions import Timeout, ConnectionError\nfrom urllib3.exceptions import ReadTimeoutError", "_____no_output_____" ], [ "#Twitter API credentials\nconsumer_key = 'pe7gsS8WNkANobhPvKU5q9PPv'\nconsumer_secret = 'EF8F0wezQz8HPBRp17DMXTEBrxTXJLBs1mzPnzjLLDlYsHql7D'\naccess_token = '468248438-aR7mCZ40MKTFq2TNIOGN0IWEegXUBGKDpN3QtDyT'\naccess_token_secret = 'K5dXjiphLnk17QfWTxigJHOJjgM3005T3c2OZj2X79FA8'", "_____no_output_____" ], [ "#Definicion palabras claves busqueda\nkeywords = ['descarbonización', 'clima', 'climático', 'combustible', 'CO2', 'climática', 'transición energética', 'renovable', 'energía', 'energético', 'energética']", "_____no_output_____" ], [ "\ndef connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count,\n tweet_id,text,created_at,source,\n reply_id, reply_user_id,\n retweet_id,retweet_user_id,\n quote_id,quote_user_id,\n reply_count,retweet_count,favorite_count,quote_count, \n hashtags, mention_ids,\n place_id, place_name, coord):\n \"\"\"\n connect to MySQL database and insert twitter data\n \"\"\"\n \n con = mysql.connector.connect(host = 'localhost',\n database='twitterdb', user='david', password = 'password', charset = 'utf8mb4',auth_plugin='mysql_native_password')\n cursor = con.cursor()\n try:\n\n if con.is_connected():\n \"\"\"\n Insert twitter data\n \"\"\"\n \n query = \"INSERT INTO UsersCAR (user_id, tweet_id,user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count) VALUES (%s,%s, %s, %s, %s, %s, %s, %s)\"\n cursor.execute(query, (user_id, tweet_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count))\n \n \n query2 = \"INSERT INTO PostsCAR (tweet_id,user_id,text,created_at,source,reply_id, reply_user_id,retweet_id, retweet_user_id,quote_id,quote_user_id,reply_count,retweet_count,favorite_count,quote_count,place_id, place_name, coord,hashtags, mention_ids) VALUES (%s,%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s)\"\n cursor.execute(query2, (tweet_id,user_id,text,created_at,source,\n reply_id, reply_user_id,\n retweet_id, retweet_user_id,\n quote_id,quote_user_id,\n reply_count,retweet_count,favorite_count,quote_count,\n place_id, place_name, coord,\n hashtags, mention_ids))\n \n con.commit() \n \n except Error as e:\n print(e)\n print(text)\n #Carlota: He dejado este print, porque no era capaz de almacenar emojis por la codificacion. \n #Estoy casi segura de que se ha arreglado, pero por si acaso\n cursor.close()\n con.close()\n return", "_____no_output_____" ], [ "class MyStreamListener(tweepy.StreamListener):\n \n def on_data(self,data):\n # Twitter returns data in JSON format - we need to decode it first\n try:\n decoded = json.loads(data)\n \n except Exception as e:\n print (\"Error on_data: %s\" % str(e)) #we don't want the listener to stop\n return True\n \n #LOCATION METADATA\n \n #En caso de estar geolocalizado guardar la geolocalizacion\n #Si esta geolocalizado dentro de un bounding box (no exacta)\n if decoded.get('place') is not None:\n place_id = decoded.get('place').get('id')\n place_name =decoded.get('place').get('name')\n else:\n place_id = 'None'\n place_name = 'None'\n \n #Si es localizacion exacta\n #Geo is deprecated, they suggest to use simply coordinates\n if decoded.get('cordinates') is not None:\n m_coord = decoded.get('coordinates')\n c=0\n coord=''\n for i in range(0, len(m_coord)-1):\n mc=m_coord[i]\n m_coord=coord+mc+';'#use a different separator!\n c=c+1\n mc=m_coord[c]\n m_coord=coord+mc\n else:\n coord = 'None'\n \n \n #USER METADATA\n user_name = '@' + decoded.get('user').get('screen_name') #nombre cuenta @itdUPM \n user_id=decoded.get('user').get('id') #id de la cuenta (int)\n user_loc=decoded.get('user').get('location') \n user_follow_count=decoded.get('user').get('followers_count')\n user_friends_count=decoded.get('user').get('friends_count')\n user_fav_count=decoded.get('user').get('favourites_count')\n user_status_count=decoded.get('user').get('statuses_count')\n \n #POST METADATA\n created_at = decoded.get('created_at') #Fecha\n text = decoded['text'].replace('\\n',' ') #Contenido tweet\n tweet_id = decoded['id'] #tweet id (int64)\n source = decoded['source'] #string source (web client, android, iphone) interesante???\n \n\n #REPLY METADATA\n reply_id=decoded['in_reply_to_status_id']\n reply_user_id=decoded['in_reply_to_user_id']\n\n #RETWEET\n if decoded.get('retweeted_status') is not None:\n retweet_id = decoded['retweeted_status'] ['id']\n retweet_user_id = decoded['retweeted_status']['user']['id']\n \n #Carlota: Si es un retweet los campos de nº de retweets favs etc vienen dentro de retweeted status\n #David: ok bien visto, he añadido el id de usuario retweeteado\n reply_count = decoded['retweeted_status']['reply_count'] #Number of times this Tweet has been replied to\n retweet_count = decoded['retweeted_status']['retweet_count'] #Number of times this Tweet has been retweeted\n favorite_count = decoded['retweeted_status']['favorite_count'] #how many times this Tweet has been liked by Twitter users.\n quote_count = decoded['retweeted_status']['quote_count']\n \n #hashtags_list=decoded.get('retweeted_status').get('entities').get('hashtags') \n #mentions=decoded.get('retweeted_status').get('entities').get('user_mentions')\n #David: para esto hay que crear una cadena de texto recorriendo la lista, el\n #código estaba en la versión anterior...\n \n hashtags_list=decoded['retweeted_status']['entities']['hashtags'] \n mentions=decoded['retweeted_status']['entities']['user_mentions']\n \n hashtags=''\n c=0\n if len(hashtags_list)>0:\n for i in range(0, len(hashtags_list)-1):\n mh=hashtags_list[i].get('text')\n hashtags=hashtags+mh+';'\n c=c+1\n mh=hashtags_list[c].get('text')\n hashtags=hashtags+str(mh)\n else:\n hashtags='None'\n\n mention_ids=''\n c=0\n if len(mentions)>0:\n for i in range(0, len(mentions)-1):\n mid=mentions[i].get('id_str')\n mention_ids=mention_ids+mid+';'#use a different separator!\n c=c+1\n mid=mentions[c].get('id_str')\n mention_ids=mention_ids+str(mid)\n else:\n mention_ids='None'\n\n \n #David: esto no sé si haría falta... este justo es un retweet de un post que a su ves\n #es un quote de una noticia, osea que hay dos pasos de conexión, pero el retweet \n #con el quote ya existe... lo guardamos pero hay que tenerlo en cuenta que es redundante\n \n #Carlota: Lo quito, porque tienes razon y no habia caido...\n \n #David. lo podemos dejar porque no son campos adicionales\n \n if decoded['retweeted_status']['is_quote_status']:\n if 'quoted_status' not in decoded['retweeted_status']:\n quote_id='None'\n quote_user_id='None' \n else:\n quote_id=decoded['retweeted_status']['quoted_status']['id']\n quote_user_id=decoded['retweeted_status']['quoted_status']['user']['id']\n else:\n quote_id='None'\n quote_user_id='None'\n \n else:\n \n reply_count = decoded['reply_count'] #Number of times this Tweet has been replied to\n retweet_count = decoded['retweet_count'] #Number of times this Tweet has been retweeted\n favorite_count = decoded['favorite_count'] #how many times this Tweet has been liked by Twitter users.\n quote_count = decoded['quote_count']\n retweet_id = 'None'\n retweet_user_id = 'None'\n \n if decoded['is_quote_status']:\n if 'quoted_status' not in decoded:\n quote_id='None'\n quote_user_id='None' \n else:\n quote_id=decoded['quoted_status']['id']\n quote_user_id=decoded['quoted_status']['user']['id']\n else:\n quote_id='None'\n quote_user_id='None'\n \n hashtags_list=decoded.get('entities').get('hashtags') \n mentions=decoded.get('entities').get('user_mentions')\n \n hashtags=''\n c=0\n if len(hashtags_list)>0:\n for i in range(0, len(hashtags_list)-1):\n mh=hashtags_list[i].get('text')\n hashtags=hashtags+mh+';'\n c=c+1\n mh=hashtags_list[c].get('text')\n hashtags=hashtags+str(mh)\n else:\n hashtags='None'\n\n mention_ids=''\n c=0\n if len(mentions)>0:\n for i in range(0, len(mentions)-1):\n mid=mentions[i].get('id_str')\n mention_ids=mention_ids+mid+';'#use a different separator!\n c=c+1\n mid=mentions[c].get('id_str')\n mention_ids=mention_ids+str(mid)\n else:\n mention_ids='None'\n \n #insert data just collected into MySQL database\n connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count,\n tweet_id,text,created_at,source,\n reply_id, reply_user_id,\n retweet_id,retweet_user_id,\n quote_id,quote_user_id,\n reply_count,retweet_count,favorite_count,quote_count, \n hashtags, mention_ids,\n place_id, place_name, coord)\n \n #print(\"Tweet colleted at: {} \".format(str(created_at)))\n \n def on_error(self, status_code):\n if status_code == 420:\n #returning False in on_error disconnects the stream\n return False\n\n # returning non-False reconnects the stream, with backoff.\n\nwhile True:\n if __name__ == '__main__':\n \n try: \n print ('Starting')\n #authorize twitter, initialize tweepy \n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n \n #create the api and the stream object\n myStreamListener = MyStreamListener()\n myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)\n #Filter the stream by keywords\n myStream.filter(track = keywords)\n \n except (Timeout, SSLError, ReadTimeoutError, ConnectionError) as e:\n #logging.warning(\"Network error occurred. Keep calm and carry on.\", str(e))\n print(\"Network error occurred. Keep calm and carry on.\")\n print(str(e))\n continue\n \n except Exception as e:\n #logging.error(\"Unexpected error!\", e)\n print(\"Unexpected error!\")\n print(str(e))\n continue\n \n", "Starting\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
ecd65149722e5f658e8412777c577551dfa9472a
2,035
ipynb
Jupyter Notebook
Calculadora.ipynb
Marcosddf/programacaojava
52ba3148721c6a9b09af0f3acc764680221c8826
[ "Unlicense" ]
2
2020-10-28T21:24:07.000Z
2021-03-09T20:29:52.000Z
Calculadora.ipynb
Marcosddf/programacaojava
52ba3148721c6a9b09af0f3acc764680221c8826
[ "Unlicense" ]
4
2020-03-24T18:17:03.000Z
2021-02-02T22:32:31.000Z
Calculadora.ipynb
Marcosddf/programacaojava
52ba3148721c6a9b09af0f3acc764680221c8826
[ "Unlicense" ]
1
2021-06-12T15:26:25.000Z
2021-06-12T15:26:25.000Z
22.611111
243
0.486486
[ [ [ "## Calculadora polimórfica\n\nO exemplo da calculadora polimórfica é frequentemente utilizado para ilustrar polimorfismo. O uso do polimorfismo permite implementar uma calculadora genérica, e a operação a ser executada é passada como parâmetro em tempo de execução.\n", "_____no_output_____" ] ], [ [ "abstract class Operacao {\n abstract int executa (int a, int b);\n}\n\nclass Soma extends Operacao {\n int executa (int a, int b){\n return a + b;\n }\n}\n\nclass Subtracao extends Operacao {\n int executa (int a, int b){\n return a - b;\n } \n}\n\n\nclass Calculadora {\n int calcula (Operacao opt, int a, int b){\n return opt.executa(a,b);\n }\n}\n\nclass Programa {\n public static void main (){\n Calculadora calc = new Calculadora ();\n \n System.out.println( calc.calcula (new Soma(),2,3) );\n \n System.out.println( calc.calcula (new Subtracao(),2,3) );\n \n }\n}\n\nPrograma.main();\n", "5\n-1\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
ecd651875226c6fd0187493c36ee62e5558258d6
131,572
ipynb
Jupyter Notebook
Assignment9ipynb.ipynb
purple0607/Linear-Algebra_2nd-Sem
35f1b22bd59957bb696ba387291e8ac27bff912d
[ "Apache-2.0" ]
null
null
null
Assignment9ipynb.ipynb
purple0607/Linear-Algebra_2nd-Sem
35f1b22bd59957bb696ba387291e8ac27bff912d
[ "Apache-2.0" ]
null
null
null
Assignment9ipynb.ipynb
purple0607/Linear-Algebra_2nd-Sem
35f1b22bd59957bb696ba387291e8ac27bff912d
[ "Apache-2.0" ]
null
null
null
78.691388
15,101
0.772231
[ [ [ "<a href=\"https://colab.research.google.com/github/purple0607/Linear-Algebra_2nd-Sem/blob/main/Assignment9ipynb.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ " # Linear Algebra for ChE\n## Laboratory 4 : Plotting Vector using NumPy and MatPlotLib\n", "_____no_output_____" ], [ "### Objectives\n In the previous laboratory activity, the students have expanded their knowledge and skills about fundamental matrix operations. Applying those matrix operations in Python programming has further improved their understanding of the different operations and their importance in Linear Algebra. However, in this laboratory activity, it focuses on using Python programming to represent and visualize vectors. Using Python to represent vectors is somehow similar to representing matrices. Additionally, this laboratory activity will also focus on learning the python libraries for numerical and scientific programming as well as the different vector operations. ", "_____no_output_____" ], [ "## Discussion", "_____no_output_____" ], [ "### NumPy\nNumPy, also known as Numerical Python, refers to a library containing multidimensional array objects and various functions for manipulating them. NumPy allows you to conduct mathematical and logical operations on arrays. [1] Jim Hugunin developed Numeric, the precursor of NumPy. Numarray, a package with some extra features, was also created. Travis Oliphant introduced the NumPy package in 2005 by combining the functionality of Numarray with Numeric. This open-source project has a large number of contributors. [2]\n", "_____no_output_____" ], [ "$Scalars$\n\nare those that have only a single value concerned with them. These can be fully stated in a single number. Scalars comprise magnitude only, and the same algebraic rules apply when working with them. In addition, it is possible to perform operations in scalars such as addition and subtraction in the same manner as the numbers can be. [3]\n\n$Vectors$\n\nare those that have a direction attached to them. These have magnitude and direction and follow the triangle law of vector addition in addition and subtraction. Using the standard algebraic rules results in vectors that cannot perform addition. The magnitude and direction of two vectors must be considered when adding them. [3]\n\n", "_____no_output_____" ], [ "#### Representing Vectors\nVectors can be presented with the use of importing the library NumPy as \"import numpy as np\" first before declaring the values of the vectors. The code used here is the \"np.array\" function. ", "_____no_output_____" ], [ "Now that you know how to represent vectors using their component and matrix form we can now hard-code them in Python. Let's say that you have the vectors:", "_____no_output_____" ], [ "$$ N = -5\\hat{x} + 7\\hat{y}\\\\\nU = 16\\hat{x} - 9\\hat{y} + 2\\hat{z}\\\\\nM = 12\\hat{w} + 6\\hat{x} - 17\\hat{y} - 3\\hat{z}\\\\\nP = -9ax - 4ay + 13az \\\\\nY = 5\\hat{i} - 10\\hat{j} + 8\\hat{k}$$", "_____no_output_____" ], [ "In which it's matrix equivalent is:", "_____no_output_____" ], [ "$$ N = \\begin{bmatrix} -5 \\\\ 7\\end{bmatrix} , U = \\begin{bmatrix} 16 \\\\ -9\\\\ 2\\end{bmatrix} , M = \\begin{bmatrix} 12 \\\\ 6 \\\\ -17 \\\\ -3\\end{bmatrix}, P = \\begin{bmatrix} -9 \\\\ -4 \\\\ 13\\end{bmatrix}, Y = \\begin{bmatrix} 5 \\\\ -10 \\\\ 8 \\end{bmatrix},\n$$\n$$ N = \\begin{bmatrix} -5 & 7\\end{bmatrix} , U = \\begin{bmatrix} 16 & -9 & 2\\end{bmatrix} , M = \\begin{bmatrix} 12 & 6 & -17 & -3\\end{bmatrix} , P = \\begin{bmatrix} -9 & -4 & 13\\end{bmatrix}, Y = \\begin{bmatrix} 5 & -10 & 8\\end{bmatrix} \n$$", "_____no_output_____" ], [ "We can then start doing numpy code with this by:", "_____no_output_____" ] ], [ [ "## Importing necessary libraries\nimport numpy as np", "_____no_output_____" ], [ "N = np.array([-5, 7])\nU = np.array([16, -9, 2])\nM = np.array([\n [12],\n [6],\n [-17],\n [-3]\n])\nP = np.array ([[-9],\n [-4],\n [13]])\nY = np.array ([[5],\n [-10],\n [8] \n])\nprint('Vector P is ')\nprint (N)\nprint('Vector O is ')\nprint (U)\nprint('Vector R is ')\nprint (M)\nprint('Vector S is ')\nprint (P)\nprint('Vector H is ')\nprint (Y)", "Vector P is \n[-5 7]\nVector O is \n[16 -9 2]\nVector R is \n[[ 12]\n [ 6]\n [-17]\n [ -3]]\nVector S is \n[[-9]\n [-4]\n [13]]\nVector H is \n[[ 5]\n [-10]\n [ 8]]\n" ], [ "N = np.array([\n [-5, 7],\n [-2, 3],\n])\n\nU = np.array([\n [16, -9, 2, 4, 11],\n [13, 1, -3, 5, 19],\n [4, 0, 12, -2, 0]\n])\nM = np.array([\n [12, 11],\n [6, 18],\n [-17, 21],\n [-3, -5]\n])\nP = np.array ([[-9, 17, 8, 9],\n [-4, 5, -16, 18],\n [13, 15, -20, 19],\n [23, -19, -3, 5],\n [18, -14, 25, 7]\n])\nprint('Vector G is ')\nprint (N)\nprint('Vector I is ')\nprint (U)\nprint('Vector A is ')\nprint (M)\nprint('Vector N is ')\nprint (P)", "Vector G is \n[[-5 7]\n [-2 3]]\nVector I is \n[[16 -9 2 4 11]\n [13 1 -3 5 19]\n [ 4 0 12 -2 0]]\nVector A is \n[[ 12 11]\n [ 6 18]\n [-17 21]\n [ -3 -5]]\nVector N is \n[[ -9 17 8 9]\n [ -4 5 -16 18]\n [ 13 15 -20 19]\n [ 23 -19 -3 5]\n [ 18 -14 25 7]]\n" ] ], [ [ "#### Describing vectors in NumPy\nFor the users to execute basic to complex operations using vectors, it is a must to define them first. Determining the shape, size, and dimensions of vectors is the primary approach to describing them. The code used to display them were \".shape,\" \".size,\" and \".ndim.\"", "_____no_output_____" ] ], [ [ "### Checking shapes\n### Shape refers to the number of components present on the rows and columns\n\nN = np.array([[14, 20, -31, 6, -1, 8, 11]])\nN.shape", "_____no_output_____" ], [ "### Checking size\n### Array/Vector size provides the exact number of components that a vector contains\n\nN.size", "_____no_output_____" ], [ "### Checking dimensions\n### The dimensions of a vector indicates the dimension of the vector, also called rank\n\nN.ndim", "_____no_output_____" ], [ "U = np.array([[[1, -10, 17, 63, 41, -25, 18, 99]]])\nU.shape", "_____no_output_____" ], [ "U.size", "_____no_output_____" ], [ "U.ndim", "_____no_output_____" ] ], [ [ "Great! Now let's try to explore in performing operations with these vectors.", "_____no_output_____" ], [ "#### $Addition$\n\nIn addition, two vectors are added and display the sum of them. The codes used were \"np.add\" and the \"+\" sign between the two vectors.\n\n#### $Subtraction$\n\nTo perform this operation, the codes used were \"np.subtract\" and the \"-\" sign in the middle of the two vectors. This operation gives the difference between the two vectors.\n\n#### $Multiplication$\n\nThis operation allows providing the product of two vectors. With the use of the codes \"np.multiply\" and \"*\" sign between the two vectors, they can be multiplied.\n\n#### $Division$\n\nDivision is an operation that divides two vectors using the codes \"np.divide\" and \"/\" sign in the middle of the two vectors. This presents the quotient of two vectors.\n", "_____no_output_____" ], [ "#### Addition", "_____no_output_____" ], [ "The addition rule is simple, the we just need to add the elements of the matrices according to their index. So in this case if we add vector $P$ and vector $Y$ we will have a resulting vector:", "_____no_output_____" ], [ "$$A = -4\\hat{x}-14\\hat{y}+21\\hat{z} \\\\ \\\\or \\\\ \\\\ A = \\begin{bmatrix} -4 \\\\ -14 \\\\ 21\\end{bmatrix} $$", "_____no_output_____" ], [ "So let's try to do that in NumPy in several number of ways:", "_____no_output_____" ] ], [ [ "A = np.add(P, Y) ## this is the functional method usisng the numpy library\nA", "_____no_output_____" ], [ "A = np.subtract(P, Y)\nA", "_____no_output_____" ], [ "A = np.multiply(P, Y)\nA", "_____no_output_____" ], [ "A = np.divide(P, Y)\nA", "_____no_output_____" ], [ "A = P + Y ## this is the explicit method, since Python does a value-reference so it can \n ## know that these variables would need to do array operations.\nA", "_____no_output_____" ], [ "pos1 = np.array([1,2,13,4,5])\npos2 = np.array([-4,15,6,8,7])\npos3 = np.array([0,-1,7,19,11])\npos4 = np.array([-22,7,18,0,12])\npos5 = np.array([14,4,-16,1,0])\npos6 = np.array([2,0,-9,3,14])\nB = pos1 + pos2 + pos3 + pos4 + pos5 + pos6\nB", "_____no_output_____" ], [ "C = pos6 - pos5 - pos4\nC", "_____no_output_____" ], [ "D = pos3 * pos4 * pos5\nD", "_____no_output_____" ], [ "E = pos1 / pos2 \nE", "_____no_output_____" ] ], [ [ "##### Try for yourself!", "_____no_output_____" ], [ "Try to implement subtraction, multiplication, and division with vectors $G$ and $P$!", "_____no_output_____" ] ], [ [ "G = np.array([\n [16, -8, -20, 7],\n])\nP = np.array([\n [-12, 11, 23, 4],\n])\nprint('Vector G is ')\nprint (G)\nprint('Vector P is ')\nprint (P)", "Vector G is \n[[ 16 -8 -20 7]]\nVector P is \n[[-12 11 23 4]]\n" ], [ "## Addition\nL = G + P\nL", "_____no_output_____" ], [ "## Subtraction\nO = P - G\nO", "_____no_output_____" ], [ "## Multiplication\nV = P * G\nV", "_____no_output_____" ], [ "## Division\nE = G / P\nE", "_____no_output_____" ], [ "Q = np.array([\n [1, 18, 0, -27],\n [3, -14, -1, 22]\n])\nR = np.array([\n [-2, 31, 26, 7],\n [6, -19, 9, 10]\n])\nprint('Vector Q is ')\nprint (Q)\nprint('Vector R is ')\nprint (R)", "Vector Q is \n[[ 1 18 0 -27]\n [ 3 -14 -1 22]]\nVector R is \n[[ -2 31 26 7]\n [ 6 -19 9 10]]\n" ], [ "## Addition\nT = np.add(Q, R) \nT", "_____no_output_____" ], [ "## Subtraction\nT = np.subtract(Q, R) \nT", "_____no_output_____" ], [ "## Multiplication \nT = np.multiply(Q, R) \nT", "_____no_output_____" ], [ "## Division\nT = np.divide(Q, R) \nT", "_____no_output_____" ] ], [ [ "### Scaling", "_____no_output_____" ], [ "This is done by using the code \"np.multiply\" and the declared value in a vector will be multiplied by the specific number. [5]\n\nLet's take the example below:", "_____no_output_____" ], [ "$$S = 7 \\cdot N$$", "_____no_output_____" ], [ "We can do this in numpy through:", "_____no_output_____" ] ], [ [ "#S = 7 * N\nS = np.multiply(7,N)\nS", "_____no_output_____" ] ], [ [ "Try to implement scaling with two vectors.", "_____no_output_____" ] ], [ [ "O = np.array([\n [1, 9, -14, 28],\n])\nY = np.array([\n [33, -61, 7, 15],\n])\nprint('Vector O is ')\nprint (O)\nprint('Vector Y is ')\nprint (Y)", "Vector O is \n[[ 1 9 -14 28]]\nVector Y is \n[[ 33 -61 7 15]]\n" ], [ "#S = 9 * O\nS = np.multiply(9,O)\nS", "_____no_output_____" ], [ "#S = 9 * Y \nS = np.multiply(9,Y)\nS", "_____no_output_____" ] ], [ [ "### MatPlotLib", "_____no_output_____" ], [ "In python, it is a plotting library that allows producing static, animated, and interactive visualization. [6] Also defined as a multi-platform data visualization package based on NumPy arrays and intended to operate with the SciPy stack, first developed in 2002 by John Hunter. Matplotlib has a large number of plots. Plots aid in the understanding of trends, patterns, and relationships. Thus, it is often used to make decisions based on numerical data. [7] Figure 8 shows what to import first in MatPlotLib.", "_____no_output_____" ], [ "#### Visualizing Data", "_____no_output_____" ], [ "One of the most significant advantages of visualization is that it provides access to visual/graphical to massive volumes of data in simply understandable graphics. Line, bar, scatter, histogram, and more graphs are available in Matplotlib. [7] In order to execute this, it is a must to import necessary programming libraries first.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib\n%matplotlib inline", "_____no_output_____" ], [ "X = [2, 4]\nY = [-7, 3]\nplt.scatter(X[0], X[1], label='X', c='blue')\nplt.scatter(Y[0], Y[1], label='Y', c='yellow')\nplt.grid()\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "VV = [-5, 1]\nWW = [9, 11]\nXX = [0, 6]\nYY = [-2, 7]\nZZ = [3, 4]\nplt.scatter(VV[0], VV[1], label='VV', c='orange')\nplt.scatter(WW[0], WW[1], label='WW', c='navy')\nplt.scatter(XX[0], XX[1], label='XX', c='purple')\nplt.scatter(YY[0], YY[1], label='YY', c='lime')\nplt.scatter(ZZ[0], ZZ[1], label='ZZ', c='skyblue')\nplt.grid()\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "W = np.array([-4, 1])\nZ = np.array([3, -5])\nR = W + Z\nMagnitude = np.sqrt(np.sum(R**2))\nprint(Magnitude)\nplt.title(\"Resultant Vector\\nMagnitude:{}\" .format(Magnitude))\nplt.xlim(-5, 5)\nplt.ylim(-5, 5)\nplt.quiver(0, 0, W[0], W[1], angles='xy', scale_units='xy', scale=1, color='green')\nplt.quiver(A[0], W[1], Z[0], Z[1], angles='xy', scale_units='xy', scale=1, color='purple')\nplt.quiver(0, 0, R[0], R[1], angles='xy', scale_units='xy', scale=1, color='yellow')\nplt.grid()\nplt.show()\nprint(R)\nSlope = R[1]/R[0]\nprint(Slope)\nAngle = (np.arctan(Slope))*(180/np.pi)\nprint(Angle)", "4.123105625617661\n" ], [ "n = W.shape[0]\nplt.xlim(-10, 10)\nplt.ylim(-10, 10)\nplt.quiver(0,0, W[0], W[1], angles='xy', scale_units='xy',scale=1)\nplt.quiver(W[0],W[1], Z[0], Z[1], angles='xy', scale_units='xy',scale=1)\nplt.quiver(0,0, R[0], R[1], angles='xy', scale_units='xy',scale=1)\nplt.show()", "_____no_output_____" ] ], [ [ "Try plotting Three Vectors and show the Resultant Vector as a result.\nUse Head to Tail Method.", "_____no_output_____" ] ], [ [ "AAA = np.array([2, -8])\nBBB = np.array([3, -7])\nCCC = AAA + BBB\nDDD = np.array([-5, 4])\nEEE = AAA + BBB + DDD\nMagnitude = np.sqrt(np.sum(EEE**2))\nplt.title(\"Resultant Vector\\nMagnitude: {} \\n Resultant: {}\" .format(Magnitude, EEE))\nplt.xlim(9, -19)\nplt.ylim(11, -19)\nplt.quiver(0, 0, AAA[0], AAA[1], angles='xy', scale_units='xy', scale=1, color='khaki')\nplt.quiver(AAA[0], AAA[1], BBB[0], BBB[1], angles='xy', scale_units='xy', scale=1, color='royalblue')\nplt.quiver(CCC[0], CCC[1], DDD[0], DDD[1], angles='xy', scale_units='xy', scale=1, color='maroon')\nplt.quiver(0, 0, EEE[0], EEE[1], angles='xy', scale_units='xy', scale=1, color='palegreen')\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "FFF = np.array([-4, -6])\nGGG = np.array([2, -8])\nHHH = FFF + GGG\nIII = np.array([6, 7])\nJJJ = FFF + GGG + III\nMagnitude = np.sqrt(np.sum(JJJ**2))\nplt.title(\"Resultant Vector\\nMagnitude: {} \\n Resultant: {}\" .format(Magnitude, JJJ))\nplt.xlim(12, -23)\nplt.ylim(11, -23)\nplt.quiver(0, 0, FFF[0], FFF[1], angles='xy', scale_units='xy', scale=1, color='cyan')\nplt.quiver(FFF[0], FFF[1], GGG[0], GGG[1], angles='xy', scale_units='xy', scale=1, color='orange')\nplt.quiver(HHH[0], HHH[1], III[0], III[1], angles='xy', scale_units='xy', scale=1, color='pink')\nplt.quiver(0, 0, JJJ[0], JJJ[1], angles='xy', scale_units='xy', scale=1, color='silver')\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "FFF = np.array([-4, -6])\nGGG = np.array([2, -8])\nHHH = FFF + GGG\nIII = np.array([6, 7])\nJJJ = FFF + GGG + III\nMagnitude = np.sqrt(np.sum(JJJ**2))\nplt.title(\"Resultant Vector\\nMagnitude: {} \\n Resultant: {}\" .format(Magnitude, JJJ))\nplt.xlim(12, -23)\nplt.ylim(11, -23)\nplt.quiver(0, 0, FFF[0], FFF[1], angles='xy', scale_units='xy', scale=1, color='cyan')\nplt.quiver(FFF[0], FFF[1], GGG[0], GGG[1], angles='xy', scale_units='xy', scale=1, color='orange')\nplt.quiver(HHH[0], HHH[1], III[0], III[1], angles='xy', scale_units='xy', scale=1, color='pink')\nplt.quiver(0, 0, JJJ[0], JJJ[1], angles='xy', scale_units='xy', scale=1, color='silver')\nplt.grid()\nplt.show()\nSlope = JJJ[1]/JJJ[0]\nprint('Slope:{}'.format(Slope))\nAngle = (np.arctan(Slope))*(180/np.pi)\nprint('Angle:{}'.format(Angle))", "_____no_output_____" ] ], [ [ "#Conclusion\n\nIn this laboratory report, the researchers have fully utilized the features of Google Colab in line with understanding the principles and application of python programming. With the topics discussed, the researchers have enhanced their knowledge and skills and performed the activity with ease and confidence. Upon doing the activity, the researchers learned the significance of importing the necessary programming libraries like \"import numpy as np\" and \"import matplotlib.pyplot as plt\" to execute commands and for the succeeding codes to work their functions. The objective of this laboratory activity has been achieved wherein the researchers have familiarized themselves with python libraries for numerical and scientific programming. In addition, the researchers were able to represent a vector using the code \"np.array\" and describe a vector according to its shape, size, and dimension with the codes \".shape,\" \".size,\" and \".ndim.\" \n\nAlong with representing and describing vectors, the researchers have also tackled performing operations with vectors, including addition, subtraction, multiplication, and division. There were two ways of executing the operations: the functional method and the explicit method. The functional method involves the signs such as \"np.add,\" \"np.subtract,\" \"np.multiply,\" and \"np.divide\" for the addition, subtraction, multiplication, and addition, respectively. While in the explicit method, the codes used were \"+,\" \"-,\" \"*,\" and \"/\" for the same arrangement of operations. Scaling has been discussed as well with the use of the code \"np.multiply\" for scaling a vector. For the MatPlotLib, it was comprehended its significance in python programming and how helpful it is in presenting a plot of data to be utilized in the interpretation of trends, patterns, and relationships. In line with this, the codes introduced allowed the researchers to display the resultant vector, magnitude, slope, and angle of the given vectors.\n\nThrough this laboratory activity, the researchers have further improved their proficiency in python programming with the use of the platform Google Colab. The different commands taught were recognized and acquainted with their usage in the activity, especially in doing the given task. Moreover, everything that has been discussed in this laboratory report will help the researchers to establish a good foundation regarding vectors and which they can use for future purposes. As a final point, the researchers have gained more knowledge and desire to apply everything they have learned in a real-life situation and field of work.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
ecd66588c65b79721832f5d4450ce7a09836094c
53,508
ipynb
Jupyter Notebook
notebooks/Modeling/Regression_Model_8_Features_CH.ipynb
tomfox1/airbnb_pricing_ds
8e1525600c72e65240667a6e1bef0b5c5281231e
[ "MIT" ]
null
null
null
notebooks/Modeling/Regression_Model_8_Features_CH.ipynb
tomfox1/airbnb_pricing_ds
8e1525600c72e65240667a6e1bef0b5c5281231e
[ "MIT" ]
1
2021-06-01T23:53:12.000Z
2021-06-01T23:53:12.000Z
notebooks/Modeling/Regression_Model_8_Features_CH.ipynb
tomfox1/airbnb_pricing_ds
8e1525600c72e65240667a6e1bef0b5c5281231e
[ "MIT" ]
2
2019-06-23T16:25:00.000Z
2019-06-25T23:22:40.000Z
60.053872
145
0.348378
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('model_ready.csv')", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df = df.drop(['amenities', 'id'], axis=1)", "_____no_output_____" ], [ "import numpy as np\nimport seaborn as sb\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nimport sklearn.metrics as metrics\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression as Lin_Reg\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport scipy.stats as stats\nimport math\n%matplotlib inline", "_____no_output_____" ], [ "# Split data into X and y\n\nX = df.drop(['total_price', 'price_log'], axis=1)\ny_log = df['price_log']\ny = df['total_price']", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y_log, test_size=0.35)", "_____no_output_____" ], [ "reg_params = 10.**np.linspace(-10, 5, 10)\nmodel = RidgeCV(alphas=reg_params, fit_intercept=True, cv=5)", "_____no_output_____" ], [ "model.fit(X_train, y_train)", "/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number6.870460e-19\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number7.907628e-19\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number1.153704e-17\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number1.155169e-17\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number8.167800e-18\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number6.155547e-17\n overwrite_a=True).T\n/home/ec2-user/anaconda3/envs/python3/lib/python3.6/site-packages/sklearn/linear_model/ridge.py:125: LinAlgWarning: scipy.linalg.solve\nIll-conditioned matrix detected. Result is not guaranteed to be accurate.\nReciprocal condition number6.116357e-17\n overwrite_a=True).T\n" ], [ "y_pred = model.predict(X_test)", "_____no_output_____" ], [ "r2 = r2_score(y_test, y_pred)\nrmse = (np.sqrt(mean_squared_error(y_test, y_pred)))", "_____no_output_____" ], [ "r2, rmse", "_____no_output_____" ], [ "y_pred = pd.DataFrame(data=y_pred.flatten())", "_____no_output_____" ], [ "y_pred = y_pred[0].apply(lambda x: math.exp(x))", "_____no_output_____" ], [ "y_test = y_test.apply(lambda x: math.exp(x))", "_____no_output_____" ], [ "y_test = pd.DataFrame(y_test)\ny_test = y_test.reset_index()\ny_test = y_test.drop('index', axis=1)", "_____no_output_____" ], [ "y_pred = pd.DataFrame(y_pred)", "_____no_output_____" ], [ "pred_df = pd.concat((y_test, y_pred), axis=1)", "_____no_output_____" ], [ "# WARNING -- Do Not Run outside of Sagemaker", "_____no_output_____" ], [ "import statsmodels.api as sm\n# Create OLS model with robust standard errors.\nmodel = sm.OLS(y_log, X)\nresults = model.fit(cov_type='HC3')\nprint(results.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: price_log R-squared: 0.748\nModel: OLS Adj. R-squared: 0.746\nMethod: Least Squares F-statistic: nan\nDate: Tue, 25 Jun 2019 Prob (F-statistic): nan\nTime: 03:41:19 Log-Likelihood: -25340.\nNo. Observations: 43365 AIC: 5.138e+04\nDf Residuals: 43015 BIC: 5.442e+04\nDf Model: 349 \nCovariance Type: HC3 \n==========================================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------------------\n 9176 -0.7423 3.651 -0.203 0.839 -7.898 6.414\n0 0.6123 7.001 0.087 0.930 -13.109 14.334\n10019 0.6338 5.083 0.125 0.901 -9.329 10.596\n10023 -0.0806 4.048 -0.020 0.984 -8.014 7.853\n139 S -1.1401 320.946 -0.004 0.997 -630.183 627.902\n37738 -0.1918 3.551 -0.054 0.957 -7.151 6.768\n60601 0.5288 3.431 0.154 0.878 -6.196 7.254\n90 -0.4826 4.064 -0.119 0.905 -8.447 7.482\n900 1.5376 7.772 0.198 0.843 -13.695 16.770\n90001 -0.0922 3.425 -0.027 0.979 -6.806 6.621\n90002 -0.2942 3.430 -0.086 0.932 -7.018 6.429\n90003 -0.2298 3.425 -0.067 0.946 -6.943 6.483\n90004 0.1279 3.424 0.037 0.970 -6.584 6.839\n90005 0.0170 3.424 0.005 0.996 -6.695 6.729\n90006 -0.1274 3.424 -0.037 0.970 -6.839 6.584\n90007 -0.1076 3.424 -0.031 0.975 -6.819 6.604\n90008 0.0409 3.425 0.012 0.990 -6.671 6.753\n90010 0.3760 3.425 0.110 0.913 -6.337 7.089\n90011 -0.2535 3.425 -0.074 0.941 -6.966 6.459\n90012 0.2556 3.424 0.075 0.941 -6.456 6.967\n90013 0.2931 3.424 0.086 0.932 -6.419 7.005\n90014 0.3028 3.424 0.088 0.930 -6.409 7.014\n90015 0.4154 3.424 0.121 0.903 -6.296 7.127\n90016 0.0579 3.424 0.017 0.986 -6.654 6.770\n90017 0.3513 3.424 0.103 0.918 -6.360 7.063\n90018 0.0304 3.424 0.009 0.993 -6.681 6.742\n90019 0.0288 3.424 0.008 0.993 -6.683 6.740\n90020 0.0632 3.424 0.018 0.985 -6.648 6.775\n90021 0.0343 3.425 0.010 0.992 -6.679 6.747\n90022 -0.1572 3.425 -0.046 0.963 -6.871 6.556\n90023 -0.2501 3.425 -0.073 0.942 -6.963 6.463\n90024 0.5017 3.424 0.147 0.884 -6.210 7.213\n90025 0.3277 3.424 0.096 0.924 -6.384 7.039\n90026 0.1408 3.424 0.041 0.967 -6.571 6.852\n90027 0.1829 3.424 0.053 0.957 -6.529 6.894\n90028 0.2886 3.424 0.084 0.933 -6.423 7.000\n90029 0.1840 3.424 0.054 0.957 -6.528 6.896\n90030 0.3249 3.660 0.089 0.929 -6.849 7.499\n90031 -0.0377 3.424 -0.011 0.991 -6.750 6.674\n90032 -0.0800 3.425 -0.023 0.981 -6.792 6.632\n90033 -0.1198 3.424 -0.035 0.972 -6.832 6.592\n90034 0.1381 3.424 0.040 0.968 -6.574 6.850\n90035 0.1842 3.424 0.054 0.957 -6.527 6.896\n90036 0.2678 3.424 0.078 0.938 -6.444 6.979\n90037 -0.1048 3.425 -0.031 0.976 -6.817 6.607\n90038 0.1494 3.424 0.044 0.965 -6.562 6.861\n90039 0.1910 3.424 0.056 0.956 -6.521 6.903\n90040 0.1645 3.425 0.048 0.962 -6.548 6.877\n90041 0.0316 3.424 0.009 0.993 -6.680 6.743\n90042 -0.0035 3.424 -0.001 0.999 -6.715 6.708\n90043 -0.0813 3.424 -0.024 0.981 -6.793 6.630\n90044 -0.0854 3.425 -0.025 0.980 -6.798 6.627\n90045 0.1390 3.424 0.041 0.968 -6.573 6.851\n90046 0.4160 3.424 0.121 0.903 -6.296 7.128\n90047 -0.0201 3.425 -0.006 0.995 -6.733 6.692\n90048 0.4285 3.424 0.125 0.900 -6.283 7.140\n90049 0.4415 3.424 0.129 0.897 -6.270 7.153\n90056 0.2160 3.425 0.063 0.950 -6.496 6.928\n90057 -0.0718 3.425 -0.021 0.983 -6.784 6.640\n90058 0.1158 3.434 0.034 0.973 -6.615 6.846\n90059 -0.4839 3.425 -0.141 0.888 -7.198 6.230\n90061 -0.4606 3.431 -0.134 0.893 -7.185 6.264\n90062 -0.0133 3.425 -0.004 0.997 -6.726 6.700\n90063 -0.1001 3.425 -0.029 0.977 -6.812 6.612\n90064 0.2573 3.424 0.075 0.940 -6.454 6.969\n90065 0.0543 3.424 0.016 0.987 -6.657 6.766\n90066 0.2222 3.424 0.065 0.948 -6.489 6.934\n90067 1.4300 3.458 0.414 0.679 -5.348 8.208\n90068 0.3940 3.424 0.115 0.908 -6.318 7.106\n90069 0.6281 3.424 0.183 0.854 -6.084 7.340\n90071 0.5355 3.426 0.156 0.876 -6.180 7.251\n90077 0.6678 3.425 0.195 0.845 -6.044 7.380\n90094 0.4703 3.425 0.137 0.891 -6.242 7.182\n90201 -0.3328 3.426 -0.097 0.923 -7.048 6.383\n90210 0.7752 3.424 0.226 0.821 -5.937 7.487\n90211 0.4282 3.424 0.125 0.900 -6.284 7.140\n90212 0.5168 3.424 0.151 0.880 -6.195 7.228\n90220 -0.2183 3.426 -0.064 0.949 -6.933 6.496\n90221 -0.5351 3.446 -0.155 0.877 -7.289 6.219\n90222 0.2455 3.518 0.070 0.944 -6.650 7.141\n90230 0.0890 3.424 0.026 0.979 -6.623 6.801\n90232 0.2286 3.424 0.067 0.947 -6.483 6.940\n90240 -0.0686 3.426 -0.020 0.984 -6.784 6.647\n90241 -0.2171 3.425 -0.063 0.949 -6.930 6.496\n90242 -0.2185 3.425 -0.064 0.949 -6.931 6.494\n90245 0.1010 3.424 0.030 0.976 -6.611 6.813\n90247 -0.1802 3.425 -0.053 0.958 -6.892 6.532\n90248 -0.1838 3.428 -0.054 0.957 -6.903 6.536\n90249 -0.0346 3.425 -0.010 0.992 -6.747 6.678\n90250 -0.0613 3.424 -0.018 0.986 -6.773 6.650\n90254 0.5535 3.425 0.162 0.872 -6.158 7.265\n90255 -0.2543 3.428 -0.074 0.941 -6.972 6.464\n90260 -0.1952 3.425 -0.057 0.955 -6.907 6.517\n90262 -0.3837 3.425 -0.112 0.911 -7.097 6.329\n90265 0.9705 3.424 0.283 0.777 -5.741 7.682\n90266 0.5857 3.424 0.171 0.864 -6.126 7.297\n90270 -0.1478 3.425 -0.043 0.966 -6.861 6.565\n90272 0.5273 3.424 0.154 0.878 -6.185 7.239\n90274 0.5244 3.426 0.153 0.878 -6.191 7.239\n90275 0.3713 3.425 0.108 0.914 -6.342 7.085\n90277 0.2736 3.424 0.080 0.936 -6.438 6.985\n90278 0.1878 3.424 0.055 0.956 -6.524 6.900\n90280 -0.1449 3.426 -0.042 0.966 -6.860 6.570\n90290 0.3868 3.424 0.113 0.910 -6.325 7.099\n90291 0.4759 3.424 0.139 0.889 -6.236 7.187\n90292 0.5020 3.424 0.147 0.883 -6.210 7.214\n90293 0.3580 3.424 0.105 0.917 -6.354 7.070\n90301 -0.0977 3.424 -0.029 0.977 -6.810 6.614\n90302 -0.0383 3.425 -0.011 0.991 -6.750 6.674\n90303 -0.0746 3.425 -0.022 0.983 -6.787 6.638\n90304 -0.0820 3.425 -0.024 0.981 -6.794 6.630\n90305 -0.0495 3.425 -0.014 0.988 -6.762 6.662\n90401 0.5289 3.424 0.154 0.877 -6.183 7.241\n90402 0.6191 3.424 0.181 0.857 -6.093 7.331\n90403 0.5003 3.424 0.146 0.884 -6.211 7.212\n90404 0.4246 3.424 0.124 0.901 -6.287 7.136\n90405 0.5200 3.424 0.152 0.879 -6.192 7.232\n90501 -0.0431 3.425 -0.013 0.990 -6.755 6.669\n90502 -0.0895 3.425 -0.026 0.979 -6.802 6.623\n90503 0.0086 3.425 0.003 0.998 -6.704 6.721\n90504 -0.0831 3.425 -0.024 0.981 -6.795 6.629\n90505 -0.0134 3.425 -0.004 0.997 -6.726 6.699\n90601 -0.0975 3.425 -0.028 0.977 -6.810 6.616\n90602 -0.0910 3.425 -0.027 0.979 -6.804 6.622\n90603 0.0468 3.426 0.014 0.989 -6.668 6.761\n90604 -0.1361 3.426 -0.040 0.968 -6.851 6.578\n90605 -0.2321 3.453 -0.067 0.946 -7.000 6.536\n90606 -0.2112 3.425 -0.062 0.951 -6.924 6.502\n90630 -0.3636 964.783 -0.000 1.000 -1891.303 1890.576\n90631 -0.1366 3.426 -0.040 0.968 -6.852 6.579\n90638 -0.1913 3.425 -0.056 0.955 -6.904 6.521\n90640 -0.2979 3.425 -0.087 0.931 -7.010 6.415\n90650 -0.1834 3.425 -0.054 0.957 -6.896 6.530\n90660 -0.2761 3.425 -0.081 0.936 -6.988 6.436\n90701 0.0293 3.426 0.009 0.993 -6.686 6.745\n90703 -0.1695 3.425 -0.050 0.961 -6.883 6.544\n90704 0.5774 3.424 0.169 0.866 -6.134 7.289\n90706 -0.0884 3.426 -0.026 0.979 -6.803 6.626\n90710 0.1043 3.426 0.030 0.976 -6.611 6.819\n90712 -0.1472 3.425 -0.043 0.966 -6.861 6.566\n90713 -0.0434 3.426 -0.013 0.990 -6.759 6.672\n90715 -0.1526 3.426 -0.045 0.964 -6.868 6.563\n90717 -0.0019 3.425 -0.001 1.000 -6.715 6.712\n90723 -0.2278 3.426 -0.066 0.947 -6.943 6.488\n90731 0.0845 3.425 0.025 0.980 -6.627 6.797\n90732 -0.1441 3.425 -0.042 0.966 -6.858 6.569\n90744 -0.4694 3.428 -0.137 0.891 -7.187 6.249\n90745 -0.2266 3.425 -0.066 0.947 -6.939 6.486\n90746 0.0146 3.425 0.004 0.997 -6.699 6.728\n90755 -0.1059 3.426 -0.031 0.975 -6.820 6.608\n90802 0.1742 3.424 0.051 0.959 -6.537 6.886\n90803 0.2741 3.424 0.080 0.936 -6.437 6.986\n90804 -0.0455 3.424 -0.013 0.989 -6.757 6.666\n90805 -0.2172 3.425 -0.063 0.949 -6.929 6.495\n90806 -0.0308 3.424 -0.009 0.993 -6.743 6.681\n90807 -0.0423 3.424 -0.012 0.990 -6.754 6.670\n90808 0.0338 3.425 0.010 0.992 -6.679 6.746\n90810 -0.2233 3.425 -0.065 0.948 -6.936 6.490\n90813 -0.0020 3.424 -0.001 1.000 -6.714 6.710\n90814 0.1069 3.425 0.031 0.975 -6.605 6.819\n90815 -0.0904 3.425 -0.026 0.979 -6.803 6.622\n91001 0.0835 3.424 0.024 0.981 -6.628 6.795\n91006 -0.1805 3.425 -0.053 0.958 -6.893 6.532\n91007 -0.0368 3.424 -0.011 0.991 -6.749 6.675\n91008 0.3270 3.433 0.095 0.924 -6.402 7.056\n91010 -0.0708 3.426 -0.021 0.984 -6.785 6.643\n91011 0.0973 3.425 0.028 0.977 -6.616 6.810\n91016 -0.0862 3.425 -0.025 0.980 -6.798 6.626\n91020 0.0927 3.425 0.027 0.978 -6.621 6.806\n91024 0.0658 3.425 0.019 0.985 -6.647 6.779\n91030 0.1583 3.425 0.046 0.963 -6.555 6.871\n91040 -0.0134 3.426 -0.004 0.997 -6.728 6.701\n91042 -0.0654 3.425 -0.019 0.985 -6.779 6.648\n91101 0.1464 3.425 0.043 0.966 -6.566 6.858\n91103 0.1620 3.425 0.047 0.962 -6.551 6.875\n91104 0.1394 3.424 0.041 0.968 -6.572 6.851\n91105 0.4882 3.425 0.143 0.887 -6.224 7.200\n91106 0.0821 3.425 0.024 0.981 -6.630 6.794\n91107 -0.0187 3.424 -0.005 0.996 -6.731 6.693\n91108 0.3569 3.428 0.104 0.917 -6.361 7.075\n91201 0.0664 3.425 0.019 0.985 -6.646 6.778\n91202 0.2223 3.425 0.065 0.948 -6.490 6.934\n91203 0.3873 3.425 0.113 0.910 -6.326 7.100\n91204 0.1650 3.425 0.048 0.962 -6.547 6.877\n91205 0.0890 3.425 0.026 0.979 -6.623 6.801\n91206 0.0549 3.425 0.016 0.987 -6.658 6.767\n91207 0.1841 3.425 0.054 0.957 -6.529 6.897\n91208 0.2331 3.425 0.068 0.946 -6.481 6.947\n91210 0.5872 3.426 0.171 0.864 -6.127 7.301\n91214 0.0262 3.425 0.008 0.994 -6.686 6.739\n91301 0.1366 3.425 0.040 0.968 -6.576 6.849\n91302 0.3642 3.425 0.106 0.915 -6.349 7.077\n91303 0.0247 3.425 0.007 0.994 -6.688 6.737\n91304 0.0627 3.425 0.018 0.985 -6.649 6.775\n91306 -0.0251 3.425 -0.007 0.994 -6.737 6.687\n91307 0.0511 3.425 0.015 0.988 -6.661 6.763\n91311 0.1606 3.425 0.047 0.963 -6.553 6.874\n91316 0.1855 3.425 0.054 0.957 -6.527 6.898\n91321 -0.0803 3.427 -0.023 0.981 -6.797 6.636\n91324 -0.0528 3.425 -0.015 0.988 -6.766 6.661\n91325 0.1109 3.425 0.032 0.974 -6.601 6.823\n91326 0.0367 3.425 0.011 0.991 -6.677 6.750\n91331 -0.1299 3.425 -0.038 0.970 -6.843 6.584\n91335 -0.0685 3.424 -0.020 0.984 -6.780 6.643\n91340 -0.0496 3.425 -0.014 0.988 -6.763 6.664\n91342 -0.1108 3.425 -0.032 0.974 -6.824 6.602\n91343 -0.1051 3.425 -0.031 0.976 -6.818 6.608\n91344 0.0420 3.425 0.012 0.990 -6.671 6.754\n91345 -0.3674 3.425 -0.107 0.915 -7.081 6.346\n91350 0.0243 3.427 0.007 0.994 -6.693 6.741\n91351 -0.2181 3.428 -0.064 0.949 -6.937 6.501\n91352 -0.1368 3.425 -0.040 0.968 -6.849 6.575\n91354 0.0287 3.425 0.008 0.993 -6.685 6.742\n91355 0.0155 3.425 0.005 0.996 -6.698 6.729\n91356 0.0800 3.425 0.023 0.981 -6.632 6.792\n91361 0.2246 3.427 0.066 0.948 -6.492 6.941\n91362 0.0770 3.455 0.022 0.982 -6.694 6.848\n91364 0.2235 3.424 0.065 0.948 -6.488 6.935\n91367 0.0559 3.424 0.016 0.987 -6.656 6.768\n91377 -0.1865 8.819 -0.021 0.983 -17.471 17.098\n91381 0.1226 3.425 0.036 0.971 -6.591 6.836\n91384 -0.2638 3.425 -0.077 0.939 -6.977 6.449\n91387 -0.0411 3.426 -0.012 0.990 -6.755 6.673\n91390 0.0306 3.426 0.009 0.993 -6.685 6.746\n91401 0.0848 3.424 0.025 0.980 -6.627 6.797\n91402 0.1053 3.425 0.031 0.975 -6.608 6.819\n91403 0.2041 3.424 0.060 0.952 -6.508 6.916\n91405 -0.1742 3.425 -0.051 0.959 -6.886 6.538\n91406 0.0358 3.424 0.010 0.992 -6.676 6.748\n91411 0.1082 3.425 0.032 0.975 -6.604 6.820\n91423 0.1431 3.424 0.042 0.967 -6.569 6.855\n91436 0.4705 3.425 0.137 0.891 -6.243 7.184\n91501 0.0611 3.425 0.018 0.986 -6.651 6.773\n91502 0.2341 3.426 0.068 0.946 -6.481 6.949\n91504 0.0674 3.425 0.020 0.984 -6.645 6.780\n91505 0.1289 3.424 0.038 0.970 -6.583 6.841\n91506 0.2216 3.425 0.065 0.948 -6.491 6.934\n91523 0.3829 32.870 0.012 0.991 -64.040 64.806\n91601 0.1089 3.424 0.032 0.975 -6.603 6.821\n91602 0.1889 3.424 0.055 0.956 -6.523 6.901\n91603 -0.4148 32.798 -0.013 0.990 -64.697 63.867\n91604 0.3185 3.424 0.093 0.926 -6.393 7.030\n91605 -0.0813 3.425 -0.024 0.981 -6.794 6.631\n91606 -0.0743 3.424 -0.022 0.983 -6.786 6.638\n91607 0.0852 3.424 0.025 0.980 -6.627 6.797\n91702 -0.2263 3.425 -0.066 0.947 -6.939 6.486\n91706 -0.1045 3.425 -0.031 0.976 -6.818 6.609\n91711 0.0007 3.425 0.000 1.000 -6.712 6.713\n91722 -0.5106 3.426 -0.149 0.882 -7.225 6.204\n91723 -0.4019 3.426 -0.117 0.907 -7.116 6.312\n91724 -0.0012 3.425 -0.000 1.000 -6.714 6.712\n91731 -0.1862 3.425 -0.054 0.957 -6.899 6.526\n91732 -0.1475 3.425 -0.043 0.966 -6.861 6.566\n91733 -0.1999 3.425 -0.058 0.953 -6.913 6.514\n91740 -0.4194 3.427 -0.122 0.903 -7.135 6.297\n91741 -0.3270 3.425 -0.095 0.924 -7.040 6.386\n91744 -0.2219 3.426 -0.065 0.948 -6.937 6.494\n91745 -0.2649 3.424 -0.077 0.938 -6.977 6.447\n91746 -0.5428 3.426 -0.158 0.874 -7.257 6.171\n91748 -0.3683 3.424 -0.108 0.914 -7.080 6.343\n91750 -0.1343 3.425 -0.039 0.969 -6.846 6.578\n91754 0.0425 3.425 0.012 0.990 -6.670 6.755\n91755 -0.2805 3.424 -0.082 0.935 -6.992 6.431\n91759 0.5997 7.691 0.078 0.938 -14.474 15.673\n91765 -0.2594 3.425 -0.076 0.940 -6.971 6.453\n91766 -0.3061 3.425 -0.089 0.929 -7.019 6.407\n91767 -0.5493 3.425 -0.160 0.873 -7.262 6.163\n91768 -0.0811 3.426 -0.024 0.981 -6.795 6.633\n91770 -0.3105 3.424 -0.091 0.928 -7.022 6.401\n91773 -0.1818 3.426 -0.053 0.958 -6.896 6.533\n91775 0.0128 3.425 0.004 0.997 -6.700 6.725\n91776 -0.2407 3.424 -0.070 0.944 -6.952 6.471\n91780 -0.2630 3.424 -0.077 0.939 -6.975 6.449\n91786 -0.5183 4.031 -0.129 0.898 -8.418 7.382\n91789 -0.3282 3.425 -0.096 0.924 -7.040 6.384\n91790 -0.2768 3.425 -0.081 0.936 -6.989 6.436\n91791 -0.0494 3.425 -0.014 0.988 -6.763 6.664\n91792 -0.4145 3.425 -0.121 0.904 -7.127 6.298\n91801 -0.2103 3.424 -0.061 0.951 -6.922 6.501\n91803 -0.3133 3.424 -0.092 0.927 -7.025 6.398\n92192 -0.1416 9.244 -0.015 0.988 -18.260 17.977\n92262 0.4049 113.672 0.004 0.997 -222.388 223.198\n92397 -0.4371 3.438 -0.127 0.899 -7.175 6.301\n92683 0.5645 3.453 0.163 0.870 -6.203 7.332\n92821 -0.1964 68.429 -0.003 0.998 -134.314 133.922\n93063 0.5263 3.509 0.150 0.881 -6.351 7.404\n93510 0.1426 3.442 0.041 0.967 -6.603 6.888\n93532 -0.0659 3.429 -0.019 0.985 -6.786 6.654\n93534 -0.5524 3.429 -0.161 0.872 -7.273 6.168\n93535 -0.4878 3.425 -0.142 0.887 -7.202 6.226\n93536 -0.5909 3.425 -0.173 0.863 -7.304 6.122\n93543 -0.4239 3.425 -0.124 0.902 -7.137 6.289\n93544 -0.2151 3.427 -0.063 0.950 -6.933 6.502\n93550 -0.3989 3.425 -0.116 0.907 -7.113 6.315\n93551 -0.2562 3.425 -0.075 0.940 -6.969 6.457\n93552 -0.4844 3.427 -0.141 0.888 -7.201 6.232\n93563 -0.0466 3.447 -0.014 0.989 -6.803 6.710\n93591 0.0397 3.517 0.011 0.991 -6.854 6.933\nNear -0.3503 5.256 -0.067 0.947 -10.651 9.950\nAparthotel 0.3928 3.287 0.119 0.905 -6.050 6.835\nApartment 0.0498 3.287 0.015 0.988 -6.392 6.491\nBarn 0.0327 3.293 0.010 0.992 -6.421 6.487\nBed and breakfast 0.2103 3.287 0.064 0.949 -6.232 6.652\nBoat 0.0111 3.289 0.003 0.997 -6.436 6.458\nBoutique hotel 0.6222 3.287 0.189 0.850 -5.820 7.065\nBungalow 0.1357 3.287 0.041 0.967 -6.306 6.577\nBus -0.2723 3.324 -0.082 0.935 -6.787 6.243\nCabin 0.1002 3.287 0.030 0.976 -6.342 6.543\nCamper/RV -0.1548 3.287 -0.047 0.962 -6.597 6.287\nCampsite 0.4662 3.325 0.140 0.888 -6.050 6.982\nCasa particular (Cuba) -0.0345 3.311 -0.010 0.992 -6.524 6.455\nCastle 0.2689 3.292 0.082 0.935 -6.183 6.721\nCave 0.3011 3.419 0.088 0.930 -6.401 7.003\nChalet 0.3917 3.288 0.119 0.905 -6.053 6.836\nCondominium 0.1477 3.287 0.045 0.964 -6.294 6.589\nCottage 0.1627 3.287 0.049 0.961 -6.279 6.604\nDome house 0.7599 3.381 0.225 0.822 -5.866 7.386\nDorm -0.3965 3.293 -0.120 0.904 -6.850 6.057\nEarth house 0.4255 3.288 0.129 0.897 -6.020 6.871\nFarm stay 0.3058 3.288 0.093 0.926 -6.138 6.750\nGuest suite 0.0447 3.287 0.014 0.989 -6.397 6.486\nGuesthouse 0.0774 3.287 0.024 0.981 -6.364 6.519\nHostel -0.2850 3.287 -0.087 0.931 -6.727 6.157\nHotel 0.1493 3.287 0.045 0.964 -6.293 6.591\nHouse 0.1748 3.287 0.053 0.958 -6.267 6.616\nHouseboat -0.1170 3.313 -0.035 0.972 -6.611 6.377\nHut 0.3763 3.450 0.109 0.913 -6.386 7.139\nIsland 1.3482 3.301 0.408 0.683 -5.121 7.818\nLoft 0.1767 3.287 0.054 0.957 -6.265 6.618\nMinsu (Taiwan) 0.1225 3.288 0.037 0.970 -6.322 6.567\nOther 0.1684 3.287 0.051 0.959 -6.274 6.611\nResort 1.3202 3.305 0.399 0.690 -5.157 7.797\nServiced apartment 0.1769 3.287 0.054 0.957 -6.265 6.618\nTent -0.5527 3.289 -0.168 0.867 -6.999 5.894\nTiny house 0.0659 3.287 0.020 0.984 -6.377 6.508\nTipi 0.0349 3.293 0.011 0.992 -6.419 6.489\nTownhouse 0.1733 3.287 0.053 0.958 -6.268 6.615\nTrain 0.5863 140.717 0.004 0.997 -275.214 276.387\nTreehouse 0.2251 3.290 0.068 0.945 -6.224 6.674\nVilla 0.4109 3.287 0.125 0.901 -6.031 6.852\nYurt -0.2418 3.289 -0.074 0.941 -6.689 6.205\nEntire home/apt 3.4260 2.891 1.185 0.236 -2.240 9.092\nPrivate room 2.8061 2.891 0.971 0.332 -2.859 8.472\nShared room 2.1291 2.891 0.737 0.461 -3.536 7.795\nAirbed 1.5778 1.735 0.909 0.363 -1.822 4.978\nCouch 1.8695 1.736 1.077 0.281 -1.532 5.271\nFuton 1.5714 1.735 0.906 0.365 -1.828 4.971\nPull-out Sofa 1.6870 1.735 0.972 0.331 -1.713 5.087\nReal Bed 1.6555 1.734 0.955 0.340 -1.744 5.055\naccommodates 0.1735 0.007 26.327 0.000 0.161 0.186\nbathrooms 0.1017 0.005 20.455 0.000 0.092 0.111\nbedrooms 0.2255 0.006 38.089 0.000 0.214 0.237\nbeds -0.0652 0.005 -12.173 0.000 -0.076 -0.055\n==============================================================================\nOmnibus: 9537.002 Durbin-Watson: 1.941\nProb(Omnibus): 0.000 Jarque-Bera (JB): 73989.505\nSkew: 0.849 Prob(JB): 0.00\nKurtosis: 9.170 Cond. No. 6.07e+16\n==============================================================================\n\nWarnings:\n[1] Standard Errors are heteroscedasticity robust (HC3)\n[2] The smallest eigenvalue is 3.71e-29. This might indicate that there are\nstrong multicollinearity problems or that the design matrix is singular.\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
ecd666f119447695cf3beee32f223bad35d56a42
9,078
ipynb
Jupyter Notebook
Assignment-1-Copy1 (1).ipynb
Bhavani263/LetsUpgrade-Python
8322352a1ce6f5523825b4388b7afbf5bd925552
[ "Apache-2.0" ]
null
null
null
Assignment-1-Copy1 (1).ipynb
Bhavani263/LetsUpgrade-Python
8322352a1ce6f5523825b4388b7afbf5bd925552
[ "Apache-2.0" ]
null
null
null
Assignment-1-Copy1 (1).ipynb
Bhavani263/LetsUpgrade-Python
8322352a1ce6f5523825b4388b7afbf5bd925552
[ "Apache-2.0" ]
null
null
null
17.593023
98
0.445362
[ [ [ "# List", "_____no_output_____" ] ], [ [ "pouch = [\"pen\",\"pencil\",\"eraser\"]#creating a list\npouch.append('scale') #To append a list item\nprint(pouch)", "['pen', 'pencil', 'eraser', 'scale']\n" ], [ "pouch.remove(\"pencil\") #To remove a list item\nprint(pouch)", "['pen', 'eraser', 'scale']\n" ], [ "pouch.reverse() #reverse the list items\nprint(pouch)", "['scale', 'eraser', 'pen']\n" ], [ "pouch.sort() #sort the list items\nprint(pouch)", "['eraser', 'pen', 'scale']\n" ], [ "x = pouch.index(\"pen\") #to find index of an item\nprint(x)", "1\n" ] ], [ [ "# Dictionaries", "_____no_output_____" ] ], [ [ "dict = {\"name\":\"sony\",\"age\":\"25\",\"phnno\":\"90989765\"} #creating a dictionaries\nx = dict.items() #to display items\nprint(x)", "dict_items([('name', 'sony'), ('age', '25'), ('phnno', '90989765')])\n" ], [ "x = dict.keys() #to find keys\nprint(x)", "dict_keys(['name', 'age', 'phnno'])\n" ], [ "x = dict.get(\"name\") #to get value of specified key\nprint(x)", "sony\n" ], [ "dict.update({\"location\":\"hyd\"}) #to insert an item to the dictionary\nprint(dict)", "{'name': 'sony', 'age': '25', 'phnno': '90989765', 'location': 'hyd'}\n" ], [ "dict.pop(\"age\")\nprint(dict)", "{'name': 'sony', 'phnno': '90989765', 'location': 'hyd'}\n" ] ], [ [ "# Tuple", "_____no_output_____" ] ], [ [ "pouch = (\"pen\",\"pencil\",\"scale\") #creating a tuple\nprint(pouch)", "('pen', 'pencil', 'scale')\n" ], [ "print(len(pouch)) #finding no of items in tuple", "3\n" ], [ "if \"pen\" in pouch: #check if item exists\n print(\"yes\")", "yes\n" ], [ "print(pouch[1]) #finding item using index", "pencil\n" ], [ "print(pouch[0:2]) #returning value within specific range", "('pen', 'pencil')\n" ] ], [ [ "# Sets", "_____no_output_____" ] ], [ [ "pouch = {\"pen\",\"pencil\",\"scale\",\"pen\"} #creating a set\nprint(pouch)", "{'pen', 'pencil', 'scale'}\n" ], [ "print(\"pencil\" in pouch) #check if item exists", "True\n" ], [ "pouch.add(\"eraser\") #to add an item\nprint(pouch)", "{'pen', 'eraser', 'pencil', 'scale'}\n" ], [ "pouch.update([\"sharpener\"]) #to update an item\nprint(pouch)", "{'eraser', 'pencil', 'sharpener', 'pen', 'scale'}\n" ], [ "print(len(pouch)) #count of items", "5\n" ] ], [ [ "# Strings", "_____no_output_____" ] ], [ [ "a = \"Hello, World! \"\nprint(a.lower()) #returns string in lowercase letters", "hello, world! \n" ], [ "a = \"Hello, World!\"\nprint(a.upper()) #returns the string in uppercase letters", "HELLO, WORLD!\n" ], [ "a = \"Hello, World!\"\nprint(a.split(\",\")) #split the string into substrings", "['Hello', ' World!']\n" ], [ "a = \"Hello, World!\"\nprint(a.replace(\"H\", \"J\")) #replace string with another string", "Jello, World!\n" ], [ "age = 36\ntxt = \"My name is John, and I am {}\"\nprint(txt.format(age)) #to insert numbers into strings", "My name is John, and I am 36\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
ecd66d12bf250098ea46d44cb05bdfbd69f862af
1,503
ipynb
Jupyter Notebook
notebooks/ea-data-explore.ipynb
evanaze/captcha
62d226742be7f4091e54a7ea960703812bd44fd5
[ "MIT" ]
null
null
null
notebooks/ea-data-explore.ipynb
evanaze/captcha
62d226742be7f4091e54a7ea960703812bd44fd5
[ "MIT" ]
3
2021-03-26T18:14:39.000Z
2021-09-20T14:09:05.000Z
notebooks/ea-data-explore.ipynb
evanaze/captcha
62d226742be7f4091e54a7ea960703812bd44fd5
[ "MIT" ]
null
null
null
18.329268
70
0.51497
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "train = pd.read_csv(\"../input/train.csv\")\ntest = pd.read_csv(\"../input/test.csv\")", "_____no_output_____" ], [ "train_files = set(train.filename)\ntest_files = set(test.filename)", "_____no_output_____" ], [ "train_files in test_files", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
ecd680b9a5a74aa87c336f9d0cabb58a2c86aec6
18,126
ipynb
Jupyter Notebook
functions-conditionals/functions.ipynb
uob-ds/cfd2021
bbf8dc1a3938def910c8173cf353efd14dca66e5
[ "CC-BY-4.0" ]
1
2021-09-01T07:46:49.000Z
2021-09-01T07:46:49.000Z
functions-conditionals/functions.ipynb
uob-ds/cfd2021
bbf8dc1a3938def910c8173cf353efd14dca66e5
[ "CC-BY-4.0" ]
null
null
null
functions-conditionals/functions.ipynb
uob-ds/cfd2021
bbf8dc1a3938def910c8173cf353efd14dca66e5
[ "CC-BY-4.0" ]
1
2021-09-28T09:09:29.000Z
2021-09-28T09:09:29.000Z
26.269565
399
0.556659
[ [ [ "# Functions\n\nWe have already used functions.\n\nFor example, we have used the `round` function:", "_____no_output_____" ] ], [ [ "a = 3.1415", "_____no_output_____" ], [ "# Call the \"round\" function\nb = round(a, 2)", "_____no_output_____" ], [ "b", "_____no_output_____" ] ], [ [ "We often need to define our own functions. Before we do, we need to go into\nmore detail about what functions are for, and what they are.", "_____no_output_____" ], [ "## Functions are like named recipes\n\nA function is a named recipe. It is a name we give to a set of steps to\nfollow, a piece of code to run.\n\nThanks to [the Berkeley\nteam](https://www.inferentialthinking.com/chapters/08/Functions_and_Tables)\nfor this metaphor.\n\nA recipe is the *procedure* to go from *ingredients* to a *meal*.\n\nA function is the *procedure* to go from the *arguments* to the\n*return value*.\n\nFor example, I might have a recipe with the procedure to go from the\ningredients: two eggs; butter; and cheese \\- to the meal \\- a cheese\nomelette.\n\nThe function `round` has the procedure to go from the arguments - two\nnumbers, to the return value, which is the value of the first argument\nrounded to the number of digits specified in the second.\n\nI could call my recipe \"two egg cheese omelette\", or \"recipe number\n4\". Whatever I called it, it would be the same recipe. I might\nprefer a name that describes what the recipe makes, to help me\nremember.\n\nLikewise, the name `round` refers to a procedure above. I could give it\nanother name, like `my_function`, but `round` is a good name, because it helps\nme remember what the procedure does.\n\nI say `round` has a procedure, but we can't see what that procedure is, it's\nburied inside the internal workings of Python.\n\nNow we are going to write our own function, where we can see the\nprocedure.", "_____no_output_____" ], [ "## Revision on variables\n\nHere is an *assignment statement*:", "_____no_output_____" ] ], [ [ "a = 2", "_____no_output_____" ] ], [ [ "As we know, we can read this as \"The variable 'a' gets the value 2\".\n\nWe also know that we have, on the left, a variable name, 'a', and on\nthe right, an expression, that gives a value.\n\nIn this case, the expression on the right is `2`. Python evaluates\nthis expression, to make its own internal computer representation of the\ninteger 2. Call this: Computer Representation (CR) of int 2.\n\nAfter Python executes this statement, the name \"a\" points to the CR of int 2.\n\nTo continue the revision:", "_____no_output_____" ] ], [ [ "b = a * 4", "_____no_output_____" ] ], [ [ "The right side `a * 4` is an expression. Python evaluates the\nexpression. First it gets the value of `a`. This is the CR of int 2.\nNext it gets the value of `4`. This is the CR of int 4. Then it\nmultiplies these results to get an CR of int 8.\n\n\"b\" now points to the CR of int 8.\n\nFinally:", "_____no_output_____" ] ], [ [ "a = 3", "_____no_output_____" ] ], [ [ "\"a\" no longer points the CR of int 2, it points to the CR of int 3.\n\nWhat value does \"b\" have now?\n\nThe same value as it had before. It pointed to the CR of int\n8 before. Changing `a` has no effect on `b`.\n\n## Defining a function\n\nWe define our function called `double`. It accepts one argument\n(ingredient), call that `x`. It's procedure is to multiply the\nargument by 2. The return value is the argument multiplied by 2.\n\nHere it is:", "_____no_output_____" ] ], [ [ "def double(x):\n d = x * 2\n return d", "_____no_output_____" ] ], [ [ "Let's look at the first line:\n\n```\ndef double(x):\n```\n\nThe first word `def` tells Python we are defining a function.\n\nThe next word `double` is the name we will give to our function.\n\nBetween the parentheses, we have the function signature. This specifies how many arguments the function has. In our case, there is only one argument, named `x`.\n\nFinally there is a colon `:` signifying the end of the signature.\n\nAs in [for loops](../iteration/iteration), the colon signifies that the next\nbit of code must be [indented](../iteration/indentation).\n\nHere is the indented part:\n\n```\n d = x * 2\n return d\n```\n\nThis is the *body* of the function. It gives the function procedure; it defines what the function will do to its arguments, and what result it should return.\n\nFor example, here we *call* the function we just created:", "_____no_output_____" ] ], [ [ "double(4)", "_____no_output_____" ] ], [ [ "Notice that `double(4)` is a [call expression](../code-basics/Calls).\n\nSo, what just happened?\n\n1. Python finds what `double` points to. It points to internal\n representation of our function (procedure).\n1. Next it sees the parenthesis `(` and sees that we want to *call*\n our function.\n1. Now Python knows we want to call the function, it knows that\n there are one or more *expressions* inside the parentheses. In\n our case there is one, `4`. As usual, it evaluates this\n expression to the CR of int 4.\n1. Now Python does the *call*. To do this it:\n\n 1. Puts itself into *function world* (more on this later).\n 2. Sets the new variable `x` to have the value CR of int 4, from\n above.\n 3. Executes the code in the function body (procedure).\n 1. The first line `d = x * 2` is an assignment statement. `x`\n evaluates to CR of int 4, 2 evaluates to CR of int 2, so `d`\n has the value CR of int 8. This is how the statement would\n work anywhere in Python, function body or not.\n 1. The next line starts with `return`. This is a *return\n statement*. When Python sees a `return` statement, it evaluates\n the expression to the right, to get the *return value*, then\n 5. Pulls itself out of *function world*.\n 6. Gives the *return value* as the final result of the call\n expression. This is CR of int 8.\n\nWe can run the function with any values for the argument.", "_____no_output_____" ] ], [ [ "double(2)", "_____no_output_____" ] ], [ [ "This time round, everything happened in the same way as before, except\nPython found the *argument* inside the parentheses evaluated to CR of\nint 2. Thus, in *function world*, `x` gets the value CR of 2, and the *return value* becomes CR of int 4.\n\n## Function world\n\nI cryptically used the term *function world* for the state that Python goes into when it calls a function.\n\nThis state has two important features.\n\n### Variables defined in functions have local scope\n\nThe first feature of function world is that all variables defined\ninside function world, get thrown away when we leave function world.\n\nWe can see this if we run the following code in a notebook cell. This\ncode runs in our usual top-level world, and so, not inside a function.", "_____no_output_____" ] ], [ [ "d", "_____no_output_____" ] ], [ [ "Notice that, in the function, we set `d` to point to the result of `x * 2`. We called the function a couple of times, so we executed this statement a couple of times. But the `d` in the function, gets thrown away, when we come back from function world.\n\nIn technical terms, this is called *scope*. The *scope* of\na variable, is the pieces of code in which the variable is visible.\n`d` can only be seen inside the function. Its scope is the function. We can also say that its scope is piece of code where it is defined, that is, it has *local* scope.\n\nThe same is true for `x`, the argument variable:", "_____no_output_____" ] ], [ [ "x", "_____no_output_____" ] ], [ [ "### The function has limited access to variables outside the function\n\nWe have not seen this yet, but function world has limited access to variables defined at the top level.\n\nWe won't go into much detail here, but the summary is that functions can see the values of variables defined at the top level, but they can't change what top level variables point to. For example, say you have a variable `a` at the top level. A function can see and use the value of `a`, but it cannot change top-level `a` to point to a different value. We will come back to this later.\n\n## Python checks the function signature\n\nThe signature for `double` is `(a)`. That tells Python to expect one\nand only one argument. If we try to call it with no arguments\n(nothing inside the parentheses), we get an error:", "_____no_output_____" ] ], [ [ "double()", "_____no_output_____" ] ], [ [ "If we try and call it with more than one argument, we get an error. We separate arguments with commas.", "_____no_output_____" ] ], [ [ "double(2, 3)", "_____no_output_____" ], [ "double(2, 3, 4)", "_____no_output_____" ] ], [ [ "## Function arguments are expressions\n\nRemember that Python knows that the arguments to a function are\nexpressions, and evaluates them, before running the function.\n\nFor example:", "_____no_output_____" ] ], [ [ "double(2 + 3)", "_____no_output_____" ] ], [ [ "All the procedure is the same as above. Python evaluates the\nexpression `2 + 3`, to get CR of int 5, then goes into function world,\nsets `x` to have the value CR of int 5, and continues from there.\n\n## Functions can have many arguments\n\nNow we define a new function:\n", "_____no_output_____" ] ], [ [ "def multiply(a, b):\n return a * b", "_____no_output_____" ] ], [ [ "The new thing here is that the function signature `(a, b)` has two arguments, separated by commas. We need to give the function two values, when we call it:", "_____no_output_____" ] ], [ [ "multiply(2, 3)", "_____no_output_____" ] ], [ [ "If we do not give it exactly two arguments, we get an error.", "_____no_output_____" ] ], [ [ "multiply(2)", "_____no_output_____" ], [ "multiply(2, 3, 4)", "_____no_output_____" ] ], [ [ "## Functions can have no arguments\n\nPerhaps the recipe analogy breaks down here, but sometimes functions take no arguments. For example:", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ "# Notice - nothing between the parentheses\n\ndef biased_coin():\n # A single random number\n r = np.random.uniform()\n # A biased coin\n result = r < 0.45\n return result", "_____no_output_____" ] ], [ [ "When we call the function, we have no arguments, so no expressions between the parentheses.", "_____no_output_____" ] ], [ [ "biased_coin()", "_____no_output_____" ] ], [ [ "As you would expect by now, if we try and send an argument, Python will complain:", "_____no_output_____" ] ], [ [ "biased_coin(0.45)", "_____no_output_____" ] ], [ [ "## Without a return statement, functions return None\n\nOur functions so far all have a `return` statement. This is not true of every function.\n\nIf your function does not have a return statement, the function returns the value [None](none).", "_____no_output_____" ] ], [ [ "def silent_addition(first, second):\n result = first + second", "_____no_output_____" ] ], [ [ "Notice that the body of this function has no `return` statement. When we call it, it returns `None`:", "_____no_output_____" ] ], [ [ "result = silent_addition(10, 12)\nresult", "_____no_output_____" ], [ "result is None", "_____no_output_____" ] ], [ [ "## End of the introduction\n\nThat's it for the introduction. For a less basic description, have\na look at the [Berkeley introduction to\nfunctions](../extra/data8_functions).\n\nNow try the [exercises](../exercises/function_exercises).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecd6821a1ef8c5c3d409dc48709d459cc28d561c
302,893
ipynb
Jupyter Notebook
day05/IN CLASS - Full - Preprocessing and visualizing tweets.ipynb
flight505/Applied_AI_IT_Uni
b1d766eccdd964d5f7d9315a215ba810930ba003
[ "MIT" ]
null
null
null
day05/IN CLASS - Full - Preprocessing and visualizing tweets.ipynb
flight505/Applied_AI_IT_Uni
b1d766eccdd964d5f7d9315a215ba810930ba003
[ "MIT" ]
null
null
null
day05/IN CLASS - Full - Preprocessing and visualizing tweets.ipynb
flight505/Applied_AI_IT_Uni
b1d766eccdd964d5f7d9315a215ba810930ba003
[ "MIT" ]
null
null
null
302,893
302,893
0.950098
[ [ [ "# Text normalization using Gensim", "_____no_output_____" ], [ "In this notebook we will load and preprocess the data from the `six_thousand_tweets.csv` file using the normalization techniques we talked about in the lecture.", "_____no_output_____" ] ], [ [ "# Importing the libraries\nimport pandas as pd\nfrom gensim.utils import simple_preprocess\nimport nltk\nnltk.download(\"wordnet\")\nnltk.download(\"stopwords\")\nfrom nltk import WordNetLemmatizer\nfrom wordcloud import WordCloud", "_____no_output_____" ] ], [ [ "## Loading up the data", "_____no_output_____" ], [ "Upload the `six_thousand_tweets.csv` file to this environment, and load it up using Pandas.", "_____no_output_____" ] ], [ [ "tweets = pd.read_csv(\"six_thousand_tweets.csv\")", "_____no_output_____" ], [ "tweets.head()", "_____no_output_____" ] ], [ [ "The column `target` represents whether the tweet is saying something \"positive\" or something \"negative\".", "_____no_output_____" ], [ "## Removing usernames and hashtags", "_____no_output_____" ], [ "For the purposes of the analysis we plan to do, let's remove all usernames (i.e. all strings that contain an `@`)", "_____no_output_____" ] ], [ [ "def clean_usernames(tweet_text):\n words = tweet_text.split()\n clean_words = [w for w in words if not (\"@\" in w or \"#\" in w)]\n return \" \".join(clean_words)", "_____no_output_____" ], [ "tweets[\"clean_text\"] = tweets[\"text\"].map(clean_usernames)", "_____no_output_____" ], [ "# Sanity check.\nfor t in tweets[\"clean_text\"]:\n if \"@\" in t:\n print(t)\n \n if \"#\" in t:\n print(t)", "_____no_output_____" ] ], [ [ "## Tokenizing", "_____no_output_____" ], [ "We will use a library called `gensim`. `gensim` comes with a series of utilities, and among them is a simple tokenizer:", "_____no_output_____" ] ], [ [ "preprocessed_texts = [\n simple_preprocess(t) for t in tweets[\"clean_text\"]\n]", "_____no_output_____" ], [ "for text, clean_text in zip(tweets[\"text\"].iloc[:5], preprocessed_texts[:5]):\n print(text, clean_text)", "At home and watching Ellen - after a quite long time! ['at', 'home', 'and', 'watching', 'ellen', 'after', 'quite', 'long', 'time']\n@thehulkster thanx ['thanx']\n@lrobertsglobal I voted many times for BTO TCOB o would have gone to nuit blanche for TCOB. Oh well ['voted', 'many', 'times', 'for', 'bto', 'tcob', 'would', 'have', 'gone', 'to', 'nuit', 'blanche', 'for', 'tcob', 'oh', 'well']\nFYI - Wonderland's water WILL infect your piercings. ['fyi', 'wonderland', 'water', 'will', 'infect', 'your', 'piercings']\n@shineonmedia There's something wrong with your site. It's not letting me go on it. It said its temporarily unavailable ['there', 'something', 'wrong', 'with', 'your', 'site', 'it', 'not', 'letting', 'me', 'go', 'on', 'it', 'it', 'said', 'its', 'temporarily', 'unavailable']\n" ] ], [ [ "## Lemmatizing", "_____no_output_____" ], [ "For lemmatizing, we can use several different tools:\n- The neural networks inside SpaCy...\n- NLTK and its lemmatizing utilities...\n\nFor now, we will use `NLTK`'s WordNet lemmatizer", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download(\"wordnet\")\nnltk.download(\"stopwords\")\n\nfrom nltk import WordNetLemmatizer", "[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n" ], [ "lemmatizer = WordNetLemmatizer()\ncorpus = [\n [lemmatizer.lemmatize(token) for token in doc] for doc in preprocessed_texts\n]", "_____no_output_____" ], [ "for text, doc in zip(tweets[\"text\"].iloc[:5], corpus[:5]):\n print(text, doc)", "At home and watching Ellen - after a quite long time! ['at', 'home', 'and', 'watching', 'ellen', 'after', 'quite', 'long', 'time']\n@thehulkster thanx ['thanx']\n@lrobertsglobal I voted many times for BTO TCOB o would have gone to nuit blanche for TCOB. Oh well ['voted', 'many', 'time', 'for', 'bto', 'tcob', 'would', 'have', 'gone', 'to', 'nuit', 'blanche', 'for', 'tcob', 'oh', 'well']\nFYI - Wonderland's water WILL infect your piercings. ['fyi', 'wonderland', 'water', 'will', 'infect', 'your', 'piercings']\n@shineonmedia There's something wrong with your site. It's not letting me go on it. It said its temporarily unavailable ['there', 'something', 'wrong', 'with', 'your', 'site', 'it', 'not', 'letting', 'me', 'go', 'on', 'it', 'it', 'said', 'it', 'temporarily', 'unavailable']\n" ] ], [ [ "## Removing stopwords", "_____no_output_____" ], [ "For this particular application (which is just a visualization of positive and negative tweets), let's remove the stopwords.\n\n**Each library has its own family of stopwords.** In this example we will use the list inside NLTK.", "_____no_output_____" ] ], [ [ "english_stopwords = set(nltk.corpus.stopwords.words(\"english\"))", "_____no_output_____" ], [ "corpus = [\n [word for word in doc if word not in english_stopwords] for doc in corpus\n]", "_____no_output_____" ], [ "corpus_positive = [\n doc for i, doc in enumerate(corpus) if tweets.loc[i, \"target\"] == 1\n]\n\ncorpus_negative = [\n doc for i, doc in enumerate(corpus) if tweets.loc[i, \"target\"] == 0\n]", "_____no_output_____" ] ], [ [ "## Visualizing the tweets using wordclouds", "_____no_output_____" ] ], [ [ "def count_words(corpus):\n \"\"\"\n Returns {word: frequency} for all words in\n all documents of the corpus.\n \"\"\"\n counts = {}\n for doc in corpus:\n for word in doc:\n if word not in counts:\n counts[word] = 1\n else:\n counts[word] += 1\n \n return counts", "_____no_output_____" ] ], [ [ "A wordcloud is a great tool for visualizing words in terms of their frequency.", "_____no_output_____" ] ], [ [ "from wordcloud import WordCloud\npositive_counts = count_words(corpus_positive)\nnegative_counts = count_words(corpus_negative)\n\nwordcount = WordCloud(\n background_color=\"white\",\n max_words=100,\n width=500,\n height=400).generate_from_frequencies(positive_counts)\nwordcount.to_image()", "_____no_output_____" ], [ "wordcount = WordCloud(\n background_color=\"white\",\n max_words=100,\n width=500,\n height=400).generate_from_frequencies(negative_counts)\nwordcount.to_image()", "_____no_output_____" ] ], [ [ "Notice how some of the words there (like `wa`, `http`, `com`...) **shouldn't** belong there. We can remove than by re-running the whole process after **adding** them to the stopwords! ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
ecd69e73f67dde0b8c7ed24bf050dfbb80589176
163,133
ipynb
Jupyter Notebook
Untitled.ipynb
misocorea/smile1ms
15b89631e5e1c379d4ad8e31f76a1462caa284b1
[ "MIT" ]
4
2019-01-15T02:41:40.000Z
2019-12-25T12:46:49.000Z
Untitled.ipynb
misocorea/smile1ms
15b89631e5e1c379d4ad8e31f76a1462caa284b1
[ "MIT" ]
null
null
null
Untitled.ipynb
misocorea/smile1ms
15b89631e5e1c379d4ad8e31f76a1462caa284b1
[ "MIT" ]
null
null
null
428.170604
46,388
0.943292
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom pandas import DataFrame,merge\nstyle.use(\"ggplot\")\n%matplotlib inline", "_____no_output_____" ], [ "animals = pd.read_csv(\"../Downloads/train.csv\")", "_____no_output_____" ], [ "AnimalType = animals['AnimalType'].value_counts() \nAnimalType.plot(kind='bar',color='#34ABD8',rot=0)", "_____no_output_____" ], [ "AnimalType = animals.OutcomeType.value_counts().sort_values() \nAnimalType.plot(kind='barh',color='#34ABD8',rot=0)", "_____no_output_____" ], [ "AnimalType = animals[['AnimalType','OutcomeType']].groupby(['OutcomeType','AnimalType']).size().unstack()\nAnimalType.plot(kind='bar',color=['#34ABD8','#E98F85'],rot=-30)", "_____no_output_____" ], [ "SexuponOutcome = animals['SexuponOutcome'].value_counts()\nSexuponOutcome.plot(kind='bar',color=['#34ABD8'],rot=-30)", "_____no_output_____" ], [ "sexType = animals['SexuponOutcome'].unique()\nprint(sexType)", "['Neutered Male' 'Spayed Female' 'Intact Male' 'Intact Female' 'Unknown'\n nan]\n" ], [ "M_F = {'Neutered Male':'Male','Spayed Female':'Female','Intact Male':'Male','Intact Female':'Female','Unknown':'Unknown'}\nN_T = {'Neutered Male':'Neutered','Spayed Female':'Neutered','Intact Male':'Intact','Intact Female':'Intact','Unknown':'Unknown'}\n\nanimals['Sex'] = animals.SexuponOutcome.map(M_F)\nanimals['Neutered'] = animals.SexuponOutcome.map(N_T)", "_____no_output_____" ], [ "Sex = DataFrame(animals.Sex.value_counts())\nNeutered = DataFrame(animals.Neutered.value_counts())\nfig = plt.figure()\nax1 = fig.add_subplot(1,2,1)\nax2 = fig.add_subplot(1,2,2)\n\nax1.bar([1,2,3],Sex['Sex'],align='center')\nax1.set_xticks([1,2,3])\nax1.set_xticklabels(Sex.index)\nax2.bar([1,2,3],Neutered['Neutered'],align='center')\nax2.set_xticks([1,2,3])\nax2.set_xticklabels(Neutered.index)", "_____no_output_____" ], [ "df = DataFrame(animals[['Sex','OutcomeType']])\n#df.plot(kind='bar')\nOutcomeSex = df.groupby(['Sex','OutcomeType']).size().unstack()\nOutcomeSex.plot(kind='bar',color=['#34ABD8','#E98F85','r'],rot=-30)", "_____no_output_____" ], [ "df = DataFrame(animals[['Sex','OutcomeType']])\nSexOutcome = df.groupby(['OutcomeType','Sex']).size().unstack()\nSexOutcome.plot(kind='bar',rot=-30)", "_____no_output_____" ], [ "OT_N = animals[['OutcomeType','Neutered']].groupby(['Neutered','OutcomeType']).size().unstack()\nOT_N.plot(kind='bar',rot=-30)", "_____no_output_____" ], [ "DC = animals[['OutcomeType','Neutered','AnimalType']].groupby(['AnimalType','OutcomeType','Neutered']).size().unstack().unstack()\nDC.plot(kind='bar',stacked=False,figsize=(10,8),rot=-30)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]