hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb20bd01b9cee004d7a292e018a58a16390faaea
39,054
ipynb
Jupyter Notebook
analysis/01__mpra/00__preprocess_counts/00__make_GEO_files.ipynb
Mele-Lab/2020_GenomeBiology_CisTransMPRA
55da814dee39f232b746deb6c8110cd15d77c3dd
[ "MIT" ]
2
2020-08-03T15:54:42.000Z
2020-12-22T09:37:46.000Z
analysis/01__mpra/00__preprocess_counts/00__make_GEO_files.ipynb
kmattioli/2019__cis_trans_MPRA
55da814dee39f232b746deb6c8110cd15d77c3dd
[ "MIT" ]
null
null
null
analysis/01__mpra/00__preprocess_counts/00__make_GEO_files.ipynb
kmattioli/2019__cis_trans_MPRA
55da814dee39f232b746deb6c8110cd15d77c3dd
[ "MIT" ]
null
null
null
28.157174
127
0.410432
[ [ [ "import pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "HUES64_rep1_tfxn1_fs = [\"../../../data/02__mpra/01__counts/07__HUES64_rep6_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/07__HUES64_rep6_lib2_BARCODES.txt\"]\nHUES64_rep1_tfxn2_fs = [\"../../../data/02__mpra/01__counts/08__HUES64_rep7_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/08__HUES64_rep7_lib2_BARCODES.txt\"]\nHUES64_rep1_tfxn3_fs = [\"../../../data/02__mpra/01__counts/09__HUES64_rep8_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/09__HUES64_rep8_lib2_BARCODES.txt\"]", "_____no_output_____" ], [ "HUES64_rep2_tfxn1_fs = [\"../../../data/02__mpra/01__counts/10__HUES64_rep9_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/10__HUES64_rep9_lib2_BARCODES.txt\"]\nHUES64_rep2_tfxn2_fs = [\"../../../data/02__mpra/01__counts/11__HUES64_rep10_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/11__HUES64_rep10_lib2_BARCODES.txt\"]\nHUES64_rep2_tfxn3_fs = [\"../../../data/02__mpra/01__counts/12__HUES64_rep11_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/12__HUES64_rep11_lib2_BARCODES.txt\"]", "_____no_output_____" ], [ "HUES64_rep3_tfxn1_fs = [\"../../../data/02__mpra/01__counts/16__HUES64_rep12_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/16__HUES64_rep12_lib2_BARCODES.txt\"]\nHUES64_rep3_tfxn2_fs = [\"../../../data/02__mpra/01__counts/17__HUES64_rep13_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/17__HUES64_rep13_lib2_BARCODES.txt\"]\nHUES64_rep3_tfxn3_fs = [\"../../../data/02__mpra/01__counts/18__HUES64_rep14_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/18__HUES64_rep14_lib2_BARCODES.txt\"]", "_____no_output_____" ], [ "mESC_rep1_tfxn1_fs = [\"../../../data/02__mpra/01__counts/15__mESC_rep3_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/15__mESC_rep3_lib2_BARCODES.txt\"]", "_____no_output_____" ], [ "mESC_rep2_tfxn1_fs = [\"../../../data/02__mpra/01__counts/19__mESC_rep4_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/19__mESC_rep4_lib2_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/19__mESC_rep4_lib3_BARCODES.txt\"]", "_____no_output_____" ], [ "mESC_rep3_tfxn1_fs = [\"../../../data/02__mpra/01__counts/20__mESC_rep5_lib1_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/20__mESC_rep5_lib2_BARCODES.txt\",\n \"../../../data/02__mpra/01__counts/20__mESC_rep5_lib3_BARCODES.txt\"]", "_____no_output_____" ] ], [ [ "## 1. import, merge, sum", "_____no_output_____" ], [ "### HUES64 rep 1", "_____no_output_____" ] ], [ [ "for i, f in enumerate(HUES64_rep1_tfxn1_fs):\n if i == 0:\n HUES64_rep1_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep1_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep1_tfxn1 = HUES64_rep1_tfxn1.merge(tmp, on=\"barcode\")\nHUES64_rep1_tfxn1[\"count\"] = HUES64_rep1_tfxn1[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep1_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep1_tfxn1.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep1_tfxn2_fs):\n if i == 0:\n HUES64_rep1_tfxn2 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep1_tfxn2))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep1_tfxn2 = HUES64_rep1_tfxn2.merge(tmp, on=\"barcode\")\nHUES64_rep1_tfxn2[\"count\"] = HUES64_rep1_tfxn2[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep1_tfxn2.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep1_tfxn2.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep1_tfxn3_fs):\n if i == 0:\n HUES64_rep1_tfxn3 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep1_tfxn3))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep1_tfxn3 = HUES64_rep1_tfxn3.merge(tmp, on=\"barcode\")\nHUES64_rep1_tfxn3[\"count\"] = HUES64_rep1_tfxn3[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep1_tfxn3.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep1_tfxn3.head()", "239653\n239653\n" ] ], [ [ "### HUES64 rep 2", "_____no_output_____" ] ], [ [ "for i, f in enumerate(HUES64_rep2_tfxn1_fs):\n if i == 0:\n HUES64_rep2_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep2_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep2_tfxn1 = HUES64_rep2_tfxn1.merge(tmp, on=\"barcode\")\nHUES64_rep2_tfxn1[\"count\"] = HUES64_rep2_tfxn1[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep2_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep2_tfxn1.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep2_tfxn2_fs):\n if i == 0:\n HUES64_rep2_tfxn2 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep2_tfxn2))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep2_tfxn2 = HUES64_rep2_tfxn2.merge(tmp, on=\"barcode\")\nHUES64_rep2_tfxn2[\"count\"] = HUES64_rep2_tfxn2[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep2_tfxn2.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep2_tfxn2.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep2_tfxn3_fs):\n if i == 0:\n HUES64_rep2_tfxn3 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep2_tfxn3))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep2_tfxn3 = HUES64_rep2_tfxn3.merge(tmp, on=\"barcode\")\nHUES64_rep2_tfxn3[\"count\"] = HUES64_rep2_tfxn3[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep2_tfxn3.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep2_tfxn3.head()", "239653\n239653\n" ] ], [ [ "### HUES64 rep 3", "_____no_output_____" ] ], [ [ "for i, f in enumerate(HUES64_rep3_tfxn1_fs):\n if i == 0:\n HUES64_rep3_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep3_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep3_tfxn1 = HUES64_rep3_tfxn1.merge(tmp, on=\"barcode\")\nHUES64_rep3_tfxn1[\"count\"] = HUES64_rep3_tfxn1[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep3_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep3_tfxn1.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep3_tfxn2_fs):\n if i == 0:\n HUES64_rep3_tfxn2 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep3_tfxn2))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep3_tfxn2 = HUES64_rep3_tfxn2.merge(tmp, on=\"barcode\")\nHUES64_rep3_tfxn2[\"count\"] = HUES64_rep3_tfxn2[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep3_tfxn2.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep3_tfxn2.head()", "239653\n239653\n" ], [ "for i, f in enumerate(HUES64_rep3_tfxn3_fs):\n if i == 0:\n HUES64_rep3_tfxn3 = pd.read_table(f, sep=\"\\t\")\n print(len(HUES64_rep3_tfxn3))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n HUES64_rep3_tfxn3 = HUES64_rep3_tfxn3.merge(tmp, on=\"barcode\")\nHUES64_rep3_tfxn3[\"count\"] = HUES64_rep3_tfxn3[[\"count_x\", \"count_y\"]].sum(axis=1)\nHUES64_rep3_tfxn3.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nHUES64_rep3_tfxn3.head()", "239653\n239653\n" ] ], [ [ "## mESC rep 1", "_____no_output_____" ] ], [ [ "for i, f in enumerate(mESC_rep1_tfxn1_fs):\n if i == 0:\n mESC_rep1_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(mESC_rep1_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n mESC_rep1_tfxn1 = mESC_rep1_tfxn1.merge(tmp, on=\"barcode\")\nmESC_rep1_tfxn1[\"count\"] = mESC_rep1_tfxn1[[\"count_x\", \"count_y\"]].sum(axis=1)\nmESC_rep1_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nmESC_rep1_tfxn1.head()", "239653\n239653\n" ] ], [ [ "## mESC rep 2", "_____no_output_____" ] ], [ [ "for i, f in enumerate(mESC_rep2_tfxn1_fs):\n if i == 0:\n mESC_rep2_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(mESC_rep2_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n mESC_rep2_tfxn1 = mESC_rep2_tfxn1.merge(tmp, on=\"barcode\")\nmESC_rep2_tfxn1[\"count\"] = mESC_rep2_tfxn1[[\"count_x\", \"count_y\", \"count\"]].sum(axis=1)\nmESC_rep2_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nmESC_rep2_tfxn1.head()", "239653\n239653\n239653\n" ] ], [ [ "## mESC rep 3", "_____no_output_____" ] ], [ [ "for i, f in enumerate(mESC_rep3_tfxn1_fs):\n if i == 0:\n mESC_rep3_tfxn1 = pd.read_table(f, sep=\"\\t\")\n print(len(mESC_rep3_tfxn1))\n else:\n tmp = pd.read_table(f, sep=\"\\t\")\n print(len(tmp))\n mESC_rep3_tfxn1 = mESC_rep3_tfxn1.merge(tmp, on=\"barcode\")\nmESC_rep3_tfxn1[\"count\"] = mESC_rep3_tfxn1[[\"count_x\", \"count_y\", \"count\"]].sum(axis=1)\nmESC_rep3_tfxn1.drop([\"count_x\", \"count_y\"], axis=1, inplace=True)\nmESC_rep3_tfxn1.head()", "239653\n239653\n239653\n" ] ], [ [ "## 2. write files", "_____no_output_____" ] ], [ [ "HUES64_rep1_tfxn1.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep1__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep1_tfxn2.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep1__tfxn2.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep1_tfxn3.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep1__tfxn3.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ], [ "HUES64_rep2_tfxn1.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep2__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep2_tfxn2.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep2__tfxn2.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep2_tfxn3.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep2__tfxn3.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ], [ "HUES64_rep3_tfxn1.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep3__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep3_tfxn2.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep3__tfxn2.BARCODES.txt\", sep=\"\\t\", index=False)\nHUES64_rep3_tfxn3.to_csv(\"../../../GEO_submission/MPRA__HUES64__rep3__tfxn3.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ], [ "mESC_rep1_tfxn1.to_csv(\"../../../GEO_submission/MPRA__mESC__rep1__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ], [ "mESC_rep2_tfxn1.to_csv(\"../../../GEO_submission/MPRA__mESC__rep2__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ], [ "mESC_rep3_tfxn1.to_csv(\"../../../GEO_submission/MPRA__mESC__rep3__tfxn1.BARCODES.txt\", sep=\"\\t\", index=False)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb20c06cc1c4a9831c06c53bef6a52ddf434776d
12,763
ipynb
Jupyter Notebook
docs/tutorials/tutorial_01_preparing_data.ipynb
BioFreak95/schnetpack
da17f05ddbaa79321316502136dad6cf0ea1132e
[ "MIT" ]
null
null
null
docs/tutorials/tutorial_01_preparing_data.ipynb
BioFreak95/schnetpack
da17f05ddbaa79321316502136dad6cf0ea1132e
[ "MIT" ]
null
null
null
docs/tutorials/tutorial_01_preparing_data.ipynb
BioFreak95/schnetpack
da17f05ddbaa79321316502136dad6cf0ea1132e
[ "MIT" ]
1
2020-02-27T00:49:50.000Z
2020-02-27T00:49:50.000Z
31.9075
493
0.5827
[ [ [ "# Preparing and loading your data\nThis tutorial introduces how SchNetPack stores and loads data.\nBefore we can start training neural networks with SchNetPack, we need to prepare our data.\nThis is because SchNetPack has to stream the reference data from disk during training in order to be able to handle large datasets.\nTherefore, it is crucial to use data format that allows for fast random read access.\nWe found that the [ASE database format](https://wiki.fysik.dtu.dk/ase/ase/db/db.html) fulfills perfectly.\nTo further improve the performance, we internally encode properties in binary.\nHowever, as long as you only access the ASE database via the provided SchNetPack `AtomsData` class, you don't have to worry about that.", "_____no_output_____" ] ], [ [ "from schnetpack import AtomsData", "_____no_output_____" ] ], [ [ "## Predefined datasets\nSchNetPack supports several benchmark datasets that can be used without preparation.\nEach one can be accessed using a corresponding class that inherits from `DownloadableAtomsData`, which supports automatic download and conversion. Here, we show how to use these data sets at the example of the QM9 benchmark.\n\nFirst, we have to import the dataset class and instantiate it. This will automatically download the data to the specified location.", "_____no_output_____" ] ], [ [ "from schnetpack.datasets import QM9\n\nqm9data = QM9('./qm9.db', download=True)", "_____no_output_____" ] ], [ [ "Let's have a closer look at this dataset.\nWe can find out how large it is and which properties it supports:", "_____no_output_____" ] ], [ [ "print('Number of reference calculations:', len(qm9data))\nprint('Available properties:')\n\nfor p in qm9data.available_properties:\n print('-', p)", "Number of reference calculations: 133885\nAvailable properties:\n- rotational_constant_A\n- rotational_constant_B\n- rotational_constant_C\n- dipole_moment\n- isotropic_polarizability\n- homo\n- lumo\n- gap\n- electronic_spatial_extent\n- zpve\n- energy_U0\n- energy_U\n- enthalpy_H\n- free_energy\n- heat_capacity\n" ] ], [ [ "We can load data points using zero-base indexing. The result is a dictionary containing the geometry and properties:", "_____no_output_____" ] ], [ [ "example = qm9data[0]\nprint('Properties:')\n\nfor k, v in example.items():\n print('-', k, ':', v.shape)", "Properties:\n- rotational_constant_A : torch.Size([1])\n- rotational_constant_B : torch.Size([1])\n- rotational_constant_C : torch.Size([1])\n- dipole_moment : torch.Size([1])\n- isotropic_polarizability : torch.Size([1])\n- homo : torch.Size([1])\n- lumo : torch.Size([1])\n- gap : torch.Size([1])\n- electronic_spatial_extent : torch.Size([1])\n- zpve : torch.Size([1])\n- energy_U0 : torch.Size([1])\n- energy_U : torch.Size([1])\n- enthalpy_H : torch.Size([1])\n- free_energy : torch.Size([1])\n- heat_capacity : torch.Size([1])\n- _atomic_numbers : torch.Size([5])\n- _positions : torch.Size([5, 3])\n- _cell : torch.Size([3, 3])\n- _neighbors : torch.Size([5, 4])\n- _cell_offset : torch.Size([5, 4, 3])\n- _idx : torch.Size([1])\n" ] ], [ [ "We see that all available properties have been loaded as torch tensors with the given shapes. Keys with an underscore indicate that these names are reserved for internal use. This includes the geometry (`_atomic_numbers`, `_positions`, `_cell`), the index within the dataset (`_idx`) as well as information about neighboring atoms and periodic boundary conditions (`_neighbors`, `_cell_offset`). \n\n<div class=\"alert alert-info\">\n**Note:** Neighbors are collected using an `EnvironmentProvider`, that can be passed to the `AtomsData` constructor. The default is the `SimpleEnvironmentProvider`, which constructs the neighbor list using a full distance matrix. This is suitable for small molecules. We supply environment providers using a cutoff (`AseEnvironmentProvider`, `TorchEnvironmentProvider`) that are able to handle larger molecules and periodic boundary conditions.\n</div>\n\nWe can directly obtain an ASE atoms object as follows:", "_____no_output_____" ] ], [ [ "at = qm9data.get_atoms(idx=0)\nprint('Atoms object:', at)\n\nat2, props = qm9data.get_properties(idx=0)\nprint('Atoms object (not the same):', at2)\nprint('Equivalent:', at2 == at, '; not the same object:', at2 is at)", "Atoms object: Atoms(symbols='CH4', pbc=False)\nAtoms object (not the same): Atoms(symbols='CH4', pbc=False)\nEquivalent: True ; not the same object: False\n" ] ], [ [ "Alternatively, all property names are pre-defined as class-variable for convenient access:", "_____no_output_____" ] ], [ [ "print('Total energy at 0K:', props[QM9.U0])\nprint('HOMO:', props[QM9.homo])", "Total energy at 0K: tensor([-1101.4878])\nHOMO: tensor([-10.5499])\n" ] ], [ [ "## Preparing your own data\nIn the following we will create an ASE database from our own data.\nFor this tutorial, we will use a dataset containing a molecular dynamics (MD) trajectory of ethanol, which can be downloaded [here](http://quantum-machine.org/gdml/data/xyz/ethanol_dft.zip).", "_____no_output_____" ] ], [ [ "import os\nif not os.path.exists('./ethanol_dft.zip'):\n !wget http://quantum-machine.org/gdml/data/xyz/ethanol_dft.zip\n \nif not os.path.exists('./ethanol.xyz'):\n !unzip ./ethanol_dft.zip", "_____no_output_____" ] ], [ [ "The data set is in xyz format with the total energy given in the comment row. For this kind of data, we supply a script that converts it into the SchNetPack ASE DB format.\n```\nschnetpack_parse.py ./ethanol.xyz ./ethanol.db\n```\n\nIn the following, we show how this can be done in general, so that you apply this to any other data format.\n\nFirst, we need to parse our data. For this we use the IO functionality supplied by ASE.\nIn order to create a SchNetPack DB, we require a **list of ASE `Atoms` objects** as well as a corresponding **list of dictionaries** `[{property_name1: property1_molecule1}, {property_name1: property1_molecule2}, ...]` containing the mapping from property names to values.", "_____no_output_____" ] ], [ [ "from ase.io import read\nimport numpy as np\n\n# load atoms from xyz file. Here, we only parse the first 10 molecules\natoms = read('./ethanol.xyz', index=':10')\n\n# comment line is weirdly stored in the info dictionary as key by ASE. here it corresponds to the energy\nprint('Energy:', atoms[0].info)\nprint()\n\n# parse properties as list of dictionaries\nproperty_list = []\nfor at in atoms:\n # All properties need to be stored as numpy arrays.\n # Note: The shape for scalars should be (1,), not ()\n # Note: GPUs work best with float32 data\n energy = np.array([float(list(at.info.keys())[0])], dtype=np.float32) \n property_list.append(\n {'energy': energy}\n )\n \nprint('Properties:', property_list)", "Energy: {'-97208.40600498248': True}\n\nProperties: [{'energy': array([-97208.41], dtype=float32)}, {'energy': array([-97208.375], dtype=float32)}, {'energy': array([-97208.04], dtype=float32)}, {'energy': array([-97207.5], dtype=float32)}, {'energy': array([-97206.84], dtype=float32)}, {'energy': array([-97206.1], dtype=float32)}, {'energy': array([-97205.266], dtype=float32)}, {'energy': array([-97204.29], dtype=float32)}, {'energy': array([-97203.16], dtype=float32)}, {'energy': array([-97201.875], dtype=float32)}]\n" ] ], [ [ "Once we have our data in this format, it is straightforward to create a new SchNetPack DB and store it.", "_____no_output_____" ] ], [ [ "%rm './new_dataset.db'\nnew_dataset = AtomsData('./new_dataset.db', available_properties=['energy'])\nnew_dataset.add_systems(atoms, property_list)", "_____no_output_____" ] ], [ [ "Now we can have a look at the data in the same way we did before for QM9:", "_____no_output_____" ] ], [ [ "print('Number of reference calculations:', len(new_dataset))\nprint('Available properties:')\n\nfor p in new_dataset.available_properties:\n print('-', p)\nprint() \n\nexample = new_dataset[0]\nprint('Properties of molecule with id 0:')\n\nfor k, v in example.items():\n print('-', k, ':', v.shape)", "Number of reference calculations: 10\nAvailable properties:\n- energy\n\nProperties of molecule with id 0:\n- energy : torch.Size([1])\n- _atomic_numbers : torch.Size([9])\n- _positions : torch.Size([9, 3])\n- _cell : torch.Size([3, 3])\n- _neighbors : torch.Size([9, 8])\n- _cell_offset : torch.Size([9, 8, 3])\n- _idx : torch.Size([1])\n" ] ], [ [ "The same way, we can store multiple properties, including atomic properties such as forces, or tensorial properties such as polarizability tensors.\n\nIn the following tutorials, we will describe how these datasets can be used to train neural networks.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb20c0d8795fe2ede2eda29a48dc514980fae80e
213,835
ipynb
Jupyter Notebook
ApplicationLab6/ATOC5860_applicationlab6_supervised_ML.ipynb
jshaw35/ATOC5860_Spring2022
b73573df41fb3994df6741a5e6995fcb8194aeae
[ "MIT" ]
null
null
null
ApplicationLab6/ATOC5860_applicationlab6_supervised_ML.ipynb
jshaw35/ATOC5860_Spring2022
b73573df41fb3994df6741a5e6995fcb8194aeae
[ "MIT" ]
null
null
null
ApplicationLab6/ATOC5860_applicationlab6_supervised_ML.ipynb
jshaw35/ATOC5860_Spring2022
b73573df41fb3994df6741a5e6995fcb8194aeae
[ "MIT" ]
null
null
null
81.33701
24,740
0.762644
[ [ [ "### ATOC5860 Application Lab #6 - supervised machine learning\n### Coded by Eleanor Middlemas (Jupiter, formerly University of Colorado, elmiddlemas at gmail.com)\n### Additional code/commenting by Jennifer Kay (University of Colorado) \n### Last updated April 6, 2022\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time", "_____no_output_____" ] ], [ [ "*In this notebook, we will use supervised machine learning models to:*\n\n**1) Predict the likelihood of rainfall given certain atmospheric conditions.**\nAfter prepping the data, we will build and train four machine learning models to make the predictions: Logistic regression, Random Forest, Singular vector machines/classifier, Neural Network\n\n**2) Determine which variable (\"feature\") is the best predictor of rainfall, i.e., \"feature importance\"**", "_____no_output_____" ], [ "## STEP 1: Read in the Data into a pandas dataframe and Look At It", "_____no_output_____" ] ], [ [ "# read in the data\ndf = pd.read_csv(\"christman_2016.csv\")\n# preview data (also through df.head() & df.tail())\ndf", "_____no_output_____" ], [ "df.day.nunique() ## Print the answer to: How many days are in this dataset?", "_____no_output_____" ], [ "##Optional: transform the day column into a readable date. Run this ONCE.\ndf['day'] = [datetime.date.fromordinal(day+693594) for day in df['day']]", "_____no_output_____" ] ], [ [ "## STEP 2: Data and Function Preparation\n\nData preparation is a huge part of building Machine Learning model \"pipelines\". Carefully think through building & training a Machine Learning model before you run it. There are a few statistical \"gotchas\" that may result in your model being biased, inaccurate, or not suitable for the problem at hand. Address these 6 questions!\n\n**Q1: What exactly are we trying to predict? A value, an outcome, a category?** Define your predictors and predictand. Relate these to your hypothesis or overarching question. In our case, our predictand is the likelihood of precipitation. We will build models to predict the likelihood that it's currently precipitating, given current atmospheric conditions.\n\n**Q2: Do you have any missing data? If so, how will you handle them?** Keep in mind, decreasing the number of input observations may bias your model. Using the Christman dataset, we have no missing data.\n\n**Q3: Do you have any categorical or non-numeric variables or features?** If so, you must figure out how to encode them into numbers. Luckily, in the geosciences, we rarely run into this problem.\n\n**Q4: How will we validate our model?** Typically, people split their existing data into training data and testing data, or perform \"cross-validation\" or a \"test-train split\". That is, we will \"hold out\" some data and call it our \"testing data\", while using the rest of the data to train our model (i.e., \"training data\"). Once our model is trained, we will evaluate its performance with the holdout testing data. Note: This could be problematic if there is limited data.\n\n**Q5: Do your features have the same variance?** You need to consider this to ensure your model doesn't overly depend on one variable with large variance. This step is called \"feature scaling\". Features of the same size also speed up the Gradient Descent algorithm.\n\n**Q6: If classification is the goal, are there the same number of observations for each feature and outcome? If not, how will you rebalance?** Here, the Christman dataset has same number of observations (8784) for each feature. But, times with no precipitation are way more common than times with precipitation. To deal with this issue, we will oversample the observations associated with precip so that the two outcomes (or \"classes\") are equal. Note: It's important that feature scaling or normalization is performed before any rebalancing so that the qualitative statistics (mean, stddev, etc) remain the same.\n\n**Q7: Which metrics are appropriate for assessing your model?** Consider the bias-variance trade-off, and whether having false positives or false negatives is more impactful. In our case, predicting no rain when there is rain (false negative) is probably more frustrating and potentially more impactful than the other way around (a false positive).", "_____no_output_____" ], [ "**Q1. What exactly are you trying to predict?**\n\nFirst, split data into predictor & predictands. ", "_____no_output_____" ] ], [ [ "##Create a new feature that indicates whether precipitation occurred. Perform this step ONCE.\n#print(df.columns) # print if you need to see what is the variable called that indicates precipitation amount?\ndf['prec_occur'] = np.array(df.Prec_inches!=0).astype(int)", "_____no_output_____" ], [ "#Next, select the data that will be predictors.\npredictors = df.copy(deep=True) # here, we use \"deep = True\" so that changes to predictors won't be made to the df.\n\n#Next, we drop some variables that shouldn't be used to predict whether or not there is rain.\npredictors = df.drop(['day','hour','Prec_inches'],axis=1) \npredictors", "_____no_output_____" ], [ "## Great, that worked. Now I will assign everything but \"prec\" to be the predictor array \"x\", \n## and prec will be the predictand vector \"y\".\n\nx = predictors.drop('prec_occur',axis=1)\ny = predictors.prec_occur", "_____no_output_____" ] ], [ [ "**Q2 & Q3 do not need to be addressed in our dataset.**", "_____no_output_____" ], [ "**Q4. How will you validate your model?**\n\nWe will perform a test-train split to validate our trained model. This step must be performed before each time the model is trained to ensure we are not baking in any bias among the models we train. That also means the following two steps must also be performed prior to training each model as well. For this reason, we write functions to call \neasily before each model training.", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nfrom random import randint", "_____no_output_____" ], [ "def define_holdout_data(x, y, verbose):\n \"\"\"Perform a 80/20 test-train split (80% of data is training, 20% is testing). Split is randomized with each call.\"\"\"\n random_state = randint(0,1000)\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, random_state=random_state)\n if verbose==True:\n print(\"Prior to scaling and rebalacing...\")\n print(\"Shape of training predictors: \"+str(np.shape(x_train)))\n print(\"Shape of testing predictors: \"+str(np.shape(x_test)))\n print(\"Shape of training predictands: \"+str(np.shape(y_train)))\n print(\"Shape of testing predictands: \"+str(np.shape(y_test)))\n print(\" \")\n return x_train, x_test, y_train, y_test", "_____no_output_____" ] ], [ [ "**Q5. Do your features have the same variance?**\n\nWe must normalize the features. In machine learning this is called Feature Scaling\". We do this so that the features with the largest variance are note weighted more heavily than those with less variance. Note: If our predictand wasn't binary, then we would normalize it as well.\n\nWe'll keep the data as a pandas dataframe rather than converting it to a numpy array beforehand. The \"fit_transform\" function outputs a numpy array, but we will convert back to a dataframe so that re-balancing the dataset is easier.", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing", "_____no_output_____" ], [ "def scale_data(x_train, x_test):\n \"\"\"\n Scale training data so that model reaches optimized weights much faster. \n \n *All data that enters the model should use the same scaling used to scale the training data.*\n Thus, we also perform scaling on testing data for validation later. \n Additionally, we return the scaler used to scale any other future input data.\n \"\"\"\n \n scaler = preprocessing.MinMaxScaler() # normalize \n x_train_scaled = pd.DataFrame(data=scaler.fit_transform(x_train),index=x_train.index,columns=x_train.columns) \n x_test_scaled = pd.DataFrame(data=scaler.transform(x_test),index=x_test.index,columns=x_test.columns)\n \n return scaler, x_train_scaled, x_test_scaled", "_____no_output_____" ] ], [ [ "**Q6. Are there the same number of observations for each outcome or class?**\nLuckily, we have the same number of observations for each feature (8784). But do we have the same number of outcomes for our predictand?", "_____no_output_____" ] ], [ [ "df['prec_occur'].value_counts()", "_____no_output_____" ] ], [ [ "**Answer:** Definitely not. The outcomes we are trying to predict are extremely unbalanced. Non-precip hours occur 30x more than precip hours. This class imbalance may bias the model because precip hours are underrepresented, which means the model won't have as many instances of precip hours to learn to distinguish precip hours from non-precip hours.\n\nThere are a number of out-of-the-box functions that resample data very precisely. The one I use below simply randomly oversamples the existing precipitating observation data to balance the dataset.\n\nNote: This function should be called on both training and testing data separately.", "_____no_output_____" ] ], [ [ "from sklearn.utils import resample", "_____no_output_____" ], [ "def balance_data(x,y,verbose):\n \"\"\"Resample data ensure model is not biased towards a particular outcome of precip or no precip.\"\"\"\n # Combine again to one dataframe to ensure both the predictor and predictand are resampled from the same \n # observations based on predictand outcomes. \n dataset = pd.concat([x, y],axis=1)\n\n # Separating classes\n raining = dataset[dataset['prec_occur'] == 1]\n not_raining = dataset[dataset['prec_occur'] == 0]\n\n random_state = randint(0,1000)\n oversample = resample(raining, \n replace=True, \n n_samples=len(not_raining), #set the number of samples to equal the number of the majority class\n random_state=random_state)\n\n # Returning to new training set\n oversample_dataset = pd.concat([not_raining, oversample])\n\n # reseparate oversampled data into X and y sets\n x_bal = oversample_dataset.drop(['prec_occur'], axis=1)\n y_bal = oversample_dataset['prec_occur']\n\n if verbose==True:\n print(\"After scaling and rebalacing...\")\n print(\"Shape of predictors: \"+str(np.shape(x_bal)))\n print(\"Shape of predictands: \"+str(np.shape(y_bal)))\n print(\" \")\n \n return x_bal, y_bal", "_____no_output_____" ] ], [ [ "**For ease, let's put the data prep code from questions 1-6 into a pipeline. In other words we will write a single function to accomplish everything we have done so far in this notebook.**\n", "_____no_output_____" ] ], [ [ "def dataprep_pipeline(x, y, verbose):\n \"\"\" Combines all the functions defined above so that the user only has to \n call one function to do all data pre-processing. \"\"\"\n # verbose=True prints the shapes of input & output data\n\n # split into training & testing data\n x_train, x_test, y_train, y_test = define_holdout_data(x, y, verbose) \n\n # perform feature scaling\n scaler, x_train_scaled, x_test_scaled = scale_data(x_train, x_test)\n\n # rebalance according to outcomes (i.e., the number of precipitating \n # observations & non-precipitating outcomes should be equal)\n if verbose==True:\n print(\"for training data... \")\n x_train_bal, y_train_bal = balance_data(x_train_scaled, y_train, verbose)\n if verbose==True:\n print(\"for testing data... \")\n x_test_bal, y_test_bal = balance_data(x_test_scaled, y_test, verbose)\n \n return x_train_bal, y_train_bal, x_test_bal, y_test_bal", "_____no_output_____" ] ], [ [ "**Q7. What are the appropriate metrics for assessing your model?**\nThese metrics will be used to evaluate each model after training.", "_____no_output_____" ], [ "Below are some commonly-used metrics for assessing the value of a given Machine Learning model.\n\n\"**True Positive (TP)**\" Is the number of times the model predicts a positive when the observation is actually positive. In our case, the model predicts that its raining when it is actually raining.<br>\n\"**False Positive (FP)**\" The number of times the model guesses that it's raining when it's not actually raining.<br>\nThe same applies to **True Negatives (TN)** (correctly predicting that it's not raining) and **False Negatives (FN)** (predicting no rain when it's actually raining).\n\n\n - **Precision = TP/(TP + FP)**: The proportion of predicted precipitating events that are actually precipitating.\n - **Accuracy = (TP + TN)/(total)**: The proportion of precipitating hours or non-precipitating hours that are correctly predicted by the model.\n - **Recall = TP/(TP + FN)**: The proportion of precipitating hours that are correctly predicted by the model.<br>\n<br>\nOther important metrics that we aren't going to look at today:\n - **F1**: a way to capture how well the model predicts the hours that it's actually precipitating.\n - **ROC/AUC**: how well the model separates precipitating hours from non-precipitating hours.", "_____no_output_____" ] ], [ [ "from sklearn import metrics\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "# Print rounded metrics for each model.\ndef bin_metrics(x, y):\n \"\"\"Prints accuracy and recall metrics for evaluating \n classification predictions.\"\"\"\n \n accuracy = metrics.accuracy_score(x, y)\n recall = metrics.recall_score(x, y)\n\n print('Accuracy:', round(accuracy, 4))\n print('Recall:', round(recall, 4))\n \n return accuracy, recall\n\n\n# Plot confusion matrix\ndef plot_cm(x, y):\n \"\"\"Plots the confusion matrix to visualize true \n & false positives & negatives\"\"\"\n cm = confusion_matrix(x, y)\n df_cm = pd.DataFrame(cm, columns=np.unique(x), index = np.unique(x))\n df_cm.index.name = 'Actual'\n df_cm.columns.name = 'Predicted'\n sns.heatmap(df_cm, cmap=\"Blues\", annot=True,annot_kws={\"size\": 25}, fmt='g')# font size\n plt.ylim([0, 2])\n plt.xticks([0.5, 1.5], ['Negatives','Positives'])\n plt.yticks([0.5, 1.5], ['Negatives','Positives'])", "_____no_output_____" ] ], [ [ "Another way we can evaluate the models is to compare precipitation likelihood given the same set of atmospheric conditions. First, let's choose some observation in the pre-scaled dataset shows that it's raining, and then find the corresponding scaled observation:", "_____no_output_____" ] ], [ [ "def rand_atmos_conditions_precip(index='rand'):\n \"\"\"\n Function returns atmospheric conditions in a dataframe as well as the scaled\n conditions in a numpy array so that they output a prediction in the model.\n \n If no input is passed, the function will randomly generate an in index to \n choose from those observations in some training data with precipitation. \n Otherwise, an integer index between 0 and 200 should be passed.\n \"\"\"\n # First, perform a test-train split\n x_train, x_test, y_train, _ = define_holdout_data(x, y, verbose=False) \n\n # perform feature scaling\n _, x_train_scaled, _ = scale_data(x_train, x_test)\n\n # this is what will go into the model to output a prediction\n if index=='rand':\n index = randint(0,len(y_train[y_train==1].index)) \n precipindex = y_train[y_train==1].index.values[index]\n testpredictor = x_train_scaled.loc[precipindex] \n \n return df.iloc[precipindex], testpredictor ", "_____no_output_____" ] ], [ [ "## STEP 3: Train & Compare Machine Learning Models\nEach section below goes through building and training a ML model. In each section, there are a few steps for each model \"pipeline\":\n1. __Randomly perform a test-train split, feature scaling, and resample data to ensure outcomes are balanced__. \n2. __Train your model__.\n3. __Assess model metrics with testing and training data__. We begin by first assessing each model's performance by calculating the metrics defined above on the *testing* or *holdout* data; the key here is that the model has never seen this data. <br>__If applicable, tune your model.__ This means choosing new *hyperparameters*, retraining the model, and then reassessing the same model metrics to see if the model yields better results.\n3. __Check for model overfitting__. We will also check to see if the model is overfitting by comparing metrics of the testing data to that of the training data. In short, the training data should not be outperforming the testing data.\n4. __Actually make a prediction with a single observation__. Predicted precipitation probability provides a sanity test for us to make sure the model isn't way off base. It allows us to see for ourselves: given X meteorological conditions and our own understanding of meteorology, would rain seem likely? Is the model actually doing something realistic?", "_____no_output_____" ], [ "## Model 1: Logistic Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "## 1. Perform a test-train split, perform feature scaling, and the rebalance our dataset.\nx_train_bal, y_train_bal, x_test_bal, y_test_bal = dataprep_pipeline(x, y, verbose=True)", "Prior to scaling and rebalacing...\nShape of training predictors: (7027, 9)\nShape of testing predictors: (1757, 9)\nShape of training predictands: (7027,)\nShape of testing predictands: (1757,)\n \nfor training data... \nAfter scaling and rebalacing...\nShape of predictors: (13592, 9)\nShape of predictands: (13592,)\n \nfor testing data... \nAfter scaling and rebalacing...\nShape of predictors: (3410, 9)\nShape of predictands: (3410,)\n \n" ], [ "## 2. Train the Logistic Regression model\n\n# initialize the model\nlr = LogisticRegression(solver='lbfgs') \n# we choose this particular solver because we're not regularizing or penalizing certain features\n\n# fit the model to scaled & balanced training data. Side note: this is where *Gradient Descent* occurs.\nlr.fit(x_train_bal, y_train_bal);", "_____no_output_____" ], [ "## 3. Assess Logistic Regression's performance using testing data\n\n##Now that we've \"trained\" our model, we make predictions using data that the \n## model has never seen before (i.e., our holdout testing data) to see how it performs.\n\ny_pred = lr.predict(x_test_bal)\n\n# Call functions defined above to calculate metrics & plot a confusion matrix based on\n# how well model simulates testing data\n#plot_cm(y_test_bal, y_pred);\nlr_acc, lr_rec = bin_metrics(y_test_bal, y_pred)", "Accuracy: 0.8469\nRecall: 0.8804\n" ] ], [ [ "Accuracy tells is the percent of correct predictions, whether precipitating or not. The Logistic Regression model, without any additional tuning, can correctly predict whether it's precipitating or not given a set of present atmospheric conditions around 84% of the time.\n\nFalse Positives are less harmful than False Negatives. Thus, along with accuracy, we should also try to maximize recall.\n\nA very important aspect of tuning machine learning model is to ensure the model isn't overfitting or underfitting:\nAn overfit model means the model is fit very well to the training data, but fails to generalize predictions outside the training dataset. A symptom of overfitting is that the models' training accuracy is much better than the testing accuracy. Overfitting can happen more easily in more complex models, like neural networks. To alleviate overfitting, one needs to reduce variance, through feature regularization, lowering model complexity, or performing k-folds cross-validation.\n\nBefore you dive too deeply into ML and in your own time, I suggest watching this (https://www.youtube.com/watch?v=EuBBz3bI-aA) 6-minute StatQuest YouTube video to develop more intuition for model error.", "_____no_output_____" ] ], [ [ "##4. Check to see if the Logistic Regression model is overfitting (or underfitting)\n#Remember:\n#testing metrics > training metrics = underfitting, model is too simple\n#testing metrics < training metrics = overfitting, model is too complex\n\n# Compare testing data metrics to data training metrics.\nprint(\"Training metrics:\")\npred_train= lr.predict(x_train_bal) \nbin_metrics(y_train_bal,pred_train);\n\n# As a reminder, display testing metrics:\nprint(\" \")\nprint(\"Testing metrics:\")\nbin_metrics(y_test_bal, y_pred);", "Training metrics:\nAccuracy: 0.8423\nRecall: 0.8676\n \nTesting metrics:\nAccuracy: 0.8469\nRecall: 0.8804\n" ], [ "## 5. Make a prediction with the Logistic Regression model\n#First, we randomly choose some atmospheric conditions using the function defined above. This will be the atmospheric conditions we use for all models we build.\n\norigvals, testpredictor = rand_atmos_conditions_precip()\n#print(origvals) # observation from original dataframe\nprint(testpredictor) # scaled observation", "temp_F 0.525066\nRH 0.827465\ndewtemp_F 0.664577\nwind_mph 0.253219\nwind_dir 0.821727\nwindgust 0.326923\nwindgust_dir 0.788301\npres_Hg 0.353087\nSOLIN_Wm2 0.000000\nName: 2785, dtype: float64\n" ], [ "# prediction output is in the format [probability no rain, probability rain]\nlr_prediction = lr.predict_proba(np.array(testpredictor).reshape(1, -1))[0][1]*100 \nprint(\"The meteorological conditions are: \")\nprint(origvals)\nprint(\" \")\nprint(\"There is a {0:.{digits}f}% chance of precipitation given those meteorological conditions.\".format(lr_prediction, digits=2))", "The meteorological conditions are: \nday 2016-04-26\nhour 0.0416667\ntemp_F 45.4\nRH 78\ndewtemp_F 38.9\nwind_mph 11.8\nwind_dir 295\nwindgust 22.1\nwindgust_dir 283\npres_Hg 829.48\nSOLIN_Wm2 0\nPrec_inches 0.04\nprec_occur 1\nName: 2785, dtype: object\n \nThere is a 93.69% chance of precipitation given those meteorological conditions.\n" ] ], [ [ "## Model 2: Random Forest\n\nTo understand random forests, one must first understand a [decision tree](https://scikit-learn.org/stable/modules/tree.html#tree). A decision tree is intuitive: it is essentially a flowchart to point to an outcome based on \"decisions\" for each feature. A Random Forest is an ensemble of decision trees that are randomly constructed based on the features of the dataset and number of decisions. Trees are constructed by randomly choosing a feature to \"seed\" each tree, and then making rules or associations with other features to lead to the specified outcome.", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestClassifier", "_____no_output_____" ], [ "##1. Perform a test-train split, perform feature scaling, and the rebalance our dataset.\n## Perform a train-test split for cross-validation, perform feature scaling, and \n## rebalance each testing & training dataset.\n\nx_train_bal, y_train_bal, x_test_bal, y_test_bal = dataprep_pipeline(x, y, verbose=False)", "_____no_output_____" ], [ "##2. Train (and tuning) the Random Forest model\n\n##Choosing hyperparameters: There are many hyperparameters one can decide upon when tuning the \n## Random Forest classifier. The two we will adjust are: 1) The number of estimators or \"trees\" in the forest\n## 2) The depth of the tree, or how many \"decisions\" are made until convergence is reached.\n\nacc_scores = []\nrec_scores = []\n\nnum_est = [10, 50, 500] # number of trees\ndepth = [2, 10, 100] # number of decisions\nfor i in num_est:\n start = time.time()\n print(\"Number of estimators is \"+str(i))\n\n for k in depth:\n print(\"depth is \"+str(k))\n forest = RandomForestClassifier(n_estimators=i, max_depth=k)\n forest.fit(x_train_bal, y_train_bal)\n \n # cross validate & evaluate metrics based on testing data\n pred_test= forest.predict(x_test_bal)\n acc_val = metrics.accuracy_score(y_test_bal, pred_test)\n acc_scores.append(acc_val)\n rec_val = metrics.recall_score(y_test_bal, pred_test)\n rec_scores.append(rec_val)\n\n end = time.time()\n print(\"Random Forest took \"+str(end-start)+\" seconds.\")", "Number of estimators is 10\ndepth is 2\ndepth is 10\ndepth is 100\nRandom Forest took 0.4650890827178955 seconds.\nNumber of estimators is 50\ndepth is 2\ndepth is 10\ndepth is 100\nRandom Forest took 2.2348790168762207 seconds.\nNumber of estimators is 500\ndepth is 2\ndepth is 10\ndepth is 100\nRandom Forest took 16.896486043930054 seconds.\n" ], [ "### visualize the recall and accuracy scores for the different hyperparameter choices\nplt.plot(acc_scores, marker='o', color='black',label='accuracy')\nplt.plot(rec_scores, marker='o', color='blue',label='recall')\nplt.xlabel('Hyperparameter Choice')\nplt.ylabel('Score')\nplt.legend()\nprint(\"Max Accuracy (black):\", round(max(acc_scores), 4))\nprint(\"Max Recall (blue):\", round(max(rec_scores), 4))", "Max Accuracy (black): 0.8202\nMax Recall (blue): 0.7544\n" ] ], [ [ "Which choice of hyperparameters should we pick? Choosing the right hyperparameters for this model requires revisiting which metrics are most important to our question. For this problem, we want to maximize both recall and accuracy.\n\nLet's go with the parameters corresponding to x=0 (Looks good for both accuracy and recall!) but try other hyperparameters too (if you have time)", "_____no_output_____" ] ], [ [ "forest = RandomForestClassifier(n_estimators=10, max_depth=2);\nforest.fit(x_train_bal, y_train_bal);", "_____no_output_____" ], [ "## 3. Assess the Random Forest's performance using testing data\n##Once again, we will use our testing data to make an initial evaluation of how the model is doing.\n\npred_test= forest.predict(x_test_bal)\n\n# Call functions defined above to calculate metrics & plot a confusion matrix based on\n# how well model simulates testing data\nforest_acc, forest_rec = bin_metrics(y_test_bal, pred_test)\nplot_cm(y_test_bal, pred_test)\n", "Accuracy: 0.7916\nRecall: 0.7385\n" ], [ "## 4. Check to see if the Random Forest is overfitting (or underfitting)\n#Remember:\n#testing metrics > training metrics = underfitting, model is too simple\n#testing metrics < training metrics = overfitting, model is too complex\n\n# Compare testing data metrics to data training metrics.\nprint(\"Training metrics:\")\nrf_pred_train= forest.predict(x_train_bal) \nbin_metrics(y_train_bal,rf_pred_train);\n\n# As a reminder, display testing metrics:\nprint(\" \")\nprint(\"Testing metrics:\")\nbin_metrics(y_test_bal, pred_test);", "Training metrics:\nAccuracy: 0.7977\nRecall: 0.7395\n \nTesting metrics:\nAccuracy: 0.7916\nRecall: 0.7385\n" ] ], [ [ "WOW - the random forest model was not an improvement over the logistical regression model.\n\nRandom forests seldom overfit, but if they do, one should try increasing the number of trees, or decreasing the amount of data used to construct each tree. See scikit-learn's Random Forest Classifier webpage (https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) for information on more hyperparameters one can tune to address overfitting.", "_____no_output_____" ] ], [ [ "##5. Make a prediction with the Random Forest\n\n# prediction output is in the format [probability no rain, probability rain]\nforest_prediction = forest.predict_proba(np.array(testpredictor).reshape(1, -1))[0][1]*100 \nprint(\"The meteorological conditions are: \")\nprint(origvals)\nprint(\" \")\nprint(\"There is a {0:.{digits}f}% chance of precipitation given those meteorological conditions.\".format(forest_prediction, digits=2))", "The meteorological conditions are: \nday 2016-04-26\nhour 0.0416667\ntemp_F 45.4\nRH 78\ndewtemp_F 38.9\nwind_mph 11.8\nwind_dir 295\nwindgust 22.1\nwindgust_dir 283\npres_Hg 829.48\nSOLIN_Wm2 0\nPrec_inches 0.04\nprec_occur 1\nName: 2785, dtype: object\n \nThere is a 80.97% chance of precipitation given those meteorological conditions.\n" ] ], [ [ "## Model 3: Support Vector Machines (SVMs)", "_____no_output_____" ], [ "SVMs divide observations into classes based on maximizing the distance between a \"kernel\" (basically a dividing function) and the elements of each feature/class/variable on a plane. Because the relationships between atmospheric variables and precipitation are inherently non-linear, we will choose a non-linear, \"RBF\" kernel.", "_____no_output_____" ] ], [ [ "from sklearn import svm", "_____no_output_____" ], [ "## 1. Perform a test-train split, perform feature scaling, and the rebalance our dataset.\nx_train_bal, y_train_bal, x_test_bal, y_test_bal = dataprep_pipeline(x, y, verbose=False)", "_____no_output_____" ] ], [ [ "Choosing hyperparameters\nIn the case of SVMs, we can tune \"C\", the regularization parameter. Regularization) penalizes higher-order coefficients during training (i.e., Gradient Descent). Regularization is a way to reduce a model's complexity and address overfitting.\n\nIn SVMs, the lower the regularization parameter C, the higher the penalty. We are unsure what the C value should be. Thus, we train the model three times, each with a different value of C to see what the best value should be. I highly suggest learning more on regularization if you choose to pursue ML methods on your own.", "_____no_output_____" ] ], [ [ "## 2. Train (and tune) the SVM (Note: this cell takes ~1 minute to run)\n\nacc_scores = []\nrec_scores = []\n\nC_range = [0.01, 1, 100]\nfor i in C_range:\n start = time.time()\n print(\"C is... \"+str(i))\n svmclassifier = svm.SVC(C=i, kernel='rbf', gamma='scale', max_iter=20000, probability=True)\n svmclassifier.fit(x_train_bal, y_train_bal)\n \n # Save model metrics in order to choose best hyperparameter\n pred_test= svmclassifier.predict(x_test_bal)\n acc_val = metrics.accuracy_score(y_test_bal, pred_test)\n acc_scores.append(acc_val)\n rec_val = metrics.recall_score(y_test_bal, pred_test)\n rec_scores.append(rec_val)\n\n end = time.time()\n print(\"Took \"+str(end-start)+\" seconds to train.\")", "C is... 0.01\nTook 23.921719074249268 seconds to train.\nC is... 1\nTook 11.88760495185852 seconds to train.\nC is... 100\nTook 11.531865119934082 seconds to train.\n" ], [ "plt.plot(C_range, acc_scores, marker='o', color='black',label='accuracy')\nplt.plot(C_range, rec_scores, marker='o', color='blue',label='recall')\nplt.xlabel('Hyperparameter Choice')\nplt.xscale('log')\nplt.ylabel('Score')\nplt.legend()\n\nprint(\"Max Accuracy (black):\", round(max(acc_scores), 4))\nprint(\"Max Recall (blue):\", round(max(rec_scores), 4))", "Max Accuracy (black): 0.8455\nMax Recall (blue): 0.8848\n" ] ], [ [ "The SVM with C=1, i.e., a medium weight penalty, results in a balance among accuracy, precision, and recall.\nWe will train our final model with this hyperparameter.", "_____no_output_____" ] ], [ [ "# Define SVM classifier & fit to training data\nsvmclassifier = svm.SVC(C=1, kernel='rbf', gamma='scale', max_iter=20000, probability=True)\nsvmclassifier.fit(x_train_bal, y_train_bal);", "_____no_output_____" ], [ "## 3. Assess SVM performance using testing data\n\npred_test= svmclassifier.predict(x_test_bal)\n\n# Call functions defined above to calculate metrics & plot a confusion matrix based on\n# how well model simulates testing data\nsvm_acc, svm_rec = bin_metrics(y_test_bal, pred_test)\nplot_cm(y_test_bal, pred_test)", "Accuracy: 0.8198\nRecall: 0.7431\n" ] ], [ [ "WOW: using a non-linear Singular Vector Machine instead of a Logistic Regressor increased the recall and accuracy. ", "_____no_output_____" ] ], [ [ "## 4. Check to see if the SVM is overfitting (or underfitting)\n#Remember:\n#testing metrics > training metrics = underfitting, model is too simple\n#testing metrics < training metrics = overfitting, model is too complex\n\n# Compare testing data metrics to data training metrics.\nprint(\"Training metrics:\")\nsvm_pred_train= svmclassifier.predict(x_train_bal) \nbin_metrics(y_train_bal,svm_pred_train);\n\n# As a reminder, display testing metrics:\nprint(\" \")\nprint(\"Testing metrics:\")\nbin_metrics(y_test_bal, pred_test);", "Training metrics:\nAccuracy: 0.9199\nRecall: 0.944\n \nTesting metrics:\nAccuracy: 0.8198\nRecall: 0.7431\n" ] ], [ [ "One can address overfitting in an SVM by changing the kernel to a simpler kernel, or tuning the regularization parameter C.", "_____no_output_____" ] ], [ [ "## 5. Make a prediction with the SVM\n\n# prediction output is in the format [probability no rain, probability rain]\nsvm_prediction = svmclassifier.predict_proba(np.array(testpredictor).reshape(1, -1))[0][1]*100 \nprint(\"The meteorological conditions are: \")\nprint(origvals)\nprint(\" \")\nprint(\"There is a {0:.{digits}f}% chance of precipitation given those meteorological conditions.\".format(svm_prediction, digits=2))", "The meteorological conditions are: \nday 2016-04-26\nhour 0.0416667\ntemp_F 45.4\nRH 78\ndewtemp_F 38.9\nwind_mph 11.8\nwind_dir 295\nwindgust 22.1\nwindgust_dir 283\npres_Hg 829.48\nSOLIN_Wm2 0\nPrec_inches 0.04\nprec_occur 1\nName: 2785, dtype: object\n \nThere is a 99.38% chance of precipitation given those meteorological conditions.\n" ] ], [ [ "## Model 4: Neural Network", "_____no_output_____" ], [ "Note: there is a TON of information online about Neural Networks. Eleanor Recommends:\n1) This three-part series of youtube videos (totaling about an hour in length) https://www.youtube.com/watch?v=aircAruvnKk. \n\n2) machinelearningmastery.com In fact, the model below is based off of this blog post (https://machinelearningmastery.com/binary-classification-tutorial-with-the-keras-deep-learning-library/)", "_____no_output_____" ] ], [ [ "import tensorflow.keras as keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense", "_____no_output_____" ], [ "## 1. Perform a test-train split, perform feature scaling, and the rebalance our dataset.\nx_train_bal, y_train_bal, x_test_bal, y_test_bal = dataprep_pipeline(x, y, verbose=False)\n", "_____no_output_____" ], [ "## 2. Train (and build and compile) the Neural Network\n## There are lots of hyperparameters here. Please read the comments to guide you in playing with them later!", "_____no_output_____" ], [ "### Build a very simple Neural Network and Compile\nnumber_inputs = len(x_train_bal.columns)\n\n# create model\nnn = Sequential()\nnn.add(Dense(number_inputs, input_dim=number_inputs, activation='relu'))\n\n# Try uncommenting this to address overfitting\n# from keras.regularizers import l2\n# reg = l2(0.001)\n# nn.add(Dense(number_inputs, activation='relu',bias_regularizer=reg,activity_regularizer=reg))\n\n# try commenting out one and then the other\nnn.add(Dense(1, activation='sigmoid'))\n#nn.addDense(1, activation='softmax'))\n\n# Compile model \n# Also try changing the learning rate.\nlearning_rate = 0.001 # only used in the SGD optimizer.\n\n# Also try commenting out one & then the other. \nnn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) \n#nn.compile(loss='binary_crossentropy', optimizer=keras.optimizers.SGD(lr=learning_rate), metrics=['accuracy']) ", "_____no_output_____" ], [ "### Actually training the model\n\nbatch_size = 24 # The number of samples the network sees before it backpropagates (batch size) # 24 & 32 yield accuracy = 87%\nepochs = 100 # The number of times the network will loop through the entire dataset (epochs)\nshuffle = True # Set whether to shuffle the training data so the model doesn't see it sequentially \nverbose = 2 # Set whether the model will output information when trained (0 = no output; 2 = output accuracy every epoch)\n\n# Train the neural network!\nstart = time.time()\n\nhistory = nn.fit(x_train_bal, y_train_bal, validation_data=(x_test_bal, y_test_bal), \n batch_size=batch_size, epochs=epochs, shuffle=shuffle, verbose=verbose)\n\nend = time.time()\nprint(\"Neural Network took \"+str(end-start)+\" seconds to train.\")", "Epoch 1/100\n568/568 - 1s - loss: 0.6181 - accuracy: 0.7000 - val_loss: 0.5470 - val_accuracy: 0.7287\nEpoch 2/100\n568/568 - 0s - loss: 0.5123 - accuracy: 0.7617 - val_loss: 0.4728 - val_accuracy: 0.8005\nEpoch 3/100\n568/568 - 0s - loss: 0.4639 - accuracy: 0.7883 - val_loss: 0.4350 - val_accuracy: 0.8259\nEpoch 4/100\n568/568 - 0s - loss: 0.4395 - accuracy: 0.8008 - val_loss: 0.4128 - val_accuracy: 0.8221\nEpoch 5/100\n568/568 - 0s - loss: 0.4262 - accuracy: 0.8058 - val_loss: 0.4009 - val_accuracy: 0.8236\nEpoch 6/100\n568/568 - 1s - loss: 0.4177 - accuracy: 0.8081 - val_loss: 0.3952 - val_accuracy: 0.8505\nEpoch 7/100\n568/568 - 0s - loss: 0.4114 - accuracy: 0.8127 - val_loss: 0.3909 - val_accuracy: 0.8469\nEpoch 8/100\n568/568 - 0s - loss: 0.4066 - accuracy: 0.8161 - val_loss: 0.3797 - val_accuracy: 0.8629\nEpoch 9/100\n568/568 - 0s - loss: 0.4023 - accuracy: 0.8199 - val_loss: 0.3782 - val_accuracy: 0.8576\nEpoch 10/100\n568/568 - 0s - loss: 0.3985 - accuracy: 0.8233 - val_loss: 0.3751 - val_accuracy: 0.8620\nEpoch 11/100\n568/568 - 1s - loss: 0.3960 - accuracy: 0.8238 - val_loss: 0.3720 - val_accuracy: 0.8599\nEpoch 12/100\n568/568 - 0s - loss: 0.3928 - accuracy: 0.8263 - val_loss: 0.3695 - val_accuracy: 0.8626\nEpoch 13/100\n568/568 - 0s - loss: 0.3909 - accuracy: 0.8259 - val_loss: 0.3721 - val_accuracy: 0.8558\nEpoch 14/100\n568/568 - 1s - loss: 0.3888 - accuracy: 0.8294 - val_loss: 0.3639 - val_accuracy: 0.8738\nEpoch 15/100\n568/568 - 0s - loss: 0.3871 - accuracy: 0.8274 - val_loss: 0.3632 - val_accuracy: 0.8747\nEpoch 16/100\n568/568 - 0s - loss: 0.3848 - accuracy: 0.8307 - val_loss: 0.3646 - val_accuracy: 0.8632\nEpoch 17/100\n568/568 - 0s - loss: 0.3823 - accuracy: 0.8306 - val_loss: 0.3610 - val_accuracy: 0.8717\nEpoch 18/100\n568/568 - 0s - loss: 0.3800 - accuracy: 0.8350 - val_loss: 0.3573 - val_accuracy: 0.8685\nEpoch 19/100\n568/568 - 0s - loss: 0.3763 - accuracy: 0.8346 - val_loss: 0.3554 - val_accuracy: 0.8726\nEpoch 20/100\n568/568 - 1s - loss: 0.3732 - accuracy: 0.8367 - val_loss: 0.3494 - val_accuracy: 0.8809\nEpoch 21/100\n568/568 - 0s - loss: 0.3691 - accuracy: 0.8386 - val_loss: 0.3507 - val_accuracy: 0.8715\nEpoch 22/100\n568/568 - 0s - loss: 0.3645 - accuracy: 0.8419 - val_loss: 0.3419 - val_accuracy: 0.8836\nEpoch 23/100\n568/568 - 0s - loss: 0.3596 - accuracy: 0.8443 - val_loss: 0.3407 - val_accuracy: 0.8877\nEpoch 24/100\n568/568 - 0s - loss: 0.3556 - accuracy: 0.8486 - val_loss: 0.3358 - val_accuracy: 0.8868\nEpoch 25/100\n568/568 - 0s - loss: 0.3526 - accuracy: 0.8489 - val_loss: 0.3335 - val_accuracy: 0.8915\nEpoch 26/100\n568/568 - 0s - loss: 0.3499 - accuracy: 0.8492 - val_loss: 0.3305 - val_accuracy: 0.8845\nEpoch 27/100\n568/568 - 0s - loss: 0.3471 - accuracy: 0.8501 - val_loss: 0.3277 - val_accuracy: 0.8839\nEpoch 28/100\n568/568 - 0s - loss: 0.3449 - accuracy: 0.8498 - val_loss: 0.3265 - val_accuracy: 0.8862\nEpoch 29/100\n568/568 - 1s - loss: 0.3429 - accuracy: 0.8498 - val_loss: 0.3276 - val_accuracy: 0.8794\nEpoch 30/100\n568/568 - 0s - loss: 0.3406 - accuracy: 0.8500 - val_loss: 0.3225 - val_accuracy: 0.8883\nEpoch 31/100\n568/568 - 1s - loss: 0.3392 - accuracy: 0.8498 - val_loss: 0.3220 - val_accuracy: 0.8836\nEpoch 32/100\n568/568 - 1s - loss: 0.3380 - accuracy: 0.8505 - val_loss: 0.3210 - val_accuracy: 0.8830\nEpoch 33/100\n568/568 - 0s - loss: 0.3362 - accuracy: 0.8527 - val_loss: 0.3198 - val_accuracy: 0.8845\nEpoch 34/100\n568/568 - 0s - loss: 0.3354 - accuracy: 0.8530 - val_loss: 0.3183 - val_accuracy: 0.8856\nEpoch 35/100\n568/568 - 0s - loss: 0.3340 - accuracy: 0.8552 - val_loss: 0.3190 - val_accuracy: 0.8883\nEpoch 36/100\n568/568 - 1s - loss: 0.3329 - accuracy: 0.8539 - val_loss: 0.3175 - val_accuracy: 0.8806\nEpoch 37/100\n568/568 - 1s - loss: 0.3321 - accuracy: 0.8533 - val_loss: 0.3143 - val_accuracy: 0.8957\nEpoch 38/100\n568/568 - 1s - loss: 0.3318 - accuracy: 0.8557 - val_loss: 0.3174 - val_accuracy: 0.8880\nEpoch 39/100\n568/568 - 1s - loss: 0.3314 - accuracy: 0.8536 - val_loss: 0.3152 - val_accuracy: 0.8877\nEpoch 40/100\n568/568 - 1s - loss: 0.3307 - accuracy: 0.8545 - val_loss: 0.3197 - val_accuracy: 0.8815\nEpoch 41/100\n568/568 - 1s - loss: 0.3301 - accuracy: 0.8530 - val_loss: 0.3147 - val_accuracy: 0.8877\nEpoch 42/100\n568/568 - 1s - loss: 0.3298 - accuracy: 0.8536 - val_loss: 0.3156 - val_accuracy: 0.8865\nEpoch 43/100\n568/568 - 1s - loss: 0.3295 - accuracy: 0.8554 - val_loss: 0.3119 - val_accuracy: 0.8862\nEpoch 44/100\n568/568 - 0s - loss: 0.3285 - accuracy: 0.8555 - val_loss: 0.3150 - val_accuracy: 0.8732\nEpoch 45/100\n568/568 - 0s - loss: 0.3282 - accuracy: 0.8574 - val_loss: 0.3134 - val_accuracy: 0.8771\nEpoch 46/100\n568/568 - 1s - loss: 0.3281 - accuracy: 0.8553 - val_loss: 0.3124 - val_accuracy: 0.8907\nEpoch 47/100\n568/568 - 1s - loss: 0.3278 - accuracy: 0.8564 - val_loss: 0.3156 - val_accuracy: 0.8853\nEpoch 48/100\n568/568 - 1s - loss: 0.3273 - accuracy: 0.8554 - val_loss: 0.3165 - val_accuracy: 0.8765\nEpoch 49/100\n568/568 - 0s - loss: 0.3266 - accuracy: 0.8562 - val_loss: 0.3143 - val_accuracy: 0.8788\nEpoch 50/100\n568/568 - 1s - loss: 0.3264 - accuracy: 0.8562 - val_loss: 0.3157 - val_accuracy: 0.8744\nEpoch 51/100\n568/568 - 1s - loss: 0.3268 - accuracy: 0.8549 - val_loss: 0.3149 - val_accuracy: 0.8830\nEpoch 52/100\n568/568 - 1s - loss: 0.3259 - accuracy: 0.8559 - val_loss: 0.3148 - val_accuracy: 0.8706\nEpoch 53/100\n568/568 - 0s - loss: 0.3257 - accuracy: 0.8569 - val_loss: 0.3132 - val_accuracy: 0.8797\nEpoch 54/100\n568/568 - 1s - loss: 0.3249 - accuracy: 0.8581 - val_loss: 0.3124 - val_accuracy: 0.8824\nEpoch 55/100\n568/568 - 0s - loss: 0.3255 - accuracy: 0.8576 - val_loss: 0.3231 - val_accuracy: 0.8735\nEpoch 56/100\n568/568 - 1s - loss: 0.3254 - accuracy: 0.8565 - val_loss: 0.3143 - val_accuracy: 0.8771\nEpoch 57/100\n568/568 - 0s - loss: 0.3249 - accuracy: 0.8570 - val_loss: 0.3124 - val_accuracy: 0.8824\nEpoch 58/100\n568/568 - 0s - loss: 0.3248 - accuracy: 0.8577 - val_loss: 0.3119 - val_accuracy: 0.8833\nEpoch 59/100\n568/568 - 1s - loss: 0.3247 - accuracy: 0.8575 - val_loss: 0.3119 - val_accuracy: 0.8833\nEpoch 60/100\n568/568 - 1s - loss: 0.3247 - accuracy: 0.8589 - val_loss: 0.3144 - val_accuracy: 0.8712\nEpoch 61/100\n568/568 - 0s - loss: 0.3246 - accuracy: 0.8569 - val_loss: 0.3160 - val_accuracy: 0.8679\nEpoch 62/100\n568/568 - 1s - loss: 0.3238 - accuracy: 0.8582 - val_loss: 0.3123 - val_accuracy: 0.8774\nEpoch 63/100\n568/568 - 0s - loss: 0.3244 - accuracy: 0.8575 - val_loss: 0.3124 - val_accuracy: 0.8797\nEpoch 64/100\n568/568 - 1s - loss: 0.3237 - accuracy: 0.8583 - val_loss: 0.3133 - val_accuracy: 0.8768\nEpoch 65/100\n568/568 - 1s - loss: 0.3231 - accuracy: 0.8593 - val_loss: 0.3092 - val_accuracy: 0.8824\nEpoch 66/100\n568/568 - 1s - loss: 0.3224 - accuracy: 0.8584 - val_loss: 0.3142 - val_accuracy: 0.8771\nEpoch 67/100\n568/568 - 1s - loss: 0.3222 - accuracy: 0.8594 - val_loss: 0.3151 - val_accuracy: 0.8685\nEpoch 68/100\n568/568 - 1s - loss: 0.3218 - accuracy: 0.8595 - val_loss: 0.3106 - val_accuracy: 0.8788\nEpoch 69/100\n568/568 - 1s - loss: 0.3208 - accuracy: 0.8610 - val_loss: 0.3097 - val_accuracy: 0.8806\nEpoch 70/100\n568/568 - 1s - loss: 0.3210 - accuracy: 0.8600 - val_loss: 0.3111 - val_accuracy: 0.8821\nEpoch 71/100\n568/568 - 1s - loss: 0.3206 - accuracy: 0.8601 - val_loss: 0.3090 - val_accuracy: 0.8726\nEpoch 72/100\n568/568 - 0s - loss: 0.3210 - accuracy: 0.8604 - val_loss: 0.3129 - val_accuracy: 0.8800\nEpoch 73/100\n568/568 - 0s - loss: 0.3203 - accuracy: 0.8591 - val_loss: 0.3106 - val_accuracy: 0.8679\nEpoch 74/100\n568/568 - 0s - loss: 0.3197 - accuracy: 0.8604 - val_loss: 0.3139 - val_accuracy: 0.8729\nEpoch 75/100\n568/568 - 1s - loss: 0.3195 - accuracy: 0.8607 - val_loss: 0.3154 - val_accuracy: 0.8685\nEpoch 76/100\n568/568 - 1s - loss: 0.3195 - accuracy: 0.8593 - val_loss: 0.3129 - val_accuracy: 0.8750\nEpoch 77/100\n568/568 - 1s - loss: 0.3195 - accuracy: 0.8589 - val_loss: 0.3091 - val_accuracy: 0.8703\nEpoch 78/100\n568/568 - 1s - loss: 0.3199 - accuracy: 0.8577 - val_loss: 0.3089 - val_accuracy: 0.8703\nEpoch 79/100\n568/568 - 1s - loss: 0.3187 - accuracy: 0.8591 - val_loss: 0.3119 - val_accuracy: 0.8726\nEpoch 80/100\n568/568 - 1s - loss: 0.3181 - accuracy: 0.8594 - val_loss: 0.3134 - val_accuracy: 0.8679\nEpoch 81/100\n568/568 - 1s - loss: 0.3188 - accuracy: 0.8601 - val_loss: 0.3099 - val_accuracy: 0.8667\nEpoch 82/100\n568/568 - 0s - loss: 0.3181 - accuracy: 0.8586 - val_loss: 0.3099 - val_accuracy: 0.8661\nEpoch 83/100\n568/568 - 1s - loss: 0.3175 - accuracy: 0.8592 - val_loss: 0.3101 - val_accuracy: 0.8664\nEpoch 84/100\n568/568 - 1s - loss: 0.3173 - accuracy: 0.8592 - val_loss: 0.3086 - val_accuracy: 0.8750\nEpoch 85/100\n568/568 - 1s - loss: 0.3174 - accuracy: 0.8598 - val_loss: 0.3120 - val_accuracy: 0.8700\nEpoch 86/100\n568/568 - 1s - loss: 0.3168 - accuracy: 0.8586 - val_loss: 0.3091 - val_accuracy: 0.8732\nEpoch 87/100\n568/568 - 1s - loss: 0.3165 - accuracy: 0.8596 - val_loss: 0.3078 - val_accuracy: 0.8599\nEpoch 88/100\n568/568 - 1s - loss: 0.3168 - accuracy: 0.8580 - val_loss: 0.3069 - val_accuracy: 0.8611\nEpoch 89/100\n568/568 - 1s - loss: 0.3161 - accuracy: 0.8589 - val_loss: 0.3110 - val_accuracy: 0.8756\nEpoch 90/100\n568/568 - 1s - loss: 0.3163 - accuracy: 0.8591 - val_loss: 0.3064 - val_accuracy: 0.8632\nEpoch 91/100\n568/568 - 0s - loss: 0.3155 - accuracy: 0.8588 - val_loss: 0.3080 - val_accuracy: 0.8629\nEpoch 92/100\n568/568 - 0s - loss: 0.3156 - accuracy: 0.8592 - val_loss: 0.3069 - val_accuracy: 0.8605\nEpoch 93/100\n568/568 - 1s - loss: 0.3151 - accuracy: 0.8592 - val_loss: 0.3088 - val_accuracy: 0.8558\nEpoch 94/100\n568/568 - 1s - loss: 0.3151 - accuracy: 0.8628 - val_loss: 0.3062 - val_accuracy: 0.8712\nEpoch 95/100\n568/568 - 1s - loss: 0.3143 - accuracy: 0.8596 - val_loss: 0.3113 - val_accuracy: 0.8558\nEpoch 96/100\n568/568 - 1s - loss: 0.3139 - accuracy: 0.8610 - val_loss: 0.3079 - val_accuracy: 0.8576\nEpoch 97/100\n568/568 - 1s - loss: 0.3132 - accuracy: 0.8603 - val_loss: 0.3055 - val_accuracy: 0.8709\nEpoch 98/100\n568/568 - 1s - loss: 0.3125 - accuracy: 0.8628 - val_loss: 0.3029 - val_accuracy: 0.8682\nEpoch 99/100\n568/568 - 1s - loss: 0.3113 - accuracy: 0.8640 - val_loss: 0.3034 - val_accuracy: 0.8812\nEpoch 100/100\n568/568 - 1s - loss: 0.3108 - accuracy: 0.8612 - val_loss: 0.3108 - val_accuracy: 0.8611\nNeural Network took 52.48426294326782 seconds to train.\n" ], [ "#Accuracy & loss with epochs\n#Neural networks train in epochs. During each epoch, the model trains by sweeping over each layer, \n#adjusting weights based on their resulting errors, through processes called forward propagation and backpropagation. \n#By plotting the model accuracy & loss which each epoch, we can visualize how the model error evolves with training.\n\nfigure, axes = plt.subplots(nrows=2,ncols=1)\nfigure.tight_layout(pad=3.0)\n\n# plot accuracy during training\nplt.subplot(211)\nplt.title('Accuracy')\nplt.plot(history.history['accuracy'], label='train')\nplt.plot(history.history['val_accuracy'], label='test')\nplt.legend();\n\n# plot loss during training\nplt.subplot(212)\nplt.title('Loss')\nplt.plot(history.history['loss'], label='train')\nplt.plot(history.history['val_loss'], label='test')\nplt.xlabel(\"Epoch\");\nplt.legend()\nplt.show();", "_____no_output_____" ], [ "##3. Assess Neural Network's performance using testing data\n## Though the accuracy is pictured above, additionally quantify recall on testing data with the \n## same functions used previously to remain consistent\n\npred_test= (nn.predict(x_test_bal)>0.5).astype(\"int32\")\nnn_acc, nn_rec = bin_metrics(y_test_bal, pred_test)\nplot_cm(y_test_bal, pred_test)", "Accuracy: 0.8611\nRecall: 0.909\n" ], [ "## 4. Check to see if the Neural Network is overfitting (or underfitting)\n#Remember:\n#testing metrics > training metrics = underfitting, model is too simple\n#testing metrics < training metrics = overfitting, model is too complex\n\n#Note: Neural networks can easily overfit because they are complex and can fit to the training data extremely well, \n# Overfitting prevents neural networks from generalizing to other data (like the testing data).\n\n# Compare testing data metrics to data training metrics.\nprint(\"Training metrics:\")\nnn_pred_train= (nn.predict(x_train_bal)>0.5).astype(\"int32\")\nbin_metrics(y_train_bal,nn_pred_train);\n\n# As a reminder, display testing metrics:\nprint(\" \")\nprint(\"Testing metrics:\")\nbin_metrics(y_test_bal, pred_test);", "Training metrics:\nAccuracy: 0.8664\nRecall: 0.9163\n \nTesting metrics:\nAccuracy: 0.8611\nRecall: 0.909\n" ], [ "## 5. Make a prediction with the Neural Network\n\n# prediction output is in the format [probability no rain, probability rain]\nnn_prediction = nn.predict(np.array(testpredictor).reshape(1, -1))[0][0]*100\nprint(\"The meteorological conditions are: \")\nprint(origvals)\nprint(\"There is a {0:.{digits}f}% chance of precipitation given those meteorological conditions.\".format(nn_prediction, digits=2))\n", "The meteorological conditions are: \nday 2016-04-26\nhour 0.0416667\ntemp_F 45.4\nRH 78\ndewtemp_F 38.9\nwind_mph 11.8\nwind_dir 295\nwindgust 22.1\nwindgust_dir 283\npres_Hg 829.48\nSOLIN_Wm2 0\nPrec_inches 0.04\nprec_occur 1\nName: 2785, dtype: object\nThere is a 97.77% chance of precipitation given those meteorological conditions.\n" ] ], [ [ "## SUMMARY: Compare all Four Machine Learning Models", "_____no_output_____" ] ], [ [ "model_metrics = pd.DataFrame({'Metrics':['Accuracy','Recall','Prediction example'],\n 'Logistic Regression':[lr_acc, lr_rec, lr_prediction],\n 'Random Forest':[forest_acc, forest_rec, forest_prediction],\n 'Singular Vector Machine':[svm_acc, svm_rec, svm_prediction],\n 'Neural Network':[nn_acc, nn_rec, nn_prediction]})\nmodel_metrics = model_metrics.set_index('Metrics')\nmodel_metrics", "_____no_output_____" ] ], [ [ "## STEP 4: Assess Feature Importance\n\nNote: Feature Importance is not possible with non-linear Singular Vector Machines because the data is transformed by the kernel into another space that is unrelated to the input space.", "_____no_output_____" ] ], [ [ "## Feature importance in Logistical Regression Model\n\npd.DataFrame(abs(lr.coef_[0]),\n index = x.columns,\n columns=['importance']).sort_values('importance',ascending=False)", "_____no_output_____" ], [ "## Feature importance in Random Forest Model\n\npd.DataFrame(forest.feature_importances_,\n index = x.columns, \n columns=['importance']).sort_values('importance', ascending=False)", "_____no_output_____" ], [ "## Feature importance in Neural Network\n\ncols = x.columns.values\nnn_featimportance = []\nfor var in cols:\n # create a vector corresponding to a 1 where the feature is located:\n inputvector = np.array((cols==var).astype(int).reshape(1, -1))\n nn_featimportance.append(nn.predict(inputvector)[0][0]*100)\n\npd.DataFrame( nn_featimportance,\n index = x.columns,\n columns=['importance']).sort_values('importance',ascending=False) ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb20c1dfd5fc82a87e49f40df48d443a88eeee63
34,700
ipynb
Jupyter Notebook
CLRS/ch02-getting_started/03-design_algo/merge_sort_implementation.ipynb
phunc20/algorithms
04674829311cde7bb173252b8a41620aae4b14ba
[ "MIT" ]
null
null
null
CLRS/ch02-getting_started/03-design_algo/merge_sort_implementation.ipynb
phunc20/algorithms
04674829311cde7bb173252b8a41620aae4b14ba
[ "MIT" ]
null
null
null
CLRS/ch02-getting_started/03-design_algo/merge_sort_implementation.ipynb
phunc20/algorithms
04674829311cde7bb173252b8a41620aae4b14ba
[ "MIT" ]
null
null
null
25.108538
369
0.503256
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb20c440047f40e9622523841b38a6cd845f2016
5,869
ipynb
Jupyter Notebook
docs/python/data_wrangling/pandas_dataframe_load_xls.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-03-18T21:13:25.000Z
2020-03-18T21:13:25.000Z
docs/python/data_wrangling/pandas_dataframe_load_xls.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
null
null
null
docs/python/data_wrangling/pandas_dataframe_load_xls.ipynb
revgizmo-forks/ds_notes
ffc73d06b07fb2b137e7e679d3c99dab53580afa
[ "CC0-1.0" ]
1
2020-08-28T11:03:18.000Z
2020-08-28T11:03:18.000Z
5,869
5,869
0.40961
[ [ [ "---\ntitle: \"Load Excel Spreadsheet As pandas Dataframe\"\nauthor: \"Chris Albon\"\ndate: 2017-12-20T11:53:49-07:00\ndescription: \"Load Excel spreadsheet as pandas dataframe.\"\ntype: technical_note\ndraft: false\n---", "_____no_output_____" ] ], [ [ "# import modules\nimport pandas as pd", "_____no_output_____" ], [ "# Import the excel file and call it xls_file\nxls_file = pd.ExcelFile('../data/example.xls')\nxls_file", "_____no_output_____" ], [ "# View the excel file's sheet names\nxls_file.sheet_names", "_____no_output_____" ], [ "# Load the xls file's Sheet1 as a dataframe\ndf = xls_file.parse('Sheet1')\ndf", "_____no_output_____" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code", "code", "code", "code" ] ]
cb20c7b4774d1fc0b7bc029f66e854c0b1f75b2b
54,682
ipynb
Jupyter Notebook
IIMB-Assignments/Assgn-5/Assignment-5-v2-Copy2.ipynb
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
IIMB-Assignments/Assgn-5/Assignment-5-v2-Copy2.ipynb
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
IIMB-Assignments/Assgn-5/Assignment-5-v2-Copy2.ipynb
rahasayantan/Work-For-Reference
e052da538df84034ec5a0fe3b19c4287de307286
[ "MIT" ]
null
null
null
34.34799
365
0.284609
[ [ [ "# Q-PART C-15\n", "_____no_output_____" ] ], [ [ "from pulp import *\nimport pyomo.environ as pe\nimport logging\n\nlogging.getLogger('pyomo.core').setLevel(logging.ERROR)\n\n", "_____no_output_____" ], [ "from pyomo.environ import *\nfrom math import pi\nimport warnings\nwarnings.filterwarnings('ignore')\nm = ConcreteModel()\n\nm.a = pe.Set(initialize=[1, 2, 3, 4])\nm.demand = pe.Var(m.a, bounds=(1e-20,2500))\nm.disc= pe.Var(m.a, bounds=(.1,.6))\nm.saleVal = pe.Var(m.a, bounds=(.6*606,606))\nm.inventory = pe.Var(m.a, bounds=(1e-20,2500))\n\nm.age = Param([1,2,3,4], initialize={1:96, 2:97, 3:98, 4:99})\n\ndef inv_rule(m, i):\n if i ==1:\n return 2476 >= m.inventory[i] - m.demand[i]\n else:\n return m.inventory[i-1] >= m.inventory[i] - m.demand[i]\n \nm.c1 = pe.Constraint(m.a, rule=inv_rule)\n\ndef demand_rule(m, i):\n if i==1:\n return m.demand[i] <= ((4.41125 + 3.89091 * m.disc[i] - 0.18602 * (m.age[i]**.5-1)/.5 \n -3.19977 * .579 \n + 0.56468 * (48**.2 -1)/.2+ 0.80126)*.2+1)**5\n else:\n return m.demand[i] <= ((4.41125 + 3.89091 * m.disc[i] - 0.18602 * (m.age[i]**.5-1)/.5 \n -3.19977 * m.disc[i-1] \n + 0.56468 * (m.demand[i-1]**.2 -1)/.2+ 0.80126)*.2+1)**5\n \nm.c2 = pe.Constraint(m.a, rule=demand_rule)\n\ndef disc_rule(m, i):\n if i ==1:\n return m.disc[i] >= .1\n else:\n return m.disc[i] >= m.disc[i-1]\n \nm.c3 = pe.Constraint(m.a, rule=disc_rule)\n\n\ndef d_i_rule(m, i):\n if i ==1:\n return m.demand[i] <= 2476\n else:\n return m.demand[i] <= m.inventory[i-1]\n \nm.c4 = pe.Constraint(m.a, rule=d_i_rule)\n\ndef d_i_rule2(m):\n return sum([m.demand[i] for i in [1,2,3,4]]) <= 2476\nm.c5 = pe.Constraint(rule=d_i_rule2)\n\n\nm.o = Objective(expr= \n (sum([m.demand[i]*(1-m.disc[i])*606 for i in [1,2,3,4]])), sense=maximize)\n\nsolver = SolverFactory('ipopt')\nstatus = solver.solve(m)\n\nprint(\"Status = %s\" % status.solver.termination_condition)\n\nfor i in [1,2,3,4]:\n print(\"%s = %f\" % (m.demand, value(m.demand[i])))\n \nfor i in [1,2,3,4]:\n print(\"%s = %f\" % (m.disc, value(m.disc[i])))\n \n#print(\"%s = %f\" % (m.disc, value(m.y)))\nprint(\"Objective = %f\" % value(m.o))", "Status = optimal\ndemand = 16.659688\ndemand = 22.480693\ndemand = 33.425632\ndemand = 41.424099\ndisc = 0.100000\ndisc = 0.150475\ndisc = 0.315890\ndisc = 0.458748\nObjective = 48103.861923\n" ], [ "#.pprint()", "_____no_output_____" ] ], [ [ "##### Analysis\nWe have used non-linear optimization to derive the outcome.\n- The discounts offerred as per the plan:\n - Week 1 - 10%\n - Week 2 - 15%\n - Week 3 - 31%\n - Week 4 - 41%\n\n- Units sold would be:\n - Week 1 - 16\n - Week 2 - 22\n - Week 3 - 33\n - Week 4 - 41\n \n- Revenue from Sales: 48103, which is higher than 41302 which he made in reality.\n\nStore would have sold less units 112, compared to the 159 the shop sold, but the revenue generated would have been close to 7000 more (without taking into consideration the extra profit he would make by selling the additional units for 60% discount approximately 11000). So in total, he would have made an extra 18000 by using the above optimization strategy.", "_____no_output_____" ], [ "# Q-3\n\n", "_____no_output_____" ] ], [ [ "data = [\"Service was very good. Excellent breakfast in beautiful restaurant included in price. I was happy there and extended my stay for extra two days.\",\n \"Really helpful staff, the room was clean, beds really comfortable. Great roof top restaurant with yummy food and very friendly staff.\",\n \"Good location. The Cleanliness part was superb.\",\n \"I stayed for two days in deluxe A/C room (Room no. 404). I think it is renovated recently. Staff behaviour, room cleanliness all are fine.\",\n \"The room and public spaces were infested with mosquitoes. I killed a dozen or so in my room prior to sleeping but still woke up covered in bites.\",\n \"Unfriendly staff with no care for guests.\",\n \"Very worst and bad experience, Service I got from the hotel reception is too worst and typical.\",\n \"Good location but the staff was unfriendly\"\n ]\n\ndata = pd.DataFrame(data)\ndata.columns = ['text']\ndata['sentiment'] = [1,1,1,1,0,0,0,-3]", "_____no_output_____" ], [ "def clean_text(x):\n splchars = re.compile(r'[^A-Za-z ]',re.IGNORECASE)\n x = splchars.sub('', x)\n x = word_tokenize(x.lower())\n x = [w for w in x if w not in stopwords]\n return(' '.join(x))\n\ndata.fillna('NA', inplace=True)\ndata['text_clean'] = data['text'].apply(lambda x: clean_text(x.lower()))\n \ncount_vec_v1 = CountVectorizer(stop_words=stopwords, \n ngram_range=(1,2), max_features=5000)\n \ncount_vec_dict = count_vec_v1.fit(data.text_clean)\nreviews_text_vec = count_vec_v1.transform(data.text)\ndf_reviews = pd.DataFrame(reviews_text_vec.toarray())\ndf_reviews.columns = count_vec_dict.get_feature_names()\n\nprint(\"Data with all possible 1,2 Grams.\")\ndf_reviews.head() ", "Data with all possible 1,2 Grams.\n" ], [ "columns = ['Beautiful', 'Good Service', 'Good Location', 'Superb', 'Cleanliness', 'Mosquitoes', \n 'Unfriendly', 'bad experience']\ncolumns = [c.lower() for c in columns]\n\nprint(\"Phrases / Words to consider\")\ncolumns", "Phrases / Words to consider\n" ], [ "df_reviews = df_reviews[list(set(columns).intersection(set(df_reviews.columns)))]\n\nprint(\"Train data\")\ndf_reviews", "Train data\n" ], [ "y_train = data.sentiment[:-1]\nX_train = df_reviews.iloc[:-1,]\nX_test = pd.DataFrame(df_reviews.iloc[-1,:]).T\n\nprint(\"Test Data\")\nX_test\n", "Test Data\n" ], [ "print(\"As we can see from above 'good service' is not available in the training set / test set and hence it will not be considered for model building\")\n\nprint(\"building Naive Bayes Model..\")\n\nbayes_clf = BernoulliNB()\nbayes_clf.fit(X_train, y_train)\n\n", "As we can see from above 'good service' is not available in the training set / test set and hence it will not be considered for model building\nbuilding Naive Bayes Model..\n" ], [ "pred = bayes_clf.predict_proba(X_test)\nprint(\"Probability of Negative Sentiment is : {}\".format(pred[0, 0]))\nprint(\"Probability of Positive Sentiment is : {}\".format(pred[0, 1]))", "Probability of Negative Sentiment is : 0.5633083058452802\nProbability of Positive Sentiment is : 0.43669169415472003\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb20caa1019050e071c1e8c59ec9c217018f9990
50,392
ipynb
Jupyter Notebook
notebooks/Hand Keypoint.ipynb
MeMihir/MasterGestures
2d797bff45d5dfe7c75adc98878baced355634df
[ "MIT" ]
null
null
null
notebooks/Hand Keypoint.ipynb
MeMihir/MasterGestures
2d797bff45d5dfe7c75adc98878baced355634df
[ "MIT" ]
null
null
null
notebooks/Hand Keypoint.ipynb
MeMihir/MasterGestures
2d797bff45d5dfe7c75adc98878baced355634df
[ "MIT" ]
null
null
null
20.393363
81
0.460093
[ [ [ "import cv2\nimport mediapipe as mp\nfrom gestureClassifier import classifyGesture\nfrom handKeypoint import detectKeypoints, drawSkeleton", "_____no_output_____" ], [ "def test(source=0, display=False):\n cap = cv2.VideoCapture(0)\n try:\n while cap.isOpened():\n success, frame = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n points = detectKeypoints(frame)\n drawSkeleton(frame, points)\n if cv2.waitKey(5) & 0xFF == 27:\n break\n if points != None and points.multi_hand_landmarks != None:\n print(classifyGesture(points))\n \n except KeyboardInterrupt:\n print('Keyboard Interrupt')\n finally:\n cap.release()", "_____no_output_____" ], [ "test()", "RLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRPoint\nRPoint\nRLeftClick\nRLeftClick\nRLeftClick\nRPoint\nRPoint\nRLeftClick\nRRightClick\nRPoint\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRLeftClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRRightClick\nRPoint\nRLeftClick\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRLeftClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRLeftClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nScroll\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRRightClick\nRPoint\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRLeftClick\nRPoint\nRPoint\nRPoint\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRPoint\nRPoint\nRRightClick\nRRightClick\nRPoint\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRLeftClick\nRRightClick\nRLeftClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRPoint\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRLeftClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRPoint\nRPoint\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRPoint\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRLeftClick\nRRightClick\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRPoint\nRLeftClick\nRPoint\nRRightClick\nRRightClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRLeftClick\nRRightClick\nRRightClick\nRLeftClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRLeftClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRLeftClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRLeftClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nScroll\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRDoubleClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nScroll\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nScroll\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nScroll\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRPoint\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nScroll\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRRightClick\nRPoint\nRRightClick\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb20d387c4ec6962349e53e2712f08db3b7064e4
77
ipynb
Jupyter Notebook
html/Dog Breed Classification Project - Lijuan Zhang - Medium_files/a_007.ipynb
geooeg/DogApp
b9dc8bf5acb0b83fbeb963f7e94443e1b3af63c6
[ "MIT" ]
1
2020-03-11T02:03:00.000Z
2020-03-11T02:03:00.000Z
html/Dog Breed Classification Project - Lijuan Zhang - Medium_files/a_008.ipynb
geooeg/DogApp
b9dc8bf5acb0b83fbeb963f7e94443e1b3af63c6
[ "MIT" ]
13
2020-09-25T22:33:44.000Z
2022-03-12T00:16:10.000Z
html/Dog Breed Classification Project - Lijuan Zhang - Medium_files/a_008.ipynb
geooeg/DogApp
b9dc8bf5acb0b83fbeb963f7e94443e1b3af63c6
[ "MIT" ]
null
null
null
77
77
0.792208
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb20d9d4afc6ff58e8ca9c98be0a48145c710f01
384,906
ipynb
Jupyter Notebook
notebooks/fundamentals/pixels_and_neighbors/divergence.ipynb
ahartikainen/computation
2b7f0fd2fe2d9f1fc494cb52f57764a09ba0617e
[ "MIT" ]
13
2017-03-09T06:01:04.000Z
2021-12-15T07:40:40.000Z
notebooks/fundamentals/pixels_and_neighbors/divergence.ipynb
ahartikainen/computation
2b7f0fd2fe2d9f1fc494cb52f57764a09ba0617e
[ "MIT" ]
14
2016-03-29T18:08:09.000Z
2017-03-07T16:34:22.000Z
notebooks/fundamentals/pixels_and_neighbors/divergence.ipynb
simpeg/tutorials
2b7f0fd2fe2d9f1fc494cb52f57764a09ba0617e
[ "MIT" ]
6
2017-06-19T15:42:02.000Z
2020-03-02T03:29:21.000Z
468.255474
135,542
0.927853
[ [ [ "# Building our operators: the Face Divergence", "_____no_output_____" ], [ "The divergence is the integral of a flux through a closed surface as that enclosed volume shrinks to a point. Since we have discretized and no longer have continuous functions, we cannot fully take the limit to a point; instead, we approximate it around some (finite!) volume: *a cell*. The flux out of the surface ($\\vec{j} \\cdot \\vec{n}$) is actually how we discretized $\\vec{j}$ onto our mesh (i.e. $\\bf{j}$) except that the face normal points out of the cell (rather than in the axes direction). After fixing the direction of the face normal (multiplying by $\\pm 1$), we only need to calculate the face areas and cell volume to create the discrete divergence matrix.\n\n<img src=\"./images/Divergence.png\" width=80% align=\"center\">\n\n<h4 align=\"center\">Figure 4. Geometrical definition of the divergence and the discretization.</h4>", "_____no_output_____" ], [ "## Implementation\n\nAlthough this is a really helpful way to think about conceptually what is happening, the implementation of that would be a huge for loop over each cell. In practice, this would be slow, so instead, we will take advantage of linear algebra. Let's start by looking at this in 1 dimension using the SimPEG Mesh class. ", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom SimPEG import Mesh\nimport matplotlib.pyplot as plt\n%matplotlib inline\nplt.set_cmap(plt.get_cmap('viridis')) # use a nice colormap!", "_____no_output_____" ], [ "# define a 1D mesh\nmesh1D = Mesh.TensorMesh([5]) # with 5 cells \n\nfig, ax = plt.subplots(1,1, figsize=(12,2))\nax.plot(mesh1D.gridN, np.zeros(mesh1D.nN),'-k',marker='|',markeredgewidth=2, markersize=16)\nax.plot(mesh1D.gridCC,np.zeros(mesh1D.nC),'o')\nax.plot(mesh1D.gridFx,np.zeros(mesh1D.nFx),'>')\nax.set_title('1D Mesh')", "_____no_output_____" ], [ "# and define a vector of fluxes that live on the faces of the 1D mesh\nface_vec = np.r_[0., 1., 2., 2., 1., 0.] # vector of fluxes that live on the faces of the mesh\nprint(\"The flux on the faces is {}\".format(face_vec))\n\nplt.plot(mesh1D.gridFx, face_vec, '-o')\nplt.ylim([face_vec.min()-0.5, face_vec.max()+0.5])\nplt.grid(which='both')\nplt.title('face_vec');", "The flux on the faces is [ 0. 1. 2. 2. 1. 0.]\n" ] ], [ [ "Over a single cell, the divergence is \n\n$$\n\\nabla \\cdot \\vec{j}(p) = \\lim_{v \\to \\{p\\}} = \\int \\int_{S(v)} \\frac{\\vec{j}\\cdot \\vec{n}}{v} dS\n$$\n\nin 1D, this collapses to taking a single difference - how much is going out of the cell vs coming in? \n\n$$\n\\nabla \\cdot \\vec{j} \\approx \\frac{1}{v}(-j_{\\text{left}} + j_{\\text{right}})\n$$\n\nSince the normal of the x-face on the left side of the cell points in the positive x-direction, we multiply by -1 to get the flux going out of the cell. On the right, the normal defining the x-face is point out of the cell, so it is positive. ", "_____no_output_____" ] ], [ [ "# We can take the divergence over the entire mesh by looping over each cell\ndiv_face_vec = np.zeros(mesh1D.nC) # allocate for each cell\n\nfor i in range(mesh1D.nC): # loop over each cell and \n div_face_vec[i] = 1.0/mesh1D.vol[i] * (-face_vec[i] + face_vec[i+1])\n\nprint(\"The face div of the 1D flux is {}\".format(div_face_vec))", "The face div of the 1D flux is [ 5. 5. 0. -5. -5.]\n" ] ], [ [ "Doing it as a for loop is easy to program for the first time, \nbut is difficult to see what is going on and could be slow! \nInstead, we can build a faceDiv matrix (note: this is a silly way to do this!)", "_____no_output_____" ] ], [ [ "faceDiv = np.zeros([mesh1D.nC, mesh1D.nF]) # allocate space for a face div matrix\nfor i in range(mesh1D.nC): # loop over each cell\n faceDiv[i, [i, i+1]] = 1.0/mesh1D.vol[i] * np.r_[-1,+1]\n\nprint(\"The 1D face div matrix for this mesh is \\n{}\".format(faceDiv))\n\nassert np.all( faceDiv.dot(face_vec) == div_face_vec ) # make sure we get the same result! \n\nprint(\"\\nThe face div of the 1D flux is still {}!\".format(div_face_vec))", "The 1D face div matrix for this mesh is \n[[-5. 5. 0. 0. 0. 0.]\n [ 0. -5. 5. 0. 0. 0.]\n [ 0. 0. -5. 5. 0. 0.]\n [ 0. 0. 0. -5. 5. 0.]\n [ 0. 0. 0. 0. -5. 5.]]\n\nThe face div of the 1D flux is still [ 5. 5. 0. -5. -5.]!\n" ] ], [ [ "the above is still a loop... (and python is not a fan of loops). \nAlso, if the mesh gets big, we are storing a lot of unnecessary zeros", "_____no_output_____" ] ], [ [ "\"There are {nnz} zeros (too many!) that we are storing\".format(nnz = np.sum(faceDiv == 0))", "_____no_output_____" ] ], [ [ "### Working in Sparse\n\nWe will use instead *sparse* matrices instead. These are in scipy and act almost the same as numpy arrays (except they default to matrix multiplication), and they don't store all of those pesky zeros! We use [scipy.sparse](http://docs.scipy.org/doc/scipy/reference/sparse.html) to build these matrices. ", "_____no_output_____" ] ], [ [ "import scipy.sparse as sp\nfrom SimPEG.Utils import sdiag # we are often building sparse diagonal matrices, so we made a functio in SimPEG!", "_____no_output_____" ], [ "# construct differencing matrix with diagonals -1, +1\nsparse_diff = sp.spdiags((np.ones((mesh1D.nC+1, 1))*[-1, 1]).T, [0, 1], mesh1D.nC, mesh1D.nC+1, format=\"csr\")\nprint(\"the sparse differencing matrix is \\n{}\".format(sparse_diff.todense()))\n\n# account for the volume\nfaceDiv_sparse = sdiag(1./mesh1D.vol) * sparse_diff # account for volume \nprint(\"\\n and the face divergence is \\n{}\".format(faceDiv_sparse.todense()))\n\nprint(\"\\n but now we are only storing {nnz} nonzeros\".format(nnz=faceDiv_sparse.nnz))\n\nassert np.all(faceDiv_sparse.dot(face_vec) == div_face_vec) \nprint(\"\\n and we get the same answer! {}\".format(faceDiv_sparse * face_vec))", "the sparse differencing matrix is \n[[-1. 1. 0. 0. 0. 0.]\n [ 0. -1. 1. 0. 0. 0.]\n [ 0. 0. -1. 1. 0. 0.]\n [ 0. 0. 0. -1. 1. 0.]\n [ 0. 0. 0. 0. -1. 1.]]\n\n and the face divergence is \n[[-5. 5. 0. 0. 0. 0.]\n [ 0. -5. 5. 0. 0. 0.]\n [ 0. 0. -5. 5. 0. 0.]\n [ 0. 0. 0. -5. 5. 0.]\n [ 0. 0. 0. 0. -5. 5.]]\n\n but now we are only storing 10 nonzeros\n\n and we get the same answer! [ 5. 5. 0. -5. -5.]\n" ] ], [ [ "In SimPEG, this is stored as the `faceDiv` property on the mesh", "_____no_output_____" ] ], [ [ "print(mesh1D.faceDiv * face_vec) # and still gives us the same answer!", "[ 5. 5. 0. -5. -5.]\n" ] ], [ [ "## Moving to 2D", "_____no_output_____" ], [ "To move up in dimensionality, we build a 2D mesh which has both x and y faces", "_____no_output_____" ] ], [ [ "mesh2D = Mesh.TensorMesh([100,80])\nmesh2D.plotGrid()\nplt.axis('tight');", "_____no_output_____" ] ], [ [ "We define 2 face functions, one in the x-direction and one in the y-direction. Here, we choose to work with sine functions as the continuous divergence is easy to compute, meaning we can test it!", "_____no_output_____" ] ], [ [ "jx_fct = lambda x, y: -np.sin(2.*np.pi*x)\njy_fct = lambda x, y: -np.sin(2.*np.pi*y)\n\njx_vec = jx_fct(mesh2D.gridFx[:,0], mesh2D.gridFx[:,1])\njy_vec = jy_fct(mesh2D.gridFy[:,0], mesh2D.gridFy[:,1])\n\nj_vec = np.r_[jx_vec, jy_vec]\n\nprint(\"There are {nFx} x-faces and {nFy} y-faces, so the length of the \"\n \"face function, j, is {lenj}\".format(\n nFx=mesh2D.nFx, \n nFy=mesh2D.nFy,\n lenj=len(j_vec)\n ))\n\nplt.colorbar(mesh2D.plotImage(j_vec, 'F', view='vec')[0])", "There are 8080 x-faces and 8100 y-faces, so the length of the face function, j, is 16180\n" ] ], [ [ "### But first... what does the matrix look like?\n\nNow, we know that we do not want to loop over each of the cells and instead want to work with matrix-vector products. In this case, each row of the divergence matrix should pick out the two relevant faces in the x-direction and two in the y-direction (4 total). \n\nWhen we unwrap our face function, we unwrap using column major ordering, so all of the x-faces are adjacent to one another, while the y-faces are separated by the number of cells in the x-direction (see [mesh.ipynb](mesh.ipynb) for more details!). \n\nWhen we plot the divergence matrix, there will be 4 \"diagonals\", \n- 2 that are due to the x-contribution\n- 2 that are due to the y-contribution\n\nHere, we define a small 2D mesh so that it is easier to see the matrix structure. ", "_____no_output_____" ] ], [ [ "small_mesh2D = Mesh.TensorMesh([3,4])\n\nprint(\"Each y-face is {} entries apart\".format(small_mesh2D.nCx))\nprint(\"and the total number of x-faces is {}\".format(small_mesh2D.nFx))\nprint(\"So in the first row of the faceDiv, we have non-zero entries at \\n{}\".format(\n small_mesh2D.faceDiv[0,:]))", "Each y-face is 3 entries apart\nand the total number of x-faces is 16\nSo in the first row of the faceDiv, we have non-zero entries at \n (0, 0)\t-3.0\n (0, 1)\t3.0\n (0, 16)\t-4.0\n (0, 19)\t4.0\n" ] ], [ [ "Now, lets look at the matrix structure", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1,2, figsize=(12,4))\n\n# plot the non-zero entries in the faceDiv\nax[0].spy(small_mesh2D.faceDiv, ms=2)\nax[0].set_xlabel('2D faceDiv')\nsmall_mesh2D.plotGrid(ax=ax[1])\n\n# Number the faces and plot. (We should really add this to SimPEG... pull request anyone!?)\nxys = zip(\n small_mesh2D.gridFx[:,0], \n small_mesh2D.gridFx[:,1], \n range(small_mesh2D.nFx)\n)\nfor x,y,ii in xys:\n ax[1].plot(x, y, 'r>')\n ax[1].text(x+0.01, y-0.02, ii, color='r')\n\nxys = zip(\n small_mesh2D.gridFy[:,0], \n small_mesh2D.gridFy[:,1], \n range(small_mesh2D.nFy)\n)\nfor x,y,ii in xys:\n ax[1].plot(x, y, 'g^')\n ax[1].text(x-0.02, y+0.02, ii+small_mesh2D.nFx, color='g')\nax[1].set_xlim((-0.1,1.1));\nax[1].set_ylim((-0.1,1.1));", "_____no_output_____" ] ], [ [ "How did we construct the matrix? - Kronecker products. \nThere is a handy identity that relates the vectorized face function to its matrix form (<a href = \"https://en.wikipedia.org/wiki/Vectorization_(mathematics)#Compatibility_with_Kronecker_products\">wikipedia link!</a>)\n$$\n\\text{vec}(AUB^\\top) = (B \\otimes A) \\text{vec}(U)\n$$\n\nFor the x-contribution:\n- A is our 1D differential operator ([-1, +1] on the diagonals)\n- U is $j_x$ (the x-face function as a matrix) \n- B is just an identity\nso \n$$\n\\text{Div}_x \\text{vec}(j_x) = (I \\otimes Div_{1D}) \\text{vec}(j_x)\n$$\n\nFor the y-contribution: \n- A is just an identity!\n- U is $j_y$ (the y-face function as a matrix) \n- B is our 1D differential operator ([-1, +1] on the diagonals)\nso\n$$\n\\text{Div}_y \\text{vec}(j_y) = (\\text{Div}_{1D} \\otimes I) \\text{vec}(j_y)\n$$\n\n$$\n\\text{Div} \\cdot j = \\text{Div}_x \\cdot j_x + \\text{Div}_y \\cdot j_y = [\\text{Div}_x, \\text{Div}_y] \\cdot [j_x; j_y]\n$$\n\nAnd $j$ is just $[j_x; j_y]$, so we can horizontally stack $\\text{Div}_x$, $\\text{Div}_y$\n\n$$\n\\text{Div} = [\\text{Div}_x, \\text{Div}_y]\n$$\n\nYou can check this out in the SimPEG docs by running **small_mesh2D.faceDiv??**", "_____no_output_____" ] ], [ [ "# small_mesh2D.faceDiv?? # check out the code!", "_____no_output_____" ] ], [ [ "Now that we have a discrete divergence, lets check out the divergence of the face function we defined earlier. ", "_____no_output_____" ] ], [ [ "Div_j = mesh2D.faceDiv * j_vec\n\nfig, ax = plt.subplots(1,2, figsize=(8,4))\nplt.colorbar(mesh2D.plotImage(j_vec, 'F', view='vec', ax=ax[0])[0],ax=ax[0])\nplt.colorbar(mesh2D.plotImage(Div_j, ax=ax[1])[0],ax=ax[1])\n\nax[0].set_title('j')\nax[1].set_title('Div j')\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "### Are we right??\n\nSince we chose a simple function,\n\n$$\n\\vec{j} = - \\sin(2\\pi x) \\hat{x} - \\sin(2\\pi y) \\hat{y} \n$$\n\nwe know the continuous divergence...\n\n$$\n\\nabla \\cdot \\vec{j} = -2\\pi (\\cos(2\\pi x) + \\cos(2\\pi y))\n$$\n\nSo lets plot it and take a look", "_____no_output_____" ] ], [ [ "# from earlier\n# jx_fct = lambda x, y: -np.sin(2*np.pi*x)\n# jy_fct = lambda x, y: -np.sin(2*np.pi*y)\n\nsol = lambda x, y: -2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y))\n\ncont_div_j = sol(mesh2D.gridCC[:,0], mesh2D.gridCC[:,1])\n\nDiv_j = mesh2D.faceDiv * j_vec\n\nfig, ax = plt.subplots(1,2, figsize=(8,4))\nplt.colorbar(mesh2D.plotImage(Div_j, ax=ax[0])[0],ax=ax[0])\nplt.colorbar(mesh2D.plotImage(cont_div_j, ax=ax[1])[0],ax=ax[1])\n\nax[0].set_title('Discrete Div j')\nax[1].set_title('Continuous Div j')\nplt.tight_layout()", "_____no_output_____" ] ], [ [ "Those look similar :)", "_____no_output_____" ], [ "### Order Test\n\nWe can do better than just an eye-ball comparison - since we are using a a staggered grid, with centered differences, the discretization should be second-order ($\\mathcal{O}(h^2)$). That is, as we refine the mesh, our approximation of the divergence should improve by a factor of 2. \n\nSimPEG has a number of testing functions for \n[derivatives](http://docs.simpeg.xyz/content/api_core/api_Tests.html#SimPEG.Tests.checkDerivative)\nand \n[order of convergence](http://docs.simpeg.xyz/content/api_core/api_Tests.html#SimPEG.Tests.OrderTest) \nto make our lives easier!", "_____no_output_____" ] ], [ [ "import unittest\nfrom SimPEG.Tests import OrderTest\n\njx = lambda x, y: -np.sin(2*np.pi*x)\njy = lambda x, y: -np.sin(2*np.pi*y)\nsol = lambda x, y: -2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y))\n\nclass Testify(OrderTest):\n meshDimension = 2\n \n def getError(self):\n j = np.r_[jx(self.M.gridFx[:,0], self.M.gridFx[:,1]),\n jy(self.M.gridFy[:,0], self.M.gridFy[:,1])]\n num = self.M.faceDiv * j # numeric answer\n ans = sol(self.M.gridCC[:,0], self.M.gridCC[:,1]) # note M is a 2D mesh\n return np.linalg.norm((num - ans), np.inf) # look at the infinity norm \n # (as we refine the mesh, the number of cells \n # changes, so need to be careful if using a 2-norm)\n def test_order(self):\n self.orderTest()\n\n# This just runs the unittest:\nsuite = unittest.TestLoader().loadTestsFromTestCase( Testify )\nunittest.TextTestRunner().run( suite );", "." ] ], [ [ "Looks good - Second order convergence!", "_____no_output_____" ], [ "## Next up ... \n\nIn the [next notebook](weakformulation.ipynb), we will explore how to use the weak formulation to discretize the DC equations. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb20f15ac5247ea64d5588dbf2faac103a8d214b
12,405
ipynb
Jupyter Notebook
notebooks/Synthetic_seismogram.ipynb
michael-scarn/geocomp-0118
935ab9cb04f5af8cf12445fda2962d2e961fbdc1
[ "Apache-2.0" ]
3
2020-03-04T15:37:09.000Z
2020-11-28T16:34:00.000Z
notebooks/Synthetic_seismogram.ipynb
helgegn/geocomp-0118
935ab9cb04f5af8cf12445fda2962d2e961fbdc1
[ "Apache-2.0" ]
null
null
null
notebooks/Synthetic_seismogram.ipynb
helgegn/geocomp-0118
935ab9cb04f5af8cf12445fda2962d2e961fbdc1
[ "Apache-2.0" ]
4
2018-02-01T18:55:32.000Z
2021-07-21T11:40:22.000Z
12,405
12,405
0.622975
[ [ [ "# Synthetic seismogram\n\nThis notebook looks at the convolutional model of a seismic trace.\n\nFor a fuller example, see [Bianco, E (2004)](https://github.com/seg/tutorials-2014/blob/master/1406_Make_a_synthetic/how_to_make_synthetic.ipynb) in *The Leading Edge*.\n\nFirst, the usual preliminaries.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Load geophysical data\n\nWe'll use `lasio` to faciliate loading curves from an LAS file.", "_____no_output_____" ] ], [ [ "from welly import Well\nw = Well.from_las('../data/L-30.las')", "_____no_output_____" ], [ "dt = w.data[\"DT\"]\nrhob = w.data[\"RHOB\"]", "_____no_output_____" ], [ "dt", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>- Convert the logs to SI units</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "dt = \nrhob = ", "_____no_output_____" ] ], [ [ "Compute velocity and thus acoustic impedance.", "_____no_output_____" ] ], [ [ "from utils import vp_from_dt, impedance, rc_series\n\nvp = vp_from_dt(dt)\nai = impedance(vp, rhob)\nz = dt.basis", "_____no_output_____" ], [ "plt.figure(figsize=(16, 2))\nplt.plot(z, ai, lw=0.5)\nplt.show()", "_____no_output_____" ] ], [ [ "## Depth to time conversion\n\nThe logs are in depth, but the seismic is in travel time. So we need to convert the well data to time.\n\nWe don't know the seismic time, but we can model it from the DT curve: since DT is 'elapsed time', in microseconds per metre, we can just add up all these time intervals for 'total elapsed time'. Then we can use that to 'look up' the time of a given depth.\n\nWe use the step size to scale the DT values to 'seconds per step' (instead of µs/m).", "_____no_output_____" ] ], [ [ "scaled_dt = dt.step * np.nan_to_num(dt) / 1e6 # Convert to seconds per step", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>- Do the arithmetic to find the timing of the top of the log.</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "dt.start, w.las.header['Well']['STRT']", "_____no_output_____" ], [ "kb = 0.3048 * w.las.header['Well']['KB'].value\ngl = 0.3048 * w.las.header['Well']['GL'].value\nstart = dt.start\n\nv_water = 1480\nv_repl = 1800\n\nwater_layer = # Depth of water\nrepl_layer = # Thickness of replacement layer\n\nwater_twt = # TWT in water, using water_layer and v_water\nrepl_twt = # TWT in replacement layer, using repl_layer and v_repl\n\nprint(\"Water time: {:.3f} ms\\nRepl time: {:.3f} ms\".format(water_twt, repl_twt))", "_____no_output_____" ] ], [ [ "You should get\n\n Water time: 0.186 ms\n Repl time: 0.233 ms", "_____no_output_____" ], [ "Now finally we can compute the cumulative time elapsed on the DT log:", "_____no_output_____" ] ], [ [ "dt_time = water_twt + repl_twt + 2*np.cumsum(scaled_dt)", "_____no_output_____" ], [ "dt_time[-1]", "_____no_output_____" ] ], [ [ "And then use this to convert the logs to a time basis:", "_____no_output_____" ] ], [ [ "delt = 0.004 # Sample interval.\nmaxt = np.ceil(dt_time[-1]) # Max time that we need; just needs to be longer than the log.\n\n# Make a regular time basis: the seismic time domain.\nseis_time = np.arange(0, maxt, delt) \n\n# Interpolate the AI log onto this basis.\nai_t = np.interp(seis_time, dt_time, ai)", "_____no_output_____" ], [ "# Let's do the depth 'log' too while we're at it.\nz_t = np.interp(seis_time, dt_time, z)", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>- Make a time-conversion function to get time-converted logs from `delt`, `maxt`, `dt_time`, and a log.</li>\n<li>- Make a function to get `dt_time` from `kb`, `gl`, `dt`, `v_water`, `v_repl`.</li>\n<li>- Recompute `ai_t` by calling your new functions.</li>\n<li>- Plot the DT log in time.</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "def time_convert(log, dt_time, delt=0.004, maxt=3.0):\n \"\"\"\n Converts log to the time domain, given dt_time, delt, and maxt.\n \n dt_time is elapsed time regularly sampled in depth. log must\n be sampled on the same depth basis.\n \"\"\"\n \n # Your code here!\n \n return log_t", "_____no_output_____" ], [ "def compute_dt_time(dt, kb, gl, v_repl, v_water=1480):\n \"\"\"\n Compute DT time from the dt log and some other variables.\n \n The DT log must be a welly curve object.\n \"\"\"\n\n # Your code here!\n \n return dt_time", "_____no_output_____" ] ], [ [ "Now, at last, we can compute the reflection coefficients in time.", "_____no_output_____" ] ], [ [ "from utils import rc_vector", "_____no_output_____" ], [ "rc = rc_vector(ai_t)\nrc[np.isnan(rc)] = 0", "_____no_output_____" ] ], [ [ "Plotting these is a bit more fiddly, because we would like to show them as a sequence of spikes, rather than as a continuous curve, and matplotlib's `axvline` method wants everything in terms of fractions of the plot's dimensions, not as values in the data space.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(16, 2))\npts, stems, base = plt.stem(seis_time[1:], rc)\nplt.setp(pts, markersize=0)\nplt.setp(stems, lw=0.5)\nplt.setp(base, lw=0.75)\nplt.show()", "_____no_output_____" ] ], [ [ "## Impulsive wavelet", "_____no_output_____" ], [ "Convolve with a wavelet.", "_____no_output_____" ] ], [ [ "from bruges.filters import ricker\n \nf = 25\nw, t = ricker(0.128, 0.004, f, return_t=True)", "_____no_output_____" ], [ "plt.plot(t, w)\nplt.show()", "_____no_output_____" ], [ "syn = np.convolve(rc, w, mode='same')", "_____no_output_____" ], [ "plt.figure(figsize=(16,2))\nplt.plot(seis_time[1:], syn)\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>- Try to plot the RC series with the synthetic.</li>\n<li>- You'll need to zoom in a bit to see much, try using a slice of `[300:350]` on all x's and y's.</li>\n</ul>\n</div>", "_____no_output_____" ], [ "If the widgets don't show up, you might need to do this:\n\n jupyter nbextension enable --py widgetsnbextension\n", "_____no_output_____" ], [ "If we are recording with dynamite or even an airgun, this might be an acceptable model of the seismic. But if we're using Vibroseis, things get more complicated. To get a flavour, try another wavelet in `bruges.filters`, or check out the notebooks:\n\n- [Vibroseis data](../notebooks/Vibroseis_data.ipynb)\n- [Wavelets and sweeps](../notebooks/Wavelets_and_sweeps.ipynb)", "_____no_output_____" ], [ "## Compare with the seismic", "_____no_output_____" ] ], [ [ "seismic = np.loadtxt('../data/Penobscot_xl1155.txt')", "_____no_output_____" ], [ "syn.shape", "_____no_output_____" ] ], [ [ "The synthetic is at trace number 77. We need to make a shifted version of the synthetic to overplot.", "_____no_output_____" ] ], [ [ "tr = 77\ngain = 50\ns = tr + gain*syn", "_____no_output_____" ] ], [ [ "And we can define semi-real-world cordinates of the seismic data:", "_____no_output_____" ] ], [ [ "extent = (0, 400, 4.0, 0)", "_____no_output_____" ], [ "plt.figure(figsize=(10,20))\nplt.imshow(seismic.T, cmap='Greys', extent=extent, aspect='auto')\nplt.plot(s, seis_time[1:])\nplt.fill_betweenx(seis_time[1:], tr, s, where=syn>0, lw=0)\nplt.xlim(0, 400)\nplt.ylim(3.2, 0)\nplt.show()", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>Load your tops data from `Reading data from files.ipynb` (using `from utils import tops` perhaps), or using the function you made in [`Practice functions`](Practice_functions.ipynb).</li>\n<li>- Use the time-converted 'depth', `z_t`, to convert depths to time.</li>\n<li>- Plot the tops on the seismic.</li>\n</ul>\n</div>", "_____no_output_____" ] ], [ [ "from utils import get_tops_from_file\n\ntops = get_tops_from_file('../data/L-30_tops.txt')", "_____no_output_____" ] ], [ [ "<div class=\"alert alert-success\">\n<b>Exercise</b>:\n<ul>\n<li>- Make functions for the wavelet creation, synthetic generation, and synthetic plotting steps.</li>\n<li>- Make a master function that takes the name of an LAS file, plus any other required info (such as `delt`), and returns a tuple of arrays: a time basis, and the synthetic amplitudes. You could make saving a plot optional.</li>\n<li>- Copy this notebook and make an offset synthetic for `R-39.las`, which has a shear-wave DT.</li>\n</ul>\n</div>", "_____no_output_____" ], [ "<hr />\n\n<div>\n<img src=\"https://avatars1.githubusercontent.com/u/1692321?s=50\"><p style=\"text-align:center\">© Agile Geoscience 2016</p>\n</div>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb2106ebe7cbb8fa09e52f3d60398d2203a15ed3
679,185
ipynb
Jupyter Notebook
LastDensenet121Model/Detection5C_NormNew_v2.ipynb
Serbeld/RX-COVID-19
d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237
[ "MIT" ]
1
2020-07-24T15:28:17.000Z
2020-07-24T15:28:17.000Z
LastDensenet121Model/Detection5C_NormNew_v2.ipynb
Serbeld/RX-COVID-19
d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237
[ "MIT" ]
null
null
null
LastDensenet121Model/Detection5C_NormNew_v2.ipynb
Serbeld/RX-COVID-19
d5936dbccdeed7dc80fbdbcc5b19c4c7eefcc237
[ "MIT" ]
2
2020-05-19T02:49:25.000Z
2020-07-30T00:01:31.000Z
304.977548
117,750
0.90712
[ [ [ "<a href=\"https://colab.research.google.com/github/Serbeld/RX-COVID-19/blob/master/Detection5C_NormNew_v2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip install lime", "_____no_output_____" ], [ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.applications import inception_v3\nfrom tensorflow.keras.layers import Dense,Dropout,Flatten,Input,AveragePooling2D,BatchNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport os\nimport lime\nfrom lime import lime_image\nfrom skimage.segmentation import mark_boundaries\nimport pandas as pd\n\nplt.rcParams[\"figure.figsize\"] = (10,5)\n", "_____no_output_____" ], [ "#Loading the dataset\n!pip install h5py\nimport h5py\n\nfrom google.colab import drive,files\ndrive.mount('/content/drive')\n\nhdf5_path = '/content/drive/My Drive/Dataset5C/Dataset5C.hdf5'\n\ndataset = h5py.File(hdf5_path, \"r\")", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pylab as plt\n\n#train\ntrain_img = dataset[\"train_img\"]\n\nxt = np.array(train_img)\nyt = np.array(dataset[\"train_labels\"])\n\n#test\ntestX = np.array(dataset[\"test_img\"])\ntestY = np.array(dataset[\"test_labels\"])\n\n#Validation\nxval = np.array(dataset[\"val_img\"])\nyval = np.array(dataset[\"val_labels\"])", "_____no_output_____" ], [ "print(\"Training Shape: \"+ str(xt.shape))\nprint(\"Validation Shape: \"+ str(xval.shape))\nprint(\"Testing Shape: \"+ str(testX.shape))", "Training Shape: (4690, 512, 512, 3)\nValidation Shape: (1005, 512, 512, 3)\nTesting Shape: (1005, 512, 512, 3)\n" ], [ "#Categorical values or OneHot\nimport keras\n\nnum_classes = 5\n\nyt = keras.utils.to_categorical(yt,num_classes)\ntestY = keras.utils.to_categorical(testY,num_classes)\nyval = keras.utils.to_categorical(yval,num_classes)\n\n#Image\nnum_image = 15\n\nprint()\nprint('Healthy: [1 0 0 0 0]')\nprint('Pneumonia & Covid-19: [0 1 0 0 0]')\nprint('Cardiomegaly: [0 0 1 0 0]')\nprint('Other respiratory disease: [0 0 0 1 0]')\nprint('Pleural Effusion: [0 0 0 0 1]')\n\nprint()\nprint(\"Output: \"+ str(yt[num_image]))\n\nimagen = train_img[num_image]\nplt.imshow(imagen)\nplt.show()", "\nHealthy: [1 0 0 0 0]\nPneumonia & Covid-19: [0 1 0 0 0]\nCardiomegaly: [0 0 1 0 0]\nOther respiratory disease: [0 0 0 1 0]\nPleural Effusion: [0 0 0 0 1]\n\nOutput: [1. 0. 0. 0. 0.]\n" ], [ "## global params\nINIT_LR = 1e-5 # learning rate\nEPOCHS = 10 # training epochs\nBS = 4 # batch size", "_____no_output_____" ], [ "## build network\nfrom tensorflow.keras.models import load_model\n\n#Inputs\ninputs = Input(shape=(512, 512, 3), name='images')\n\ninputs2 = BatchNormalization()(inputs)\n\n#Inception Model\noutput1 = inception_v3.InceptionV3(include_top=False,weights= \"imagenet\", \n input_shape=(512, 512, 3),\n classes = 5)(inputs2)\n\n#AveragePooling2D\noutput = AveragePooling2D(pool_size=(2, 2), strides=None, \n padding='valid',name='AvgPooling')(output1)\n\n#Flattened\noutput = Flatten(name='Flatten')(output)\n\n#Dropout\noutput = Dropout(0.2,name='Dropout')(output)\n\n#ReLU layer\noutput = Dense(10, activation = 'relu',name='ReLU')(output)\n\n#Dense layer\noutput = Dense(5, activation='softmax',name='softmax')(output)\n\n# the actual model train)\nmodel = Model(inputs=inputs, outputs=output)\n\nprint(\"[INFO] compiling model...\")\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n\nmodel.summary()", "[INFO] compiling model...\nModel: \"model\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nimages (InputLayer) [(None, 512, 512, 3)] 0 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 512, 512, 3) 12 \n_________________________________________________________________\ninception_v3 (Model) (None, 14, 14, 2048) 21802784 \n_________________________________________________________________\nAvgPooling (AveragePooling2D (None, 7, 7, 2048) 0 \n_________________________________________________________________\nFlatten (Flatten) (None, 100352) 0 \n_________________________________________________________________\nDropout (Dropout) (None, 100352) 0 \n_________________________________________________________________\nReLU (Dense) (None, 10) 1003530 \n_________________________________________________________________\nsoftmax (Dense) (None, 5) 55 \n=================================================================\nTotal params: 22,806,381\nTrainable params: 22,771,943\nNon-trainable params: 34,438\n_________________________________________________________________\n" ], [ "from tensorflow.keras.callbacks import ModelCheckpoint\n\nmodel_checkpoint = ModelCheckpoint(filepath=\"/content/drive/My Drive/Dataset5C/Model\",\n monitor='val_loss', save_best_only=True)\n\n## train\nprint(\"[INFO] training head...\")\nH = model.fit({'images': xt}, \n {'softmax': yt}, \n batch_size = BS,\n epochs = EPOCHS,\n validation_data=(xval, yval),\n callbacks=[model_checkpoint],\n shuffle=True)\n", "[INFO] training head...\nEpoch 1/10\n1173/1173 [==============================] - ETA: 0s - loss: 0.9260 - accuracy: 0.6079WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:1817: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nINFO:tensorflow:Assets written to: /content/drive/My Drive/Dataset5C/Model/assets\n1173/1173 [==============================] - 603s 514ms/step - loss: 0.9260 - accuracy: 0.6079 - val_loss: 0.6811 - val_accuracy: 0.7274\nEpoch 2/10\n1173/1173 [==============================] - ETA: 0s - loss: 0.5126 - accuracy: 0.8156INFO:tensorflow:Assets written to: /content/drive/My Drive/Dataset5C/Model/assets\n1173/1173 [==============================] - 605s 516ms/step - loss: 0.5126 - accuracy: 0.8156 - val_loss: 0.4918 - val_accuracy: 0.8279\nEpoch 3/10\n1173/1173 [==============================] - ETA: 0s - loss: 0.3161 - accuracy: 0.8949INFO:tensorflow:Assets written to: /content/drive/My Drive/Dataset5C/Model/assets\n1173/1173 [==============================] - 604s 515ms/step - loss: 0.3161 - accuracy: 0.8949 - val_loss: 0.4802 - val_accuracy: 0.8249\nEpoch 4/10\n1173/1173 [==============================] - ETA: 0s - loss: 0.1936 - accuracy: 0.9424INFO:tensorflow:Assets written to: /content/drive/My Drive/Dataset5C/Model/assets\n1173/1173 [==============================] - 602s 513ms/step - loss: 0.1936 - accuracy: 0.9424 - val_loss: 0.4552 - val_accuracy: 0.8468\nEpoch 5/10\n1173/1173 [==============================] - ETA: 0s - loss: 0.1096 - accuracy: 0.9699INFO:tensorflow:Assets written to: /content/drive/My Drive/Dataset5C/Model/assets\n1173/1173 [==============================] - 599s 511ms/step - loss: 0.1096 - accuracy: 0.9699 - val_loss: 0.4519 - val_accuracy: 0.8438\nEpoch 6/10\n1173/1173 [==============================] - 538s 459ms/step - loss: 0.0631 - accuracy: 0.9840 - val_loss: 0.4964 - val_accuracy: 0.8507\nEpoch 7/10\n1173/1173 [==============================] - 538s 459ms/step - loss: 0.0403 - accuracy: 0.9921 - val_loss: 0.5453 - val_accuracy: 0.8517\nEpoch 8/10\n1173/1173 [==============================] - 538s 458ms/step - loss: 0.0318 - accuracy: 0.9930 - val_loss: 0.5901 - val_accuracy: 0.8408\nEpoch 9/10\n1173/1173 [==============================] - 538s 459ms/step - loss: 0.0223 - accuracy: 0.9951 - val_loss: 0.6561 - val_accuracy: 0.8338\nEpoch 10/10\n1173/1173 [==============================] - 537s 458ms/step - loss: 0.0233 - accuracy: 0.9936 - val_loss: 0.6977 - val_accuracy: 0.8498\n" ], [ "#Load the best model trained\nmodel = load_model(\"/content/drive/My Drive/Dataset5C/Model\")", "_____no_output_____" ], [ "## eval\nprint(\"[INFO] evaluating network...\")\nprint()\nprint(\"Loss: \"+ str(round(model.evaluate(testX,testY,verbose=0)[0],2))+ \" Acc: \"+ str(round(model.evaluate(testX,testY,verbose=1)[1],2)))\nprint()\n\npredIdxs = model.predict(testX)\npredIdxs = np.argmax(predIdxs, axis=1) # argmax for the predicted probability\n#print(classification_report(testY.argmax(axis=1), predIdxs,target_names=lb.classes_))\n\ncm = confusion_matrix(testY.argmax(axis=1), predIdxs)\ntotal = sum(sum(cm))\n#print(total) #60\n\nacc = (cm[0, 0] + cm[1, 1] + cm[2, 2] + cm[3,3]+ cm[4,4]) / total\n\n#sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])\n#specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])\n# show the confusion matrix, accuracy, sensitivity, and specificity\nprint(cm)\nprint(\"acc: {:.4f}\".format(acc))\n#print(\"sensitivity: {:.4f}\".format(sensitivity))\n#print(\"specificity: {:.4f}\".format(specificity))\n", "[INFO] evaluating network...\n\n32/32 [==============================] - 23s 732ms/step - loss: 0.4641 - accuracy: 0.8408\nLoss: 0.46 Acc: 0.84\n\n[[190 6 0 0 0]\n [ 7 184 2 9 5]\n [ 0 0 175 9 2]\n [ 0 4 11 153 54]\n [ 0 6 3 42 143]]\nacc: 0.8408\n" ], [ "## explain\nN = EPOCHS\nplt.style.use(\"ggplot\")\nplt.figure(1)\nplt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Precision of COVID-19 detection.\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend(loc=\"lower left\")\n#plt.axis([0, EPOCHS, 0.3, 0.9])\nplt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_cero_plot_Inception_2nd_time.png\")\nplt.show()", "_____no_output_____" ], [ "import cv2\n\nplt.figure(2)\nfor ind in range(1): \n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(testX[-ind], model.predict,\n hide_color=0, num_samples=42)\n print(\"> label:\", testY[ind].argmax(), \"- predicted:\", predIdxs[ind])\n \n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=True)\n mask = np.array(mark_boundaries(temp/2 +1, mask))\n #print(mask.shape)\n imagen = testX[ind]\n imagen[:,:,0] = imagen[:,:,2]\n imagen[:,:,1] = imagen[:,:,2]\n mask[:,:,0] = mask[:,:,2]\n mask[:,:,1] = mask[:,:,2]\n plt.imshow((mask +imagen)/255)\n plt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_pulmons_inception_Normal\"+str(ind)+\".png\")\n plt.show()", "_____no_output_____" ], [ "plt.figure(3)\nfor ind in range(1): \n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(testX[-ind], model.predict,\n hide_color=0, num_samples=42)\n print(\"> label:\", testY[ind].argmax(), \"- predicted:\", predIdxs[ind])\n \n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=True)\n mask = np.array(mark_boundaries(temp/2 +1, mask))\n #print(mask.shape)\n imagen = testX[ind]\n imagen[:,:,0] = imagen[:,:,2]\n imagen[:,:,1] = imagen[:,:,2]\n mask[:,:,0] = mask[:,:,2]\n mask[:,:,1] = mask[:,:,2]\n kernel = np.ones((50,50),np.uint8)\n mask = cv2.dilate(mask,kernel,iterations = 1)\n mask = cv2.blur(mask,(30,30))\n plt.imshow((mask +imagen)/255)\n plt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_pulmons_inception_Light\"+str(ind)+\".png\")\n plt.show()", "_____no_output_____" ], [ "plt.figure(4)\n\nfor ind in range(1): \n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(testX[-ind], model.predict,\n hide_color=0, num_samples=42)\n print(\"> label:\", testY[ind].argmax(), \"- predicted:\", predIdxs[ind])\n \n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0], positive_only=True, num_features=3, hide_rest=True)\n mask = np.array(mark_boundaries(temp/2 +1, mask))\n #print(mask.shape)\n imagen = testX[ind]\n imagen[:,:,0] = imagen[:,:,2]\n imagen[:,:,1] = imagen[:,:,2]\n mask[:,:,0] = mask[:,:,2]\n mask[:,:,1] = mask[:,:,2]\n kernel = np.ones((50,50),np.uint8)\n mask = cv2.dilate(mask,kernel,iterations = 1)\n mask = cv2.blur(mask,(30,30))\n mask = np.array(mask, dtype=np.uint8)\n mask = cv2.medianBlur(mask,5)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n mask = cv2.applyColorMap(mask, cv2.COLORMAP_HOT) #heatmap\n end = cv2.addWeighted((imagen/255), 0.7, mask/255, 0.3, 0)\n plt.imshow((end))\n plt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_pulmons_inception_Heat_map_purple\"+str(ind)+\".png\")\n plt.show()", "_____no_output_____" ], [ "plt.figure(4)\nfor ind in range(1): \n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(testX[-ind], model.predict,\n hide_color=0, num_samples=42)\n print(\"> label:\", testY[ind].argmax(), \"- predicted:\", predIdxs[ind])\n \n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0], positive_only=True, num_features=2, hide_rest=True)\n mask = np.array(mark_boundaries(temp/2 +1, mask))\n #print(mask.shape)\n imagen = testX[ind]\n imagen[:,:,0] = imagen[:,:,2]\n imagen[:,:,1] = imagen[:,:,2]\n mask[:,:,0] = mask[:,:,2]\n mask[:,:,1] = mask[:,:,2]\n kernel = np.ones((30,30),np.uint8)\n mask = cv2.dilate(mask,kernel,iterations = 2)\n mask = cv2.blur(mask,(30,30))\n mask = cv2.blur(mask,(30,30))\n mask = np.array(mask, dtype=np.uint8)\n mask = cv2.medianBlur(mask,5)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n mask2 = cv2.applyColorMap((mask), cv2.COLORMAP_JET) #heatmap\n\n mask = cv2.blur(mask,(60,60))\n mask = cv2.applyColorMap(mask, cv2.COLORMAP_HOT) #heatmap\n\n mask = ((mask*1.1 + mask2*0.7)/255)*(3/2)\n end = cv2.addWeighted(imagen/255, 0.8, mask2/255, 0.3, 0)\n #end = cv2.addWeighted(end, 0.8, mask/255, 0.2, 0)\n plt.imshow((end))\n cv2.imwrite(\"/content/drive/My Drive/Maps/Heat_map\"+str(ind)+\".png\",end*255)\n\n plt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_pulmons_inception_Heat_map\"+str(ind)+\".png\")\n plt.show()", "_____no_output_____" ], [ "plt.figure(5)\nfor ind in range(1): \n explainer = lime_image.LimeImageExplainer()\n explanation = explainer.explain_instance(testX[-ind], model.predict,\n hide_color=0, num_samples=42)\n print(\"> label:\", testY[ind].argmax(), \"- predicted:\", predIdxs[ind])\n \n temp, mask = explanation.get_image_and_mask(\n explanation.top_labels[0], positive_only=True, num_features=1, hide_rest=True)\n mask = np.array(mark_boundaries(temp/2 +1, mask))\n #print(mask.shape)\n imagen = testX[ind]\n imagen[:,:,0] = imagen[:,:,2]\n imagen[:,:,1] = imagen[:,:,2]\n mask[:,:,0] = mask[:,:,2]\n mask[:,:,1] = mask[:,:,2]\n kernel = np.ones((30,30),np.uint8)\n mask = cv2.dilate(mask,kernel,iterations = 2)\n mask = cv2.blur(mask,(30,30))\n mask = cv2.blur(mask,(30,30))\n mask = np.array(mask, dtype=np.uint8)\n mask = cv2.medianBlur(mask,5)\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n mask2 = cv2.applyColorMap((mask), cv2.COLORMAP_JET) #heatmap\n\n mask = cv2.blur(mask,(60,60))\n mask = cv2.applyColorMap(mask, cv2.COLORMAP_HOT) #heatmap\n\n mask = ((mask*1.1 + mask2*0.7)/255)*(3/2)\n end = cv2.addWeighted(imagen/255, 0.8, mask2/255, 0.3, 0)\n #end = cv2.addWeighted(end, 0.8, mask/255, 0.2, 0)\n deep = np.reshape(end,newshape=(512,512,3),order='C')\n CHANNEL1=deep[:,:,2]\n CHANNEL2=deep[:,:,0]\n deep[:,:,0] = CHANNEL1\n #deep[:,:,2] = CHANNEL2\n plt.imshow((deep))\n plt.savefig(\"/content/drive/My Drive/Dataset5C/Model/trained_pulmons_inception_Heat_map_ma\"+str(ind)+\".png\")\n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb21097d0d1fefece4dae50f4f845759a1499c03
4,464
ipynb
Jupyter Notebook
workshop.ipynb
YangfanPeng/git-tutorial
8df6e1dfaaa9b3a92267378c1b4f9d5ebcd147ee
[ "MIT" ]
null
null
null
workshop.ipynb
YangfanPeng/git-tutorial
8df6e1dfaaa9b3a92267378c1b4f9d5ebcd147ee
[ "MIT" ]
null
null
null
workshop.ipynb
YangfanPeng/git-tutorial
8df6e1dfaaa9b3a92267378c1b4f9d5ebcd147ee
[ "MIT" ]
null
null
null
24.26087
346
0.537186
[ [ [ "# File Versioning for Code with Git and GitHub\n\nWelcome to the workshop on \"File Versioning for Code with Git and GitHub\"! If you have managed to open this jupyter notebook inside your browser you have already learned how to: install and set up python, jupyter, git and gitkraken. Furthermore you have forked and cloned a repository and started jupyter successfully! Congratulations!\n\nUsing this jupyter notebook we'll implement some scripts that manipulate .csv files and learn the git basics.\n\nHave fun :)!", "_____no_output_____" ] ], [ [ "import csv\n\ndef readCsvData(path):\n data = []\n with open(path, newline='') as csvFile:\n csvReader = csv.reader(csvFile, delimiter=';', quotechar='|')\n for line in csvReader:\n data.append(line)\n return data\n \ndef writeCsvData(path, data):\n with open(path, 'w', newline='') as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n for line in data:\n csvWriter.writerow(line)", "_____no_output_____" ], [ "csvData = readCsvData('raw-data2.csv');\nprint(csvData)", "_____no_output_____" ], [ "def sum(a, b):\n return a + b\n\ndef sumLine(line):\n firstValueOfLine = int(line[0])\n secondValueOfLine = int(line[1])\n return sum(firstValueOfLine, secondValueOfLine)", "_____no_output_____" ], [ "import copy\n\ncsvDataWithSums = copy.deepcopy(csvData)\n\nfor line in csvDataWithSums:\n sumOfLine = sumLine(line)\n line.append(sumOfLine)\n \nprint(csvDataWithSums)", "_____no_output_____" ], [ "def prod(a, b):\n return a * b\n\ndef prodLine(line):\n firstValueOfLine = int(line[0])\n secondValueOfLine = int(line[1])\n return prod(firstValueOfLine, secondValueOfLine)", "_____no_output_____" ], [ "import copy\n\ncsvDataWithSums = copy.deepcopy(csvData)\n\nfor line in csvDataWithSums:\n prodOfLine = prodLine(line)\n line.append(prodOfLine)\n \nprint(csvDataWithSums)", "_____no_output_____" ], [ "def div(a, b):\n return a / b\n\ndef divLine(line):\n firstValueOfLine = int(line[0])\n secondValueOfLine = int(line[1])\n return div(firstValueOfLine, secondValueOfLine)", "_____no_output_____" ], [ "import copy\n\ncsvDataWithSums = copy.deepcopy(csvData)\n\nfor line in csvDataWithSums:\n divOfLine = divLine(line)\n line.append(divOfLine)\n \nprint(csvDataWithSums)", "_____no_output_____" ], [ "writeCsvData('result-data.csv', csvDataWithSums)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb2117da0b3b2579de6866037d22f0740ce8c0b3
104,575
ipynb
Jupyter Notebook
research/models_selection.ipynb
qangelot/projet_Nantes
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
[ "MIT" ]
null
null
null
research/models_selection.ipynb
qangelot/projet_Nantes
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
[ "MIT" ]
null
null
null
research/models_selection.ipynb
qangelot/projet_Nantes
6cd63aec0acc5de77683832dd20f66ec3aa9e3eb
[ "MIT" ]
null
null
null
66.650733
692
0.636691
[ [ [ "# Models selection\n\nMaintenant que nous avons créé des features grâce à l'étude du domaine métier et à l'EDA, et que nous les avons sélectionnées grâce à Boruta, nous pouvons passer à la phase de sélection du ou des modèles les plus adaptées à notre dataset.\n\nL'EDA soulève quelques interrogations à ce sujet mais le meilleur moyen reste de tester une variété de modèles avec quelques combinaisons de leurs hyperparamètres.\n\nUn mot sur les hyperparamètres : pour le learning rate ou la régularisation, ajouter 0.1 à 0.01 à un effet important sur le comportement du modèle, en revanche ajouter 0.1 à 10 n'a pratiquement aucune conséquence. Ainsi, pour le learning rate, nous devrions privilégier une distribution logarithmique du paramètre.\n", "_____no_output_____" ] ], [ [ "from model_selector.run_regressors import run_linear_models, run_svm_models, run_neighbor_models, run_gaussian_models, run_nn_models, run_tree_models\nfrom sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone\nfrom sklearn.model_selection import KFold, cross_val_score, train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport pickle\nimport xgboost as xgb\nimport seaborn as sns\nfrom typing import List\nimport scipy.stats as stats\nimport pandas as pd\nimport sqlite3 as sql\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set_theme(style=\"darkgrid\")\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)", "_____no_output_____" ], [ "train = pd.read_csv('../data/train.csv', index_col='date')\ntest = pd.read_csv('../data/test.csv', index_col='date')", "_____no_output_____" ], [ "y_train = train.reel\nX_train = train.drop(['reel'], axis=1)\ny_test = test.reel\nX_test = test.drop(['reel'], axis=1)", "_____no_output_____" ], [ "X_test.head()", "_____no_output_____" ] ], [ [ "## 1. Les modèles linéaires", "_____no_output_____" ] ], [ [ "# run_linear_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/linear.png)", "_____no_output_____" ], [ "## 2. Les modèles support vector machines (SVM)\n\nDe la documentation de Scikit-learn : \n\nLa complexité du temps d'ajustement du modèle SVR est plus que quadratique avec le nombre d'échantillons, ce qui rend difficile l'adaptation à des ensembles de données de plus de 10 000 échantillons. Pour les grands ensembles de données, envisagez d'utiliser LinearSVR ou SGDRegressor à la place, éventuellement après un transformateur Nystroem.\n\nIci, vu la taille de notre dataset, nous utilisons le modèle LinearSVR uniquement.", "_____no_output_____" ] ], [ [ "# run_svm_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/svm.png)\n\nLinearSVR n'apporte pas d'amélioration par rapport à LassoLars", "_____no_output_____" ], [ "## 3. Les modèles basés sur les distances", "_____no_output_____" ] ], [ [ "# run_neighbor_models(X_train, y_train, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/knn.png)\n\nKNN est, dans le cadre de ce jeu de données, relativement mauvais par rapport aux modèles précédement testés.", "_____no_output_____" ], [ "## 4. Les modèles gaussiens", "_____no_output_____" ] ], [ [ "# run_gaussian_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/gaussian.png)\n\nTout comme SVM avant, ce modèle nécessite une grande quantité de RAM car son implémentation dans Scikit-learn demande de calculer une matrice de covarriance sur l'ensemble du train set. Au vu de notre large dataset, cela n'est pas réalisable ici. Tout comme SVM, nous pourrions probablement tenter une approche d'entraînement par batch.", "_____no_output_____" ], [ "## 5. Les réseaux de neuronnes", "_____no_output_____" ] ], [ [ "# run_nn_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/mlp.png)\n\nMLPRegressor obtient des résultats solides dans la veine des modèles linéaires.", "_____no_output_____" ], [ "## 6. Les modèles à base d'arbres de décision", "_____no_output_____" ] ], [ [ "# run_tree_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/tree.png)\n\nDe façon assez suprenante un simple arbre de décision obtient le meilleur score jusqu'à présent. Une bonne idée est de continuer dans la direction de modèles plus perfectionnés à base d'arbres. ", "_____no_output_____" ], [ "## 7. Les modèles ensemblistes (bagging)", "_____no_output_____" ] ], [ [ "run_ensemble_models(X_train, y_train, small = True, normalize_x = False)", "_____no_output_____" ] ], [ [ "![title](img/bagging.png)\n\n<br>\n\nLa tendance est confirmée par ce run, il semblerait que les modèles à base d'arbres soient de bon candidats sur ce jeu de données. Voyons maintenant la technique du boosting.", "_____no_output_____" ], [ "## 8. Les modèles ensemblistes (boosting) : XGBoost et LGBM\n\nXgboost et LGBM ne sont pas encore implémentés dans mon package de sélection de modèle, on opère donc une randomized search manuelle.", "_____no_output_____" ] ], [ [ "from xgboost import XGBRegressor\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import TimeSeriesSplit, cross_val_score\nimport time\n\n# n_estimators doit être tune plus tard \n# car les paramètres échantillonnés avec des n_estimators plus élevés\n# obtiendront un avantage injuste (default=100)\nparams = {\n 'n_estimators': [100, 150, 250, 500],\n 'min_child_weight': [2, 4, 6],\n 'gamma': [i/10.0 for i in range(2, 6)],\n 'max_depth': [3, 5, 8],\n 'learning_rate': [0.01, 0.05, 0.1, 0.2]\n}\n\nreg = XGBRegressor(seed=42)\n\nn_iter_search = 100\ncv_ts = TimeSeriesSplit(n_splits=3)\nrandom_search = RandomizedSearchCV(reg, param_distributions=params, verbose = 2,\n n_iter=n_iter_search, cv=cv_ts, scoring='neg_mean_squared_error', random_state=42)\n\nstart = time.time()\nrandom_search.fit(X_train, y_train)\nprint(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time.time() - start), n_iter_search))", "Fitting 3 folds for each of 100 candidates, totalling 300 fits\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 1.2s\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 2.6s\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 3.5s\n[CV] END gamma=0.2, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=250; total time= 0.8s\n[CV] END gamma=0.2, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=250; total time= 1.5s\n[CV] END gamma=0.2, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=250; total time= 2.1s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=5, min_child_weight=6, n_estimators=250; total time= 1.6s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=5, min_child_weight=6, n_estimators=250; total time= 2.4s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=5, min_child_weight=6, n_estimators=250; total time= 4.2s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=8, min_child_weight=4, n_estimators=250; total time= 2.7s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=8, min_child_weight=4, n_estimators=250; total time= 5.5s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=8, min_child_weight=4, n_estimators=250; total time= 7.4s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=250; total time= 0.8s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=250; total time= 1.7s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=250; total time= 2.5s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=100; total time= 0.5s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=100; total time= 0.7s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=100; total time= 0.9s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=500; total time= 1.8s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=500; total time= 3.2s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=3, min_child_weight=4, n_estimators=500; total time= 4.5s\n[CV] END gamma=0.2, learning_rate=0.01, max_depth=8, min_child_weight=4, n_estimators=150; total time= 1.4s\n[CV] END gamma=0.2, learning_rate=0.01, max_depth=8, min_child_weight=4, n_estimators=150; total time= 2.6s\n[CV] END gamma=0.2, learning_rate=0.01, max_depth=8, min_child_weight=4, n_estimators=150; total time= 4.1s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 0.8s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 1.7s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 2.6s\n[CV] END gamma=0.4, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 0.6s\n[CV] END gamma=0.4, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 1.1s\n[CV] END gamma=0.4, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 1.9s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=3, min_child_weight=2, n_estimators=250; total time= 1.0s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=3, min_child_weight=2, n_estimators=250; total time= 1.7s\n[CV] END gamma=0.5, learning_rate=0.05, max_depth=3, min_child_weight=2, n_estimators=250; total time= 2.5s\n[CV] END gamma=0.5, learning_rate=0.01, max_depth=5, min_child_weight=2, n_estimators=500; total time= 3.2s\n[CV] END gamma=0.5, learning_rate=0.01, max_depth=5, min_child_weight=2, n_estimators=500; total time= 5.9s\n[CV] END gamma=0.5, learning_rate=0.01, max_depth=5, min_child_weight=2, n_estimators=500; total time= 8.6s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=8, min_child_weight=4, n_estimators=150; total time= 1.5s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=8, min_child_weight=4, n_estimators=150; total time= 3.3s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=8, min_child_weight=4, n_estimators=150; total time= 4.6s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=8, min_child_weight=6, n_estimators=100; total time= 1.3s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=8, min_child_weight=6, n_estimators=100; total time= 2.8s\n[CV] END gamma=0.4, learning_rate=0.05, max_depth=8, min_child_weight=6, n_estimators=100; total time= 3.3s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=8, min_child_weight=4, n_estimators=250; total time= 2.6s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=8, min_child_weight=4, n_estimators=250; total time= 5.2s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=8, min_child_weight=4, n_estimators=250; total time= 7.1s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=5, min_child_weight=6, n_estimators=500; total time= 3.3s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=5, min_child_weight=6, n_estimators=500; total time= 6.1s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=5, min_child_weight=6, n_estimators=500; total time= 8.8s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=5, min_child_weight=2, n_estimators=150; total time= 1.0s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=5, min_child_weight=2, n_estimators=150; total time= 1.8s\n[CV] END gamma=0.3, learning_rate=0.2, max_depth=5, min_child_weight=2, n_estimators=150; total time= 2.5s\n[CV] END gamma=0.2, learning_rate=0.05, max_depth=5, min_child_weight=4, n_estimators=500; total time= 3.1s\n[CV] END gamma=0.2, learning_rate=0.05, max_depth=5, min_child_weight=4, n_estimators=500; total time= 6.3s\n[CV] END gamma=0.2, learning_rate=0.05, max_depth=5, min_child_weight=4, n_estimators=500; total time= 9.2s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 0.6s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 1.5s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=5, min_child_weight=6, n_estimators=100; total time= 1.7s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=3, min_child_weight=6, n_estimators=150; total time= 0.5s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=3, min_child_weight=6, n_estimators=150; total time= 1.0s\n[CV] END gamma=0.3, learning_rate=0.01, max_depth=3, min_child_weight=6, n_estimators=150; total time= 1.4s\n[CV] END gamma=0.5, learning_rate=0.2, max_depth=8, min_child_weight=2, n_estimators=150; total time= 1.4s\n[CV] END gamma=0.5, learning_rate=0.2, max_depth=8, min_child_weight=2, n_estimators=150; total time= 3.0s\n[CV] END gamma=0.5, learning_rate=0.2, max_depth=8, min_child_weight=2, n_estimators=150; total time= 4.4s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 1.6s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 2.7s\n[CV] END gamma=0.2, learning_rate=0.1, max_depth=5, min_child_weight=4, n_estimators=250; total time= 4.2s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=150; total time= 0.6s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=150; total time= 1.0s\n[CV] END gamma=0.4, learning_rate=0.2, max_depth=3, min_child_weight=6, n_estimators=150; total time= 1.6s\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=2, n_estimators=100; total time= 0.8s\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=2, n_estimators=100; total time= 1.3s\n[CV] END gamma=0.3, learning_rate=0.1, max_depth=5, min_child_weight=2, n_estimators=100; total time= 1.9s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 0.8s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 1.8s\n[CV] END gamma=0.5, learning_rate=0.1, max_depth=3, min_child_weight=6, n_estimators=250; total time= 2.6s\n" ], [ "best_xgbr = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1.0, gamma=0.5, gpu_id=-1,\n importance_type='gain', interaction_constraints='',\n learning_rate=0.1, max_delta_step=0, max_depth=3,\n min_child_weight=4, monotone_constraints='()',\n n_estimators=95, n_jobs=8, num_parallel_tree=1, random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1,\n tree_method='exact', validate_parameters=1, verbosity=None)\n\nbest_xgbr.fit(X_train, y_train)", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error\n\ndef print_metrics(y_true, y_predicted):\n\n print('Root Mean Square Error = ' +\n str(np.sqrt(mean_squared_error(y_true, y_predicted))))\n print('Mean Absolute Error = ' +\n str(mean_absolute_error(y_true, y_predicted)))\n print('Median Absolute Error = ' +\n str(median_absolute_error(y_true, y_predicted))) \n\ny_pred = best_xgbr.predict(X_test)\nprint_metrics(y_test**2, y_pred**2)", "Root Mean Square Error = 22.122820724498332\nMean Absolute Error = 16.154971986274795\nMedian Absolute Error = 12.76448690834031\n" ], [ "pickle.dump(best_xgbr, open('models/best_xgbr.sav', 'wb'))", "_____no_output_____" ], [ "import lightgbm as lgb\n\n# n_estimators doit être tune plus tard\n# car les paramètres échantillonnés avec des n_estimators plus élevés\n# obtiendront un avantage injuste (default=100)\nparams = {\n 'n_estimators': [100, 150, 250, 500],\n 'num_leaves': [8, 32, 64, 128, 256],\n 'max_depth': [3, 5, 8],\n 'boosting_type': ['gbdt', 'dart', 'goss'],\n 'learning_rate': [0.01, 0.05, 0.1, 0.2]}\n \nlgbr = lgb.LGBMRegressor(seed=42)\n\nn_iter_search = 100\nrandom_search = RandomizedSearchCV(lgbr, param_distributions=params, verbose=2,\n n_iter=n_iter_search, cv=cv_ts, scoring='neg_mean_squared_error', random_state=42)\n\nstart = time.time()\nrandom_search.fit(X_train, y_train)\nprint(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time.time() - start), n_iter_search))", "Fitting 3 folds for each of 100 candidates, totalling 300 fits\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=8; total time= 0.2s\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=8; total time= 0.3s\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=8; total time= 0.4s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=8; total time= 0.6s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=8; total time= 1.3s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=8; total time= 1.7s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=256; total time= 1.6s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=256; total time= 2.0s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=256; total time= 2.4s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=3, n_estimators=500, num_leaves=128; total time= 0.4s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=3, n_estimators=500, num_leaves=128; total time= 0.5s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=3, n_estimators=500, num_leaves=128; total time= 0.7s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 1.0s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 1.8s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 2.8s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=150, num_leaves=8; total time= 0.3s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=150, num_leaves=8; total time= 0.2s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=150, num_leaves=8; total time= 0.5s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=100, num_leaves=128; total time= 0.1s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=100, num_leaves=128; total time= 0.3s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=3, n_estimators=100, num_leaves=128; total time= 0.2s\n[CV] END boosting_type=gbdt, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=8; total time= 0.1s\n[CV] END boosting_type=gbdt, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=8; total time= 0.2s\n[CV] END boosting_type=gbdt, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=8; total time= 0.3s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=64; total time= 1.5s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=64; total time= 2.9s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=8, n_estimators=250, num_leaves=64; total time= 3.7s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=5, n_estimators=250, num_leaves=8; total time= 0.6s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=5, n_estimators=250, num_leaves=8; total time= 1.1s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=5, n_estimators=250, num_leaves=8; total time= 1.5s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=8, n_estimators=150, num_leaves=32; total time= 0.3s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=8, n_estimators=150, num_leaves=32; total time= 0.4s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=8, n_estimators=150, num_leaves=32; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=128; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=128; total time= 0.8s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=128; total time= 1.1s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=500, num_leaves=256; total time= 0.8s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=500, num_leaves=256; total time= 1.2s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=500, num_leaves=256; total time= 1.6s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=32; total time= 0.2s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=32; total time= 0.3s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=8, n_estimators=100, num_leaves=32; total time= 0.4s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=3, n_estimators=250, num_leaves=8; total time= 0.6s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=3, n_estimators=250, num_leaves=8; total time= 1.0s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=3, n_estimators=250, num_leaves=8; total time= 1.5s\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=128; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=128; total time= 1.3s\n[CV] END boosting_type=dart, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=128; total time= 1.2s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.3s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.4s\n[CV] END boosting_type=gbdt, learning_rate=0.05, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.4s\n[CV] END boosting_type=goss, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=64; total time= 0.3s\n[CV] END boosting_type=goss, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=64; total time= 0.4s\n[CV] END boosting_type=goss, learning_rate=0.1, max_depth=5, n_estimators=150, num_leaves=64; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=3, n_estimators=500, num_leaves=256; total time= 1.7s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=3, n_estimators=500, num_leaves=256; total time= 3.4s\n[CV] END boosting_type=dart, learning_rate=0.01, max_depth=3, n_estimators=500, num_leaves=256; total time= 4.8s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.3s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.5s\n[CV] END boosting_type=gbdt, learning_rate=0.2, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.6s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.8s\n[CV] END boosting_type=dart, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 1.1s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=3, n_estimators=150, num_leaves=64; total time= 0.3s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=3, n_estimators=150, num_leaves=64; total time= 0.5s\n[CV] END boosting_type=dart, learning_rate=0.1, max_depth=3, n_estimators=150, num_leaves=64; total time= 0.7s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.5s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.7s\n[CV] END boosting_type=gbdt, learning_rate=0.01, max_depth=5, n_estimators=250, num_leaves=128; total time= 0.8s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.3s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.4s\n[CV] END boosting_type=goss, learning_rate=0.2, max_depth=5, n_estimators=150, num_leaves=32; total time= 0.5s\n" ], [ "best_lgbr = lgb.LGBMRegressor(max_depth=5, n_estimators=150, num_leaves=8)\nbest_lgbr.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred = best_lgbr.predict(X_test)\nprint_metrics(y_test**2, y_pred**2)", "Root Mean Square Error = 22.094410468949704\nMean Absolute Error = 15.99914128977054\nMedian Absolute Error = 12.415289455391928\n" ], [ "pickle.dump(best_lgbr, open('models/best_lgbr.sav', 'wb'))", "_____no_output_____" ] ], [ [ "On observe des résulats très solides de la part de XGBoost et LGBM.", "_____no_output_____" ], [ "## Conclusion\n\nMon package de sélection de modèle a rendu ses conclusions. Encore une fois, celui-ci n'est pas omniscient et est encore très perfectible. En effet, il est tout simplement impossible de créer un package qui balaye de façon exhaustive tous les espaces d'hyperparamètres et ce pour chaque jeu de données. Cependant, c'est un outil riche d'enseignement quant au type de modèle à utiliser pour tel ou tel dataset. Ici très clairement les modèles à base d'arbres sont les grands gagnants et cela n'est pas si surprenant car on l'a constaté lors de l'EDA, les relations entre les prédicteurs et la variable indépendantes ne sont la plupart du temps pas linéaires, mais bien plus complexes.\n\nAfin d'aller au bout des choses, il serait intéressant de retenir les principaux modèles sélectionnés et des les tuner avec une autre technique que Gridsearch : l'optimisation Bayésienne. De plus, il serait utile de tenter des méthodes d'agrégation de modèles afin de voir si l'on peut encore progresser en termes de performance.\n\nLes modèles avec lesquels je souhaite poursuivre l'exploration :\n- ElasticNet et LassoLars\n- Multilayer perceptron\n- Random Forest\n- XGBoost et LightGBM\n\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
cb2128f9d6a098c3fb508f64b6dcb7e419ca0a84
659,970
ipynb
Jupyter Notebook
module4-decision-trees/decision-trees.ipynb
standroidbeta/DS-Unit-2-Sprint-2-Regression
fe427477ae1a49ffd239a2d37fcd2b32d924557c
[ "MIT" ]
null
null
null
module4-decision-trees/decision-trees.ipynb
standroidbeta/DS-Unit-2-Sprint-2-Regression
fe427477ae1a49ffd239a2d37fcd2b32d924557c
[ "MIT" ]
null
null
null
module4-decision-trees/decision-trees.ipynb
standroidbeta/DS-Unit-2-Sprint-2-Regression
fe427477ae1a49ffd239a2d37fcd2b32d924557c
[ "MIT" ]
null
null
null
127.407336
140,875
0.804009
[ [ [ "_Lambda School Data Science — Tree Ensembles_ \n\n# Decision Trees\n\n### Links\n- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)\n- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)\n- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)\n- [How a Russian mathematician constructed a decision tree - by hand - to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)\n- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._", "_____no_output_____" ], [ "### Libraries to install\n\n#### graphviz (to visualize trees)\nAnaconda: \n```conda install python-graphviz```\n\nGoogle Colab: \n```!pip install graphviz\n!apt-get install graphviz\n```\n\n#### ipywidgets (optional, for interactive widgets)\nAnaconda: Already installed\nGoogle Colab: [Doesn't work](https://github.com/googlecolab/colabtools/issues/60#issuecomment-462529981)\n\n#### mlxtend (to plot decision regions)\n[mlxtend.plotting.plot_decision_regions](http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/): `pip install mlxtend`", "_____no_output_____" ], [ "### Imports and helper functions", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport graphviz\nfrom IPython.display import display\nfrom ipywidgets import interact\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor, export_graphviz\n\n\ndef viztree(decision_tree, feature_names):\n \"\"\"Visualize a decision tree\"\"\"\n dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, \n filled=True, rounded=True) \n return graphviz.Source(dot_data)\n\n\ndef viz3D(fitted_model, df, feature1, feature2, target='', num=100):\n \"\"\"\n Visualize model predictions in 3D, for regression or binary classification\n \n Parameters\n ----------\n fitted_model : scikit-learn model, already fitted\n df : pandas dataframe, which was used to fit model\n feature1 : string, name of feature 1\n feature2 : string, name of feature 2\n target : string, name of target\n num : int, number of grid points for each feature\n \n References\n ----------\n https://jakevdp.github.io/PythonDataScienceHandbook/04.12-three-dimensional-plotting.html\n https://scikit-learn.org/stable/auto_examples/tree/plot_iris.html \n \"\"\"\n x1 = np.linspace(df[feature1].min(), df[feature1].max(), num)\n x2 = np.linspace(df[feature2].min(), df[feature2].max(), num)\n X1, X2 = np.meshgrid(x1, x2)\n X = np.c_[X1.flatten(), X2.flatten()]\n if hasattr(fitted_model, 'predict_proba'):\n predicted = fitted_model.predict_proba(X)[:,1]\n else:\n predicted = fitted_model.predict(X)\n Z = predicted.reshape(num, num)\n \n fig = plt.figure()\n ax = plt.axes(projection='3d')\n ax.plot_surface(X1, X2, Z, cmap='viridis')\n ax.set_xlabel(feature1)\n ax.set_ylabel(feature2)\n ax.set_zlabel(target)\n return fig", "_____no_output_____" ] ], [ [ "# Golf Putts (1 feature, non-linear)\n\nhttps://statmodeling.stat.columbia.edu/2008/12/04/the_golf_puttin/", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\ncolumns = ['distance', 'tries', 'successes']\ndata = [[2, 1443, 1346],\n [3, 694, 577],\n [4, 455, 337],\n [5, 353, 208],\n [6, 272, 149],\n [7, 256, 136],\n [8, 240, 111],\n [9, 217, 69],\n [10, 200, 67],\n [11, 237, 75],\n [12, 202, 52],\n [13, 192, 46],\n [14, 174, 54],\n [15, 167, 28],\n [16, 201, 27],\n [17, 195, 31],\n [18, 191, 33],\n [19, 147, 20],\n [20, 152, 24]]\n\nputts = pd.DataFrame(columns=columns, data=data)\nputts['rate of success'] = putts['successes'] / putts['tries']\nputts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts');", "_____no_output_____" ] ], [ [ "### OLS Regression", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\nputts_X = putts[['distance']]\nputts_y = putts['rate of success']\nlr = LinearRegression()\nlr.fit(putts_X, putts_y)\nprint('R^2 Score', lr.score(putts_X, putts_y))\nax = putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts')\nax.plot(putts_X, lr.predict(putts_X));", "R^2 Score 0.8695850610243295\n" ] ], [ [ "### Decision Tree", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndef viztree(decision_tree, feature_names):\n dot_data = export_graphviz(decision_tree, out_file=None, feature_names=feature_names, \n filled=True, rounded=True) \n return graphviz.Source(dot_data)\n\ndef putts_tree(max_depth=1):\n tree = DecisionTreeRegressor(max_depth=max_depth)\n tree.fit(putts_X, putts_y)\n print('R^2 Score', tree.score(putts_X, putts_y))\n ax = putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts')\n ax.step(putts_X, tree.predict(putts_X), where='mid')\n plt.show()\n display(viztree(tree, feature_names=['distance']))\n\ninteract(putts_tree, max_depth=(1,6,1));", "_____no_output_____" ], [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\npredictions = []\nfor distance in putts['distance']:\n samples = putts.copy()\n if distance <= 8.5:\n samples = samples.query('distance <= 8.5')\n if distance <= 4.5:\n samples = samples.query('distance <= 4.5')\n else:\n samples = samples.query('distance > 4.5')\n else:\n samples = samples.query('distance > 8.5')\n if distance <= 14.5:\n samples = samples.query('distance <= 14.5')\n else:\n samples = samples.query('distance > 14.5')\n prediction = samples['rate of success'].mean()\n predictions.append(prediction)\n \nprint('R^2 Score', r2_score(putts_y, predictions))\nax = putts.plot('distance', 'rate of success', kind='scatter', title='Golf Putts')\nax.step(putts_X, predictions, where='mid');", "R^2 Score 0.9676848646593703\n" ] ], [ [ "# Wave (1 feature, non-monotonic, train/test split)", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport matplotlib.pyplot as plt\n\n# Based on http://scikit-learn.org/stable/auto_examples/tree/plot_tree_regression.html\ndef make_data():\n import numpy as np\n rng = np.random.RandomState(1)\n X = np.sort(5 * rng.rand(80, 1), axis=0)\n y = np.sin(X).ravel()\n y[::5] += 2 * (0.5 - rng.rand(16))\n return X, y\n\nwave_X, wave_y = make_data()\nwave_X_train, wave_X_test, wave_y_train, wave_y_test = train_test_split(\n wave_X, wave_y, test_size=0.25, random_state=42)\n\ndef regress_wave(max_depth=1):\n tree = DecisionTreeRegressor(max_depth=max_depth)\n tree.fit(wave_X_train, wave_y_train)\n print('Train R^2 score:', tree.score(wave_X_train, wave_y_train))\n print('Test R^2 score:', tree.score(wave_X_test, wave_y_test))\n plt.scatter(wave_X_train, wave_y_train)\n plt.scatter(wave_X_test, wave_y_test)\n plt.step(wave_X, tree.predict(wave_X), where='mid')\n plt.show()\n \ninteract(regress_wave, max_depth=(1,8,1));", "_____no_output_____" ] ], [ [ "# Simple housing (2 features)\n\nhttps://christophm.github.io/interpretable-ml-book/interaction.html#feature-interaction", "_____no_output_____" ] ], [ [ "columns = ['Price', 'Good Location', 'Big Size']\n\ndata = [[300000, 1, 1], \n [200000, 1, 0], \n [250000, 0, 1], \n [150000, 0, 0]]\n\nhouse = pd.DataFrame(columns=columns, data=data)\nhouse", "_____no_output_____" ] ], [ [ "### OLS Regression", "_____no_output_____" ] ], [ [ "house_X = house.drop(columns='Price')\nhouse_y = house['Price']\nlr = LinearRegression()\nlr.fit(house_X, house_y)\nprint('R^2', lr.score(house_X, house_y))\nprint('Intercept \\t', lr.intercept_)\ncoefficients = pd.Series(lr.coef_, house_X.columns)\nprint(coefficients.to_string())", "R^2 1.0\nIntercept \t 150000.0\nGood Location 50000.0\nBig Size 100000.0\n" ], [ "%matplotlib notebook\n%matplotlib notebook\n%matplotlib notebook\nimport matplotlib.pyplot as plt\nviz3D(lr, house, feature1='Good Location', feature2='Big Size', target='Price');", "_____no_output_____" ] ], [ [ "### Decision Tree", "_____no_output_____" ] ], [ [ "tree = DecisionTreeRegressor()\ntree.fit(house_X, house_y)\nprint('R^2', tree.score(house_X, house_y))", "R^2 1.0\n" ], [ "%matplotlib notebook\nimport matplotlib.pyplot as plt\nviz3D(tree, house, feature1='Good Location', feature2='Big Size', target='Price');", "_____no_output_____" ], [ "plt.figure()\ntable = house.pivot_table('Price', 'Good Location', 'Big Size')\nsns.heatmap(table, annot=True, fmt='d', cmap='viridis');", "_____no_output_____" ] ], [ [ "# Simple housing, with a twist (feature interactions, 2 features)", "_____no_output_____" ] ], [ [ "house.loc[0, 'Price'] = 400000\nhouse_X = house.drop(columns='Price')\nhouse_y = house['Price']\nhouse", "_____no_output_____" ] ], [ [ "### OLS Regression, without engineering an interaction term", "_____no_output_____" ] ], [ [ "lr = LinearRegression()\nlr.fit(house_X, house_y)\nprint('R^2', lr.score(house_X, house_y))\nprint('Intercept \\t', lr.intercept_)\ncoefficients = pd.Series(lr.coef_, house_X.columns)\nprint(coefficients.to_string())", "R^2 0.9285714285714286\nIntercept \t 125000.00000000003\nGood Location 100000.0\nBig Size 150000.0\n" ] ], [ [ "### Decision Tree, without engineering an interaction term", "_____no_output_____" ] ], [ [ "tree = DecisionTreeRegressor()\ntree.fit(house_X, house_y)\nprint('R^2', tree.score(house_X, house_y))", "R^2 1.0\n" ], [ "viztree(tree, feature_names=house_X.columns)", "_____no_output_____" ] ], [ [ "### OLS Regression, with engineered interaction term", "_____no_output_____" ] ], [ [ "house['Good Location * Big Size'] = house['Good Location'] * house['Big Size']\nhouse_X = house.drop(columns='Price')\nhouse_y = house['Price']\nhouse", "_____no_output_____" ], [ "lr = LinearRegression()\nlr.fit(house_X, house_y)\nprint('R^2', lr.score(house_X, house_y))\nprint('Intercept \\t', lr.intercept_)\ncoefficients = pd.Series(lr.coef_, house_X.columns)\nprint(coefficients.to_string())", "R^2 1.0\nIntercept \t 150000.0\nGood Location 50000.0\nBig Size 100000.0\nGood Location * Big Size 100000.0\n" ] ], [ [ "### Decision Tree, with engineered interaction term", "_____no_output_____" ] ], [ [ "tree = DecisionTreeRegressor()\ntree.fit(house_X, house_y)\nprint('R^2', tree.score(house_X, house_y))", "R^2 1.0\n" ], [ "viztree(tree, feature_names=house_X.columns)", "_____no_output_____" ] ], [ [ "# Titanic (classification, interactions, non-linear / non-monotonic)", "_____no_output_____" ] ], [ [ "titanic = sns.load_dataset('titanic')\ntitanic['sex'] = (titanic['sex'] == 'female').astype(int)", "_____no_output_____" ], [ "imputer = SimpleImputer()\ntitanic_X = imputer.fit_transform(titanic[['age', 'sex']])\ntitanic_y = titanic['survived']\ntree = DecisionTreeClassifier(max_depth=4)\ntree.fit(titanic_X, titanic_y)\nprint('Accuracy', tree.score(titanic_X, titanic_y))\n\n%matplotlib notebook\nimport matplotlib.pyplot as plt\nviz3D(tree, titanic, feature1='age', feature2='sex', target='survived');", "Accuracy 0.8047138047138047\n" ], [ "from sklearn.linear_model import LogisticRegression\nlogistic = LogisticRegression(solver='lbfgs')\nlogistic.fit(titanic_X, titanic_y)\nprint('Accuracy', tree.score(titanic_X, titanic_y))\n\n%matplotlib notebook\nimport matplotlib.pyplot as plt\nviz3D(logistic, titanic, feature1='age', feature2='sex', target='survived');", "Accuracy 0.8047138047138047\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb212fa624c6d67afe67e22f629ef04910ee9a9e
5,691
ipynb
Jupyter Notebook
weather_api.ipynb
robynmundle/predicting_flight_delays
86813a90de41cc17694848095fe6ea00fe1510b0
[ "MIT" ]
null
null
null
weather_api.ipynb
robynmundle/predicting_flight_delays
86813a90de41cc17694848095fe6ea00fe1510b0
[ "MIT" ]
null
null
null
weather_api.ipynb
robynmundle/predicting_flight_delays
86813a90de41cc17694848095fe6ea00fe1510b0
[ "MIT" ]
null
null
null
21.638783
133
0.517835
[ [ [ "# Visual Crossing API requests", "_____no_output_____" ] ], [ [ "# import packages\nimport pandas as pd\nimport requests\nimport json\nimport re\nfrom datetime import datetime, date, timedelta\nfrom IPython.display import JSON\n\nimport my_access\nfrom vc_api import *", "_____no_output_____" ], [ "# load airport lat-long data\napt_coords = pd.read_csv('airport_coordinates.csv').rename(columns={\"Unnamed: 0\": \"code\"}).set_index('code')\napt_coords = apt_coords[apt_coords.lat_long.notnull()]", "_____no_output_____" ], [ "# check that airport code is in apt_coords\napt_coords.loc[\"IAH\"]", "_____no_output_____" ], [ "# generate list of dates to \nstart_date = \"2018-01-01\" # string - format YYYY-MM-DD\nend_date = \"2019-12-31\" # string - format YYYY-MM-DD\n\ndatelist = generate_dates(start_date, end_date)\napt_code = \"IAH\" # airport 3-letter code\napi_key = my_access.visual_crossing2['api_key'] # api key goes here", "_____no_output_____" ] ], [ [ "DANGER: check details before running this. Commented out for safety.", "_____no_output_____" ] ], [ [ "# for date in datelist:\n# if get_save(apt_code, date, api_key) != 200:\n# print(f\"Request for {apt_code} on {date} failed.\")\n# break\n# else:\n# print(f\"Data for {apt_code} on {date} retrieved successfully.\")", "_____no_output_____" ] ], [ [ "### Get data for flights_test", "_____no_output_____" ] ], [ [ "# load list of airports needed\ndf = pd.read_csv('raw_flights_test.csv', index_col=0)", "_____no_output_____" ], [ "airports = [a for a in df.origin.unique() if a in apt_coords.index] + [a for a in df.dest.unique() if a in apt_coords.index]\nairports = set(airports)\ndates = [f\"2020-01-0{i}\" for i in range(1,8)]", "_____no_output_____" ], [ "api_key = my_access.visual_crossing2['api_key']", "_____no_output_____" ], [ "list_ = []\nfor date in dates:\n for airport in airports:\n list_.append((airport, date))", "_____no_output_____" ], [ "# for item in list_:\n# print(item)\n# print(get_save(item[0], item[1], api_key))", "_____no_output_____" ], [ "# for item in list_[995:]:\n# print(item)\n# print(get_save(item[0], item[1], api_key))", "_____no_output_____" ], [ "next_ = ('ROC', '2020-01-07')\nprint(list_.index(next_))", "2011\n" ], [ "len(list_)", "_____no_output_____" ], [ "# list_[2011:]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb213923f45370290d45005293457c4dbd772c0e
86,681
ipynb
Jupyter Notebook
ssd300_training.ipynb
mdsmith-cim/ssd_keras
05755de5e8f6265cdbbab1ba750e8aefc0b9808c
[ "Apache-2.0" ]
null
null
null
ssd300_training.ipynb
mdsmith-cim/ssd_keras
05755de5e8f6265cdbbab1ba750e8aefc0b9808c
[ "Apache-2.0" ]
null
null
null
ssd300_training.ipynb
mdsmith-cim/ssd_keras
05755de5e8f6265cdbbab1ba750e8aefc0b9808c
[ "Apache-2.0" ]
null
null
null
64.542815
1,505
0.622051
[ [ [ "# SSD300 Training Tutorial\n\nThis tutorial explains how to train an SSD300 on the Pascal VOC datasets. The preset parameters reproduce the training of the original SSD300 \"07+12\" model. Training SSD512 works simiarly, so there's no extra tutorial for that. The same goes for training on other datasets.\n\nYou can find a summary of a full training here to get an impression of what it should look like:\n[SSD300 \"07+12\" training summary](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md)", "_____no_output_____" ] ], [ [ "from keras.optimizers import Adam, SGD\nfrom keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger\nfrom keras import backend as K\nfrom keras.models import load_model\nfrom math import ceil\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom models.keras_ssd300 import ssd_300\nfrom keras_loss_function.keras_ssd_loss import SSDLoss\nfrom keras_layers.keras_layer_AnchorBoxes import AnchorBoxes\nfrom keras_layers.keras_layer_DecodeDetections import DecodeDetections\nfrom keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast\nfrom keras_layers.keras_layer_L2Normalization import L2Normalization\n\nfrom ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast\n\nfrom data_generator.object_detection_2d_data_generator import DataGenerator\nfrom data_generator.object_detection_2d_geometric_ops import Resize\nfrom data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels\nfrom data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation\nfrom data_generator.object_detection_2d_misc_utils import apply_inverse_transforms\n\n%matplotlib inline", "Using TensorFlow backend.\n" ] ], [ [ "## 0. Preliminary note\n\nAll places in the code where you need to make any changes are marked `TODO` and explained accordingly. All code cells that don't contain `TODO` markers just need to be executed.", "_____no_output_____" ], [ "## 1. Set the model configuration parameters\n\nThis section sets the configuration parameters for the model definition. The parameters set here are being used both by the `ssd_300()` function that builds the SSD300 model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to run the training. Most of these parameters are needed to define the anchor boxes.\n\nThe parameters as set below produce the original SSD300 architecture that was trained on the Pascal VOC datsets, i.e. they are all chosen to correspond exactly to their respective counterparts in the `.prototxt` file that defines the original Caffe implementation. Note that the anchor box scaling factors of the original SSD implementation vary depending on the datasets on which the models were trained. The scaling factors used for the MS COCO datasets are smaller than the scaling factors used for the Pascal VOC datasets. The reason why the list of scaling factors has 7 elements while there are only 6 predictor layers is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.\n\nAs mentioned above, the parameters set below are not only needed to build the model, but are also passed to the `SSDInputEncoder` constructor further down, which is responsible for matching and encoding ground truth boxes and anchor boxes during the training. In order to do that, it needs to know the anchor box parameters.", "_____no_output_____" ] ], [ [ "img_height = 300 # Height of the model input images\nimg_width = 300 # Width of the model input images\nimg_channels = 3 # Number of color channels of the model input images\nmean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights.\nswap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images.\nn_classes = 20 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO\nscales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets\nscales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets\nscales = scales_pascal\naspect_ratios = [[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters\ntwo_boxes_for_ar1 = True\nsteps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer.\noffsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer.\nclip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries\nvariances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation\nnormalize_coords = True", "_____no_output_____" ] ], [ [ "## 2. Build or load the model\n\nYou will want to execute either of the two code cells in the subsequent two sub-sections, not both.", "_____no_output_____" ], [ "### 2.1 Create a new model and load trained VGG-16 weights into it (or trained SSD weights)\n\nIf you want to create a new SSD300 model, this is the relevant section for you. If you want to load a previously saved SSD300 model, skip ahead to section 2.2.\n\nThe code cell below does the following things:\n1. It calls the function `ssd_300()` to build the model.\n2. It then loads the weights file that is found at `weights_path` into the model. You could load the trained VGG-16 weights or you could load the weights of a trained model. If you want to reproduce the original SSD training, load the pre-trained VGG-16 weights. In any case, you need to set the path to the weights file you want to load on your local machine. Download links to all the trained weights are provided in the [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository.\n3. Finally, it compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.\n\nNormally, the optimizer of choice would be Adam (commented out below), but since the original implementation uses plain SGD with momentum, we'll do the same in order to reproduce the original training. Adam is generally the superior optimizer, so if your goal is not to have everything exactly as in the original training, feel free to switch to Adam. You might need to adjust the learning rate scheduler below slightly in case you use Adam.\n\nNote that the learning rate that is being set here doesn't matter, because further below we'll pass a learning rate scheduler to the training function, which will overwrite any learning rate set here, i.e. what matters are the learning rates that are defined by the learning rate scheduler.\n\n`SSDLoss` is a custom Keras loss function that implements the multi-task that consists of a log loss for classification and a smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.", "_____no_output_____" ] ], [ [ "# 1: Build the Keras model.\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = ssd_300(image_size=(img_height, img_width, img_channels),\n n_classes=n_classes,\n mode='training',\n l2_regularization=0.0005,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n normalize_coords=normalize_coords,\n subtract_mean=mean_color,\n swap_channels=swap_channels)\n\n# 2: Load some weights into the model.\n\nweights_path = '/usr/local/data/msmith/uncertainty/ssd_keras/weights/VGG_ILSVRC_16_layers_fc_reduced.h5'\n\nmodel.load_weights(weights_path, by_name=True)\n\n# 3: Instantiate an optimizer and the SSD loss function and compile the model.\n# If you want to follow the original Caffe implementation, use the preset SGD\n# optimizer, otherwise I'd recommend the commented-out Adam optimizer.\n\n#adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\nsgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)\n\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nmodel.compile(optimizer=sgd, loss=ssd_loss.compute_loss)", "WARNING: Logging before flag parsing goes to stderr.\nW1120 20:56:28.103250 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:95: The name tf.reset_default_graph is deprecated. Please use tf.compat.v1.reset_default_graph instead.\n\nW1120 20:56:28.105612 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:98: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nW1120 20:56:28.142223 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:102: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n\nW1120 20:56:28.144087 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:517: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nW1120 20:56:28.168423 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:4185: The name tf.truncated_normal is deprecated. Please use tf.random.truncated_normal instead.\n\nW1120 20:56:28.220454 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3976: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n\nW1120 20:56:28.422724 140000601630464 deprecation.py:506] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\nW1120 20:56:30.621887 140000601630464 deprecation_wrapper.py:119] From /home/vision/msmith/localDrive/msmith/anaconda3/lib/python3.7/site-packages/keras/optimizers.py:790: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n\nW1120 20:56:30.649017 140000601630464 deprecation.py:323] From /usr/local/data/msmith/uncertainty/ssd_keras/keras_loss_function/keras_ssd_loss.py:133: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.cast` instead.\nW1120 20:56:30.664265 140000601630464 deprecation.py:323] From /usr/local/data/msmith/uncertainty/ssd_keras/keras_loss_function/keras_ssd_loss.py:74: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nW1120 20:56:30.686405 140000601630464 deprecation.py:323] From /usr/local/data/msmith/uncertainty/ssd_keras/keras_loss_function/keras_ssd_loss.py:166: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.cast` instead.\n" ] ], [ [ "### 2.2 Load a previously created model\n\nIf you have previously created and saved a model and would now like to load it, execute the next code cell. The only thing you need to do here is to set the path to the saved model HDF5 file that you would like to load.\n\nThe SSD model contains custom objects: Neither the loss function nor the anchor box or L2-normalization layer types are contained in the Keras core library, so we need to provide them to the model loader.\n\nThis next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below.", "_____no_output_____" ] ], [ [ "# TODO: Set the path to the `.h5` file of the model to be loaded.\nmodel_path = 'path/to/trained/model.h5'\n\n# We need to create an SSDLoss object in order to pass that to the model loader.\nssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)\n\nK.clear_session() # Clear previous models from memory.\n\nmodel = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,\n 'L2Normalization': L2Normalization,\n 'compute_loss': ssd_loss.compute_loss})", "_____no_output_____" ] ], [ [ "## 3. Set up the data generators for the training\n\nThe code cells below set up the data generators for the training and validation datasets to train the model. The settings below reproduce the original SSD training on Pascal VOC 2007 `trainval` plus 2012 `trainval` and validation on Pascal VOC 2007 `test`.\n\nThe only thing you need to change here are the filepaths to the datasets on your local machine. Note that parsing the labels from the XML annotations files can take a while.\n\nNote that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images (around 9 GB total for Pascal VOC 2007 `trainval` plus 2012 `trainval` and another 2.6 GB for 2007 `test`). You can later load these HDF5 datasets directly in the constructor.\n\nThe original SSD implementation uses a batch size of 32 for the training. In case you run into GPU memory issues, reduce the batch size accordingly. You need at least 7 GB of free GPU memory to train an SSD300 with 20 object classes with a batch size of 32.\n\nThe `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.\n\nThe data augmentation settings defined further down reproduce the data augmentation pipeline of the original SSD training. The training generator receives an object `ssd_data_augmentation`, which is a transformation object that is itself composed of a whole chain of transformations that replicate the data augmentation procedure used to train the original Caffe implementation. The validation generator receives an object `resize`, which simply resizes the input images.\n\nAn `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs.\n\nIn order to train the model on a dataset other than Pascal VOC, either choose `DataGenerator`'s appropriate parser method that corresponds to your data format, or, if `DataGenerator` does not provide a suitable parser for your data format, you can write an additional parser and add it. Out of the box, `DataGenerator` can handle datasets that use the Pascal VOC format (use `parse_xml()`), the MS COCO format (use `parse_json()`) and a wide range of CSV formats (use `parse_csv()`).", "_____no_output_____" ] ], [ [ "# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.\n\n# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.\n\ntrain_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)\nval_dataset = DataGenerator(load_images_into_memory=True, hdf5_dataset_path=None)\n\n# 2: Parse the image and label lists for the training and validation datasets. This can take a while.\n\n# TODO: Set the paths to the datasets here.\n\n# The directories that contain the images.\nVOC_2007_images_dir = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/JPEGImages/'\nVOC_2012_images_dir = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2012/JPEGImages/'\n\n# The directories that contain the annotations.\nVOC_2007_annotations_dir = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/Annotations/'\nVOC_2012_annotations_dir = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2012/Annotations/'\n\n# The paths to the image sets.\nVOC_2007_train_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/ImageSets/Main/train.txt'\nVOC_2012_train_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2012/ImageSets/Main/train.txt'\nVOC_2007_val_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/ImageSets/Main/val.txt'\nVOC_2012_val_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2012/ImageSets/Main/val.txt'\nVOC_2007_trainval_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt'\nVOC_2012_trainval_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2012/ImageSets/Main/trainval.txt'\nVOC_2007_test_image_set_filename = '/usr/local/data/msmith/APL/Datasets/PASCAL/VOCdevkit/VOC2007/ImageSets/Main/test.txt'\n\n# The XML parser needs to now what object class names to look for and in which order to map them to integers.\nclasses = ['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n\n# train_dataset.parse_xml(images_dirs=[VOC_2007_images_dir,\n# VOC_2012_images_dir],\n# image_set_filenames=[VOC_2007_trainval_image_set_filename,\n# VOC_2012_trainval_image_set_filename],\n# annotations_dirs=[VOC_2007_annotations_dir,\n# VOC_2012_annotations_dir],\n# classes=classes,\n# include_classes='all',\n# exclude_truncated=False,\n# exclude_difficult=False,\n# ret=False)\n\ntrain_dataset.parse_xml(images_dirs=[VOC_2007_images_dir, VOC_2012_images_dir],\n image_set_filenames=[VOC_2007_train_image_set_filename, VOC_2012_train_image_set_filename],\n annotations_dirs=[VOC_2007_annotations_dir, VOC_2012_annotations_dir],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False)\n\nval_dataset.parse_xml(images_dirs=[VOC_2012_images_dir],\n image_set_filenames=[VOC_2012_val_image_set_filename],\n annotations_dirs=[VOC_2012_annotations_dir],\n classes=classes,\n include_classes='all',\n exclude_truncated=False,\n exclude_difficult=True,\n ret=False)\n\n# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will\n# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`\n# option in the constructor, because in that cas the images are in memory already anyway. If you don't\n# want to create HDF5 datasets, comment out the subsequent two function calls.\n\n# train_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07+12_trainval.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)\n\n# val_dataset.create_hdf5_dataset(file_path='dataset_pascal_voc_07_test.h5',\n# resize=False,\n# variable_image_size=True,\n# verbose=True)", "Processing image set 'train.txt': 100%|██████████| 2501/2501 [00:21<00:00, 115.42it/s]\nProcessing image set 'train.txt': 100%|██████████| 5717/5717 [00:43<00:00, 131.68it/s]\nLoading images into memory: 100%|██████████| 8218/8218 [01:43<00:00, 79.54it/s] \nProcessing image set 'val.txt': 100%|██████████| 5823/5823 [00:41<00:00, 140.44it/s]\nLoading images into memory: 100%|██████████| 5823/5823 [01:12<00:00, 79.86it/s] \n" ], [ "# 3: Set the batch size.\n\nbatch_size = 32 # Change the batch size if you like, or if you run into GPU memory issues.\n\n# 4: Set the image transformations for pre-processing and data augmentation options.\n\n# For the training generator:\nssd_data_augmentation = SSDDataAugmentation(img_height=img_height,\n img_width=img_width,\n background=mean_color)\n\n# For the validation generator:\nconvert_to_3_channels = ConvertTo3Channels()\nresize = Resize(height=img_height, width=img_width)\n\n# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.\n\n# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.\npredictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3],\n model.get_layer('fc7_mbox_conf').output_shape[1:3],\n model.get_layer('conv6_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv7_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv8_2_mbox_conf').output_shape[1:3],\n model.get_layer('conv9_2_mbox_conf').output_shape[1:3]]\n\nssd_input_encoder = SSDInputEncoder(img_height=img_height,\n img_width=img_width,\n n_classes=n_classes,\n predictor_sizes=predictor_sizes,\n scales=scales,\n aspect_ratios_per_layer=aspect_ratios,\n two_boxes_for_ar1=two_boxes_for_ar1,\n steps=steps,\n offsets=offsets,\n clip_boxes=clip_boxes,\n variances=variances,\n matching_type='multi',\n pos_iou_threshold=0.5,\n neg_iou_limit=0.5,\n normalize_coords=normalize_coords)\n\n# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.\n\ntrain_generator = train_dataset.generate(batch_size=batch_size,\n shuffle=True,\n transformations=[ssd_data_augmentation],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\nval_generator = val_dataset.generate(batch_size=batch_size,\n shuffle=False,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=ssd_input_encoder,\n returns={'processed_images',\n 'encoded_labels'},\n keep_images_without_gt=False)\n\n# Get the number of samples in the training and validations datasets.\ntrain_dataset_size = train_dataset.get_dataset_size()\nval_dataset_size = val_dataset.get_dataset_size()\n\nprint(\"Number of images in the training dataset:\\t{:>6}\".format(train_dataset_size))\nprint(\"Number of images in the validation dataset:\\t{:>6}\".format(val_dataset_size))", "Number of images in the training dataset:\t 8218\nNumber of images in the validation dataset:\t 5823\n" ] ], [ [ "## 4. Set the remaining training parameters\n\nWe've already chosen an optimizer and set the batch size above, now let's set the remaining training parameters. I'll set one epoch to consist of 1,000 training steps. The next code cell defines a learning rate schedule that replicates the learning rate schedule of the original Caffe implementation for the training of the SSD300 Pascal VOC \"07+12\" model. That model was trained for 120,000 steps with a learning rate of 0.001 for the first 80,000 steps, 0.0001 for the next 20,000 steps, and 0.00001 for the last 20,000 steps. If you're training on a different dataset, define the learning rate schedule however you see fit.\n\nI'll set only a few essential Keras callbacks below, feel free to add more callbacks if you want TensorBoard summaries or whatever. We obviously need the learning rate scheduler and we want to save the best models during the training. It also makes sense to continuously stream our training history to a CSV log file after every epoch, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Finally, we'll also add a callback that makes sure that the training terminates if the loss becomes `NaN`. Depending on the optimizer you use, it can happen that the loss becomes `NaN` during the first iterations of the training. In later iterations it's less of a risk. For example, I've never seen a `NaN` loss when I trained SSD using an Adam optimizer, but I've seen a `NaN` loss a couple of times during the very first couple of hundred training steps of training a new model when I used an SGD optimizer.", "_____no_output_____" ] ], [ [ "# Define a learning rate schedule.\n\n# def lr_schedule(epoch):\n# if epoch < 80:\n# return 0.001\n# elif epoch < 100:\n# return 0.0001\n# else:\n# return 0.00001", "_____no_output_____" ], [ "# Define a learning rate schedule.\n\ndef lr_schedule(epoch):\n if epoch < 56:\n return 0.001\n elif epoch < 76:\n return 0.0001\n else:\n return 0.00001", "_____no_output_____" ], [ "# Define model callbacks.\n\n# TODO: Set the filepath under which you want to save the model.\nmodel_checkpoint = ModelCheckpoint(filepath='ssd300_dropout_PASCAL2012_train_+12_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=False,\n mode='auto',\n period=1)\n#model_checkpoint.best = \n\ncsv_logger = CSVLogger(filename='ssd300_dropout_pascal_07+12_training_log.csv',\n separator=',',\n append=True)\n\nlearning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule,\n verbose=1)\n\nterminate_on_nan = TerminateOnNaN()\n\ncallbacks = [model_checkpoint,\n csv_logger,\n learning_rate_scheduler,\n terminate_on_nan]", "_____no_output_____" ] ], [ [ "## 5. Train", "_____no_output_____" ], [ "In order to reproduce the training of the \"07+12\" model mentioned above, at 1,000 training steps per epoch you'd have to train for 120 epochs. That is going to take really long though, so you might not want to do all 120 epochs in one go and instead train only for a few epochs at a time. You can find a summary of a full training [here](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md).\n\nIn order to only run a partial training and resume smoothly later on, there are a few things you should note:\n1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.\n2. In order for the learning rate scheduler callback above to work properly, `fit_generator()` needs to know which epoch we're in, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.\n3. In order for the model checkpoint callback above to work correctly after a kernel restart, set `model_checkpoint.best` to the best validation loss from the previous training. If you don't do this and a new `ModelCheckpoint` object is created after a kernel restart, that object obviously won't know what the last best validation loss was, so it will always save the weights of the first epoch of your new training and record that loss as its new best loss. This isn't super-important, I just wanted to mention it.", "_____no_output_____" ] ], [ [ "# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.\ninitial_epoch = 0\nfinal_epoch = 120\nsteps_per_epoch = 1000\n\nhistory = model.fit_generator(generator=train_generator,\n steps_per_epoch=steps_per_epoch,\n epochs=final_epoch,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=ceil(val_dataset_size/batch_size),\n initial_epoch=initial_epoch)", "Epoch 1/120\n\nEpoch 00001: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 804s 804ms/step - loss: 10.4902 - val_loss: 9.2242\n\nEpoch 00001: val_loss improved from inf to 9.22418, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-01_loss-10.4904_val_loss-9.2242.h5\nEpoch 2/120\n\nEpoch 00002: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 784s 784ms/step - loss: 8.9313 - val_loss: 8.3443\n\nEpoch 00002: val_loss improved from 9.22418 to 8.34433, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-02_loss-8.9312_val_loss-8.3443.h5\nEpoch 3/120\n\nEpoch 00003: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 784s 784ms/step - loss: 8.3203 - val_loss: 7.8638\n\nEpoch 00003: val_loss improved from 8.34433 to 7.86378, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-03_loss-8.3205_val_loss-7.8638.h5\nEpoch 4/120\n\nEpoch 00004: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 786s 786ms/step - loss: 7.9173 - val_loss: 7.5816\n\nEpoch 00004: val_loss improved from 7.86378 to 7.58159, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-04_loss-7.9173_val_loss-7.5816.h5\nEpoch 5/120\n\nEpoch 00005: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 788s 788ms/step - loss: 7.6130 - val_loss: 7.3883\n\nEpoch 00005: val_loss improved from 7.58159 to 7.38831, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-05_loss-7.6130_val_loss-7.3883.h5\nEpoch 6/120\n\nEpoch 00006: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 786s 786ms/step - loss: 7.3684 - val_loss: 7.2014\n\nEpoch 00006: val_loss improved from 7.38831 to 7.20139, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-06_loss-7.3684_val_loss-7.2014.h5\nEpoch 7/120\n\nEpoch 00007: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 790s 790ms/step - loss: 7.1900 - val_loss: 7.0621\n\nEpoch 00007: val_loss improved from 7.20139 to 7.06207, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-07_loss-7.1900_val_loss-7.0621.h5\nEpoch 8/120\n\nEpoch 00008: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 780s 780ms/step - loss: 7.0191 - val_loss: 6.9060\n\nEpoch 00008: val_loss improved from 7.06207 to 6.90597, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-08_loss-7.0190_val_loss-6.9060.h5\nEpoch 9/120\n\nEpoch 00009: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 797s 797ms/step - loss: 6.8653 - val_loss: 6.8018\n\nEpoch 00009: val_loss improved from 6.90597 to 6.80184, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-09_loss-6.8651_val_loss-6.8018.h5\nEpoch 10/120\n\nEpoch 00010: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 794s 794ms/step - loss: 6.7317 - val_loss: 6.7153\n\nEpoch 00010: val_loss improved from 6.80184 to 6.71527, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-10_loss-6.7317_val_loss-6.7153.h5\nEpoch 11/120\n\nEpoch 00011: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 786s 786ms/step - loss: 6.5928 - val_loss: 6.6134\n\nEpoch 00011: val_loss improved from 6.71527 to 6.61343, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-11_loss-6.5928_val_loss-6.6134.h5\nEpoch 12/120\n\nEpoch 00012: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 792s 792ms/step - loss: 6.5233 - val_loss: 6.5321\n\nEpoch 00012: val_loss improved from 6.61343 to 6.53214, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-12_loss-6.5235_val_loss-6.5321.h5\nEpoch 13/120\n\nEpoch 00013: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 779s 779ms/step - loss: 6.3785 - val_loss: 6.4660\n\nEpoch 00013: val_loss improved from 6.53214 to 6.46605, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-13_loss-6.3785_val_loss-6.4660.h5\nEpoch 14/120\n\nEpoch 00014: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 782s 782ms/step - loss: 6.2876 - val_loss: 6.3903\n\nEpoch 00014: val_loss improved from 6.46605 to 6.39029, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-14_loss-6.2876_val_loss-6.3903.h5\nEpoch 15/120\n\nEpoch 00015: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 786s 786ms/step - loss: 6.1833 - val_loss: 6.3379\n\nEpoch 00015: val_loss improved from 6.39029 to 6.33792, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-15_loss-6.1832_val_loss-6.3379.h5\nEpoch 16/120\n\nEpoch 00016: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 788s 788ms/step - loss: 6.1140 - val_loss: 6.2744\n\nEpoch 00016: val_loss improved from 6.33792 to 6.27437, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-16_loss-6.1140_val_loss-6.2744.h5\nEpoch 17/120\n\nEpoch 00017: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 777s 777ms/step - loss: 6.0181 - val_loss: 6.2608\n\nEpoch 00017: val_loss improved from 6.27437 to 6.26083, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-17_loss-6.0182_val_loss-6.2608.h5\nEpoch 18/120\n\nEpoch 00018: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 782s 782ms/step - loss: 5.9498 - val_loss: 6.1641\n\nEpoch 00018: val_loss improved from 6.26083 to 6.16415, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-18_loss-5.9497_val_loss-6.1641.h5\nEpoch 19/120\n\nEpoch 00019: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 781s 781ms/step - loss: 5.8630 - val_loss: 6.1405\n\nEpoch 00019: val_loss improved from 6.16415 to 6.14053, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-19_loss-5.8630_val_loss-6.1405.h5\nEpoch 20/120\n\nEpoch 00020: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 784s 784ms/step - loss: 5.7860 - val_loss: 6.1127\n\nEpoch 00020: val_loss improved from 6.14053 to 6.11266, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-20_loss-5.7859_val_loss-6.1127.h5\nEpoch 21/120\n\nEpoch 00021: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 788s 788ms/step - loss: 5.7082 - val_loss: 6.0719\n\nEpoch 00021: val_loss improved from 6.11266 to 6.07186, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-21_loss-5.7082_val_loss-6.0719.h5\nEpoch 22/120\n\nEpoch 00022: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 787s 787ms/step - loss: 5.6627 - val_loss: 6.0232\n\nEpoch 00022: val_loss improved from 6.07186 to 6.02317, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-22_loss-5.6625_val_loss-6.0232.h5\nEpoch 23/120\n\nEpoch 00023: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 788s 788ms/step - loss: 5.5693 - val_loss: 5.9671\n\nEpoch 00023: val_loss improved from 6.02317 to 5.96715, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-23_loss-5.5694_val_loss-5.9671.h5\nEpoch 24/120\n\nEpoch 00024: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 782s 782ms/step - loss: 5.5207 - val_loss: 5.9637\n\nEpoch 00024: val_loss improved from 5.96715 to 5.96368, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-24_loss-5.5207_val_loss-5.9637.h5\nEpoch 25/120\n\nEpoch 00025: LearningRateScheduler setting learning rate to 0.001.\n1000/1000 [==============================] - 788s 788ms/step - loss: 5.4582 - val_loss: 5.8855\n\nEpoch 00025: val_loss improved from 5.96368 to 5.88554, saving model to ssd300_dropout_PASCAL2012_train_+12_epoch-25_loss-5.4583_val_loss-5.8855.h5\nEpoch 26/120\n\nEpoch 00026: LearningRateScheduler setting learning rate to 0.001.\n" ] ], [ [ "## 6. Make predictions\n\nNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator that we've already set up above. Feel free to change the batch size.\n\nYou can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.", "_____no_output_____" ] ], [ [ "# 1: Set the generator for the predictions.\n\npredict_generator = val_dataset.generate(batch_size=1,\n shuffle=False,\n transformations=[convert_to_3_channels,\n resize],\n label_encoder=None,\n returns={'processed_images',\n 'filenames',\n 'inverse_transform',\n 'original_images',\n 'original_labels'},\n keep_images_without_gt=False)", "_____no_output_____" ], [ "# 2: Generate samples.\n\nbatch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(predict_generator)\n\ni = 0 # Which batch item to look at\n\nprint(\"Image:\", batch_filenames[i])\nprint()\nprint(\"Ground truth boxes:\\n\")\nprint(np.array(batch_original_labels[i]))\nplt.imshow(batch_images[i])", "_____no_output_____" ], [ "# 3: Make predictions.\n# TODO: wrap in for loop for N iterations\n# Results of each should be different, i.e. should get a number of different predictions (e.g. take 1 = 2 detections, take 2 = 3)\n# Should be (but not always) a lot of overlap\n# Need to process them - partitioning detections into observations\n# Find high IOU areas\n\ny_pred = model.predict(batch_images)", "_____no_output_____" ] ], [ [ "Now let's decode the raw predictions in `y_pred`.\n\nHad we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU).\n\n`decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions.", "_____no_output_____" ] ], [ [ "print(y_pred)\nprint(y_pred.shape)", "_____no_output_____" ], [ "# 4: Decode the raw predictions in `y_pred`.\n\ny_pred_decoded = decode_detections(y_pred,\n confidence_thresh=0.5,\n iou_threshold=0.4,\n top_k=200,\n normalize_coords=normalize_coords,\n img_height=img_height,\n img_width=img_width)", "_____no_output_____" ] ], [ [ "We made the predictions on the resized images, but we'd like to visualize the outcome on the original input images, so we'll convert the coordinates accordingly. Don't worry about that opaque `apply_inverse_transforms()` function below, in this simple case it just aplies `(* original_image_size / resized_image_size)` to the box coordinates.", "_____no_output_____" ] ], [ [ "# 5: Convert the predictions for the original image.\n\ny_pred_decoded_inv = apply_inverse_transforms(y_pred_decoded, batch_inverse_transforms)\n\nnp.set_printoptions(precision=2, suppress=True, linewidth=90)\nprint(\"Predicted boxes:\\n\")\nprint(' class conf xmin ymin xmax ymax')\nprint(y_pred_decoded_inv[i])", "_____no_output_____" ] ], [ [ "Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison.", "_____no_output_____" ] ], [ [ "# 5: Draw the predicted boxes onto the image\n\n# Set the colors for the bounding boxes\ncolors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist()\nclasses = ['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n\nplt.figure(figsize=(20,12))\nplt.imshow(batch_original_images[i])\n\ncurrent_axis = plt.gca()\n\nfor box in batch_original_labels[i]:\n xmin = box[1]\n ymin = box[2]\n xmax = box[3]\n ymax = box[4]\n label = '{}'.format(classes[int(box[0])])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0})\n\nfor box in y_pred_decoded_inv[i]:\n xmin = box[2]\n ymin = box[3]\n xmax = box[4]\n ymax = box[5]\n color = colors[int(box[0])]\n label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])\n current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) \n current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})", "_____no_output_____" ], [ "model.summary()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb2140ffaa99ca08fbfc55d1ff867cbda2cd1455
118,194
ipynb
Jupyter Notebook
Regression/Linear Models/RidgeRegression_Scale_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
53
2021-08-28T07:41:49.000Z
2022-03-09T02:20:17.000Z
Regression/Linear Models/RidgeRegression_Scale_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
142
2021-07-27T07:23:10.000Z
2021-08-25T14:57:24.000Z
Regression/Linear Models/RidgeRegression_Scale_PowerTransformer.ipynb
shreepad-nade/ds-seed
93ddd3b73541f436b6832b94ca09f50872dfaf10
[ "Apache-2.0" ]
38
2021-07-27T04:54:08.000Z
2021-08-23T02:27:20.000Z
163.477178
67,470
0.86413
[ [ [ "# RidgeRegression with Scale & Power Transformer", "_____no_output_____" ], [ "This Code template is for the regression analysis using simple Ridge Regression with Feature Rescaling technique Scale and Feature Transformation technique PowerTransformer in a pipeline. Ridge Regression is also known as Tikhonov regularization.", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "import warnings \nimport numpy as np \nimport pandas as pd \nimport seaborn as se \nimport matplotlib.pyplot as plt \nfrom sklearn.linear_model import Ridge\nfrom sklearn.pipeline import Pipeline,make_pipeline\nfrom sklearn.preprocessing import scale,PowerTransformer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error \nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\nfile_path= \"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\nfeatures=[]", "_____no_output_____" ] ], [ [ "Target feature for prediction.", "_____no_output_____" ] ], [ [ "#y_value\ntarget=''", "_____no_output_____" ] ], [ [ "### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path)\ndf.head()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "X=df[features]\nY=df[target]", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)", "_____no_output_____" ] ], [ [ "Calling preprocessing functions on the feature and target set.\n", "_____no_output_____" ] ], [ [ "x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i])\nX=EncodeX(X)\nY=NullClearner(Y)\nX.head()", "_____no_output_____" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "f,ax = plt.subplots(figsize=(18, 18))\nmatrix = np.triu(X.corr())\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\nplt.show()", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.", "_____no_output_____" ] ], [ [ "x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)", "_____no_output_____" ] ], [ [ "### Data Rescaling\n<Code>scale</Code> standardizes a dataset along any axis. It standardizes features by removing the mean and scaling to unit variance.\n\nscale is similar to <Code>StandardScaler</Code> in terms of feature transformation, but unlike StandardScaler, it lacks Transformer API i.e., it does not have <Code>fit_transform</Code>, <Code>transform</Code> and other related methods.", "_____no_output_____" ] ], [ [ "x_train =scale(x_train)\nx_test = scale(x_test)", "_____no_output_____" ] ], [ [ "\n\n### Feature Transformation\n\nPower transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired.\n\n##### For more information on PowerTransformer [ click here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)\n\n### Model\nRidge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients. The ridge coefficients minimize a penalized residual sum of squares:\n\n\\begin{equation*}\n\\min_{w} || X w - y||_2^2 + \\alpha ||w||_2^2\n\\end{equation*}\n\nThe complexity parameter controls the amount of shrinkage: the larger the value of , the greater the amount of shrinkage and thus the coefficients become more robust to collinearity.\n\nThis model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape (n_samples, n_targets)).\n\n#### Model Tuning Parameters\n\n> **alpha** -> Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization.\n\n> **solver** -> Solver to use in the computational routines {‘auto’, ‘svd’, ‘cholesky’, ‘lsqr’, ‘sparse_cg’, ‘sag’, ‘saga’}", "_____no_output_____" ] ], [ [ "model=make_pipeline(PowerTransformer(), Ridge(random_state=123))\nmodel.fit(x_train,y_train)\n\n", "_____no_output_____" ] ], [ [ "#### Model Accuracy\n\nWe will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.\n\n> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.", "_____no_output_____" ] ], [ [ "y_pred=model.predict(x_test)\nprint(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))", "Accuracy score 44.60 %\n\n" ] ], [ [ "> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. \n\n> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. \n\n> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ", "_____no_output_____" ] ], [ [ "print(\"R2 Score: {:.2f} %\".format(r2_score(y_test,y_pred)*100))\nprint(\"Mean Absolute Error {:.2f}\".format(mean_absolute_error(y_test,y_pred)))\nprint(\"Mean Squared Error {:.2f}\".format(mean_squared_error(y_test,y_pred)))", "R2 Score: 44.60 %\nMean Absolute Error 0.30\nMean Squared Error 0.13\n" ] ], [ [ "#### Prediction Plot\n\nFirst, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.\nFor the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(14,10))\nplt.plot(range(20),y_test[0:20], color = \"green\")\nplt.plot(range(20),model.predict(x_test[0:20]), color = \"red\")\nplt.legend([\"Actual\",\"prediction\"]) \nplt.title(\"Predicted vs True Value\")\nplt.xlabel(\"Record number\")\nplt.ylabel(target)\nplt.show()", "_____no_output_____" ] ], [ [ "#### Creator: Ganapathi Thota , Github: [Profile](https://github.com/Shikiz)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb214dd2f0e96035d31788be22628a6b740d45d0
185,229
ipynb
Jupyter Notebook
DimensionReduction/PrincipleComponentAnalysis/pca.ipynb
johnnychiuchiu/Machine-Learning
0fd6fd2c08025134cf7d20b245c39f82d5453e14
[ "MIT" ]
13
2018-03-19T18:16:03.000Z
2022-03-22T03:44:13.000Z
DimensionReduction/PrincipleComponentAnalysis/pca.ipynb
johnnychiuchiu/Machine-Learning
0fd6fd2c08025134cf7d20b245c39f82d5453e14
[ "MIT" ]
null
null
null
DimensionReduction/PrincipleComponentAnalysis/pca.ipynb
johnnychiuchiu/Machine-Learning
0fd6fd2c08025134cf7d20b245c39f82d5453e14
[ "MIT" ]
7
2018-01-11T04:03:11.000Z
2021-01-22T07:56:42.000Z
185.972892
45,164
0.880753
[ [ [ "# Understanding Principal Component Analysis", "_____no_output_____" ], [ "**Outline**\n\n* [Introduction](#intro)\n* [Assumption and derivation](#derive)\n* [PCA Example](#example)\n* [PCA Usage](#usage)", "_____no_output_____" ] ], [ [ "%load_ext watermark\n%matplotlib inline\n# %config InlineBackend.figure_format='retina'\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport math\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import accuracy_score\n\n%watermark -a 'Johnny' -d -t -v -p numpy,pandas,matplotlib,sklearn", "The watermark extension is already loaded. To reload it, use:\n %reload_ext watermark\nJohnny 2018-11-24 17:26:10 \n\nCPython 3.6.3\nIPython 6.1.0\n\nnumpy 1.13.3\npandas 0.20.3\nmatplotlib 2.1.0\nsklearn 0.19.1\n" ] ], [ [ "---", "_____no_output_____" ], [ "## <a id=\"intro\">Introduction</a>", "_____no_output_____" ], [ "When we have two features that are highly correlated with each other, we may not want to include both of them in our model. In [Lasso and Ridge regression](http://nbviewer.jupyter.org/github/johnnychiuchiu/Machine-Learning/blob/master/LinearRegression/linearRegressionModelBuilding.ipynb#ridge), what it does is fitting a model with all the predictors but put a penalized term, either L1 or L2 norm on the value of the regression coefficients, this will shrinks the coefficient estimates towards zero. In other words, it try to pick some predictors out of all the predictors in order to reduce the dimension of our column space.\n\nPrincipal Component Analysis(PCA) is another type of dimension reduction method. What PCA is all about is **Finding the directions of maximum variance in high-dimensional data and project it onto a smaller dimensional subspace while retaining most of the information.** The main idea and motivation is that each of the $n$ observations lives in $p$-dimensional space, but not all of these dimensions are equally interesting. PCA seeks a small number of dimensions that are as intersteing as possible. The concept of *interesting* is measured by the amount that the observations vary along each dimension.\n\nNote that PCA is just a linear transformation method. Compared to the original space, it can project our high-dimensional data into another dimension, of which each of the direction are with the maximum variance. In other words, the orthogonality of principal components implies that PCA finds the most uncorrelated components to explain as much variation in the data as possible. We can then pick the number of directions, i.e. components, we want to keep while containing most of the information of the original data. The direction of the highest variance is called the first principal component, the second highest is call the second principal component, and so on.\n\nIn PCA, we found out that the first principal component is obtained by doing eigendecomposition of the covariance matrix X, and the eigenvector with the largest eigenvalue is our first principal component in the sense that every vector in the span of this eigenvector will strech out by the largest amount, since eigenvalues are the factors by which the eigenvectors streckes or squishes during the transformation. Therefore, we can sort the top k component by the value of the eigenvalues that we found from doing eigendecomposition of the covariance matrix X.\n\n**Application of PCA**\n\n* We can use PCA as a tool for data visualization. For instance, if we can obtain a two-dimensional representation of the data that captures most of the information, then we can plot hte observations in this low-dimensional space.\n* We can use princial components as predictors in a regression model in place of the original larger set of variables.\n\n\n", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## <a id=\"derive\">Assumption and derivation</a>", "_____no_output_____" ], [ "**Assumption** for PCA before we derive the whole process are\n* Since we are only interested in variance, we assume that each of the variables in $X$ has been and should be centered to have mean zero, i.e. the column means of $X$ are zero.", "_____no_output_____" ], [ "**Method Derivation**\n\nAssume we have n observation, and a set of features $X1, X2, X3, \\dots, Xp$. In order words, we have\n\n\\begin{pmatrix}\n x_{1,1} & x_{1,2} & \\cdots & x_{1,p} \\\\\n x_{2,1} & x_{2,2} & \\cdots & x_{2,p} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n,1} & x_{n,2} & \\cdots & x_{n,p} \n\\end{pmatrix}\n\nwhere \n\n\\begin{equation*}\nX1 = \\begin{bmatrix}\n x_{1,1} \\\\\n x_{2,1} \\\\\n \\vdots \\\\\n x_{n,1} \n \\end{bmatrix}\n\\end{equation*}\n \n\nPCA will try to find a low dimensional representation of a dataset that contains as much as possible of the variance. The idea is that each of the n observations lives in p-dimensional space, but not all of these dimensions are equally interesting. PCA seeks a small number of dimensions that are as interesting as possible. Let see how these dimensions, or *principal component* are found.\n\nGiven $n \\times p$ data set $X$, how do we compute the first principal component? We look for the linear combination of the sample feature values of the form\n\n$$z_{i,1} = \\phi_{1,1}x_{i,1}+\\phi_{2,1}x_{i,2}+\\dots+\\phi_{p,1}x_{i,p}$$\nwhere\n0<i<n and $\\phi_1$ denotes the first principal component loading vector, which is\n\n\\begin{equation*}\n \\phi_1=\\begin{pmatrix}\n \\phi_{1,1} \\\\\n \\phi_{2,1} \\\\\n \\vdots \\\\\n \\phi_{p,1} \n \\end{pmatrix}\n\\end{equation*} \n\nWe'll have n values of $z_1$, and we want to look for the linear combination that has the largest sample variance. More formally,\n\n\\begin{equation*}\nZ_1\n=\n\\begin{pmatrix}\n z_{1,1} \\\\\n z_{2,1} \\\\\n \\vdots \\\\\n z_{n,1} \n \\end{pmatrix}\n= \n\\begin{pmatrix}\n \\phi_{1,1}x_{1,1} + \\phi_{2,1}x_{1,2} + \\cdots + \\phi_{p,1}x_{1,p} \\\\\n \\phi_{1,1}x_{2,1} + \\phi_{2,1}x_{2,2} + \\cdots + \\phi_{p,1}x_{2,p} \\\\\n \\vdots \\\\\n \\phi_{1,1}x_{n,1} + \\phi_{2,1}x_{n,2} + \\cdots + \\phi_{p,1}x_{n,p} \n\\end{pmatrix}\n=\n\\begin{pmatrix}\n \\phi_{1,1} \n \\phi_{2,1} \n \\dots \n \\phi_{p,1} \n \\end{pmatrix}\n\\begin{pmatrix}\n x_{1,1} & x_{1,2} & \\cdots & x_{1,p} \\\\\n x_{2,1} & x_{2,2} & \\cdots & x_{2,p} \\\\\n \\vdots & \\vdots & \\ddots & \\vdots \\\\\n x_{n,1} & x_{n,2} & \\cdots & x_{n,p} \n\\end{pmatrix}\n=\n\\phi_{1,1}X_{1}+\\phi_{2,1}X_{2}+\\dots+\\phi_{p,1}X_{p}\n=\n\\phi_1^T X\n\\end{equation*}\n\nWe assume that each of the variables in $X$ has been centered to have mean zero, i.e., the column means of $X$ are zero. Therefore, $E(X_i)=0$ for i in 1,...p. It's obvious to know that $E(Z_1)=E(\\phi_{1,1}X_{1}+\\phi_{2,1}X_{2}+\\dots+\\phi_{p,1}X_{p}) = 0$\n\nTherefore, the variance of $Z_1$ is \n\n$$Var(Z_1) = E\\Big[[Z_1-E(Z_1)][Z_1-E(Z_1)]^T\\Big] = E\\Big[Z_1 Z_1^T \\Big] = E\\Big[(\\phi_1^T X) (\\phi_1^T X)^T \\Big] = E\\Big[\\phi_1^T X X^T \\phi_1\\Big] = \\phi_1^T E[X X^T] \\phi_1$$\n\nWe also know that the [covariance matrix](https://en.wikipedia.org/wiki/Covariance_matrix) of X is\n\n$$C = Cov(X) = E\\Big[[X-E(X)][X-E(X)]^T\\Big] = E[X X^T]$$\n\nHence, the $Var(Z_1)= \\phi_1^T E[X X^T] \\phi_1 = \\phi_1^T C \\phi_1$\n\nApart from finding the largest sample variance, we also constrain the loadings so that their sum of squares is equal to one, since otherwise setting these elements to be arbitrarily large in absolute value could result in an arbitrarily large variance. More formally,\n\n$$\\sum_{j=1}^{p}\\phi_{j1}^2=1$$", "_____no_output_____" ], [ "In other words, the first principal component loading vector solves the optimization problem\n\n$$\\text{maximize}_\\phi \\quad \\phi^TC\\phi$$\n$$\\text{subject to} \\sum_{j=1}^{p}\\phi_{j1}^2 = \\phi_1^T \\phi_1 =1$$\n", "_____no_output_____" ], [ "This objective function can be solved by the Lagrange multiplier, minimizing the loss function:\n\n$$L = \\phi^T C\\phi - \\lambda(\\phi^T \\phi-1)$$\n\nNext, to solve for $\\phi$, we set the partial derivative of L with respect to $\\phi$ to 0.\n\n$$\\frac{\\partial L}{\\partial \\phi_1} = C\\phi - \\lambda \\phi_1 =0 $$\n$$ C\\phi_1 = \\lambda \\phi_1 $$", "_____no_output_____" ], [ "Surprisingly we see that it is actually a eigendecomposition problem. To refresh our mind a little bit, here is a very good [youtube video](https://www.youtube.com/watch?v=PFDu9oVAE-g&index=14&list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab) explaining what eigenvalue and eigenvector is in a very geometrical way.", "_____no_output_____" ], [ "Therefore, from the equation above, we pick $\\phi$ as the eigenvector associated with the largest eigenvalue.\n\nAlso, most data can’t be well-described by a single principal component. Typically, we compute multiple principal components by computing all eigenvectors of the covariance matrix of $X$ and ranking them by their eigenvalues. After sorting the eigenpairs, the next question is “how many principal components are we going to choose for our new feature subspace?” A useful measure is the so-called “explained variance,” which can be calculated from the eigenvalues. The explained variance tells us how much information (variance) can be attributed to each of the principal components.", "_____no_output_____" ], [ "To sum up, here are the **steps that we take to perform a PCA analysis**\n1. Standardize the data.\n2. Obtain the Eigenvectors and Eigenvalues from the covariance matrix (technically the correlation matrix after performing the standardization).\n3. Sort eigenvalues in descending order and choose the k eigenvectors that correspond to the k largest eigenvalues where k is the number of dimensions of the new feature subspace.\n4. Projection onto the new feature space. During this step we will take the top k eigenvectors and use it to transform the original dataset X to obtain a k-dimensional feature subspace X′.", "_____no_output_____" ], [ "---", "_____no_output_____" ], [ "## <a id=\"process\">PCA Analysis Example</a>", "_____no_output_____" ], [ "Let's use the classical IRIS data to illustrate the topics that we just covered, including\n* What are the explained variance of each component? How many component should we pick?\n* How will the scatter plot be if we plot in the dimension of first and second component?", "_____no_output_____" ] ], [ [ "# Read Data\ndf = pd.read_csv(\n filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',\n header=None,\n sep=',')\n\ndf.columns=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'class']\ndf.dropna(how=\"all\", inplace=True) # drops the empty line at file-end\n\ndf.tail()", "_____no_output_____" ], [ "# split data table into data X and class labels y\nX = df.iloc[:,0:4].values\ny = df.iloc[:,4].values", "_____no_output_____" ] ], [ [ "**EDA**\n\nTo get a feeling for how the 3 different flower classes are distributes along the 4 different features, let us visualize them via histograms.", "_____no_output_____" ] ], [ [ "def plot_iris():\n label_dict = {1: 'Iris-Setosa',\n 2: 'Iris-Versicolor',\n 3: 'Iris-Virgnica'}\n\n feature_dict = {0: 'sepal length [cm]',\n 1: 'sepal width [cm]',\n 2: 'petal length [cm]',\n 3: 'petal width [cm]'}\n\n with plt.style.context('seaborn-whitegrid'):\n plt.figure(figsize=(8, 6))\n for cnt in range(4):\n plt.subplot(2, 2, cnt+1)\n for lab in ('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'):\n plt.hist(X[y==lab, cnt],\n label=lab,\n bins=10,\n alpha=0.3,)\n plt.xlabel(feature_dict[cnt])\n plt.legend(loc='upper right', fancybox=True, fontsize=8)\n\n plt.tight_layout()\n plt.show()\n \nplot_iris() ", "_____no_output_____" ] ], [ [ "## Process", "_____no_output_____" ], [ "### 1. Standardize the data", "_____no_output_____" ] ], [ [ "# create a StandardScaler object\nscaler = StandardScaler()\n# fit and then transform to get the standardized dataset\nscaler.fit(X)", "_____no_output_____" ], [ "X_std = scaler.transform(X)", "_____no_output_____" ] ], [ [ "### 2. Do eigendecomposition and sort eigenvalues in descending order", "_____no_output_____" ] ], [ [ "# n_components: Number of components to keep\n# if n_components is not set all components are kept\nmy_pca = PCA(n_components=None) \nmy_pca.fit(X_std)", "_____no_output_____" ], [ "def plot_var_explained(var_exp, figsize=(6,4)):\n \"\"\"variance explained per component plot\"\"\"\n \n # get culmulative variance explained\n cum_var_exp = np.cumsum(var_exp)\n \n # plot\n with plt.style.context('seaborn-whitegrid'):\n plt.figure(figsize=figsize)\n\n plt.bar(range(len(var_exp)), var_exp, alpha=0.5, align='center',\n label='individual explained variance')\n plt.step(range(len(var_exp)), cum_var_exp, where='mid',\n label='cumulative explained variance')\n plt.ylabel('Explained variance ratio')\n plt.xlabel('Principal components')\n plt.legend(loc='best')\n plt.tight_layout()\n plt.show()", "_____no_output_____" ], [ "var_exp = my_pca.explained_variance_ratio_\nplot_var_explained(var_exp, figsize=(6,4))", "_____no_output_____" ], [ "# plot a simpler version of the bar chart\npd.DataFrame(my_pca.explained_variance_ratio_).plot.bar()", "_____no_output_____" ] ], [ [ "The plot above clearly shows that most of the variance (72.77% of the variance to be precise) can be explained by the first principal component alone. The second principal component still bears some information (23.03%) while the third and fourth principal components can safely be dropped without losing to much information. Together, the first two principal components contain 95.8% of the information.", "_____no_output_____" ], [ "### 3. Check the scores within each principal component", "_____no_output_____" ] ], [ [ "PC_df = pd.DataFrame(my_pca.components_,columns=df.iloc[:,0:4].columns).transpose()\nPC_df", "_____no_output_____" ], [ "import seaborn as sns\nplt.figure(figsize=None) #(4,4)\nsns.heatmap(PC_df,cmap=\"RdBu_r\",annot=PC_df.values, linewidths=1, center=0)", "_____no_output_____" ] ], [ [ "From the above heatmap & table, we can see that first component consist of all 4 features with a smaller weight on sepal_wid", "_____no_output_____" ], [ "### 4. Projection onto the new feature space\nDuring this step we will take the top k eigenvectors and use it to transform the original dataset X to obtain a k-dimensional feature subspace X′.", "_____no_output_____" ] ], [ [ "sklearn_pca = PCA(n_components=2)\nY_sklearn = sklearn_pca.fit_transform(X_std)", "_____no_output_____" ], [ "Y_sklearn[1:10]", "_____no_output_____" ] ], [ [ "Each of the list in the array above shows the projected value of each observation onto the first two principal components. If we want to fit model using the data projected on to their first 2 principal component, then `Y_sklearn` is the data we want to use.", "_____no_output_____" ], [ "## <a id=\"usage\">PCA Usage</a>", "_____no_output_____" ], [ "### Data Visualization", "_____no_output_____" ], [ "We can use PCA as a tool for data visualization. For instance, if we can obtain a two-dimensional representation of the data that captures most of the information, then we can plot hte observations in this low-dimensional space.\n\nLet's see how it will be like using IRIS data if we plot it out in the first two principal components.", "_____no_output_____" ] ], [ [ "with plt.style.context('seaborn-whitegrid'):\n plt.figure(figsize=(6, 4))\n for lab, col in zip(('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'),\n ('blue', 'red', 'green')):\n print(lab)\n print(col)\n plt.scatter(Y_sklearn[y==lab, 0],\n Y_sklearn[y==lab, 1],\n label=lab,\n c=col)\n plt.xlabel('Principal Component 1')\n plt.ylabel('Principal Component 2')\n plt.legend(loc='lower center')\n plt.tight_layout()\n plt.show()", "Iris-setosa\nblue\nIris-versicolor\nred\nIris-virginica\ngreen\n" ] ], [ [ "### Principal Component Regression\nWe can use princial components as predictors in a regression model in place of the original larger set of variables.\n\nLet's compare the result of logistic regression using all the features with the one using only the first two component", "_____no_output_____" ] ], [ [ "# the code is copied from Ethen's PCA blog post, which is listed in the reference.\n\n# split 30% of the iris data into a test set for evaluation\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size = 0.3, random_state = 1)\n\n# create the pipeline, where we'll\n# standardize the data, perform PCA and\n# fit the logistic regression\npipeline1 = Pipeline([\n ('standardize', StandardScaler()),\n ('pca', PCA(n_components = 2)),\n ('logistic', LogisticRegression(random_state = 1))\n])\npipeline1.fit(X_train, y_train)\ny_pred1 = pipeline1.predict(X_test)\n\n# pipeline without PCA\npipeline2 = Pipeline([\n ('standardize', StandardScaler()),\n ('logistic', LogisticRegression(random_state = 1))\n])\npipeline2.fit(X_train, y_train)\ny_pred2 = pipeline2.predict(X_test)\n\n# access the prediction accuracy\nprint('PCA Accuracy %.3f' % accuracy_score(y_test, y_pred1))\nprint('Accuracy %.3f' % accuracy_score(y_test, y_pred2))", "PCA Accuracy 0.800\nAccuracy 0.822\n" ] ], [ [ "We saw that by using only the first two component, the accuracy only drop by 0.022, which is about 2-3% from the original accuracy. Actually, by using the first three principal component, we can get the same accuracy as the original model with all the features.", "_____no_output_____" ], [ "### Reference\n\n* [PCA in 3 steps](http://sebastianraschka.com/Articles/2015_pca_in_3_steps.html)\n* [Everything you did and didn't know about PCA\n](http://alexhwilliams.info/itsneuronalblog/2016/03/27/pca/)\n* [Ethen: Principal Component Analysis (PCA) from scratch](http://nbviewer.jupyter.org/github/ethen8181/machine-learning/blob/master/dim_reduct/PCA.ipynb)\n* [Wiki: Matrix Multiplication](https://en.wikipedia.org/wiki/Matrix_multiplication)\n* [Sklearn: Pipelining: chaining a PCA and a logistic regression](http://scikit-learn.org/stable/auto_examples/plot_digits_pipe.html#sphx-glr-auto-examples-plot-digits-pipe-py)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb215ad6e229693a04314cbb2f1e0329d970a0e2
18,673
ipynb
Jupyter Notebook
lineage tests/Interacting Lineages via SBML.ipynb
albertanis/bioscrape
d595a6f3b37e527851e6f33215ed3f24d4f58add
[ "MIT" ]
16
2017-11-16T02:22:25.000Z
2020-06-15T20:36:44.000Z
lineage tests/Interacting Lineages via SBML.ipynb
albertanis/bioscrape
d595a6f3b37e527851e6f33215ed3f24d4f58add
[ "MIT" ]
90
2020-04-16T23:39:29.000Z
2021-09-18T18:37:06.000Z
lineage tests/Interacting Lineages via SBML.ipynb
albertanis/bioscrape
d595a6f3b37e527851e6f33215ed3f24d4f58add
[ "MIT" ]
15
2018-04-18T03:09:07.000Z
2020-06-26T18:02:23.000Z
95.758974
12,000
0.82788
[ [ [ "%matplotlib inline\n\nimport bioscrape as bs\nimport libsbml\nfrom bioscrape.lineage import py_SimulateInteractingCellLineage\nfrom bioscrape.lineage import py_SimulateSingleCell\n\nfrom bioscrape.lineage import LineageModel\nimport numpy as np\nimport pylab as plt\n\n#Number of cells\nN = 20\n\nsbml_file_path = \"C:\\\\Users\\\\wp_ix\\\\OneDrive\\\\Caltech\\\\Code\\\\bioscrape lineages\\\\sbml models\\\\\"\nf1 = \"cell_to_cell_comm_model_1.xml\"\nf2 = \"cell_to_cell_comm_model_2.xml\"\nf5 = \"cell_to_cell_comm_model_5.xml\"\nf20 = \"cell_to_cell_comm_model_10.xml\"\nf100 = \"cell_to_cell_comm_model_100.xml\"\n\nsbml_file1 = sbml_file_path+f1\nif N == 1:\n sbml_file2 = sbml_file_path+f1\nelif N == 2:\n sbml_file2 = sbml_file_path+f2\nelif N == 5:\n sbml_file2 = sbml_file_path+f5\nelif N == 20:\n sbml_file2 = sbml_file_path+f20\nelif N == 100:\n sbml_file2 = sbml_file_path+f100\nelse:\n raise ValueError(\"Invalid Value of N\")\n\nM1 = LineageModel(sbml_filename = sbml_file1)\nM2 = LineageModel(sbml_filename = sbml_file2)\n\nprint(M2.get_species())\n\ntimepoints = np.arange(0, 50, .001)\n\nresults2 = py_SimulateSingleCell(timepoints, Model = M2)\ndata_crn = results2\nprint(data_crn.shape)\n\nprint(timepoints[-1])\nglobal_sync_period = .01\nlineage = py_SimulateInteractingCellLineage(timepoints, global_sync_period, models = [M1], initial_cell_states = [N], global_species = [\"A\", \"B\"], global_species_method = 3)\ndata_approx = []\nprint(\"lineages returned:\", lineage.py_size())\nfor i in range(lineage.py_size()):\n sch = lineage.py_get_schnitz(i)\n data_approx.append(sch.py_get_dataframe(Model = M1))\n t = sch.py_get_time()\n print(\"i=\", i, \"t0=\", t[0], \"tf=\", t[1], \"len(t)=\", len(t))#, t.shape, t)\n #for j in range(6):\n #print(\"\\tj=\", j ,sch.py_get_data()[:, j])\nprint([data_approx[i].shape for i in range(N)])\n\n\n\n\n\n\n", "_____no_output_____" ], [ "plt.figure()\ncolor_list = [\"cyan\", \"blue\"]\nplt.subplot(121)\nplt.title(\"Approximate\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Count\")\nplt.ylim(0, 25)\nmeanA_approx = 0\nfor i in range(lineage.py_size()):\n plt.plot(timepoints, data_approx[i][\"A\"], color = color_list[0])\n plt.plot(timepoints, data_approx[i][\"B\"], color = color_list[1])\n\n\nplt.subplot(122)\nplt.title(\"Exact\")\nplt.plot(timepoints, data_crn[\"A_1_combined\"], color = color_list[0])\nplt.plot(timepoints, data_crn[\"B_1_combined\"], color = color_list[1])\nplt.xlabel(\"Time\")\nplt.ylim(0, 25)\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb216c8e1cb7e7a2b0ec1923fc3aaf86eaaa20d9
19,102
ipynb
Jupyter Notebook
ChatBot_With_Seq2Seq.ipynb
shubham0204/Google_Colab_Notebooks
7e6bab31b6fed10b4269bb5e0d20731d6da47524
[ "Apache-2.0" ]
1
2021-11-05T00:38:31.000Z
2021-11-05T00:38:31.000Z
ChatBot_With_Seq2Seq.ipynb
shubham0204/Google_Colab_Notebooks
7e6bab31b6fed10b4269bb5e0d20731d6da47524
[ "Apache-2.0" ]
2
2021-07-31T13:32:27.000Z
2021-10-16T14:20:06.000Z
ChatBot_With_Seq2Seq.ipynb
shubham0204/Google_Colab_Notebooks
7e6bab31b6fed10b4269bb5e0d20731d6da47524
[ "Apache-2.0" ]
3
2021-11-24T13:01:31.000Z
2022-02-11T11:20:18.000Z
37.308594
332
0.53052
[ [ [ "# Chatbot using Seq2Seq LSTM models\nIn this notebook, we will assemble a seq2seq LSTM model using Keras Functional API to create a working Chatbot which would answer questions asked to it.\n\nChatbots have become applications themselves. You can choose the field or stream and gather data regarding various questions. We can build a chatbot for an e-commerce webiste or a school website where parents could get information about the school.\n\n\nMessaging platforms like Allo have implemented chatbot services to engage users. The famous [Google Assistant](https://assistant.google.com/), [Siri](https://www.apple.com/in/siri/), [Cortana](https://www.microsoft.com/en-in/windows/cortana) and [Alexa](https://www.alexa.com/) may have been build using simialr models.\n\nSo, let's start building our Chatbot.\n", "_____no_output_____" ], [ "## 1) Importing the packages\n\nWe will import [TensorFlow](https://www.tensorflow.org) and our beloved [Keras](https://www.tensorflow.org/guide/keras). Also, we import other modules which help in defining model layers.\n\n\n\n\n", "_____no_output_____" ] ], [ [ "\nimport numpy as np\nimport tensorflow as tf\nimport pickle\nfrom tensorflow.keras import layers , activations , models , preprocessing\n", "_____no_output_____" ] ], [ [ "## 2) Preprocessing the data", "_____no_output_____" ], [ "### A) Download the data\n\nThe dataset hails from [chatterbot/english on Kaggle](https://www.kaggle.com/kausr25/chatterbotenglish).com by [kausr25](https://www.kaggle.com/kausr25). It contains pairs of questions and answers based on a number of subjects like food, history, AI etc.\n\nThe raw data could be found from this repo -> https://github.com/shubham0204/Dataset_Archives\n", "_____no_output_____" ] ], [ [ "\n!wget https://github.com/shubham0204/Dataset_Archives/blob/master/chatbot_nlp.zip?raw=true -O chatbot_nlp.zip\n!unzip chatbot_nlp.zip\n", "_____no_output_____" ] ], [ [ "### B) Reading the data from the files\n\nWe parse each of the `.yaml` files.\n\n* Concatenate two or more sentences if the answer has two or more of them.\n* Remove unwanted data types which are produced while parsing the data.\n* Append `<START>` and `<END>` to all the `answers`.\n* Create a `Tokenizer` and load the whole vocabulary ( `questions` + `answers` ) into it.\n\n\n\n", "_____no_output_____" ] ], [ [ "\nfrom tensorflow.keras import preprocessing , utils\nimport os\nimport yaml\n\ndir_path = 'chatbot_nlp/data'\nfiles_list = os.listdir(dir_path + os.sep)\n\nquestions = list()\nanswers = list()\n\nfor filepath in files_list:\n stream = open( dir_path + os.sep + filepath , 'rb')\n docs = yaml.safe_load(stream)\n conversations = docs['conversations']\n for con in conversations:\n if len( con ) > 2 :\n questions.append(con[0])\n replies = con[ 1 : ]\n ans = ''\n for rep in replies:\n ans += ' ' + rep\n answers.append( ans )\n elif len( con )> 1:\n questions.append(con[0])\n answers.append(con[1])\n\nanswers_with_tags = list()\nfor i in range( len( answers ) ):\n if type( answers[i] ) == str:\n answers_with_tags.append( answers[i] )\n else:\n questions.pop( i )\n\nanswers = list()\nfor i in range( len( answers_with_tags ) ) :\n answers.append( '<START> ' + answers_with_tags[i] + ' <END>' )\n\ntokenizer = preprocessing.text.Tokenizer()\ntokenizer.fit_on_texts( questions + answers )\nVOCAB_SIZE = len( tokenizer.word_index )+1\nprint( 'VOCAB SIZE : {}'.format( VOCAB_SIZE ))\n", "_____no_output_____" ] ], [ [ "\n### C) Preparing data for Seq2Seq model\n\nOur model requires three arrays namely `encoder_input_data`, `decoder_input_data` and `decoder_output_data`.\n\nFor `encoder_input_data` :\n* Tokenize the `questions`. Pad them to their maximum length.\n\nFor `decoder_input_data` :\n* Tokenize the `answers`. Pad them to their maximum length.\n\nFor `decoder_output_data` :\n\n* Tokenize the `answers`. Remove the first element from all the `tokenized_answers`. This is the `<START>` element which we added earlier.\n\n", "_____no_output_____" ] ], [ [ "\nfrom gensim.models import Word2Vec\nimport re\n\nvocab = []\nfor word in tokenizer.word_index:\n vocab.append( word )\n\ndef tokenize( sentences ):\n tokens_list = []\n vocabulary = []\n for sentence in sentences:\n sentence = sentence.lower()\n sentence = re.sub( '[^a-zA-Z]', ' ', sentence )\n tokens = sentence.split()\n vocabulary += tokens\n tokens_list.append( tokens )\n return tokens_list , vocabulary\n\n#p = tokenize( questions + answers )\n#model = Word2Vec( p[ 0 ] ) \n\n#embedding_matrix = np.zeros( ( VOCAB_SIZE , 100 ) )\n#for i in range( len( tokenizer.word_index ) ):\n #embedding_matrix[ i ] = model[ vocab[i] ]\n\n# encoder_input_data\ntokenized_questions = tokenizer.texts_to_sequences( questions )\nmaxlen_questions = max( [ len(x) for x in tokenized_questions ] )\npadded_questions = preprocessing.sequence.pad_sequences( tokenized_questions , maxlen=maxlen_questions , padding='post' )\nencoder_input_data = np.array( padded_questions )\nprint( encoder_input_data.shape , maxlen_questions )\n\n# decoder_input_data\ntokenized_answers = tokenizer.texts_to_sequences( answers )\nmaxlen_answers = max( [ len(x) for x in tokenized_answers ] )\npadded_answers = preprocessing.sequence.pad_sequences( tokenized_answers , maxlen=maxlen_answers , padding='post' )\ndecoder_input_data = np.array( padded_answers )\nprint( decoder_input_data.shape , maxlen_answers )\n\n# decoder_output_data\ntokenized_answers = tokenizer.texts_to_sequences( answers )\nfor i in range(len(tokenized_answers)) :\n tokenized_answers[i] = tokenized_answers[i][1:]\npadded_answers = preprocessing.sequence.pad_sequences( tokenized_answers , maxlen=maxlen_answers , padding='post' )\nonehot_answers = utils.to_categorical( padded_answers , VOCAB_SIZE )\ndecoder_output_data = np.array( onehot_answers )\nprint( decoder_output_data.shape )\n", "_____no_output_____" ] ], [ [ "## 3) Defining the Encoder-Decoder model\nThe model will have Embedding, LSTM and Dense layers. The basic configuration is as follows.\n\n\n* 2 Input Layers : One for `encoder_input_data` and another for `decoder_input_data`.\n* Embedding layer : For converting token vectors to fix sized dense vectors. **( Note : Don't forget the `mask_zero=True` argument here )**\n* LSTM layer : Provide access to Long-Short Term cells.\n\nWorking : \n\n1. The `encoder_input_data` comes in the Embedding layer ( `encoder_embedding` ). \n2. The output of the Embedding layer goes to the LSTM cell which produces 2 state vectors ( `h` and `c` which are `encoder_states` )\n3. These states are set in the LSTM cell of the decoder.\n4. The decoder_input_data comes in through the Embedding layer.\n5. The Embeddings goes in LSTM cell ( which had the states ) to produce seqeunces.\n\n\n\n<center><img style=\"float: center;\" src=\"https://cdn-images-1.medium.com/max/1600/1*bnRvZDDapHF8Gk8soACtCQ.gif\"></center>\n\n\nImage credits to [Hackernoon](https://hackernoon.com/tutorial-3-what-is-seq2seq-for-text-summarization-and-why-68ebaa644db0).\n\n\n\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "\nencoder_inputs = tf.keras.layers.Input(shape=( maxlen_questions , ))\nencoder_embedding = tf.keras.layers.Embedding( VOCAB_SIZE, 200 , mask_zero=True ) (encoder_inputs)\nencoder_outputs , state_h , state_c = tf.keras.layers.LSTM( 200 , return_state=True )( encoder_embedding )\nencoder_states = [ state_h , state_c ]\n\ndecoder_inputs = tf.keras.layers.Input(shape=( maxlen_answers , ))\ndecoder_embedding = tf.keras.layers.Embedding( VOCAB_SIZE, 200 , mask_zero=True) (decoder_inputs)\ndecoder_lstm = tf.keras.layers.LSTM( 200 , return_state=True , return_sequences=True )\ndecoder_outputs , _ , _ = decoder_lstm ( decoder_embedding , initial_state=encoder_states )\ndecoder_dense = tf.keras.layers.Dense( VOCAB_SIZE , activation=tf.keras.activations.softmax ) \noutput = decoder_dense ( decoder_outputs )\n\nmodel = tf.keras.models.Model([encoder_inputs, decoder_inputs], output )\nmodel.compile(optimizer=tf.keras.optimizers.RMSprop(), loss='categorical_crossentropy')\n\nmodel.summary()\n", "_____no_output_____" ] ], [ [ "## 4) Training the model\nWe train the model for a number of epochs with `RMSprop` optimizer and `categorical_crossentropy` loss function.", "_____no_output_____" ] ], [ [ "\nmodel.fit([encoder_input_data , decoder_input_data], decoder_output_data, batch_size=50, epochs=150 ) \nmodel.save( 'model.h5' ) \n", "_____no_output_____" ] ], [ [ "## 5) Defining inference models\nWe create inference models which help in predicting answers.\n\n**Encoder inference model** : Takes the question as input and outputs LSTM states ( `h` and `c` ).\n\n**Decoder inference model** : Takes in 2 inputs, one are the LSTM states ( Output of encoder model ), second are the answer input seqeunces ( ones not having the `<start>` tag ). It will output the answers for the question which we fed to the encoder model and its state values.", "_____no_output_____" ] ], [ [ "\ndef make_inference_models():\n \n encoder_model = tf.keras.models.Model(encoder_inputs, encoder_states)\n \n decoder_state_input_h = tf.keras.layers.Input(shape=( 200 ,))\n decoder_state_input_c = tf.keras.layers.Input(shape=( 200 ,))\n \n decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n \n decoder_outputs, state_h, state_c = decoder_lstm(\n decoder_embedding , initial_state=decoder_states_inputs)\n decoder_states = [state_h, state_c]\n decoder_outputs = decoder_dense(decoder_outputs)\n decoder_model = tf.keras.models.Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n \n return encoder_model , decoder_model\n", "_____no_output_____" ] ], [ [ "## 6) Talking with our Chatbot\n\nFirst, we define a method `str_to_tokens` which converts `str` questions to Integer tokens with padding.\n", "_____no_output_____" ] ], [ [ "\ndef str_to_tokens( sentence : str ):\n words = sentence.lower().split()\n tokens_list = list()\n for word in words:\n tokens_list.append( tokenizer.word_index[ word ] ) \n return preprocessing.sequence.pad_sequences( [tokens_list] , maxlen=maxlen_questions , padding='post')\n", "_____no_output_____" ] ], [ [ "\n\n\n1. First, we take a question as input and predict the state values using `enc_model`.\n2. We set the state values in the decoder's LSTM.\n3. Then, we generate a sequence which contains the `<start>` element.\n4. We input this sequence in the `dec_model`.\n5. We replace the `<start>` element with the element which was predicted by the `dec_model` and update the state values.\n6. We carry out the above steps iteratively till we hit the `<end>` tag or the maximum answer length.\n\n\n\n\n\n", "_____no_output_____" ] ], [ [ "\nenc_model , dec_model = make_inference_models()\n\nfor _ in range(10):\n states_values = enc_model.predict( str_to_tokens( input( 'Enter question : ' ) ) )\n empty_target_seq = np.zeros( ( 1 , 1 ) )\n empty_target_seq[0, 0] = tokenizer.word_index['start']\n stop_condition = False\n decoded_translation = ''\n while not stop_condition :\n dec_outputs , h , c = dec_model.predict([ empty_target_seq ] + states_values )\n sampled_word_index = np.argmax( dec_outputs[0, -1, :] )\n sampled_word = None\n for word , index in tokenizer.word_index.items() :\n if sampled_word_index == index :\n decoded_translation += ' {}'.format( word )\n sampled_word = word\n \n if sampled_word == 'end' or len(decoded_translation.split()) > maxlen_answers:\n stop_condition = True\n \n empty_target_seq = np.zeros( ( 1 , 1 ) ) \n empty_target_seq[ 0 , 0 ] = sampled_word_index\n states_values = [ h , c ] \n\n print( decoded_translation )\n", "_____no_output_____" ] ], [ [ "\n## 7) Conversion to TFLite ( Optional )\n\nWe can convert our seq2seq model to a TensorFlow Lite model so that we can use it on edge devices.\n", "_____no_output_____" ] ], [ [ "\n!pip install tf-nightly\n", "_____no_output_____" ], [ "\nconverter = tf.lite.TFLiteConverter.from_keras_model( enc_model )\nbuffer = converter.convert()\nopen( 'enc_model.tflite' , 'wb' ).write( buffer )\n\nconverter = tf.lite.TFLiteConverter.from_keras_model( dec_model )\nopen( 'dec_model.tflite' , 'wb' ).write( buffer )\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb21764a8634dfae52bd9f5c06372df9b5855379
11,046
ipynb
Jupyter Notebook
notebooks/FFlowTraining.ipynb
deltatrelabs/deltatre-microsoft-ai-soccer-action-recognition
acdc1e840ebc85bf6f4ac9785354b32235c386f9
[ "MIT" ]
14
2019-11-06T08:17:45.000Z
2022-01-21T17:14:42.000Z
notebooks/FFlowTraining.ipynb
deltatrelabs/deltatre-microsoft-ai-soccer-action-recognition
acdc1e840ebc85bf6f4ac9785354b32235c386f9
[ "MIT" ]
14
2020-03-24T17:48:14.000Z
2022-02-10T01:13:49.000Z
notebooks/FFlowTraining.ipynb
deltatrelabs/deltatre-microsoft-ai-soccer-action-recognition
acdc1e840ebc85bf6f4ac9785354b32235c386f9
[ "MIT" ]
4
2020-05-20T01:56:42.000Z
2022-03-11T19:25:18.000Z
31.291785
138
0.537208
[ [ [ "from config import *\n\nimport mPyPl as mp\nfrom mPyPl.utils.flowutils import *\nfrom mpyplx import *\nfrom pipe import Pipe\nfrom functools import partial\n\nimport numpy as np\nimport cv2\nimport itertools\nfrom moviepy.editor import *\nimport pickle\nimport functools ", "_____no_output_____" ], [ "from config import *\n\ntest_names = (\n from_json(os.path.join(source_dir,'matches.json'))\n | mp.where(lambda x: 'Test' in x.keys() and int(x['Test'])>0)\n | mp.apply(['Id','Half'],'pattern',lambda x: \"{}_{}_\".format(x[0],x[1]))\n | mp.select_field('pattern')\n | mp.as_list\n)", "_____no_output_____" ], [ "stream = (\n mp.get_datastream(data_dir, ext=\".fflow.pickle\", classes={'noshot' : 0, 'shots': 1})\n | datasplit_by_pattern(test_pattern=test_names)\n | stratify_sample_tt()\n | mp.apply(['class_id','split'],'descr',lambda x: \"{}-{}\".format(x[0],x[1]))\n | summarize('descr')\n | mp.as_list\n)\n\ntrain, test = (\n stream\n | mp.apply('filename', 'raw', lambda x: pickle.load(open(x, 'rb')), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('raw', 'gradients', calc_gradients, eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('gradients', 'polar', lambda x: to_polar(x), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('polar', 'channel1', lambda x: np.concatenate([y[0] for y in x]), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('polar', 'channel2', lambda x: np.concatenate([y[1] for y in x]), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.make_train_test_split()\n)", "_____no_output_____" ], [ "train = train | mp.as_list\n\nch1 = stream | mp.select_field('channel1') | mp.as_list\nch1_flatten = np.concatenate(ch1)\n\nch2 = stream | mp.select_field('channel2') | mp.as_list\nch2_flatten = np.concatenate(ch2)", "_____no_output_____" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nplt.hist(ch1_flatten, bins=100);", "_____no_output_____" ], [ "plt.hist(ch2_flatten, bins=100);", "_____no_output_____" ] ], [ [ "## OpticalFlow Model Training", "_____no_output_____" ] ], [ [ "scene_changes = pickle.load(open('scene.changes.pkl', 'rb'))\nscene_changes = list(scene_changes[40].keys())\nscene_changes = [ fn.replace('.resized.mp4', '.fflow.pickle') for fn in scene_changes]", "_____no_output_____" ], [ "retinaflow_shape = (25, 50, 2)\n\nhist_params = [\n dict(\n bins=retinaflow_shape[1],\n lower=0,\n upper=150,\n maxv=150\n ),\n dict(\n bins=retinaflow_shape[1],\n lower=0,\n upper=6.29,\n maxv=6.29 \n ),\n]\n\nstream = (\n mp.get_datastream(data_dir, ext=\".fflow.pickle\", classes={'noshot' : 0, 'shots': 1})\n | mp.filter('filename', lambda x: not x in scene_changes) \n | datasplit_by_pattern(test_pattern=test_names)\n | stratify_sample_tt()\n | mp.apply(['class_id','split'],'descr',lambda x: \"{}-{}\".format(x[0],x[1]))\n | summarize('descr')\n | mp.as_list\n)\n\ntrain, test = (\n stream\n | mp.apply('filename', 'raw', lambda x: pickle.load(open(x, 'rb')), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('raw', 'gradients', calc_gradients, eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('gradients', 'polar', lambda x: to_polar(x), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('polar', 'histograms', lambda x: video_to_hist(x, hist_params), eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.apply('histograms', 'fflows', functools.partial(zero_pad,shape=retinaflow_shape), \n eval_strategy=mp.EvalStrategies.LazyMemoized)\n | mp.make_train_test_split()\n)\n", "_____no_output_____" ], [ "no_train = stream | mp.filter('split',lambda x: x==mp.SplitType.Train) | mp.count\nno_test = stream | mp.filter('split',lambda x: x==mp.SplitType.Test) | mp.count\n\n# training params\nLEARNING_RATE = 0.001\nV = \"v1\"\nMODEL_CHECKPOINT = \"models/unet_ch_\" + V + \".h5\"\nMODEL_PATH = MODEL_CHECKPOINT.replace(\"_ch_\", \"_model_\")\nHISTORY_PATH = MODEL_PATH.replace(\".h5\", \"_history.pkl\")\nBATCH_SIZE = 16\nEPOCHS = 50", "_____no_output_____" ], [ "from keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import EarlyStopping\n\ncallback_checkpoint = ModelCheckpoint(\n MODEL_CHECKPOINT, \n verbose=1, \n monitor='val_loss', \n save_best_only=True\n)\n\ncallback_stopping = EarlyStopping(\n monitor='val_loss', \n min_delta=0, \n patience=7, \n verbose=1, \n mode='auto', \n restore_best_weights=True\n)\n\n\nfrom keras.callbacks import ReduceLROnPlateau\nreduce_lr = ReduceLROnPlateau(monitor='val_loss', verbose=1, factor=0.5,\n patience=4, cooldown=4, min_lr=0.0001)", "_____no_output_____" ], [ "from keras.models import Sequential\nfrom keras.layers import *\nfrom keras.regularizers import l2\nfrom keras.optimizers import Adam\n\nretinaflow_shape = (25, 50, 2)\n\nmodel = Sequential()\nmodel.add(Conv2D(64, (5,3), input_shape=retinaflow_shape))\nmodel.add(Conv2D(32, (3,3), activation='relu', kernel_initializer='glorot_uniform'))\nmodel.add(MaxPooling2D(pool_size=(3, 3)))\nmodel.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(32, activation='relu', kernel_initializer='glorot_uniform'))\nmodel.add(Dense(1, activation='sigmoid', kernel_initializer='glorot_uniform'))\n\nmodel.compile(loss='binary_crossentropy',\n optimizer=Adam(lr=0.001),\n metrics=['acc'])\nmodel.summary()", "_____no_output_____" ], [ "history = model.fit_generator(\n train | mp.infshuffle | mp.as_batch('fflows', 'class_id', batchsize=BATCH_SIZE),\n steps_per_epoch = no_train // BATCH_SIZE,\n validation_data = test | mp.infshuffle | mp.as_batch('fflows', 'class_id', batchsize=BATCH_SIZE),\n validation_steps = no_test // BATCH_SIZE,\n epochs=EPOCHS, \n verbose=1,\n callbacks=[callback_checkpoint, callback_stopping, reduce_lr]\n )", "_____no_output_____" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\ndef plot_history(history):\n loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' not in s]\n val_loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' in s]\n acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' not in s]\n val_acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' in s]\n \n if len(loss_list) == 0:\n print('Loss is missing in history')\n return \n \n ## As loss always exists\n epochs = range(1,len(history.history[loss_list[0]]) + 1)\n \n ## Loss\n plt.figure(1)\n for l in loss_list:\n plt.plot(epochs, history.history[l], 'b', label='Training loss (' + str(str(format(history.history[l][-1],'.5f'))+')'))\n for l in val_loss_list:\n plt.plot(epochs, history.history[l], 'g', label='Validation loss (' + str(str(format(history.history[l][-1],'.5f'))+')'))\n \n plt.title('Loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n \n ## Accuracy\n plt.figure(2)\n for l in acc_list:\n plt.plot(epochs, history.history[l], 'b', label='Training accuracy (' + str(format(history.history[l][-1],'.5f'))+')')\n for l in val_acc_list: \n plt.plot(epochs, history.history[l], 'g', label='Validation accuracy (' + str(format(history.history[l][-1],'.5f'))+')')\n\n plt.title('Accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()", "_____no_output_____" ], [ "plot_history(history)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb21799b642a9d4e3885196ff8f7a506bc9c1b6d
47,653
ipynb
Jupyter Notebook
dev/01a_torch_core.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
null
null
null
dev/01a_torch_core.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
null
null
null
dev/01a_torch_core.ipynb
tianjianjiang/fastai_dev
cc8e2d64c330c1a93dd84c854b12e700c7d68a8b
[ "Apache-2.0" ]
1
2019-08-30T14:34:07.000Z
2019-08-30T14:34:07.000Z
36.348589
3,276
0.639729
[ [ [ "#default_exp torch_core", "_____no_output_____" ], [ "#export\nfrom local.test import *\nfrom local.imports import *\nfrom local.torch_imports import *\nfrom local.core import *\nfrom local.notebook.showdoc import show_doc", "_____no_output_____" ], [ "#export\nif torch.cuda.is_available(): torch.cuda.set_device(int(os.environ.get('DEFAULT_GPU') or 0))", "_____no_output_____" ] ], [ [ "# Torch Core\n\n> Basic pytorch functions used in the fastai library", "_____no_output_____" ], [ "## Basics", "_____no_output_____" ] ], [ [ "#export\n@patch\ndef __array_eq__(self:Tensor,b):\n return torch.equal(self,b) if self.dim() else self==b", "_____no_output_____" ], [ "#export\ndef tensor(x, *rest, **kwargs):\n \"Like `torch.as_tensor`, but handle lists too, and can pass multiple vector elements directly.\"\n if len(rest): x = (x,)+rest\n # Pytorch bug in dataloader using num_workers>0\n if isinstance(x, (tuple,list)) and len(x)==0: return tensor(0)\n res = (torch.tensor(x, **kwargs) if isinstance(x, (tuple,list))\n else as_tensor(x, **kwargs) if hasattr(x, '__array__')\n else as_tensor(x, **kwargs) if is_listy(x)\n else as_tensor(x, **kwargs) if is_iter(x)\n else None)\n if res is None:\n res = as_tensor(array(x), **kwargs)\n if res.dtype is torch.float64: return res.float()\n if res.dtype is torch.int32:\n warn('Tensor is int32: upgrading to int64; for better performance use int64 input')\n return res.long()\n return res", "_____no_output_____" ], [ "test_eq(tensor(array([1,2,3])), torch.tensor([1,2,3]))\ntest_eq(tensor(1,2,3), torch.tensor([1,2,3]))\ntest_eq_type(tensor(1.0), torch.tensor(1.0))", "_____no_output_____" ], [ "#export\ndef set_seed(s):\n \"Set random seed for `random`, `torch`, and `numpy` (where available)\"\n try: torch.manual_seed(s)\n except NameError: pass\n try: np.random.seed(s%(2**32-1))\n except NameError: pass\n random.seed(s)", "_____no_output_____" ], [ "set_seed(2*33)\na1 = np.random.random()\na2 = torch.rand(())\na3 = random.random()\nset_seed(2*33)\nb1 = np.random.random()\nb2 = torch.rand(())\nb3 = random.random()\ntest_eq(a1,b1)\ntest_eq(a2,b2)\ntest_eq(a3,b3)", "_____no_output_____" ], [ "#export\ndef _fa_rebuild_tensor (cls, *args, **kwargs): return cls(torch._utils._rebuild_tensor_v2(*args, **kwargs))\ndef _fa_rebuild_qtensor(cls, *args, **kwargs): return cls(torch._utils._rebuild_qtensor (*args, **kwargs))", "_____no_output_____" ], [ "#export\nclass TensorBase(Tensor, metaclass=BypassNewMeta):\n def _new_meta(self, *args, **kwargs): return tensor(self)\n\n def __reduce_ex__(self,proto):\n torch.utils.hooks.warn_if_has_hooks(self)\n args = (type(self), self.storage(), self.storage_offset(), tuple(self.size()), self.stride())\n if self.is_quantized: args = args + (self.q_scale(), self.q_zero_point())\n f = _fa_rebuild_qtensor if self.is_quantized else _fa_rebuild_tensor\n return (f, args + (self.requires_grad, OrderedDict()))", "_____no_output_____" ], [ "#export\ndef _patch_tb():\n def get_f(fn):\n def _f(self, *args, **kwargs):\n cls = self.__class__\n res = getattr(super(TensorBase, self), fn)(*args, **kwargs)\n return cls(res) if isinstance(res,Tensor) else res\n return _f\n\n t = tensor([1])\n skips = '__class__ __deepcopy__ __delattr__ __dir__ __doc__ __getattribute__ __hash__ __init__ \\\n __init_subclass__ __new__ __reduce__ __reduce_ex__ __module__ __setstate__'.split()\n\n for fn in dir(t):\n if fn in skips: continue\n f = getattr(t, fn)\n if isinstance(f, (MethodWrapperType, BuiltinFunctionType, BuiltinMethodType, MethodType, FunctionType)):\n setattr(TensorBase, fn, get_f(fn))\n\n_patch_tb()", "_____no_output_____" ], [ "class _T(TensorBase): pass\n\nt = _T(range(5))\ntest_eq_type(t[0], _T(0))\ntest_eq_type(t[:2], _T([0,1]))\ntest_eq_type(t+1, _T(range(1,6)))\n\ntest_eq(type(pickle.loads(pickle.dumps(t))), _T)", "_____no_output_____" ] ], [ [ "## L -", "_____no_output_____" ] ], [ [ "#export\n@patch\ndef tensored(self:L):\n \"`mapped(tensor)`\"\n return self.mapped(tensor)\n@patch\ndef stack(self:L, dim=0):\n \"Same as `torch.stack`\"\n return torch.stack(list(self.tensored()), dim=dim)\n@patch\ndef cat (self:L, dim=0):\n \"Same as `torch.cat`\"\n return torch.cat (list(self.tensored()), dim=dim)", "_____no_output_____" ], [ "show_doc(L.tensored)", "_____no_output_____" ] ], [ [ "There are shortcuts for `torch.stack` and `torch.cat` if your `L` contains tensors or something convertible. You can manually convert with `tensored`.", "_____no_output_____" ] ], [ [ "t = L(([1,2],[3,4]))\ntest_eq(t.tensored(), [tensor(1,2),tensor(3,4)])", "_____no_output_____" ], [ "show_doc(L.stack)", "_____no_output_____" ], [ "test_eq(t.stack(), tensor([[1,2],[3,4]]))", "_____no_output_____" ], [ "show_doc(L.cat)", "_____no_output_____" ], [ "test_eq(t.cat(), tensor([1,2,3,4]))", "_____no_output_____" ] ], [ [ "## Chunks", "_____no_output_____" ] ], [ [ "#export\ndef concat(*ls):\n \"Concatenate tensors, arrays, lists, or tuples\"\n if not len(ls): return []\n it = ls[0]\n if isinstance(it,torch.Tensor): res = torch.cat(ls)\n elif isinstance(it,ndarray): res = np.concatenate(ls)\n else:\n res = [o for x in ls for o in L(x)]\n if isinstance(it,(tuple,list)): res = type(it)(res)\n else: res = L(res)\n return retain_type(res, it)", "_____no_output_____" ], [ "a,b,c = [1],[1,2],[1,1,2]\ntest_eq(concat(a,b), c)\ntest_eq_type(concat(tuple (a),tuple (b)), tuple (c))\ntest_eq_type(concat(array (a),array (b)), array (c))\ntest_eq_type(concat(tensor(a),tensor(b)), tensor(c))\ntest_eq_type(concat(TensorBase(a),TensorBase(b)), TensorBase(c))\ntest_eq_type(concat([1,1],1), [1,1,1])\ntest_eq_type(concat(1,1,1), L(1,1,1))\ntest_eq_type(concat(L(1,2),1), L(1,2,1))", "_____no_output_____" ], [ "#export\nclass Chunks:\n \"Slice and int indexing into a list of lists\"\n def __init__(self, chunks, lens=None):\n self.chunks = chunks\n self.lens = L(map(len,self.chunks) if lens is None else lens)\n self.cumlens = np.cumsum(0+self.lens)\n self.totlen = self.cumlens[-1]\n\n def __getitem__(self,i):\n if isinstance(i,slice): return self.getslice(i)\n di,idx = self.doc_idx(i)\n return self.chunks[di][idx]\n\n def getslice(self, i):\n st_d,st_i = self.doc_idx(ifnone(i.start,0))\n en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))\n res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]\n for b in range(st_d+1,en_d): res.append(self.chunks[b])\n if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])\n return concat(*res)\n\n def doc_idx(self, i):\n if i<0: i=self.totlen+i # count from end\n docidx = np.searchsorted(self.cumlens, i+1)-1\n cl = self.cumlens[docidx]\n return docidx,i-cl", "_____no_output_____" ], [ "docs = L(list(string.ascii_lowercase[a:b]) for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))\n\nb = Chunks(docs)\ntest_eq([b[ o] for o in range(0,5)], ['a','b','c','d','e'])\ntest_eq([b[-o] for o in range(1,6)], ['z','y','x','w','v'])\ntest_eq(b[6:13], 'g,h,i,j,k,l,m'.split(','))\ntest_eq(b[20:77], 'u,v,w,x,y,z'.split(','))\ntest_eq(b[:5], 'a,b,c,d,e'.split(','))\ntest_eq(b[:2], 'a,b'.split(','))", "_____no_output_____" ], [ "t = torch.arange(26)\ndocs = L(t[a:b] for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))\nb = Chunks(docs)\ntest_eq([b[ o] for o in range(0,5)], range(0,5))\ntest_eq([b[-o] for o in range(1,6)], [25,24,23,22,21])\ntest_eq(b[6:13], torch.arange(6,13))\ntest_eq(b[20:77], torch.arange(20,26))\ntest_eq(b[:5], torch.arange(5))\ntest_eq(b[:2], torch.arange(2))", "_____no_output_____" ], [ "docs = L(TensorBase(t[a:b]) for a,b in ((0,3),(3,7),(7,8),(8,16),(16,24),(24,26)))\nb = Chunks(docs)\ntest_eq_type(b[:2], TensorBase(range(2)))\ntest_eq_type(b[:5], TensorBase(range(5)))\ntest_eq_type(b[9:13], TensorBase(range(9,13)))", "_____no_output_____" ] ], [ [ "## Other functions", "_____no_output_____" ] ], [ [ "#export\ndef apply(func, x, *args, **kwargs):\n \"Apply `func` recursively to `x`, passing on args\"\n if is_listy(x): return type(x)([apply(func, o, *args, **kwargs) for o in x])\n if isinstance(x,dict): return {k: apply(func, v, *args, **kwargs) for k,v in x.items()}\n res = func(x, *args, **kwargs)\n return res if x is None else retain_type(res, x)", "_____no_output_____" ], [ "#export\ndef to_detach(b, cpu=True):\n \"Recursively detach lists of tensors in `b `; put them on the CPU if `cpu=True`.\"\n def _inner(x, cpu=True):\n if not isinstance(x,Tensor): return x\n x = x.detach()\n return x.cpu() if cpu else x\n return apply(_inner, b, cpu=cpu)", "_____no_output_____" ], [ "#export\ndef to_half(b):\n \"Recursively map lists of tensors in `b ` to FP16.\"\n return apply(lambda x: x.half() if torch.is_floating_point(x) else x, b)", "_____no_output_____" ], [ "#export\ndef to_float(b):\n \"Recursively map lists of int tensors in `b ` to float.\"\n return apply(lambda x: x.float() if torch.is_floating_point(x) else x, b)", "_____no_output_____" ], [ "#export\n# None: True if available; True: error if not availabe; False: use CPU\ndefaults.use_cuda = None", "_____no_output_____" ], [ "#export\ndef default_device(use_cuda=-1):\n \"Return or set default device; `use_cuda`: None - CUDA if available; True - error if not availabe; False - CPU\"\n if use_cuda != -1: defaults.use_cuda=use_cuda\n use = defaults.use_cuda or (torch.cuda.is_available() and defaults.use_cuda is None)\n assert torch.cuda.is_available() or not use\n return torch.device(torch.cuda.current_device()) if use else torch.device('cpu')", "_____no_output_____" ], [ "#cuda\n_td = torch.device(torch.cuda.current_device())\ntest_eq(default_device(None), _td)\ntest_eq(default_device(True), _td)\ntest_eq(default_device(False), torch.device('cpu'))\ndefault_device(None);", "_____no_output_____" ], [ "#export\ndef to_device(b, device=None):\n \"Recursively put `b` on `device`.\"\n if device is None: device=default_device()\n def _inner(o): return o.to(device, non_blocking=True) if isinstance(o,Tensor) else o\n return apply(_inner, b)", "_____no_output_____" ], [ "t = to_device((3,(tensor(3),tensor(2))))\nt1,(t2,t3) = t\ntest_eq_type(t,(3,(tensor(3).cuda(),tensor(2).cuda())))\ntest_eq(t2.type(), \"torch.cuda.LongTensor\")\ntest_eq(t3.type(), \"torch.cuda.LongTensor\")", "_____no_output_____" ], [ "#export\ndef to_cpu(b):\n \"Recursively map lists of tensors in `b ` to the cpu.\"\n return to_device(b,'cpu')", "_____no_output_____" ], [ "t3 = to_cpu(t3)\ntest_eq(t3.type(), \"torch.LongTensor\")\ntest_eq(t3, 2)", "_____no_output_____" ], [ "# #export\n# def to_np(x):\n# \"Convert a tensor to a numpy array.\"\n# return apply(Self.detach().cpu().numpy(), x)", "_____no_output_____" ], [ "#export\ndef to_np(x):\n \"Convert a tensor to a numpy array.\"\n return apply(lambda o: o.data.cpu().numpy(), x)", "_____no_output_____" ], [ "t3 = to_np(t3)\ntest_eq(type(t3), np.ndarray)\ntest_eq(t3, 2)", "_____no_output_____" ], [ "#export\ndef item_find(x, idx=0):\n \"Recursively takes the `idx`-th element of `x`\"\n if is_listy(x): return item_find(x[idx])\n if isinstance(x,dict):\n key = list(x.keys())[idx] if isinstance(idx, int) else idx\n return item_find(x[key])\n return x", "_____no_output_____" ], [ "#export\ndef find_device(b):\n \"Recursively search the device of `b`.\"\n return item_find(b).device", "_____no_output_____" ], [ "dev = default_device()\ntest_eq(find_device(t2), dev)\ntest_eq(find_device([t2,t2]), dev)\ntest_eq(find_device({'a':t2,'b':t2}), dev)\ntest_eq(find_device({'a':[[t2],[t2]],'b':t2}), dev)", "_____no_output_____" ], [ "#export\ndef find_bs(b):\n \"Recursively search the batch size of `b`.\"\n return item_find(b).shape[0]", "_____no_output_____" ], [ "x = torch.randn(4,5)\ntest_eq(find_bs(x), 4)\ntest_eq(find_bs([x, x]), 4)\ntest_eq(find_bs({'a':x,'b':x}), 4)\ntest_eq(find_bs({'a':[[x],[x]],'b':x}), 4)", "_____no_output_____" ], [ "def np_func(f):\n \"Convert a function taking and returning numpy arrays to one taking and returning tensors\"\n def _inner(*args, **kwargs):\n nargs = [to_np(arg) if isinstance(arg,Tensor) else arg for arg in args]\n return tensor(f(*nargs, **kwargs))\n functools.update_wrapper(_inner, f)\n return _inner", "_____no_output_____" ] ], [ [ "This decorator is particularly useful for using numpy functions as fastai metrics, for instance:", "_____no_output_____" ] ], [ [ "from sklearn.metrics import f1_score\n\n@np_func\ndef f1(inp,targ): return f1_score(targ, inp)\n\na1,a2 = array([0,1,1]),array([1,0,1])\nt = f1(tensor(a1),tensor(a2))\ntest_eq(f1_score(a1,a2), t)\nassert isinstance(t,Tensor)", "_____no_output_____" ], [ "#export\nclass Module(nn.Module, metaclass=PrePostInitMeta):\n \"Same as `nn.Module`, but no need for subclasses to call `super().__init__`\"\n def __pre_init__(self): super().__init__()\n def __init__(self): pass", "_____no_output_____" ], [ "show_doc(Module, title_level=3)", "_____no_output_____" ], [ "class _T(Module):\n def __init__(self): self.f = nn.Linear(1,1)\n def forward(self,x): return self.f(x)\n\nt = _T()\nt(tensor([1.]))", "_____no_output_____" ], [ "# export\ndef one_hot(x, c):\n \"One-hot encode `x` with `c` classes.\"\n res = torch.zeros(c, dtype=torch.uint8)\n res[L(x)] = 1.\n return res", "_____no_output_____" ], [ "test_eq(one_hot([1,4], 5), tensor(0,1,0,0,1).byte())\ntest_eq(one_hot([], 5), tensor(0,0,0,0,0).byte())\ntest_eq(one_hot(2, 5), tensor(0,0,1,0,0).byte())", "_____no_output_____" ], [ "#export\ndef one_hot_decode(x, vocab=None):\n return L(vocab[i] if vocab else i for i,x_ in enumerate(x) if x_==1)", "_____no_output_____" ], [ "test_eq(one_hot_decode(tensor(0,1,0,0,1)), [1,4])\ntest_eq(one_hot_decode(tensor(0,0,0,0,0)), [ ])\ntest_eq(one_hot_decode(tensor(0,0,1,0,0)), [2 ])", "_____no_output_____" ], [ "#export\ndef trainable_params(m):\n \"Return all trainable parameters of `m`\"\n return [p for p in m.parameters() if p.requires_grad]", "_____no_output_____" ], [ "m = nn.Linear(4,5)\ntest_eq(trainable_params(m), [m.weight, m.bias])\nm.weight.requires_grad_(False)\ntest_eq(trainable_params(m), [m.bias])", "_____no_output_____" ], [ "#export\ndef bn_bias_params(m):\n \"Return all bias and BatchNorm parameters\"\n if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): return list(m.parameters())\n res = sum([bn_bias_params(c) for c in m.children()], [])\n if hasattr(m, 'bias'): res.append(m.bias)\n return res", "_____no_output_____" ], [ "model = nn.Sequential(nn.Linear(10,20), nn.BatchNorm1d(20), nn.Conv1d(3,4, 3))\ntest_eq(bn_bias_params(model), [model[0].bias, model[1].weight, model[1].bias, model[2].bias])\nmodel = nn.ModuleList([nn.Linear(10,20), nn.Sequential(nn.BatchNorm1d(20), nn.Conv1d(3,4, 3))])\ntest_eq(bn_bias_params(model), [model[0].bias, model[1][0].weight, model[1][0].bias, model[1][1].bias])", "_____no_output_____" ] ], [ [ "### Image helpers", "_____no_output_____" ] ], [ [ "#export\ndef make_cross_image(bw=True):\n \"Create a tensor containing a cross image, either `bw` (True) or color\"\n if bw:\n im = torch.zeros(5,5)\n im[2,:] = 1.\n im[:,2] = 1.\n else:\n im = torch.zeros(3,5,5)\n im[0,2,:] = 1.\n im[1,:,2] = 1.\n return im", "_____no_output_____" ], [ "plt.imshow(make_cross_image(), cmap=\"Greys\");", "_____no_output_____" ], [ "plt.imshow(make_cross_image(False).permute(1,2,0));", "_____no_output_____" ], [ "#export\ndef show_title(o, ax=None, ctx=None, label=None, **kwargs):\n \"Set title of `ax` to `o`, or print `o` if `ax` is `None`\"\n ax = ifnone(ax,ctx)\n if ax is None: print(o)\n elif hasattr(ax, 'set_title'): ax.set_title(o)\n elif isinstance(ax, pd.Series):\n while label in ax: label += '_'\n ax = ax.append(pd.Series({label: o}))\n return ax", "_____no_output_____" ], [ "test_stdout(lambda: show_title(\"title\"), \"title\")\n# ensure that col names are unique when showing to a pandas series\nassert show_title(\"title\", ctx=pd.Series(dict(a=1)), label='a').equals(pd.Series(dict(a=1,a_='title')))", "_____no_output_____" ], [ "#export\ndef show_image(im, ax=None, figsize=None, title=None, ctx=None, **kwargs):\n \"Show a PIL or PyTorch image on `ax`.\"\n ax = ifnone(ax,ctx)\n if ax is None: _,ax = plt.subplots(figsize=figsize)\n # Handle pytorch axis order\n if isinstance(im,Tensor):\n im = to_cpu(im)\n if im.shape[0]<5: im=im.permute(1,2,0)\n elif not isinstance(im,np.ndarray): im=array(im)\n # Handle 1-channel images\n if im.shape[-1]==1: im=im[...,0]\n ax.imshow(im, **kwargs)\n if title is not None: ax.set_title(title)\n ax.axis('off')\n return ax", "_____no_output_____" ] ], [ [ "`show_image` can show b&w images...", "_____no_output_____" ] ], [ [ "im = make_cross_image()\nax = show_image(im, cmap=\"Greys\", figsize=(2,2))", "_____no_output_____" ] ], [ [ "...and color images with standard `c*h*w` dim order...", "_____no_output_____" ] ], [ [ "im2 = make_cross_image(False)\nax = show_image(im2, figsize=(2,2))", "_____no_output_____" ] ], [ [ "...and color images with `h*w*c` dim order...", "_____no_output_____" ] ], [ [ "im3 = im2.permute(1,2,0)\nax = show_image(im3, figsize=(2,2))", "_____no_output_____" ], [ "ax = show_image(im, cmap=\"Greys\", figsize=(2,2))\nshow_title(\"Cross\", ax)", "_____no_output_____" ], [ "#export\ndef show_titled_image(o, **kwargs):\n \"Call `show_image` destructuring `o` to `(img,title)`\"\n show_image(o[0], title=str(o[1]), **kwargs)", "_____no_output_____" ], [ "#export\ndef show_image_batch(b, show=show_titled_image, items=9, cols=3, figsize=None, **kwargs):\n \"Display batch `b` in a grid of size `items` with `cols` width\"\n rows = (items+cols-1) // cols\n if figsize is None: figsize = (cols*3, rows*3)\n fig,axs = plt.subplots(rows, cols, figsize=figsize)\n for *o,ax in zip(*to_cpu(b), axs.flatten()): show(o, ax=ax, **kwargs)", "_____no_output_____" ], [ "show_image_batch(([im,im2,im3],['bw','chw','hwc']), items=3)", "_____no_output_____" ] ], [ [ "# Export -", "_____no_output_____" ] ], [ [ "#hide\nfrom local.notebook.export import notebook2script\nnotebook2script(all_fs=True)", "Converted 00_test.ipynb.\nConverted 01_core.ipynb.\nConverted 01a_torch_core.ipynb.\nConverted 01b_script.ipynb.\nConverted 01c_dataloader.ipynb.\nConverted 02_data_transforms.ipynb.\nConverted 03_data_pipeline.ipynb.\nConverted 05_data_core.ipynb.\nConverted 06_data_source.ipynb.\nConverted 07_vision_core.ipynb.\nConverted 08_pets_tutorial.ipynb.\nConverted 09_vision_augment.ipynb.\nConverted 11_layers.ipynb.\nConverted 11a_vision_models_xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 14_callback_schedule.ipynb.\nConverted 15_callback_hook.ipynb.\nConverted 16_callback_progress.ipynb.\nConverted 17_callback_tracker.ipynb.\nConverted 18_callback_fp16.ipynb.\nConverted 19_callback_mixup.ipynb.\nConverted 20_metrics.ipynb.\nConverted 21_tutorial_imagenette.ipynb.\nConverted 30_text_core.ipynb.\nConverted 31_text_data.ipynb.\nConverted 32_text_models_awdlstm.ipynb.\nConverted 33_test_models_core.ipynb.\nConverted 34_callback_rnn.ipynb.\nConverted 35_tutorial_wikitext.ipynb.\nConverted 36_text_models_qrnn.ipynb.\nConverted 40_tabular_core.ipynb.\nConverted 41_tabular_model.ipynb.\nConverted 50_data_block.ipynb.\nConverted 90_notebook_core.ipynb.\nConverted 91_notebook_export.ipynb.\nConverted 92_notebook_showdoc.ipynb.\nConverted 93_notebook_export2html.ipynb.\nConverted 94_index.ipynb.\nConverted 95_utils_test.ipynb.\nConverted 96_data_external.ipynb.\nConverted notebook2jekyll.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb217edae9c8e19639f10ce22151b4d41188d2cf
42,139
ipynb
Jupyter Notebook
Course - 4: Applied Text Mining in Python/Module+2+(Python+3).ipynb
Skharwa1/Applied-Data-Science-with-Python-Specialization
c1778f2f265b75613718f4339c1527f9b42c96b0
[ "MIT" ]
9
2021-10-03T08:17:06.000Z
2022-03-24T10:41:10.000Z
Course - 4: Applied Text Mining in Python/Module+2+(Python+3).ipynb
Skharwa1/Applied-Data-Science-with-Python-Specialization
c1778f2f265b75613718f4339c1527f9b42c96b0
[ "MIT" ]
null
null
null
Course - 4: Applied Text Mining in Python/Module+2+(Python+3).ipynb
Skharwa1/Applied-Data-Science-with-Python-Specialization
c1778f2f265b75613718f4339c1527f9b42c96b0
[ "MIT" ]
22
2021-02-10T06:01:00.000Z
2022-03-21T11:50:42.000Z
37.35727
1,695
0.510691
[ [ [ "# Module 2 (Python 3)", "_____no_output_____" ], [ "## Basic NLP Tasks with NLTK", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download()", "NLTK Downloader\n---------------------------------------------------------------------------\n d) Download l) List u) Update c) Config h) Help q) Quit\n---------------------------------------------------------------------------\nDownloader> d\n\nDownload which package (l=list; x=cancel)?\n Identifier> l\nPackages:\n [ ] abc................. Australian Broadcasting Commission 2006\n [ ] alpino.............. Alpino Dutch Treebank\n [ ] averaged_perceptron_tagger Averaged Perceptron Tagger\n [ ] averaged_perceptron_tagger_ru Averaged Perceptron Tagger (Russian)\n [ ] basque_grammars..... Grammars for Basque\n [ ] biocreative_ppi..... BioCreAtIvE (Critical Assessment of Information\n Extraction Systems in Biology)\n [ ] bllip_wsj_no_aux.... BLLIP Parser: WSJ Model\n [ ] book_grammars....... Grammars from NLTK Book\n [ ] brown............... Brown Corpus\n [ ] brown_tei........... Brown Corpus (TEI XML Version)\n [ ] cess_cat............ CESS-CAT Treebank\n [ ] cess_esp............ CESS-ESP Treebank\n [ ] chat80.............. Chat-80 Data Files\n [ ] city_database....... City Database\n [ ] cmudict............. The Carnegie Mellon Pronouncing Dictionary (0.6)\n [ ] comparative_sentences Comparative Sentence Dataset\n [ ] comtrans............ ComTrans Corpus Sample\n [ ] conll2000........... CONLL 2000 Chunking Corpus\n [ ] conll2002........... CONLL 2002 Named Entity Recognition Corpus\nHit Enter to continue: \n [ ] conll2007........... Dependency Treebanks from CoNLL 2007 (Catalan\n and Basque Subset)\n [ ] crubadan............ Crubadan Corpus\n [ ] dependency_treebank. Dependency Parsed Treebank\n [ ] dolch............... Dolch Word List\n [ ] europarl_raw........ Sample European Parliament Proceedings Parallel\n Corpus\n [ ] floresta............ Portuguese Treebank\n [ ] framenet_v15........ FrameNet 1.5\n [ ] framenet_v17........ FrameNet 1.7\n [ ] gazetteers.......... Gazeteer Lists\n [ ] ieer................ NIST IE-ER DATA SAMPLE\n [ ] indian.............. Indian Language POS-Tagged Corpus\n [ ] jeita............... JEITA Public Morphologically Tagged Corpus (in\n ChaSen format)\n [ ] kimmo............... PC-KIMMO Data Files\n [ ] knbc................ KNB Corpus (Annotated blog corpus)\n [ ] large_grammars...... Large context-free and feature-based grammars\n for parser comparison\n [ ] lin_thesaurus....... Lin's Dependency Thesaurus\n [ ] mac_morpho.......... MAC-MORPHO: Brazilian Portuguese news text with\n part-of-speech tags\nHit Enter to continue: \n [ ] machado............. Machado de Assis -- Obra Completa\n [ ] masc_tagged......... MASC Tagged Corpus\n [ ] maxent_ne_chunker... ACE Named Entity Chunker (Maximum entropy)\n [ ] maxent_treebank_pos_tagger Treebank Part of Speech Tagger (Maximum entropy)\n [ ] moses_sample........ Moses Sample Models\n [ ] movie_reviews....... Sentiment Polarity Dataset Version 2.0\n [ ] mte_teip5........... MULTEXT-East 1984 annotated corpus 4.0\n [ ] mwa_ppdb............ The monolingual word aligner (Sultan et al.\n 2015) subset of the Paraphrase Database.\n [ ] names............... Names Corpus, Version 1.3 (1994-03-29)\n [ ] nombank.1.0......... NomBank Corpus 1.0\n [ ] nonbreaking_prefixes Non-Breaking Prefixes (Moses Decoder)\n [ ] omw................. Open Multilingual Wordnet\n [ ] opinion_lexicon..... Opinion Lexicon\n [ ] panlex_swadesh...... PanLex Swadesh Corpora\n [ ] paradigms........... Paradigm Corpus\n [ ] pe08................ Cross-Framework and Cross-Domain Parser\n Evaluation Shared Task\n [ ] perluniprops........ perluniprops: Index of Unicode Version 7.0.0\n character properties in Perl\n [ ] pil................. The Patient Information Leaflet (PIL) Corpus\nHit Enter to continue: \n [ ] pl196x.............. Polish language of the XX century sixties\n [ ] porter_test......... Porter Stemmer Test Files\n [ ] ppattach............ Prepositional Phrase Attachment Corpus\n [ ] problem_reports..... Problem Report Corpus\n [ ] product_reviews_1... Product Reviews (5 Products)\n [ ] product_reviews_2... Product Reviews (9 Products)\n [ ] propbank............ Proposition Bank Corpus 1.0\n [ ] pros_cons........... Pros and Cons\n [ ] ptb................. Penn Treebank\n [ ] punkt............... Punkt Tokenizer Models\n [ ] qc.................. Experimental Data for Question Classification\n [ ] reuters............. The Reuters-21578 benchmark corpus, ApteMod\n version\n [ ] rslp................ RSLP Stemmer (Removedor de Sufixos da Lingua\n Portuguesa)\n [ ] rte................. PASCAL RTE Challenges 1, 2, and 3\n [ ] sample_grammars..... Sample Grammars\n [ ] semcor.............. SemCor 3.0\n [ ] senseval............ SENSEVAL 2 Corpus: Sense Tagged Text\n [ ] sentence_polarity... Sentence Polarity Dataset v1.0\n [ ] sentiwordnet........ SentiWordNet\nHit Enter to continue: \n [ ] shakespeare......... Shakespeare XML Corpus Sample\n [ ] sinica_treebank..... Sinica Treebank Corpus Sample\n [ ] smultron............ SMULTRON Corpus Sample\n [ ] snowball_data....... Snowball Data\n [ ] spanish_grammars.... Grammars for Spanish\n [ ] state_union......... C-Span State of the Union Address Corpus\n [ ] stopwords........... Stopwords Corpus\n [ ] subjectivity........ Subjectivity Dataset v1.0\n [ ] swadesh............. Swadesh Wordlists\n [ ] switchboard......... Switchboard Corpus Sample\n [ ] tagsets............. Help on Tagsets\n [ ] timit............... TIMIT Corpus Sample\n [ ] toolbox............. Toolbox Sample Files\n [ ] treebank............ Penn Treebank Sample\n [ ] twitter_samples..... Twitter Samples\n [ ] udhr2............... Universal Declaration of Human Rights Corpus\n (Unicode Version)\n [ ] udhr................ Universal Declaration of Human Rights Corpus\n [ ] unicode_samples..... Unicode Samples\n [ ] universal_tagset.... Mappings to the Universal Part-of-Speech Tagset\n [ ] universal_treebanks_v20 Universal Treebanks Version 2.0\nHit Enter to continue: \n [ ] vader_lexicon....... VADER Sentiment Lexicon\n [ ] verbnet3............ VerbNet Lexicon, Version 3.3\n [ ] verbnet............. VerbNet Lexicon, Version 2.1\n [ ] webtext............. Web Text Corpus\n [ ] wmt15_eval.......... Evaluation data from WMT15\n [ ] word2vec_sample..... Word2Vec Sample\n [ ] wordnet............. WordNet\n [ ] wordnet_ic.......... WordNet-InfoContent\n [ ] words............... Word Lists\n [ ] ycoe................ York-Toronto-Helsinki Parsed Corpus of Old\n English Prose\n\nCollections:\n [P] all-corpora......... All the corpora\n [P] all-nltk............ All packages available on nltk_data gh-pages\n branch\n [P] all................. All packages\n [P] book................ Everything used in the NLTK Book\n [P] popular............. Popular packages\n [ ] tests............... Packages for running tests\n [ ] third-party......... Third-party data packages\n\n([*] marks installed packages; [P] marks partially installed collections)\n\nDownload which package (l=list; x=cancel)?\n Identifier> l\nPackages:\n [ ] abc................. Australian Broadcasting Commission 2006\n [ ] alpino.............. Alpino Dutch Treebank\n [ ] averaged_perceptron_tagger Averaged Perceptron Tagger\n [ ] averaged_perceptron_tagger_ru Averaged Perceptron Tagger (Russian)\n [ ] basque_grammars..... Grammars for Basque\n [ ] biocreative_ppi..... BioCreAtIvE (Critical Assessment of Information\n Extraction Systems in Biology)\n [ ] bllip_wsj_no_aux.... BLLIP Parser: WSJ Model\n [ ] book_grammars....... Grammars from NLTK Book\n [ ] brown............... Brown Corpus\n [ ] brown_tei........... Brown Corpus (TEI XML Version)\n [ ] cess_cat............ CESS-CAT Treebank\n [ ] cess_esp............ CESS-ESP Treebank\n [ ] chat80.............. Chat-80 Data Files\n [ ] city_database....... City Database\n [ ] cmudict............. The Carnegie Mellon Pronouncing Dictionary (0.6)\n [ ] comparative_sentences Comparative Sentence Dataset\n [ ] comtrans............ ComTrans Corpus Sample\n [ ] conll2000........... CONLL 2000 Chunking Corpus\n [ ] conll2002........... CONLL 2002 Named Entity Recognition Corpus\n" ], [ "import nltk\nfrom nltk.book import *", "*** Introductory Examples for the NLTK Book ***\nLoading text1, ..., text9 and sent1, ..., sent9\nType the name of the text or sentence to view it.\nType: 'texts()' or 'sents()' to list the materials.\ntext1: Moby Dick by Herman Melville 1851\ntext2: Sense and Sensibility by Jane Austen 1811\ntext3: The Book of Genesis\ntext4: Inaugural Address Corpus\ntext5: Chat Corpus\n" ] ], [ [ "### Counting vocabulary of words", "_____no_output_____" ] ], [ [ "text7", "_____no_output_____" ], [ "sent7", "_____no_output_____" ], [ "len(sent7)", "_____no_output_____" ], [ "len(text7)", "_____no_output_____" ], [ "len(set(text7))", "_____no_output_____" ], [ "list(set(text7))[:10]", "_____no_output_____" ] ], [ [ "### Frequency of words", "_____no_output_____" ] ], [ [ "dist = FreqDist(text7)\nlen(dist)", "_____no_output_____" ], [ "vocab1 = dist.keys()\n#vocab1[:10] \n# In Python 3 dict.keys() returns an iterable view instead of a list\nlist(vocab1)[:10]", "_____no_output_____" ], [ "dist['four']", "_____no_output_____" ], [ "freqwords = [w for w in vocab1 if len(w) > 5 and dist[w] > 100]\nfreqwords", "_____no_output_____" ] ], [ [ "### Normalization and stemming", "_____no_output_____" ] ], [ [ "input1 = \"List listed lists listing listings\"\nwords1 = input1.lower().split(' ')\nwords1", "_____no_output_____" ], [ "porter = nltk.PorterStemmer()\n[porter.stem(t) for t in words1]", "_____no_output_____" ] ], [ [ "### Lemmatization", "_____no_output_____" ] ], [ [ "udhr = nltk.corpus.udhr.words('English-Latin1')\nudhr[:20]", "_____no_output_____" ], [ "[porter.stem(t) for t in udhr[:20]] # Still Lemmatization", "_____no_output_____" ], [ "WNlemma = nltk.WordNetLemmatizer()\n[WNlemma.lemmatize(t) for t in udhr[:20]]", "_____no_output_____" ] ], [ [ "### Tokenization", "_____no_output_____" ] ], [ [ "text11 = \"Children shouldn't drink a sugary drink before bed.\"\ntext11.split(' ')", "_____no_output_____" ], [ "nltk.word_tokenize(text11)", "_____no_output_____" ], [ "text12 = \"This is the first sentence. A gallon of milk in the U.S. costs $2.99. Is this the third sentence? Yes, it is!\"\nsentences = nltk.sent_tokenize(text12)\nlen(sentences)", "_____no_output_____" ], [ "sentences", "_____no_output_____" ] ], [ [ "## Advanced NLP Tasks with NLTK", "_____no_output_____" ], [ "### POS tagging", "_____no_output_____" ] ], [ [ "nltk.help.upenn_tagset('MD')", "MD: modal auxiliary\n can cannot could couldn't dare may might must need ought shall should\n shouldn't will would\n" ], [ "text13 = nltk.word_tokenize(text11)\nnltk.pos_tag(text13)", "_____no_output_____" ], [ "text14 = nltk.word_tokenize(\"Visiting aunts can be a nuisance\")\nnltk.pos_tag(text14)", "_____no_output_____" ], [ "# Parsing sentence structure\ntext15 = nltk.word_tokenize(\"Alice loves Bob\")\ngrammar = nltk.CFG.fromstring(\"\"\"\nS -> NP VP\nVP -> V NP\nNP -> 'Alice' | 'Bob'\nV -> 'loves'\n\"\"\")\n\nparser = nltk.ChartParser(grammar)\ntrees = parser.parse_all(text15)\nfor tree in trees:\n print(tree)", "(S (NP Alice) (VP (V loves) (NP Bob)))\n" ], [ "text16 = nltk.word_tokenize(\"I saw the man with a telescope\")\ngrammar1 = nltk.data.load('mygrammar.cfg')\ngrammar1", "_____no_output_____" ], [ "parser = nltk.ChartParser(grammar1)\ntrees = parser.parse_all(text16)\nfor tree in trees:\n print(tree)", "(S\n (NP I)\n (VP\n (VP (V saw) (NP (Det the) (N man)))\n (PP (P with) (NP (Det a) (N telescope)))))\n(S\n (NP I)\n (VP\n (V saw)\n (NP (Det the) (N man) (PP (P with) (NP (Det a) (N telescope))))))\n" ], [ "from nltk.corpus import treebank\ntext17 = treebank.parsed_sents('wsj_0001.mrg')[0]\nprint(text17)", "(S\n (NP-SBJ\n (NP (NNP Pierre) (NNP Vinken))\n (, ,)\n (ADJP (NP (CD 61) (NNS years)) (JJ old))\n (, ,))\n (VP\n (MD will)\n (VP\n (VB join)\n (NP (DT the) (NN board))\n (PP-CLR (IN as) (NP (DT a) (JJ nonexecutive) (NN director)))\n (NP-TMP (NNP Nov.) (CD 29))))\n (. .))\n" ] ], [ [ "### POS tagging and parsing ambiguity", "_____no_output_____" ] ], [ [ "text18 = nltk.word_tokenize(\"The old man the boat\")\nnltk.pos_tag(text18)", "_____no_output_____" ], [ "text19 = nltk.word_tokenize(\"Colorless green ideas sleep furiously\")\nnltk.pos_tag(text19)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb219184a6d63d1955c5329e17f47748b5ae2447
101,633
ipynb
Jupyter Notebook
Project-code/AlexNet_CIFAR.ipynb
bmbodj/COMP551_P4
e52ee58f6be3d6565bcaaad0a170bccbbea5e700
[ "BSD-2-Clause" ]
null
null
null
Project-code/AlexNet_CIFAR.ipynb
bmbodj/COMP551_P4
e52ee58f6be3d6565bcaaad0a170bccbbea5e700
[ "BSD-2-Clause" ]
null
null
null
Project-code/AlexNet_CIFAR.ipynb
bmbodj/COMP551_P4
e52ee58f6be3d6565bcaaad0a170bccbbea5e700
[ "BSD-2-Clause" ]
null
null
null
136.971698
38,742
0.807395
[ [ [ "# COMP551: Project 4", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport torch\nimport torchvision\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom torch.utils.data import DataLoader, Dataset, TensorDataset", "_____no_output_____" ], [ "# Load the Drive helper and mount\nfrom google.colab import drive\n\n# This will prompt for authorization.\ndrive.mount('/content/drive')", "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ], [ "transform = transforms.Compose([transforms.Resize(32,32),\n transforms.ToTensor(),\n #transforms.Lambda(lambda x: x.repeat(3,1,1)),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ntraining_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\nvalidation_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n\ntrainloader = torch.utils.data.DataLoader(training_dataset, batch_size=100, shuffle=True,num_workers=2)\nvalidloader = torch.utils.data.DataLoader(validation_dataset, batch_size = 100, shuffle=False,num_workers=2)\n\n", "Files already downloaded and verified\nFiles already downloaded and verified\n" ], [ "", "_____no_output_____" ], [ "#*********************************************************************\n# model part\nimport torchvision.models as models\n# use pretrained model:\nmodel = models.alexnet(pretrained = True)\n#import OrderedDicted to corectly align the network layers\n", "_____no_output_____" ], [ "print(model)", "AlexNet(\n (features): Sequential(\n (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n (1): ReLU(inplace)\n (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n (4): ReLU(inplace)\n (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (7): ReLU(inplace)\n (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (9): ReLU(inplace)\n (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (11): ReLU(inplace)\n (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))\n (classifier): Sequential(\n (0): Dropout(p=0.5)\n (1): Linear(in_features=9216, out_features=4096, bias=True)\n (2): ReLU(inplace)\n (3): Dropout(p=0.5)\n (4): Linear(in_features=4096, out_features=4096, bias=True)\n (5): ReLU(inplace)\n (6): Linear(in_features=4096, out_features=1000, bias=True)\n )\n)\n" ], [ "#import nn to modify features\nfrom collections import OrderedDict\nfrom torch import nn\n# cgange features to deal with image reduction small output size issue\n\nfeatures = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n # nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\nmodel.features= features\n#create classifier which fit our num of outputs\n\nclassifier = nn.Sequential(\n nn.Dropout(p=0.5),\n nn.Linear(in_features=9216, out_features=4096, bias=True),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Linear(in_features=4096, out_features=4096, bias=True),\n nn.ReLU(),\n nn.Linear(in_features=4096, out_features=10, bias=True)\n)\n#replace the model's classifier with this new classifier \nmodel.classifier = classifier\n", "_____no_output_____" ], [ "print(model)", "AlexNet(\n (features): Sequential(\n (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))\n (1): ReLU(inplace)\n (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n (4): ReLU(inplace)\n (5): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (6): ReLU(inplace)\n (7): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (8): ReLU(inplace)\n (9): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (10): ReLU(inplace)\n (11): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n (avgpool): AdaptiveAvgPool2d(output_size=(6, 6))\n (classifier): Sequential(\n (0): Dropout(p=0.5)\n (1): Linear(in_features=9216, out_features=4096, bias=True)\n (2): ReLU()\n (3): Dropout(p=0.5)\n (4): Linear(in_features=4096, out_features=4096, bias=True)\n (5): ReLU()\n (6): Linear(in_features=4096, out_features=10, bias=True)\n )\n)\n" ], [ "#import optimizer:\nfrom torch import optim\n#define criteria and optimizer\n# Note that other losses or optimizers can also be tried\ncriteria = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr = 0.0003, momentum=0.9)", "_____no_output_____" ], [ "#train model\n#define training function\ndef train (model, loader, criterion, gpu):\n model.train()\n current_loss = 0\n current_correct = 0\n for train, y_train in iter(loader):\n if gpu:\n train, y_train = train.to('cuda'), y_train.to('cuda')\n optimizer.zero_grad()\n output = model.forward(train)\n _, preds = torch.max(output,1)\n loss = criterion(output, y_train)\n loss.backward()\n optimizer.step()\n current_loss += loss.item()*train.size(0)\n current_correct += torch.sum(preds == y_train.data)\n #check if the training is correct: print(preds,y_train,current_correct,current_loss)\n epoch_loss = current_loss / len(loader)\n # devide 4 because we read 4 data everytime\n epoch_acc = current_correct.double() / len(loader)/100\n \n return epoch_loss, epoch_acc", "_____no_output_____" ], [ "#define validation function\ndef validation (model, loader, criterion, gpu):\n model.eval()\n valid_loss = 0\n valid_correct = 0\n #I added this\n pred=torch.zeros(len(loader))\n for valid, y_valid in iter(loader):\n if gpu:\n valid, y_valid = valid.to('cuda'), y_valid.to('cuda')\n output = model.forward(valid)\n _, preds = torch.max(output,1)\n valid_loss += criterion(output, y_valid).item()*valid.size(0)\n valid_correct += torch.sum(preds == y_valid.data)\n \n epoch_loss = valid_loss / len(loader)\n epoch_acc = valid_correct.double() / len(loader)/100\n \n return epoch_loss, epoch_acc", "_____no_output_____" ], [ "#define test function\ndef test (model, loader, criterion, gpu):\n model.eval()\n valid_loss = 0\n valid_correct = 0\n i=0\n pred=torch.zeros(len(loader))\n for test, y_train in iter(loader):\n if gpu:\n test = test.to('cuda')\n output = model.forward(test)\n _, preds = torch.max(output,1)\n pred[i]=preds\n i=i+1 \n return pred", "_____no_output_____" ], [ "# training\n#send model to gpu. If not send it to GPU, delete next line.\nmodel.to('cuda')\ntrain_losses =[]\ntrain_acc =[]\nvalid_losses=[]\nvalid_acc =[]\n#Initialize training params \n#freeze gradient parameters in pretrained model\nfor param in model.parameters():\n param.require_grad = False\n# define number of epochs\nepochs = 16 \nepoch = 0\n\nfor e in range(epochs):\n epoch +=1\n print(epoch)\n#train: \n with torch.set_grad_enabled(True):\n epoch_train_loss, epoch_train_acc = train(model,trainloader, criteria, 1)\n train_losses.append(epoch_train_loss)\n train_acc.append(epoch_train_acc)\n print(\"Epoch: {} Train Loss : {:.4f} Train Accuracy: {:.4f}\".format(epoch,epoch_train_loss,epoch_train_acc))\n \n#Valid, Activate next code when validation result is needed:\n with torch.no_grad():\n epoch_val_loss, epoch_val_acc = validation(model, validloader, criteria, 1)\n valid_losses.append(epoch_val_loss)\n valid_acc.append(epoch_val_acc)\n print(\"Epoch: {} Validation Loss : {:.4f} Validation Accuracy {:.4f}\".format(epoch,epoch_val_loss,epoch_val_acc))", "1\nEpoch: 1 Train Loss : 230.2622 Train Accuracy: 0.1006\nEpoch: 1 Validation Loss : 230.2592 Validation Accuracy 0.1031\n2\nEpoch: 2 Train Loss : 230.2603 Train Accuracy: 0.0990\nEpoch: 2 Validation Loss : 230.2557 Validation Accuracy 0.1059\n3\nEpoch: 3 Train Loss : 230.2570 Train Accuracy: 0.0998\nEpoch: 3 Validation Loss : 230.2523 Validation Accuracy 0.1382\n4\nEpoch: 4 Train Loss : 230.2540 Train Accuracy: 0.1019\nEpoch: 4 Validation Loss : 230.2490 Validation Accuracy 0.1294\n5\nEpoch: 5 Train Loss : 230.2486 Train Accuracy: 0.1038\nEpoch: 5 Validation Loss : 230.2458 Validation Accuracy 0.1417\n6\nEpoch: 6 Train Loss : 230.2487 Train Accuracy: 0.1037\nEpoch: 6 Validation Loss : 230.2425 Validation Accuracy 0.1260\n7\nEpoch: 7 Train Loss : 230.2466 Train Accuracy: 0.1071\nEpoch: 7 Validation Loss : 230.2391 Validation Accuracy 0.1734\n8\nEpoch: 8 Train Loss : 230.2438 Train Accuracy: 0.1044\nEpoch: 8 Validation Loss : 230.2353 Validation Accuracy 0.1117\n9\nEpoch: 9 Train Loss : 230.2390 Train Accuracy: 0.1081\nEpoch: 9 Validation Loss : 230.2313 Validation Accuracy 0.1310\n10\nEpoch: 10 Train Loss : 230.2365 Train Accuracy: 0.1115\nEpoch: 10 Validation Loss : 230.2267 Validation Accuracy 0.1407\n11\nEpoch: 11 Train Loss : 230.2304 Train Accuracy: 0.1164\nEpoch: 11 Validation Loss : 230.2216 Validation Accuracy 0.1213\n12\nEpoch: 12 Train Loss : 230.2274 Train Accuracy: 0.1129\nEpoch: 12 Validation Loss : 230.2158 Validation Accuracy 0.1151\n13\nEpoch: 13 Train Loss : 230.2237 Train Accuracy: 0.1149\nEpoch: 13 Validation Loss : 230.2090 Validation Accuracy 0.1306\n14\nEpoch: 14 Train Loss : 230.2149 Train Accuracy: 0.1203\nEpoch: 14 Validation Loss : 230.2010 Validation Accuracy 0.1108\n15\nEpoch: 15 Train Loss : 230.2092 Train Accuracy: 0.1143\nEpoch: 15 Validation Loss : 230.1915 Validation Accuracy 0.1353\n16\nEpoch: 16 Train Loss : 230.2004 Train Accuracy: 0.1184\nEpoch: 16 Validation Loss : 230.1802 Validation Accuracy 0.1280\n" ], [ "#Plot training and validation losses\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.plot(train_losses, label='Training loss')\nplt.plot(valid_losses, label='Validation loss')\nplt.legend()", "_____no_output_____" ], [ "#Plot training and validation accuracy\nplt.plot(train_acc, label='Training accuracy')\nplt.plot(valid_acc, label='Validation accuracy')\nplt.legend()", "_____no_output_____" ], [ "# for variety, lets use altair to do the plot\nimport altair as alt\n\n# create a pandas dataframe for the loss\ndf = pd.DataFrame({\n 'epoch': range(1, len(train_losses) + 1),\n 'train': train_losses,\n 'valid': valid_losses\n})\n\n# unpivot to have cols [epoch, dataset, loss]\ndf = df.melt(id_vars=['epoch'],\n value_vars=['train', 'valid'],\n value_name='loss',\n var_name='Dataset')\n\n# line plot with altair\nalt.Chart(df).mark_line(point=True)\\\n .encode(x='epoch', y='loss', color='Dataset')\\\n .interactive()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb219beba81bad3961b28ea98f983e191de08694
2,672
ipynb
Jupyter Notebook
notebook/Untitled.ipynb
YichaoOU/self_driving_car
7f1615c21ac2d9e77b41f66c65abc529ec9e0717
[ "MIT" ]
null
null
null
notebook/Untitled.ipynb
YichaoOU/self_driving_car
7f1615c21ac2d9e77b41f66c65abc529ec9e0717
[ "MIT" ]
1
2019-11-26T06:58:59.000Z
2019-11-26T06:58:59.000Z
notebook/Untitled.ipynb
YichaoOU/self_driving_car
7f1615c21ac2d9e77b41f66c65abc529ec9e0717
[ "MIT" ]
null
null
null
22.644068
129
0.533308
[ [ [ "import numba\nfrom numba import vectorize\nimport numpy as np\nimport math # Note that for the CUDA target, we need to use the scalar functions from the math module, not NumPy\n\nSQRT_2PI = np.float32((2*math.pi)**0.5) # Precompute this constant as a float32. Numba will inline it at compile time.\n\n@vectorize(['float32(float32, float32, float32)'], target='cuda')\ndef gaussian_pdf(x, mean, sigma):\n '''Compute the value of a Gaussian probability density function at x with given mean and sigma.'''\n return math.exp(-0.5 * ((x - mean) / sigma)**2) / (sigma * SQRT_2PI)", "_____no_output_____" ], [ "# Evaluate the Gaussian a million times!\nx = np.random.uniform(-3, 3, size=1000000).astype(np.float32)\nmean = np.float32(0.0)\nsigma = np.float32(1.0)\n\n# Quick test\ngaussian_pdf(x[0], 0.0, 1.0)\n", "_____no_output_____" ], [ "import scipy.stats # for definition of gaussian distribution\nnorm_pdf = scipy.stats.norm\n%timeit norm_pdf.pdf(x, loc=mean, scale=sigma)", "10 loops, best of 3: 77.9 ms per loop\n" ], [ "%timeit gaussian_pdf(x, mean, sigma)", "100 loops, best of 3: 5.4 ms per loop\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb21a535cbca3e9deaafa83a1f8e6943989c16e4
59,784
ipynb
Jupyter Notebook
tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb
vasudev-sharma/course-content
46fb9be49da52acb5df252dda43f11b6d1fe827f
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb
vasudev-sharma/course-content
46fb9be49da52acb5df252dda43f11b6d1fe827f
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb
vasudev-sharma/course-content
46fb9be49da52acb5df252dda43f11b6d1fe827f
[ "CC-BY-4.0", "BSD-3-Clause" ]
null
null
null
36.188862
535
0.566456
[ [ [ "<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D4_DynamicNetworks/W2D4_Tutorial1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Tutorial 1: Neural Rate Models\n**Week 2, Day 4: Dynamic Networks**\n\n**By Neuromatch Academy**\n\n__Content creators:__ Qinglong Gu, Songtin Li, Arvind Kumar, John Murray, Julijana Gjorgjieva \n\n__Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Spiros Chavlis, Michael Waskom\n", "_____no_output_____" ], [ "---\n# Tutorial Objectives\n\nThe brain is a complex system, not because it is composed of a large number of diverse types of neurons, but mainly because of how neurons are connected to each other. The brain is indeed a network of highly specialized neuronal networks. \n\nThe activity of a neural network constantly evolves in time. For this reason, neurons can be modeled as dynamical systems. The dynamical system approach is only one of the many modeling approaches that computational neuroscientists have developed (other points of view include information processing, statistical models, etc.). \n\nHow the dynamics of neuronal networks affect the representation and processing of information in the brain is an open question. However, signatures of altered brain dynamics present in many brain diseases (e.g., in epilepsy or Parkinson's disease) tell us that it is crucial to study network activity dynamics if we want to understand the brain.\n\nIn this tutorial, we will simulate and study one of the simplest models of biological neuronal networks. Instead of modeling and simulating individual excitatory neurons (e.g., LIF models that you implemented yesterday), we will treat them as a single homogeneous population and approximate their dynamics using a single one-dimensional equation describing the evolution of their average spiking rate in time.\n\nIn this tutorial, we will learn how to build a firing rate model of a single population of excitatory neurons. \n\n**Steps:**\n- Write the equation for the firing rate dynamics of a 1D excitatory population.\n- Visualize the response of the population as a function of parameters such as threshold level and gain, using the frequency-current (F-I) curve.\n- Numerically simulate the dynamics of the excitatory population and find the fixed points of the system. \n- Investigate the stability of the fixed points by linearizing the dynamics around them.\n \n", "_____no_output_____" ], [ "---\n# Setup", "_____no_output_____" ] ], [ [ "# Imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.optimize as opt # root-finding algorithm", "_____no_output_____" ], [ "# @title Figure Settings\nimport ipywidgets as widgets # interactive display\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle\")", "_____no_output_____" ], [ "# @title Helper functions\n\n\ndef plot_fI(x, f):\n plt.figure(figsize=(6, 4)) # plot the figure\n plt.plot(x, f, 'k')\n plt.xlabel('x (a.u.)', fontsize=14)\n plt.ylabel('F(x)', fontsize=14)\n plt.show()\n\n\ndef plot_dr_r(r, drdt, x_fps=None):\n plt.figure()\n plt.plot(r, drdt, 'k')\n plt.plot(r, 0. * r, 'k--')\n if x_fps is not None:\n plt.plot(x_fps, np.zeros_like(x_fps), \"ko\", ms=12)\n plt.xlabel(r'$r$')\n plt.ylabel(r'$\\frac{dr}{dt}$', fontsize=20)\n plt.ylim(-0.1, 0.1)\n\n\ndef plot_dFdt(x, dFdt):\n plt.figure()\n plt.plot(x, dFdt, 'r')\n plt.xlabel('x (a.u.)', fontsize=14)\n plt.ylabel('dF(x)', fontsize=14)\n plt.show()", "_____no_output_____" ] ], [ [ "---\n# Section 1: Neuronal network dynamics", "_____no_output_____" ] ], [ [ "# @title Video 1: Dynamic networks\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"p848349hPyw\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "## Section 1.1: Dynamics of a single excitatory population\n\nIndividual neurons respond by spiking. When we average the spikes of neurons in a population, we can define the average firing activity of the population. In this model, we are interested in how the population-averaged firing varies as a function of time and network parameters. Mathematically, we can describe the firing rate dynamic as:\n\n\\begin{align}\n\\tau \\frac{dr}{dt} &= -r + F(w\\cdot r + I_{\\text{ext}}) \\quad\\qquad (1)\n\\end{align}\n\n$r(t)$ represents the average firing rate of the excitatory population at time $t$, $\\tau$ controls the timescale of the evolution of the average firing rate, $w$ denotes the strength (synaptic weight) of the recurrent input to the population, $I_{\\text{ext}}$ represents the external input, and the transfer function $F(\\cdot)$ (which can be related to f-I curve of individual neurons described in the next sections) represents the population activation function in response to all received inputs.\n\nTo start building the model, please execute the cell below to initialize the simulation parameters.", "_____no_output_____" ] ], [ [ "# @markdown *Execute this cell to set default parameters for a single excitatory population model*\n\n\ndef default_pars_single(**kwargs):\n pars = {}\n\n # Excitatory parameters\n pars['tau'] = 1. # Timescale of the E population [ms]\n pars['a'] = 1.2 # Gain of the E population\n pars['theta'] = 2.8 # Threshold of the E population\n\n # Connection strength\n pars['w'] = 0. # E to E, we first set it to 0\n\n # External input\n pars['I_ext'] = 0.\n\n # simulation parameters\n pars['T'] = 20. # Total duration of simulation [ms]\n pars['dt'] = .1 # Simulation time step [ms]\n pars['r_init'] = 0.2 # Initial value of E\n\n # External parameters if any\n pars.update(kwargs)\n\n # Vector of discretized time points [ms]\n pars['range_t'] = np.arange(0, pars['T'], pars['dt'])\n\n return pars", "_____no_output_____" ] ], [ [ "You can now use:\n- `pars = default_pars_single()` to get all the parameters, and then you can execute `print(pars)` to check these parameters. \n- `pars = default_pars_single(T=T_sim, dt=time_step)` to set new simulation time and time step\n- To update an existing parameter dictionary, use `pars['New_para'] = value`\n\nBecause `pars` is a dictionary, it can be passed to a function that requires individual parameters as arguments using `my_func(**pars)` syntax.", "_____no_output_____" ], [ "## Section 1.2: F-I curves\nIn electrophysiology, a neuron is often characterized by its spike rate output in response to input currents. This is often called the **F-I** curve, denoting the output spike frequency (**F**) in response to different injected currents (**I**). We estimated this for an LIF neuron in yesterday's tutorial.\n\nThe transfer function $F(\\cdot)$ in Equation $1$ represents the gain of the population as a function of the total input. The gain is often modeled as a sigmoidal function, i.e., more input drive leads to a nonlinear increase in the population firing rate. The output firing rate will eventually saturate for high input values. \n\nA sigmoidal $F(\\cdot)$ is parameterized by its gain $a$ and threshold $\\theta$.\n\n$$ F(x;a,\\theta) = \\frac{1}{1+\\text{e}^{-a(x-\\theta)}} - \\frac{1}{1+\\text{e}^{a\\theta}} \\quad(2)$$\n\nThe argument $x$ represents the input to the population. Note that the second term is chosen so that $F(0;a,\\theta)=0$.\n\nMany other transfer functions (generally monotonic) can be also used. Examples are the rectified linear function $ReLU(x)$ or the hyperbolic tangent $tanh(x)$.", "_____no_output_____" ], [ "### Exercise 1: Implement F-I curve \n\nLet's first investigate the activation functions before simulating the dynamics of the entire population. \n\nIn this exercise, you will implement a sigmoidal **F-I** curve or transfer function $F(x)$, with gain $a$ and threshold level $\\theta$ as parameters.", "_____no_output_____" ] ], [ [ "def F(x, a, theta):\n \"\"\"\n Population activation function.\n\n Args:\n x (float): the population input\n a (float): the gain of the function\n theta (float): the threshold of the function\n\n Returns:\n float: the population activation response F(x) for input x\n \"\"\"\n #################################################\n ## TODO for students: compute f = F(x) ##\n # Fill out function and remove\n raise NotImplementedError(\"Student excercise: implement the f-I function\")\n #################################################\n\n # Define the sigmoidal transfer function f = F(x)\n f = ...\n\n return f\n\n\npars = default_pars_single() # get default parameters\nx = np.arange(0, 10, .1) # set the range of input\n\n# Uncomment below to test your function\n# f = F(x, pars['a'], pars['theta'])\n# plot_fI(x, f)", "_____no_output_____" ], [ "# to_remove solution\ndef F(x, a, theta):\n \"\"\"\n Population activation function.\n\n Args:\n x (float): the population input\n a (float): the gain of the function\n theta (float): the threshold of the function\n\n Returns:\n float: the population activation response F(x) for input x\n \"\"\"\n\n # Define the sigmoidal transfer function f = F(x)\n f = (1 + np.exp(-a * (x - theta)))**-1 - (1 + np.exp(a * theta))**-1\n\n return f\n\n\npars = default_pars_single() # get default parameters\nx = np.arange(0, 10, .1) # set the range of input\n\n# Uncomment below to test your function\nf = F(x, pars['a'], pars['theta'])\n\nwith plt.xkcd():\n plot_fI(x, f)", "_____no_output_____" ] ], [ [ "### Interactive Demo: Parameter exploration of F-I curve\nHere's an interactive demo that shows how the F-I curve changes for different values of the gain and threshold parameters. How do the gain and threshold parameters affect the F-I curve?", "_____no_output_____" ] ], [ [ "# @title\n\n# @markdown Make sure you execute this cell to enable the widget!\n\n\ndef interactive_plot_FI(a, theta):\n \"\"\"\n Population activation function.\n\n Expecxts:\n a : the gain of the function\n theta : the threshold of the function\n\n Returns:\n plot the F-I curve with give parameters\n \"\"\"\n\n # set the range of input\n x = np.arange(0, 10, .1)\n plt.figure()\n plt.plot(x, F(x, a, theta), 'k')\n plt.xlabel('x (a.u.)', fontsize=14)\n plt.ylabel('F(x)', fontsize=14)\n plt.show()\n\n\n_ = widgets.interact(interactive_plot_FI, a=(0.3, 3, 0.3), theta=(2, 4, 0.2))", "_____no_output_____" ], [ "# to_remove explanation\n\n\"\"\"\nDiscussion:\n\nFor the function we have chosen to model the F-I curve (eq 2),\n- a determines the slope (gain) of the rising phase of the F-I curve\n- theta determines the input at which the function F(x) reaches its mid-value (0.5).\nThat is, theta shifts the F-I curve along the horizontal axis.\n\nFor our neurons we are using in this tutorial:\n- a controls the gain of the neuron population\n- theta controls the threshold at which the neuron population starts to respond\n\"\"\";", "_____no_output_____" ] ], [ [ "## Section 1.3: Simulation scheme of E dynamics\n\nBecause $F(\\cdot)$ is a nonlinear function, the exact solution of Equation $1$ can not be determined via analytical methods. Therefore, numerical methods must be used to find the solution. In practice, the derivative on the left-hand side of Equation $1$ can be approximated using the Euler method on a time-grid of stepsize $\\Delta t$:\n\n\\begin{align}\n&\\frac{dr}{dt} \\approx \\frac{r[k+1]-r[k]}{\\Delta t} \n\\end{align}\nwhere $r[k] = r(k\\Delta t)$. \n\nThus,\n\n$$\\Delta r[k] = \\frac{\\Delta t}{\\tau}[-r[k] + F(w\\cdot r[k] + I_{\\text{ext}}[k];a,\\theta)]$$\n\n\nHence, Equation (1) is updated at each time step by:\n\n$$r[k+1] = r[k] + \\Delta r[k]$$\n", "_____no_output_____" ] ], [ [ "# @markdown *Execute this cell to enable the single population rate model simulator: `simulate_single`*\n\n\ndef simulate_single(pars):\n \"\"\"\n Simulate an excitatory population of neurons\n\n Args:\n pars : Parameter dictionary\n\n Returns:\n rE : Activity of excitatory population (array)\n\n Example:\n pars = default_pars_single()\n r = simulate_single(pars)\n \"\"\"\n\n # Set parameters\n tau, a, theta = pars['tau'], pars['a'], pars['theta']\n w = pars['w']\n I_ext = pars['I_ext']\n r_init = pars['r_init']\n dt, range_t = pars['dt'], pars['range_t']\n Lt = range_t.size\n\n # Initialize activity\n r = np.zeros(Lt)\n r[0] = r_init\n I_ext = I_ext * np.ones(Lt)\n\n # Update the E activity\n for k in range(Lt - 1):\n dr = dt / tau * (-r[k] + F(w * r[k] + I_ext[k], a, theta))\n r[k+1] = r[k] + dr\n\n return r\n\nhelp(simulate_single)", "_____no_output_____" ] ], [ [ "### Interactive Demo: Parameter Exploration of single population dynamics\n\nNote that $w=0$, as in the default setting, means no recurrent input to the neuron population in Equation (1). Hence, the dynamics are entirely determined by the external input $I_{\\text{ext}}$. Explore these dynamics in this interactive demo.\n\nHow does $r_{\\text{sim}}(t)$ change with different $I_{\\text{ext}}$ values? How does it change with different $\\tau$ values? Investigate the relationship between $F(I_{\\text{ext}}; a, \\theta)$ and the steady value of $r(t)$. \n\nNote that, $r_{\\rm ana}(t)$ denotes the analytical solution - you will learn how this is computed in the next section.", "_____no_output_____" ] ], [ [ "# @title\n\n# @markdown Make sure you execute this cell to enable the widget!\n\n# get default parameters\npars = default_pars_single(T=20.)\n\n\ndef Myplot_E_diffI_difftau(I_ext, tau):\n # set external input and time constant\n pars['I_ext'] = I_ext\n pars['tau'] = tau\n\n # simulation\n r = simulate_single(pars)\n\n # Analytical Solution\n r_ana = (pars['r_init']\n + (F(I_ext, pars['a'], pars['theta'])\n - pars['r_init']) * (1. - np.exp(-pars['range_t'] / pars['tau'])))\n\n # plot\n plt.figure()\n plt.plot(pars['range_t'], r, 'b', label=r'$r_{\\mathrm{sim}}$(t)', alpha=0.5,\n zorder=1)\n plt.plot(pars['range_t'], r_ana, 'b--', lw=5, dashes=(2, 2),\n label=r'$r_{\\mathrm{ana}}$(t)', zorder=2)\n plt.plot(pars['range_t'],\n F(I_ext, pars['a'], pars['theta']) * np.ones(pars['range_t'].size),\n 'k--', label=r'$F(I_{\\mathrm{ext}})$')\n plt.xlabel('t (ms)', fontsize=16.)\n plt.ylabel('Activity r(t)', fontsize=16.)\n plt.legend(loc='best', fontsize=14.)\n plt.show()\n\n\n_ = widgets.interact(Myplot_E_diffI_difftau, I_ext=(0.0, 10., 1.),\n tau=(1., 5., 0.2))", "_____no_output_____" ], [ "# to_remove explanation\n\n\"\"\"\nDiscussion:\n\nGiven the choice of F-I curve (eq 2) and dynamics of the neuron population (eq. 1)\nthe neurons have two fixed points or steady-state responses irrespective of the input.\n- Weak inputs to the neurons eventually result in the activity converging to zero\n- Strong inputs to the neurons eventually result in the activity converging to max value\n\nThe time constant tau, does not affect the steady-state response but it determines\nthe time the neurons take to reach to their fixed point.\n\"\"\";", "_____no_output_____" ] ], [ [ "## Think!\nAbove, we have numerically solved a system driven by a positive input. Yet, $r_E(t)$ either decays to zero or reaches a fixed non-zero value.\n- Why doesn't the solution of the system \"explode\" in a finite time? In other words, what guarantees that $r_E$(t) stays finite? \n- Which parameter would you change in order to increase the maximum value of the response? ", "_____no_output_____" ] ], [ [ "# to_remove explanation\n\n\"\"\"\nDiscussion:\n\n1) As the F-I curve is bounded between zero and one, the system doesn't explode.\nThe f-curve guarantees this property\n\n2) One way to increase the maximum response is to change the f-I curve. For\nexample, the ReLU is an unbounded function, and thus will increase the overall maximal\nresponse of the network.\n\"\"\";", "_____no_output_____" ] ], [ [ "---\n# Section 2: Fixed points of the single population system\n", "_____no_output_____" ] ], [ [ "# @title Video 2: Fixed point\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"Ox3ELd1UFyo\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "As you varied the two parameters in the last Interactive Demo, you noticed that, while at first the system output quickly changes, with time, it reaches its maximum/minimum value and does not change anymore. The value eventually reached by the system is called the **steady state** of the system, or the **fixed point**. Essentially, in the steady states the derivative with respect to time of the activity ($r$) is zero, i.e. $\\displaystyle \\frac{dr}{dt}=0$. \n\nWe can find that the steady state of the Equation. (1) by setting $\\displaystyle{\\frac{dr}{dt}=0}$ and solve for $r$:\n\n$$-r_{\\text{steady}} + F(w\\cdot r_{\\text{steady}} + I_{\\text{ext}};a,\\theta) = 0, \\qquad (3)$$\n\nWhen it exists, the solution of Equation. (3) defines a **fixed point** of the dynamical system in Equation (1). Note that if $F(x)$ is nonlinear, it is not always possible to find an analytical solution, but the solution can be found via numerical simulations, as we will do later.\n\nFrom the Interactive Demo, one could also notice that the value of $\\tau$ influences how quickly the activity will converge to the steady state from its initial value. \n\nIn the specific case of $w=0$, we can also analytically compute the solution of Equation (1) (i.e., the thick blue dashed line) and deduce the role of $\\tau$ in determining the convergence to the fixed point: \n\n$$\\displaystyle{r(t) = \\big{[}F(I_{\\text{ext}};a,\\theta) -r(t=0)\\big{]} (1-\\text{e}^{-\\frac{t}{\\tau}})} + r(t=0)$$ \\\\\n\nWe can now numerically calculate the fixed point with a root finding algorithm.", "_____no_output_____" ], [ "## Exercise 2: Visualization of the fixed points\n\nWhen it is not possible to find the solution for Equation (3) analytically, a graphical approach can be taken. To that end, it is useful to plot $\\displaystyle{\\frac{dr}{dt}}$ as a function of $r$. The values of $r$ for which the plotted function crosses zero on the y axis correspond to fixed points. \n\nHere, let us, for example, set $w=5.0$ and $I^{\\text{ext}}=0.5$. From Equation (1), you can obtain\n\n$$\\frac{dr}{dt} = [-r + F(w\\cdot r + I^{\\text{ext}})]\\,/\\,\\tau $$\n\nThen, plot the $dr/dt$ as a function of $r$, and check for the presence of fixed points. ", "_____no_output_____" ] ], [ [ "def compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):\n \"\"\"Given parameters, compute dr/dt as a function of r.\n\n Args:\n r (1D array) : Average firing rate of the excitatory population\n I_ext, w, a, theta, tau (numbers): Simulation parameters to use\n other_pars : Other simulation parameters are unused by this function\n\n Returns\n drdt function for each value of r\n \"\"\"\n #########################################################################\n # TODO compute drdt and disable the error\n raise NotImplementedError(\"Finish the compute_drdt function\")\n #########################################################################\n\n # Calculate drdt\n drdt = ...\n\n return drdt\n\n\n# Define a vector of r values and the simulation parameters\nr = np.linspace(0, 1, 1000)\npars = default_pars_single(I_ext=0.5, w=5)\n\n# Uncomment to test your function\n# drdt = compute_drdt(r, **pars)\n# plot_dr_r(r, drdt)", "_____no_output_____" ], [ "# to_remove solution\ndef compute_drdt(r, I_ext, w, a, theta, tau, **other_pars):\n \"\"\"Given parameters, compute dr/dt as a function of r.\n\n Args:\n r (1D array) : Average firing rate of the excitatory population\n I_ext, w, a, theta, tau (numbers): Simulation parameters to use\n other_pars : Other simulation parameters are unused by this function\n\n Returns\n drdt function for each value of r\n \"\"\"\n # Calculate drdt\n drdt = (-r + F(w * r + I_ext, a, theta)) / tau\n\n return drdt\n\n\n# Define a vector of r values and the simulation parameters\nr = np.linspace(0, 1, 1000)\npars = default_pars_single(I_ext=0.5, w=5)\n\ndrdt = compute_drdt(r, **pars)\nwith plt.xkcd():\n plot_dr_r(r, drdt)", "_____no_output_____" ] ], [ [ "## Exercise 3: Fixed point calculation\n\nWe will now find the fixed points numerically. To do so, we need to specif initial values ($r_{\\text{guess}}$) for the root-finding algorithm to start from. From the line $\\displaystyle{\\frac{dr}{dt}}$ plotted above in Exercise 2, initial values can be chosen as a set of values close to where the line crosses zero on the y axis (real fixed point).\n\nThe next cell defines three helper functions that we will use:\n\n- `my_fp_single(r_guess, **pars)` uses a root-finding algorithm to locate a fixed point near a given initial value\n- `check_fp_single(x_fp, **pars)`, verifies that the values of $r_{\\rm fp}$ for which $\\displaystyle{\\frac{dr}{dt}} = 0$ are the true fixed points\n- `my_fp_finder(r_guess_vector, **pars)` accepts an array of initial values and finds the same number of fixed points, using the above two functions", "_____no_output_____" ] ], [ [ "# @markdown *Execute this cell to enable the fixed point functions*\n\ndef my_fp_single(r_guess, a, theta, w, I_ext, **other_pars):\n \"\"\"\n Calculate the fixed point through drE/dt=0\n\n Args:\n r_guess : Initial value used for scipy.optimize function\n a, theta, w, I_ext : simulation parameters\n\n Returns:\n x_fp : value of fixed point\n \"\"\"\n # define the right hand of E dynamics\n def my_WCr(x):\n r = x\n drdt = (-r + F(w * r + I_ext, a, theta))\n y = np.array(drdt)\n\n return y\n\n x0 = np.array(r_guess)\n x_fp = opt.root(my_WCr, x0).x.item()\n\n return x_fp\n\n\ndef check_fp_single(x_fp, a, theta, w, I_ext, mytol=1e-4, **other_pars):\n \"\"\"\n Verify |dr/dt| < mytol\n\n Args:\n fp : value of fixed point\n a, theta, w, I_ext: simulation parameters\n mytol : tolerance, default as 10^{-4}\n\n Returns :\n Whether it is a correct fixed point: True/False\n \"\"\"\n # calculate Equation(3)\n y = x_fp - F(w * x_fp + I_ext, a, theta)\n\n # Here we set tolerance as 10^{-4}\n return np.abs(y) < mytol\n\n\ndef my_fp_finder(pars, r_guess_vector, mytol=1e-4):\n \"\"\"\n Calculate the fixed point(s) through drE/dt=0\n\n Args:\n pars : Parameter dictionary\n r_guess_vector : Initial values used for scipy.optimize function\n mytol : tolerance for checking fixed point, default as 10^{-4}\n\n Returns:\n x_fps : values of fixed points\n\n \"\"\"\n x_fps = []\n correct_fps = []\n for r_guess in r_guess_vector:\n x_fp = my_fp_single(r_guess, **pars)\n if check_fp_single(x_fp, **pars, mytol=mytol):\n x_fps.append(x_fp)\n\n return x_fps\n\nhelp(my_fp_finder)", "_____no_output_____" ], [ "r = np.linspace(0, 1, 1000)\npars = default_pars_single(I_ext=0.5, w=5)\ndrdt = compute_drdt(r, **pars)\n\n#############################################################################\n# TODO for students:\n# Define initial values close to the intersections of drdt and y=0\n# (How many initial values? Hint: How many times do the two lines intersect?)\n# Calculate the fixed point with these initial values and plot them\n#############################################################################\nr_guess_vector = [...]\n\n# Uncomment to test your values\n# x_fps = my_fp_finder(pars, r_guess_vector)\n# plot_dr_r(r, drdt, x_fps)", "_____no_output_____" ], [ "# to_remove solution\nr = np.linspace(0, 1, 1000)\npars = default_pars_single(I_ext=0.5, w=5)\ndrdt = compute_drdt(r, **pars)\n\nr_guess_vector = [0, .4, .9]\n\nx_fps = my_fp_finder(pars, r_guess_vector)\nwith plt.xkcd():\n plot_dr_r(r, drdt, x_fps)", "_____no_output_____" ] ], [ [ "## Interactive Demo: fixed points as a function of recurrent and external inputs.\n\nYou can now explore how the previous plot changes when the recurrent coupling $w$ and the external input $I_{\\text{ext}}$ take different values. How does the number of fixed points change?", "_____no_output_____" ] ], [ [ "# @title\n\n# @markdown Make sure you execute this cell to enable the widget!\n\n\ndef plot_intersection_single(w, I_ext):\n # set your parameters\n pars = default_pars_single(w=w, I_ext=I_ext)\n\n # find fixed points\n r_init_vector = [0, .4, .9]\n x_fps = my_fp_finder(pars, r_init_vector)\n\n # plot\n r = np.linspace(0, 1., 1000)\n drdt = (-r + F(w * r + I_ext, pars['a'], pars['theta'])) / pars['tau']\n\n plot_dr_r(r, drdt, x_fps)\n\n_ = widgets.interact(plot_intersection_single, w=(1, 7, 0.2),\n I_ext=(0, 3, 0.1))", "_____no_output_____" ], [ "# to_remove explanation\n\n\"\"\"\nDiscussion:\n\nThe fixed points of the single excitatory neuron population are determined by both\nrecurrent connections w and external input I_ext. In a previous interactive demo\nwe saw how the system showed two different steady-states when w = 0. But when w\ndoe not equal 0, for some range of w the system shows three fixed points (the middle\none being unstable) and the steady state depends on the initial conditions (i.e.\nr at time zero.).\n\nMore on this will be explained in the next section.\n\"\"\";", "_____no_output_____" ] ], [ [ "---\n# Summary\n\nIn this tutorial, we have investigated the dynamics of a rate-based single population of neurons.\n\nWe learned about:\n- The effect of the input parameters and the time constant of the network on the dynamics of the population.\n- How to find the fixed point(s) of the system.\n\nNext, we have two Bonus, but important concepts in dynamical system analysis and simulation. If you have time left, watch the next video and proceed to solve the exercises. You will learn:\n\n- How to determine the stability of a fixed point by linearizing the system.\n- How to add realistic inputs to our model.", "_____no_output_____" ], [ "---\n# Bonus 1: Stability of a fixed point", "_____no_output_____" ] ], [ [ "# @title Video 3: Stability of fixed points\nfrom IPython.display import YouTubeVideo\nvideo = YouTubeVideo(id=\"KKMlWWU83Jg\", width=854, height=480, fs=1)\nprint(\"Video available at https://youtube.com/watch?v=\" + video.id)\nvideo", "_____no_output_____" ] ], [ [ "#### Initial values and trajectories\n\nHere, let us first set $w=5.0$ and $I_{\\text{ext}}=0.5$, and investigate the dynamics of $r(t)$ starting with different initial values $r(0) \\equiv r_{\\text{init}}$. We will plot the trajectories of $r(t)$ with $r_{\\text{init}} = 0.0, 0.1, 0.2,..., 0.9$.", "_____no_output_____" ] ], [ [ "# @markdown Execute this cell to see the trajectories!\n\npars = default_pars_single()\npars['w'] = 5.0\npars['I_ext'] = 0.5\n\nplt.figure(figsize=(8, 5))\nfor ie in range(10):\n pars['r_init'] = 0.1 * ie # set the initial value\n r = simulate_single(pars) # run the simulation\n\n # plot the activity with given initial\n plt.plot(pars['range_t'], r, 'b', alpha=0.1 + 0.1 * ie,\n label=r'r$_{\\mathrm{init}}$=%.1f' % (0.1 * ie))\n\nplt.xlabel('t (ms)')\nplt.title('Two steady states?')\nplt.ylabel(r'$r$(t)')\nplt.legend(loc=[1.01, -0.06], fontsize=14)\nplt.show()", "_____no_output_____" ] ], [ [ "## Interactive Demo: dynamics as a function of the initial value\n\nLet's now set $r_{\\rm init}$ to a value of your choice in this demo. How does the solution change? What do you observe?", "_____no_output_____" ] ], [ [ "# @title\n\n# @markdown Make sure you execute this cell to enable the widget!\n\npars = default_pars_single(w=5.0, I_ext=0.5)\n\ndef plot_single_diffEinit(r_init):\n pars['r_init'] = r_init\n r = simulate_single(pars)\n\n plt.figure()\n plt.plot(pars['range_t'], r, 'b', zorder=1)\n plt.plot(0, r[0], 'bo', alpha=0.7, zorder=2)\n plt.xlabel('t (ms)', fontsize=16)\n plt.ylabel(r'$r(t)$', fontsize=16)\n plt.ylim(0, 1.0)\n plt.show()\n\n\n_ = widgets.interact(plot_single_diffEinit, r_init=(0, 1, 0.02))", "_____no_output_____" ], [ "# to_remove explanation\n\"\"\"\nDiscussion:\n\nTo better appreciate what is happening here, you should go back to the previous\ninteractive demo. Set the w = 5 and I_ext = 0.5.\n\nYou will find that there are three fixed points of the system for these values of\nw and I_ext. Now, choose the initial value in this demo and see in which direction\nthe system output moves. When r_init is in the vicinity of the leftmost fixed points\nit moves towards the left most fixed point. When r_init is in the vicinity of the\nrightmost fixed points it moves towards the rightmost fixed point.\n\"\"\";", "_____no_output_____" ] ], [ [ "### Stability analysis via linearization of the dynamics\n\nJust like Equation $1$ in the case ($w=0$) discussed above, a generic linear system \n$$\\frac{dx}{dt} = \\lambda (x - b),$$ \nhas a fixed point for $x=b$. The analytical solution of such a system can be found to be:\n$$x(t) = b + \\big{(} x(0) - b \\big{)} \\text{e}^{\\lambda t}.$$ \nNow consider a small perturbation of the activity around the fixed point: $x(0) = b+ \\epsilon$, where $|\\epsilon| \\ll 1$. Will the perturbation $\\epsilon(t)$ grow with time or will it decay to the fixed point? The evolution of the perturbation with time can be written, using the analytical solution for $x(t)$, as:\n $$\\epsilon (t) = x(t) - b = \\epsilon \\text{e}^{\\lambda t}$$\n\n- if $\\lambda < 0$, $\\epsilon(t)$ decays to zero, $x(t)$ will still converge to $b$ and the fixed point is \"**stable**\".\n\n- if $\\lambda > 0$, $\\epsilon(t)$ grows with time, $x(t)$ will leave the fixed point $b$ exponentially, and the fixed point is, therefore, \"**unstable**\" .", "_____no_output_____" ], [ "### Compute the stability of Equation $1$\n\nSimilar to what we did in the linear system above, in order to determine the stability of a fixed point $r^{*}$ of the excitatory population dynamics, we perturb Equation (1) around $r^{*}$ by $\\epsilon$, i.e. $r = r^{*} + \\epsilon$. We can plug in Equation (1) and obtain the equation determining the time evolution of the perturbation $\\epsilon(t)$:\n\n\\begin{align}\n\\tau \\frac{d\\epsilon}{dt} \\approx -\\epsilon + w F'(w\\cdot r^{*} + I_{\\text{ext}};a,\\theta) \\epsilon \n\\end{align}\n\nwhere $F'(\\cdot)$ is the derivative of the transfer function $F(\\cdot)$. We can rewrite the above equation as:\n\n\\begin{align}\n\\frac{d\\epsilon}{dt} \\approx \\frac{\\epsilon}{\\tau }[-1 + w F'(w\\cdot r^* + I_{\\text{ext}};a,\\theta)] \n\\end{align}\n\nThat is, as in the linear system above, the value of\n\n$$\\lambda = [-1+ wF'(w\\cdot r^* + I_{\\text{ext}};a,\\theta)]/\\tau \\qquad (4)$$\n\ndetermines whether the perturbation will grow or decay to zero, i.e., $\\lambda$ defines the stability of the fixed point. This value is called the **eigenvalue** of the dynamical system.", "_____no_output_____" ], [ "## Exercise 4: Compute $dF$\n\nThe derivative of the sigmoid transfer function is:\n\\begin{align} \n\\frac{dF}{dx} & = \\frac{d}{dx} (1+\\exp\\{-a(x-\\theta)\\})^{-1} \\\\\n& = a\\exp\\{-a(x-\\theta)\\} (1+\\exp\\{-a(x-\\theta)\\})^{-2}. \\qquad (5)\n\\end{align}\n\nLet's now find the expression for the derivative $\\displaystyle{\\frac{dF}{dx}}$ in the following cell and plot it.", "_____no_output_____" ] ], [ [ "def dF(x, a, theta):\n \"\"\"\n Population activation function.\n\n Args:\n x : the population input\n a : the gain of the function\n theta : the threshold of the function\n\n Returns:\n dFdx : the population activation response F(x) for input x\n \"\"\"\n\n ###########################################################################\n # TODO for students: compute dFdx ##\n raise NotImplementedError(\"Student excercise: compute the deravitive of F\")\n ###########################################################################\n\n # Calculate the population activation\n dFdx = ...\n\n return dFdx\n\n\npars = default_pars_single() # get default parameters\nx = np.arange(0, 10, .1) # set the range of input\n\n# Uncomment below to test your function\n# df = dF(x, pars['a'], pars['theta'])\n# plot_dFdt(x, df)", "_____no_output_____" ], [ "# to_remove solution\ndef dF(x, a, theta):\n \"\"\"\n Population activation function.\n\n Args:\n x : the population input\n a : the gain of the function\n theta : the threshold of the function\n\n Returns:\n dFdx : the population activation response F(x) for input x\n \"\"\"\n\n # Calculate the population activation\n dFdx = a * np.exp(-a * (x - theta)) * (1 + np.exp(-a * (x - theta)))**-2\n\n return dFdx\n\n\npars = default_pars_single() # get default parameters\nx = np.arange(0, 10, .1) # set the range of input\n\ndf = dF(x, pars['a'], pars['theta'])\n\nwith plt.xkcd():\n plot_dFdt(x, df)", "_____no_output_____" ] ], [ [ "## Exercise 5: Compute eigenvalues\n\nAs discussed above, for the case with $w=5.0$ and $I_{\\text{ext}}=0.5$, the system displays **three** fixed points. However, when we simulated the dynamics and varied the initial conditions $r_{\\rm init}$, we could only obtain **two** steady states. In this exercise, we will now check the stability of each of the three fixed points by calculating the corresponding eigenvalues with the function `eig_single`. Check the sign of each eigenvalue (i.e., stability of each fixed point). How many of the fixed points are stable?\n\nNote that the expression of the eigenvalue at fixed point $r^*$\n$$\\lambda = [-1+ wF'(w\\cdot r^* + I_{\\text{ext}};a,\\theta)]/\\tau$$", "_____no_output_____" ] ], [ [ "def eig_single(fp, tau, a, theta, w, I_ext, **other_pars):\n \"\"\"\n Args:\n fp : fixed point r_fp\n tau, a, theta, w, I_ext : Simulation parameters\n\n Returns:\n eig : eigevalue of the linearized system\n \"\"\"\n #####################################################################\n ## TODO for students: compute eigenvalue and disable the error\n raise NotImplementedError(\"Student excercise: compute the eigenvalue\")\n ######################################################################\n # Compute the eigenvalue\n eig = ...\n\n return eig\n\n\n# Find the eigenvalues for all fixed points of Exercise 2\npars = default_pars_single(w=5, I_ext=.5)\nr_guess_vector = [0, .4, .9]\nx_fp = my_fp_finder(pars, r_guess_vector)\n\n# Uncomment below lines after completing the eig_single function.\n\n# for fp in x_fp:\n# eig_fp = eig_single(fp, **pars)\n# print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')", "_____no_output_____" ] ], [ [ "**SAMPLE OUTPUT**\n\n```\nFixed point1 at 0.042 with Eigenvalue=-0.583\nFixed point2 at 0.447 with Eigenvalue=0.498\nFixed point3 at 0.900 with Eigenvalue=-0.626\n```", "_____no_output_____" ] ], [ [ "# to_remove solution\ndef eig_single(fp, tau, a, theta, w, I_ext, **other_pars):\n \"\"\"\n Args:\n fp : fixed point r_fp\n tau, a, theta, w, I_ext : Simulation parameters\n\n Returns:\n eig : eigevalue of the linearized system\n \"\"\"\n # Compute the eigenvalue\n eig = (-1. + w * dF(w * fp + I_ext, a, theta)) / tau\n\n return eig\n\n\n# Find the eigenvalues for all fixed points of Exercise 2\npars = default_pars_single(w=5, I_ext=.5)\nr_guess_vector = [0, .4, .9]\nx_fp = my_fp_finder(pars, r_guess_vector)\n\nfor fp in x_fp:\n eig_fp = eig_single(fp, **pars)\n print(f'Fixed point1 at {fp:.3f} with Eigenvalue={eig_fp:.3f}')", "_____no_output_____" ] ], [ [ "## Think! \nThroughout the tutorial, we have assumed $w> 0 $, i.e., we considered a single population of **excitatory** neurons. What do you think will be the behavior of a population of inhibitory neurons, i.e., where $w> 0$ is replaced by $w< 0$? ", "_____no_output_____" ] ], [ [ "# to_remove explanation\n\n\"\"\"\nDiscussion:\n\nYou can check this by going back the second last interactive demo and set the\nweight to w<0. You will notice that the system has only one fixed point and that\nis at zero value. For this particular dynamics, the system will eventually converge\nto zero. But try it out.\n\"\"\";", "_____no_output_____" ] ], [ [ "---\n# Bonus 2: Noisy input drives the transition between two stable states\n\n", "_____no_output_____" ], [ "## Ornstein-Uhlenbeck (OU) process\n\nAs discussed in several previous tutorials, the OU process is usually used to generate a noisy input into the neuron. The OU input $\\eta(t)$ follows: \n\n$$\\tau_\\eta \\frac{d}{dt}\\eta(t) = -\\eta (t) + \\sigma_\\eta\\sqrt{2\\tau_\\eta}\\xi(t)$$\n\nExecute the following function `my_OU(pars, sig, myseed=False)` to generate an OU process.", "_____no_output_____" ] ], [ [ "# @title OU process `my_OU(pars, sig, myseed=False)`\n\n# @markdown Make sure you execute this cell to visualize the noise!\n\n\ndef my_OU(pars, sig, myseed=False):\n \"\"\"\n A functions that generates Ornstein-Uhlenback process\n\n Args:\n pars : parameter dictionary\n sig : noise amplitute\n myseed : random seed. int or boolean\n\n Returns:\n I : Ornstein-Uhlenbeck input current\n \"\"\"\n\n # Retrieve simulation parameters\n dt, range_t = pars['dt'], pars['range_t']\n Lt = range_t.size\n tau_ou = pars['tau_ou'] # [ms]\n\n # set random seed\n if myseed:\n np.random.seed(seed=myseed)\n else:\n np.random.seed()\n\n # Initialize\n noise = np.random.randn(Lt)\n I_ou = np.zeros(Lt)\n I_ou[0] = noise[0] * sig\n\n # generate OU\n for it in range(Lt - 1):\n I_ou[it + 1] = (I_ou[it]\n + dt / tau_ou * (0. - I_ou[it])\n + np.sqrt(2 * dt / tau_ou) * sig * noise[it + 1])\n\n return I_ou\n\n\npars = default_pars_single(T=100)\npars['tau_ou'] = 1. # [ms]\nsig_ou = 0.1\nI_ou = my_OU(pars, sig=sig_ou, myseed=2020)\nplt.figure(figsize=(10, 4))\nplt.plot(pars['range_t'], I_ou, 'r')\nplt.xlabel('t (ms)')\nplt.ylabel(r'$I_{\\mathrm{OU}}$')\nplt.show()", "_____no_output_____" ] ], [ [ "## Example: Up-Down transition\n\nIn the presence of two or more fixed points, noisy inputs can drive a transition between the fixed points! Here, we stimulate an E population for 1,000 ms applying OU inputs.", "_____no_output_____" ] ], [ [ "# @title Simulation of an E population with OU inputs\n\n# @markdown Make sure you execute this cell to spot the Up-Down states!\n\npars = default_pars_single(T=1000)\npars['w'] = 5.0\nsig_ou = 0.7\npars['tau_ou'] = 1. # [ms]\npars['I_ext'] = 0.56 + my_OU(pars, sig=sig_ou, myseed=2020)\n\nr = simulate_single(pars)\n\nplt.figure(figsize=(10, 4))\nplt.plot(pars['range_t'], r, 'b', alpha=0.8)\nplt.xlabel('t (ms)')\nplt.ylabel(r'$r(t)$')\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb21b7a2c48b08fba58a7c5fc19ab65c10dd0024
7,409
ipynb
Jupyter Notebook
notebooks/find_tifs_with_no_matching_jpg.ipynb
jeremydmoore/coding4ch
c17bccb6b75b134c6c9a5c304b5793e8c2236350
[ "Apache-2.0" ]
14
2020-03-12T22:28:19.000Z
2020-11-18T18:56:11.000Z
notebooks/find_tifs_with_no_matching_jpg.ipynb
jeremydmoore/coding4ch
c17bccb6b75b134c6c9a5c304b5793e8c2236350
[ "Apache-2.0" ]
6
2020-04-07T16:11:03.000Z
2020-09-24T16:13:54.000Z
notebooks/find_tifs_with_no_matching_jpg.ipynb
jeremydmoore/coding4ch
c17bccb6b75b134c6c9a5c304b5793e8c2236350
[ "Apache-2.0" ]
2
2020-03-12T22:28:20.000Z
2020-06-15T18:29:30.000Z
31.798283
192
0.561479
[ [ [ "# Find \\*.tifs with no matching \\*.jpg\n\n#### Created on Cinco de Mayo in 2020 by Jeremy Moore and David Armstrong to identify \\*.tif images that don't have a matching \\*.jpg image for the Asian Art Museum of San Francisco\n\n1. Manually set root_dir_path to the full path of the directory containing your *all_jpgs* and *all_tifs* directories\n1. Programatically create a *no_match* directory inside of *all_tifs*\n1. Get list of all \\*.tifs in *all_tifs* directory\n1. Get the identier, or stem, of each \\*.tif\n1. Check if this identifier exists as a \\*.jpg in the *all_jpgs* directory first as a test\n1. Run again and if there is no matching \\*.jpg, move the \\*.tif into the *no_match* directory\n\n***Update root_dir_path location and verify names of *.jpg and *.tif directories below BEFORE running any cells!***", "_____no_output_____" ] ], [ [ "# imports from standard library\nfrom pathlib import Path", "_____no_output_____" ], [ "# set root directory path that contains the directories with our tifs and jpgs\nroot_dir_path = Path('/Users/dlisla/Pictures/test_directory')\n\nprint(f'root_dir_path: {root_dir_path}')\nprint(f'root_dir_path.name: {root_dir_path.name}')", "root_dir_path: /Users/dlisla/Pictures/test_directory\nroot_dir_path.name: test_directory\n" ], [ "# set path to directory with our all_jpgs and all_tifs\nbad_jpg_dir_path = root_dir_path.joinpath('all_jpgs')\nall_tifs_dir_path = root_dir_path.joinpath('all_tifs')\n\n# create a directory inside of all_tifs directory named no_match to move \nno_match_dir_path = all_tifs_dir_path.joinpath('no_match')\nno_match_dir_path.mkdir() # will raise a FileExistsError if the no_match directory already exists\n\n# verify existence of no_match directory, if False, then do not continue\nprint(f'Does the no_match directory exist? {no_match_dir_path.is_dir()}')", "Does the no_match directory exist? True\n" ], [ "# get sorted list of all *.tifs in all_tifs directory\n# NOTE: this is NOT recursive and will not look inside of all_tifs subdirectories\n# NOTE: this may also find non-image hidden files that start with a '.' and end with .tif\ntif_path_list = sorted(all_tifs_dir_path.glob('*.tif'))\n\nprint(f'Total number of *.tif: {len(tif_path_list)}\\n')\nprint(f'First *.tif paths: {tif_path_list[0]}')\nprint(f'Last *.tif paths: {tif_path_list[-1]}')", "Total number of *.tif: 4\n\nFirst *.tif paths: /Users/dlisla/Pictures/test_directory/all_tifs/01.tif\nLast *.tif paths: /Users/dlisla/Pictures/test_directory/all_tifs/04.tif\n" ], [ "# for loop to test our code test what will happen\nfor tif_path in tif_path_list:\n \n # get image's identifier to match against the JPEG filenames\n identifier = tif_path.stem # stem is the Python name for identifier\n \n \n # set jpg filename and path\n jpg_filename = f'{identifier}.jpg'\n jpg_path = bad_jpg_dir_path.joinpath(jpg_filename)\n \n \n # does jpg exist?\n if jpg_path.is_file(): # there's a match\n\n # print(f'{jpg_path.name} has a match!\\n') # commented out to silently skip matched images\n pass\n \n else: # we need to move it into our no_match directory\n \n print(f'{tif_path.name} has no matching *.jpg')\n \n # set new tif path inside of the no_match directory\n new_tif_path = no_match_dir_path.joinpath(tif_path.name)\n \n print(f'Moving to {new_tif_path} . . . (not really, this is a test)\\n')", "03.tif has no matching *.jpg\nMoving to /Users/dlisla/Pictures/test_directory/all_tifs/no_match/03.tif . . . (not really, this is a test)\n\n" ], [ "# warning, will move files!\nfor tif_path in tif_path_list:\n \n # get image's identifier to match against the JPEG filenames\n identifier = tif_path.stem # stem is the Python name for identifier\n \n \n # set jpg filename and path\n jpg_filename = f'{identifier}.jpg'\n jpg_path = bad_jpg_dir_path.joinpath(jpg_filename)\n \n \n # does jpg exist?\n if jpg_path.is_file(): # there's a match\n\n # print(f'{jpg_path.name} has a match!\\n') # commented out to silently skip matched images\n pass\n \n else: # we need to move it into our no_match directory\n \n print(f'{tif_path.name} has no JPEG')\n \n # set new tif path inside of the no_match directory\n new_tif_path = no_match_dir_path.joinpath(tif_path.name)\n \n print(f'Moving to {new_tif_path} . . .')\n \n # move our file\n tif_path.rename(new_tif_path)\n \n if new_tif_path.is_file():\n print('Success!\\n')\n else:\n print('Something broke with moving:{tif_path.name} to {tif_path}!!\\n')", "03.tif has no JPEG\nMoving to /Users/dlisla/Pictures/test_directory/all_tifs/no_match/03.tif . . .\nSuccess!\n\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb21b84651e4c606d3c15971467400ef0198b0c7
42,427
ipynb
Jupyter Notebook
1_Preliminaries.ipynb
shrey-mishra16/AutoImageCaptioning
0869da674db831401c938f84627cfe048c8ffcce
[ "MIT" ]
3
2020-04-14T14:40:59.000Z
2020-04-14T14:41:21.000Z
1_Preliminaries.ipynb
shrey-mishra16/AutoImageCaptioning
0869da674db831401c938f84627cfe048c8ffcce
[ "MIT" ]
null
null
null
1_Preliminaries.ipynb
shrey-mishra16/AutoImageCaptioning
0869da674db831401c938f84627cfe048c8ffcce
[ "MIT" ]
null
null
null
43.029412
705
0.597002
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\n# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False)", "Requirement already satisfied: nltk in /opt/conda/lib/python3.6/site-packages\nRequirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from nltk)\n\u001b[33mYou are using pip version 9.0.1, however version 18.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\nloading annotations into memory...\nDone (t=0.88s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\nDone (t=0.95s)\ncreating index...\n" ] ], [ [ "When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.", "_____no_output_____" ] ], [ [ "sample_caption = 'A person doing a trick on a rail while riding a skateboard.'", "_____no_output_____" ] ], [ [ "In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.", "_____no_output_____" ] ], [ [ "import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)", "['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n" ] ], [ [ "In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.", "_____no_output_____" ] ], [ [ "sample_caption = []\n\nstart_word = data_loader.dataset.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader.dataset.vocab(start_word))\nprint(sample_caption)", "Special start word: <start>\n[0]\n" ] ], [ [ "In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.", "_____no_output_____" ] ], [ [ "sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])\nprint(sample_caption)", "[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18]\n" ] ], [ [ "In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.", "_____no_output_____" ] ], [ [ "end_word = data_loader.dataset.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader.dataset.vocab(end_word))\nprint(sample_caption)", "Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18, 1]\n" ] ], [ [ "Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).", "_____no_output_____" ] ], [ [ "import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)", "tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1009,\n 207, 139, 3, 753, 18, 1])\n" ] ], [ [ "And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.", "_____no_output_____" ] ], [ [ "# Preview the word2idx dictionary.\ndict(list(data_loader.dataset.vocab.word2idx.items())[:10])", "_____no_output_____" ] ], [ [ "We also print the total number of keys.", "_____no_output_____" ] ], [ [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 8855\n" ] ], [ [ "As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ", "_____no_output_____" ] ], [ [ "# Modify the minimum word count threshold.\nvocab_threshold = 4\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False)", "loading annotations into memory...\nDone (t=0.88s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\n" ], [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 9955\n" ] ], [ [ "There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.", "_____no_output_____" ] ], [ [ "unk_word = data_loader.dataset.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))", "Special unknown word: <unk>\nAll unknown words are mapped to this integer: 2\n" ] ], [ [ "Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ", "_____no_output_____" ] ], [ [ "print(data_loader.dataset.vocab('jfkafejw'))\nprint(data_loader.dataset.vocab('ieowoqjf'))", "2\n2\n" ] ], [ [ "The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.", "_____no_output_____" ] ], [ [ "# Obtain the data loader (from file). Note that it runs much faster than before!\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_from_file=True)", "Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\n" ] ], [ [ "In the next section, you will learn how to use the data loader to obtain batches of training data.", "_____no_output_____" ], [ "<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ", "_____no_output_____" ] ], [ [ "from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader.dataset.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))", "value: 10 --- count: 86334\nvalue: 11 --- count: 79948\nvalue: 9 --- count: 71934\nvalue: 12 --- count: 57637\nvalue: 13 --- count: 37645\nvalue: 14 --- count: 22335\nvalue: 8 --- count: 20771\nvalue: 15 --- count: 12841\nvalue: 16 --- count: 7729\nvalue: 17 --- count: 4842\nvalue: 18 --- count: 3104\nvalue: 19 --- count: 2014\nvalue: 7 --- count: 1597\nvalue: 20 --- count: 1451\nvalue: 21 --- count: 999\nvalue: 22 --- count: 683\nvalue: 23 --- count: 534\nvalue: 24 --- count: 383\nvalue: 25 --- count: 277\nvalue: 26 --- count: 215\nvalue: 27 --- count: 159\nvalue: 28 --- count: 115\nvalue: 29 --- count: 86\nvalue: 30 --- count: 58\nvalue: 31 --- count: 49\nvalue: 32 --- count: 44\nvalue: 34 --- count: 39\nvalue: 37 --- count: 32\nvalue: 33 --- count: 31\nvalue: 35 --- count: 31\nvalue: 36 --- count: 26\nvalue: 38 --- count: 18\nvalue: 39 --- count: 18\nvalue: 43 --- count: 16\nvalue: 44 --- count: 16\nvalue: 48 --- count: 12\nvalue: 45 --- count: 11\nvalue: 42 --- count: 10\nvalue: 40 --- count: 9\nvalue: 49 --- count: 9\nvalue: 46 --- count: 9\nvalue: 47 --- count: 7\nvalue: 50 --- count: 6\nvalue: 51 --- count: 6\nvalue: 41 --- count: 6\nvalue: 52 --- count: 5\nvalue: 54 --- count: 3\nvalue: 56 --- count: 2\nvalue: 6 --- count: 2\nvalue: 53 --- count: 2\nvalue: 55 --- count: 2\nvalue: 57 --- count: 1\n" ] ], [ [ "To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader.dataset.get_train_indices()\nprint('sampled indices:', indices)\n\n# Create and assign a batch sampler to retrieve a batch with the sampled indices.\nnew_sampler = data.sampler.SubsetRandomSampler(indices=indices)\ndata_loader.batch_sampler.sampler = new_sampler\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\n# print('images:', images)\n# print('captions:', captions)", "sampled indices: [218755, 211575, 274482, 18930, 119189, 307903, 116144, 19399, 382457, 188791]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 15])\n" ] ], [ [ "Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.", "_____no_output_____" ], [ "<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ", "_____no_output_____" ] ], [ [ "# Watch for any changes in model.py, and re-load it automatically.\n% load_ext autoreload\n% autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nfrom model import EncoderCNN, DecoderRNN", "_____no_output_____" ] ], [ [ "In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ] ], [ [ "Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.", "_____no_output_____" ] ], [ [ "# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"", "Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /root/.torch/models/resnet50-19c8e357.pth\n100%|██████████| 102502400/102502400 [00:04<00:00, 23112603.41it/s]\n" ] ], [ [ "The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n![Encoder](images/encoder.png)\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.", "_____no_output_____" ], [ "<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n![Decoder](images/decoder.png)\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.", "_____no_output_____" ] ], [ [ "# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"", "type(outputs): <class 'torch.Tensor'>\noutputs.shape: torch.Size([10, 15, 9955])\n" ] ], [ [ "When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb21b99f4aebd4bf6bd3892200b8ee2347a29044
19,199
ipynb
Jupyter Notebook
python-sdk/tutorials/automl-with-azureml/continuous-retraining/auto-ml-continuous-retraining.ipynb
rejuvyesh/azureml-examples
a3f22ffd0b4329fed9853f9a7a54d34c5ea830ea
[ "MIT" ]
null
null
null
python-sdk/tutorials/automl-with-azureml/continuous-retraining/auto-ml-continuous-retraining.ipynb
rejuvyesh/azureml-examples
a3f22ffd0b4329fed9853f9a7a54d34c5ea830ea
[ "MIT" ]
null
null
null
python-sdk/tutorials/automl-with-azureml/continuous-retraining/auto-ml-continuous-retraining.ipynb
rejuvyesh/azureml-examples
a3f22ffd0b4329fed9853f9a7a54d34c5ea830ea
[ "MIT" ]
null
null
null
32.762799
486
0.613001
[ [ [ "# Automated Machine Learning \n**Continuous retraining using Pipelines and Time-Series TabularDataset**\n## Contents\n1. [Introduction](#Introduction)\n2. [Setup](#Setup)\n3. [Compute](#Compute)\n4. [Run Configuration](#Run-Configuration)\n5. [Data Ingestion Pipeline](#Data-Ingestion-Pipeline)\n6. [Training Pipeline](#Training-Pipeline)\n7. [Publish Retraining Pipeline and Schedule](#Publish-Retraining-Pipeline-and-Schedule)\n8. [Test Retraining](#Test-Retraining)", "_____no_output_____" ], [ "## Introduction\nIn this example we use AutoML and Pipelines to enable contious retraining of a model based on updates to the training dataset. We will create two pipelines, the first one to demonstrate a training dataset that gets updated over time. We leverage time-series capabilities of `TabularDataset` to achieve this. The second pipeline utilizes pipeline `Schedule` to trigger continuous retraining. \nMake sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.\nIn this notebook you will learn how to:\n* Create an Experiment in an existing Workspace.\n* Configure AutoML using AutoMLConfig.\n* Create data ingestion pipeline to update a time-series based TabularDataset\n* Create training pipeline to prepare data, run AutoML, register the model and setup pipeline triggers.\n\n## Setup\nAs part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.", "_____no_output_____" ] ], [ [ "import logging\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn import datasets\n\nimport azureml.core\nfrom azureml.core.experiment import Experiment\nfrom azureml.core.workspace import Workspace\nfrom azureml.train.automl import AutoMLConfig", "_____no_output_____" ] ], [ [ "This sample notebook may use features that are not available in previous versions of the Azure ML SDK.", "_____no_output_____" ], [ "Accessing the Azure ML workspace requires authentication with Azure.\n\nThe default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.\n\nIf you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:\n```\nfrom azureml.core.authentication import InteractiveLoginAuthentication\nauth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')\nws = Workspace.from_config(auth = auth)\n```\nIf you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:\n```\nfrom azureml.core.authentication import ServicePrincipalAuthentication\nauth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')\nws = Workspace.from_config(auth = auth)\n```\nFor more details, see aka.ms/aml-notebook-auth", "_____no_output_____" ] ], [ [ "ws = Workspace.from_config()\ndstor = ws.get_default_datastore()\n\n# Choose a name for the run history container in the workspace.\nexperiment_name = \"retrain-noaaweather\"\nexperiment = Experiment(ws, experiment_name)\n\noutput = {}\noutput[\"Subscription ID\"] = ws.subscription_id\noutput[\"Workspace\"] = ws.name\noutput[\"Resource Group\"] = ws.resource_group\noutput[\"Location\"] = ws.location\noutput[\"Run History Name\"] = experiment_name\npd.set_option(\"display.max_colwidth\", -1)\noutputDf = pd.DataFrame(data=output, index=[\"\"])\noutputDf.T", "_____no_output_____" ] ], [ [ "## Compute \n\n#### Create or Attach existing AmlCompute\n\nYou will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.\n\n> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.\n\n#### Creation of AmlCompute takes approximately 5 minutes. \nIf the AmlCompute with that name is already in your workspace this code will skip the creation process.\nAs with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.", "_____no_output_____" ] ], [ [ "from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n# Choose a name for your CPU cluster\namlcompute_cluster_name = \"cont-cluster\"\n\n# Verify that cluster does not exist already\ntry:\n compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)\n print(\"Found existing cluster, use it.\")\nexcept ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=\"STANDARD_DS12_V2\", max_nodes=4\n )\n compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)\ncompute_target.wait_for_completion(show_output=True)", "_____no_output_____" ] ], [ [ "## Run Configuration", "_____no_output_____" ] ], [ [ "from azureml.core.runconfig import CondaDependencies, RunConfiguration\n\n# create a new RunConfig object\nconda_run_config = RunConfiguration(framework=\"python\")\n\n# Set compute target to AmlCompute\nconda_run_config.target = compute_target\n\nconda_run_config.environment.docker.enabled = True\n\ncd = CondaDependencies.create(\n pip_packages=[\n \"azureml-sdk[automl]\",\n \"applicationinsights\",\n \"azureml-opendatasets\",\n \"azureml-defaults\",\n ],\n conda_packages=[\"numpy==1.16.2\"],\n pin_sdk_version=False,\n)\nconda_run_config.environment.python.conda_dependencies = cd\n\nprint(\"run config is ready\")", "_____no_output_____" ] ], [ [ "## Data Ingestion Pipeline \nFor this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.\n", "_____no_output_____" ] ], [ [ "# The name and target column of the Dataset to create\ndataset = \"NOAA-Weather-DS4\"\ntarget_column_name = \"temperature\"", "_____no_output_____" ] ], [ [ "\n### Upload Data Step\nThe data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file.", "_____no_output_____" ] ], [ [ "from azureml.pipeline.core import Pipeline, PipelineParameter\nfrom azureml.pipeline.steps import PythonScriptStep\n\nds_name = PipelineParameter(name=\"ds_name\", default_value=dataset)\nupload_data_step = PythonScriptStep(\n script_name=\"upload_weather_data.py\",\n allow_reuse=False,\n name=\"upload_weather_data\",\n arguments=[\"--ds_name\", ds_name],\n compute_target=compute_target,\n runconfig=conda_run_config,\n)", "_____no_output_____" ] ], [ [ "### Submit Pipeline Run", "_____no_output_____" ] ], [ [ "data_pipeline = Pipeline(\n description=\"pipeline_with_uploaddata\", workspace=ws, steps=[upload_data_step]\n)\ndata_pipeline_run = experiment.submit(\n data_pipeline, pipeline_parameters={\"ds_name\": dataset}\n)", "_____no_output_____" ], [ "data_pipeline_run.wait_for_completion(show_output=False)", "_____no_output_____" ] ], [ [ "## Training Pipeline\n### Prepare Training Data Step\n\nScript to check if new data is available since the model was last trained. If no new data is available, we cancel the remaining pipeline steps. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained.", "_____no_output_____" ] ], [ [ "from azureml.pipeline.core import PipelineData\n\n# The model name with which to register the trained model in the workspace.\nmodel_name = PipelineParameter(\"model_name\", default_value=\"noaaweatherds\")", "_____no_output_____" ], [ "data_prep_step = PythonScriptStep(\n script_name=\"check_data.py\",\n allow_reuse=False,\n name=\"check_data\",\n arguments=[\"--ds_name\", ds_name, \"--model_name\", model_name],\n compute_target=compute_target,\n runconfig=conda_run_config,\n)", "_____no_output_____" ], [ "from azureml.core import Dataset\n\ntrain_ds = Dataset.get_by_name(ws, dataset)\ntrain_ds = train_ds.drop_columns([\"partition_date\"])", "_____no_output_____" ] ], [ [ "### AutoMLStep\nCreate an AutoMLConfig and a training step.", "_____no_output_____" ] ], [ [ "from azureml.train.automl import AutoMLConfig\nfrom azureml.pipeline.steps import AutoMLStep\n\nautoml_settings = {\n \"iteration_timeout_minutes\": 10,\n \"experiment_timeout_hours\": 0.25,\n \"n_cross_validations\": 3,\n \"primary_metric\": \"r2_score\",\n \"max_concurrent_iterations\": 3,\n \"max_cores_per_iteration\": -1,\n \"verbosity\": logging.INFO,\n \"enable_early_stopping\": True,\n}\n\nautoml_config = AutoMLConfig(\n task=\"regression\",\n debug_log=\"automl_errors.log\",\n path=\".\",\n compute_target=compute_target,\n training_data=train_ds,\n label_column_name=target_column_name,\n **automl_settings,\n)", "_____no_output_____" ], [ "from azureml.pipeline.core import PipelineData, TrainingOutput\n\nmetrics_output_name = \"metrics_output\"\nbest_model_output_name = \"best_model_output\"\n\nmetrics_data = PipelineData(\n name=\"metrics_data\",\n datastore=dstor,\n pipeline_output_name=metrics_output_name,\n training_output=TrainingOutput(type=\"Metrics\"),\n)\nmodel_data = PipelineData(\n name=\"model_data\",\n datastore=dstor,\n pipeline_output_name=best_model_output_name,\n training_output=TrainingOutput(type=\"Model\"),\n)", "_____no_output_____" ], [ "automl_step = AutoMLStep(\n name=\"automl_module\",\n automl_config=automl_config,\n outputs=[metrics_data, model_data],\n allow_reuse=False,\n)", "_____no_output_____" ] ], [ [ "### Register Model Step\nScript to register the model to the workspace. ", "_____no_output_____" ] ], [ [ "register_model_step = PythonScriptStep(\n script_name=\"register_model.py\",\n name=\"register_model\",\n allow_reuse=False,\n arguments=[\n \"--model_name\",\n model_name,\n \"--model_path\",\n model_data,\n \"--ds_name\",\n ds_name,\n ],\n inputs=[model_data],\n compute_target=compute_target,\n runconfig=conda_run_config,\n)", "_____no_output_____" ] ], [ [ "### Submit Pipeline Run", "_____no_output_____" ] ], [ [ "training_pipeline = Pipeline(\n description=\"training_pipeline\",\n workspace=ws,\n steps=[data_prep_step, automl_step, register_model_step],\n)", "_____no_output_____" ], [ "training_pipeline_run = experiment.submit(\n training_pipeline,\n pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n)", "_____no_output_____" ], [ "training_pipeline_run.wait_for_completion(show_output=False)", "_____no_output_____" ] ], [ [ "### Publish Retraining Pipeline and Schedule\nOnce we are happy with the pipeline, we can publish the training pipeline to the workspace and create a schedule to trigger on blob change. The schedule polls the blob store where the data is being uploaded and runs the retraining pipeline if there is a data change. A new version of the model will be registered to the workspace once the run is complete.", "_____no_output_____" ] ], [ [ "pipeline_name = \"Retraining-Pipeline-NOAAWeather\"\n\npublished_pipeline = training_pipeline.publish(\n name=pipeline_name, description=\"Pipeline that retrains AutoML model\"\n)\n\npublished_pipeline", "_____no_output_____" ], [ "from azureml.pipeline.core import Schedule\n\nschedule = Schedule.create(\n workspace=ws,\n name=\"RetrainingSchedule\",\n pipeline_parameters={\"ds_name\": dataset, \"model_name\": \"noaaweatherds\"},\n pipeline_id=published_pipeline.id,\n experiment_name=experiment_name,\n datastore=dstor,\n wait_for_provisioning=True,\n polling_interval=1440,\n)", "_____no_output_____" ] ], [ [ "## Test Retraining\nHere we setup the data ingestion pipeline to run on a schedule, to verify that the retraining pipeline runs as expected. \n\nNote: \n* Azure NOAA Weather data is updated daily and retraining will not trigger if there is no new data available. \n* Depending on the polling interval set in the schedule, the retraining may take some time trigger after data ingestion pipeline completes.", "_____no_output_____" ] ], [ [ "pipeline_name = \"DataIngestion-Pipeline-NOAAWeather\"\n\npublished_pipeline = training_pipeline.publish(\n name=pipeline_name, description=\"Pipeline that updates NOAAWeather Dataset\"\n)\n\npublished_pipeline", "_____no_output_____" ], [ "from azureml.pipeline.core import Schedule\n\nschedule = Schedule.create(\n workspace=ws,\n name=\"RetrainingSchedule-DataIngestion\",\n pipeline_parameters={\"ds_name\": dataset},\n pipeline_id=published_pipeline.id,\n experiment_name=experiment_name,\n datastore=dstor,\n wait_for_provisioning=True,\n polling_interval=1440,\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb21c53021edffa52920a194e27637df0dc7be91
4,063
ipynb
Jupyter Notebook
prediction_classical.ipynb
mherlich/wireless-data-set
4440c7efbb17b2d91c90b712f09381e1933c60e9
[ "Apache-2.0" ]
2
2022-02-04T20:43:12.000Z
2022-03-04T14:23:08.000Z
prediction_classical.ipynb
mherlich/wireless-data-set
4440c7efbb17b2d91c90b712f09381e1933c60e9
[ "Apache-2.0" ]
null
null
null
prediction_classical.ipynb
mherlich/wireless-data-set
4440c7efbb17b2d91c90b712f09381e1933c60e9
[ "Apache-2.0" ]
2
2022-03-06T14:01:46.000Z
2022-03-09T02:59:29.000Z
25.080247
158
0.505292
[ [ [ "import sys\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport contextily as ctx\nimport matplotlib.pyplot as plt\nimport sklearn.linear_model\nimport geopy.distance\n\nimport utils", "_____no_output_____" ], [ "df = pd.read_feather('SRFG-v1.fth')", "_____no_output_____" ] ], [ [ "# Constant predictions", "_____no_output_____" ] ], [ [ "p = pd.DataFrame(index=df['datarate'].index)\np['const(mean)'] = np.repeat(df['datarate'].mean(), len(df))\np['const(median)'] = np.repeat(df['datarate'].median(), len(df))", "_____no_output_____" ] ], [ [ "# Basic lookups", "_____no_output_____" ] ], [ [ "for v in [\"sinr\", \"signal\", \"rsrq\", \"rsrp\", \"rssi\"]:\n lookup = df.groupby(v, dropna=False)['datarate'].median()\n p[v+'-Lookup'] = list(lookup[df[v]])\n print(v, df[v].isna().sum())\n lookup.plot()\n plt.show()", "_____no_output_____" ], [ "for i in range(2, 6):\n print(geopy.distance.geodesic((df[\"lat\"].mean(), df[\"long\"].mean()), (df[\"lat\"].mean(), df[\"long\"].mean()+10**-i)).meters)\n lookup = df.groupby(round(df.long, i), dropna=False)['datarate'].median()\n p['long-Lookup'+str(i)] = list(lookup[round(df[\"long\"], i)])\n lookup.plot()\n plt.show()", "_____no_output_____" ] ], [ [ "# Estimation from other data rates", "_____no_output_____" ] ], [ [ "p['prevDR'] = (df[\"datarate\"].shift(1) * df[\"predecessor\"]).fillna(df[\"datarate\"].shift(1) * df[\"predecessor2\"]).fillna(df[\"datarate\"].mean())", "_____no_output_____" ], [ "tp = (df[\"datarate\"].shift(1) * df[\"predecessor\"]).fillna(df[\"datarate\"].shift(1) * df[\"predecessor2\"])\nts = (df[\"datarate\"].shift(-1) * df[\"predecessor\"].shift(-1)).fillna(df[\"datarate\"].shift(-1) * df[\"predecessor2\"].shift(-1))\np['meanDR'] = ((tp+ts)/2).fillna(tp).fillna(ts).fillna(df[\"datarate\"].mean())", "_____no_output_____" ] ], [ [ "# Evaluation", "_____no_output_____" ] ], [ [ "r = utils.evaluate(p, df['datarate'])\nr[\"Strategy\"] = \"\\texttt{\" + r[\"Strategy\"] + \"}\"\nr[\"MAE [\\\\si{\\\\mega\\\\bit\\\\per\\\\second}]\"] = r[\"MAE\"]/1e6\nr[\"$R^2$\"] = r[\"R2\"]\nprint(r[[\"Strategy\", \"MAE [\\\\si{\\\\mega\\\\bit\\\\per\\\\second}]\", \"$R^2$\"]].to_latex(index=False, float_format=\"%.2f\", escape=False))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb21c7e3cb762cf5b32ec4ab79a94f8b3b0df78d
7,197
ipynb
Jupyter Notebook
pandas_speed_simple.ipynb
tagler/Data_Science_Notebook_Tutorials_Python
60ec52a7159f6947ff1397e7def404c959d056fb
[ "MIT" ]
1
2021-03-28T03:14:03.000Z
2021-03-28T03:14:03.000Z
pandas_speed_simple.ipynb
tagler/Data_Science_Notebook_Tutorials_Python
60ec52a7159f6947ff1397e7def404c959d056fb
[ "MIT" ]
null
null
null
pandas_speed_simple.ipynb
tagler/Data_Science_Notebook_Tutorials_Python
60ec52a7159f6947ff1397e7def404c959d056fb
[ "MIT" ]
null
null
null
20.504274
137
0.484646
[ [ [ "# Pandas Speed Compairison - Simple Function ", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "column_size = 100_000\ndf = pd.DataFrame(\n {\n \"A\" : np.random.random(column_size),\n \"B\" : np.random.random(column_size),\n \"C\" : np.random.random(column_size),\n \"D\" : np.random.random(column_size),\n \"E\" : np.random.random(column_size),\n }\n)", "_____no_output_____" ], [ "def simple_function(a, b, c, d, e):\n return a + b - c * d / e", "_____no_output_____" ] ], [ [ "## Iterrows", "_____no_output_____" ] ], [ [ "%%timeit\nresult = []\nfor each_index, each_row in df.iterrows():\n each_result = simple_function(each_row.A, each_row.B, each_row.C, each_row.D, each_row.E)\n result.append(each_result)\ndf[\"RESULT\"] = result", "8.72 s ± 384 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "## Apply", "_____no_output_____" ] ], [ [ "%%timeit\ndf[\"RESULT\"] = df.apply(lambda x : simple_function(x.A, x.B, x.C, x.D, x.E), axis='columns')", "4.26 s ± 518 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "## Pandarallel", "_____no_output_____" ] ], [ [ "from pandarallel import pandarallel", "_____no_output_____" ], [ "pandarallel.initialize()", "INFO: Pandarallel will run on 4 workers.\nINFO: Pandarallel will use standard multiprocessing data transfer (pipe) to transfer data between the main process and workers.\n" ], [ "%%timeit\ndf[\"RESULT\"] = df.parallel_apply(lambda x : simple_function(x.A, x.B, x.C, x.D, x.E), axis='columns')", "2.39 s ± 219 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] ], [ [ "## Itertuples ", "_____no_output_____" ] ], [ [ "%%timeit\nresult = []\nfor each_row in df.itertuples():\n each_result = simple_function(each_row.A, each_row.B, each_row.C, each_row.D, each_row.E)\n result.append(each_result)\ndf[\"RESULT\"] = result", "164 ms ± 14.5 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n" ] ], [ [ "## Swifter", "_____no_output_____" ] ], [ [ "import swifter", "_____no_output_____" ], [ "%%timeit\ndf[\"RESULT\"] = df.swifter.progress_bar(False).apply(lambda x : simple_function(x.A, x.B, x.C, x.D, x.E), axis='columns')", "4.49 ms ± 523 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] ], [ [ "## Pandas Vectorize", "_____no_output_____" ] ], [ [ "%%timeit\ndf[\"RESULT\"] = simple_function(df[\"A\"], df[\"B\"], df[\"C\"], df[\"D\"], df[\"E\"])", "1.86 ms ± 251 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] ], [ [ "## Numpy Vectorize", "_____no_output_____" ] ], [ [ "%%timeit\ndf['RESULT'] = simple_function(df[\"A\"].values, df[\"B\"].values, df[\"C\"].values, df[\"D\"].values, df[\"E\"].values)", "1.31 ms ± 175 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n" ] ], [ [ "## Numba", "_____no_output_____" ] ], [ [ "import numba", "_____no_output_____" ], [ "@numba.njit()\ndef simple_function_numba(a, b, c, d, e):\n return a + b - c * d / e", "_____no_output_____" ], [ "%%timeit\ndf['RESULT'] = simple_function_numba(df[\"A\"].values, df[\"B\"].values, df[\"C\"].values, df[\"D\"].values, df[\"E\"].values)", "911 µs ± 153 µs per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ], [ "# Note: use modin and dask for big datasets (overwrites pandas api, faster read_csv)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb21cdd59e3fd61a99560863fbd22eea68672328
202,256
ipynb
Jupyter Notebook
DataFrames 2.ipynb
radzak/PandasTutorial
5b8c2b4bff3a205759560cbd1715b0bb7214adf4
[ "MIT" ]
null
null
null
DataFrames 2.ipynb
radzak/PandasTutorial
5b8c2b4bff3a205759560cbd1715b0bb7214adf4
[ "MIT" ]
null
null
null
DataFrames 2.ipynb
radzak/PandasTutorial
5b8c2b4bff3a205759560cbd1715b0bb7214adf4
[ "MIT" ]
null
null
null
35.45241
114
0.346042
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df = pd.read_csv('datasets/employees.csv')\ndf.head(3)", "_____no_output_____" ], [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 8 columns):\nFirst Name 933 non-null object\nGender 855 non-null object\nStart Date 1000 non-null object\nLast Login Time 1000 non-null object\nSalary 1000 non-null int64\nBonus % 1000 non-null float64\nSenior Management 933 non-null object\nTeam 957 non-null object\ndtypes: float64(1), int64(1), object(6)\nmemory usage: 62.6+ KB\n" ], [ "df['Start Date'].head(3)", "_____no_output_____" ], [ "df['Start Date'] = pd.to_datetime(df['Start Date'])", "_____no_output_____" ], [ "df['Last Login Time'] = pd.to_datetime(df['Last Login Time'])", "_____no_output_____" ], [ "df['Senior Management'] = df['Senior Management'].astype('bool')", "_____no_output_____" ], [ "df['Gender'] = df['Gender'].astype('category')", "_____no_output_____" ], [ "# reduced memory usage\ndf.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 8 columns):\nFirst Name 933 non-null object\nGender 855 non-null category\nStart Date 1000 non-null datetime64[ns]\nLast Login Time 1000 non-null datetime64[ns]\nSalary 1000 non-null int64\nBonus % 1000 non-null float64\nSenior Management 1000 non-null bool\nTeam 957 non-null object\ndtypes: bool(1), category(1), datetime64[ns](2), float64(1), int64(1), object(2)\nmemory usage: 49.0+ KB\n" ] ], [ [ "# Filter A `DataFrame` Based On A Condition", "_____no_output_____" ] ], [ [ "df = pd.read_csv('datasets/employees.csv', parse_dates=['Start Date', 'Last Login Time'])\n# df['Start Date'] = pd.to_datetime(df['Start Date'])\n# df['Last Login Time'] = pd.to_datetime(df['Last Login Time'])\n\ndf['Senior Management'] = df['Senior Management'].astype('bool')\ndf['Gender'] = df['Gender'].astype('category')\ndf.head(3)", "_____no_output_____" ], [ "df[[True, False, True] + [False for x in range(997)]] # you can pass a list of booleans to get specific rows", "_____no_output_____" ], [ "df[df['Gender'] == 'Male']", "_____no_output_____" ], [ "mask = df['Team'] == 'Finance' # mask/extract/condition - common names\ndf[mask]", "_____no_output_____" ], [ "df[~df['Senior Management']].head() # since Senior Management Column consists of boolean values", "_____no_output_____" ], [ "mask = df['Team'] != 'Marketing'\ndf[mask].head()", "_____no_output_____" ], [ "df[df['Salary'] > 110000].head()\n\ndf[df['Bonus %'] < 1.5].head()", "_____no_output_____" ], [ "mask = df['Start Date'] <= '1985-01-01'\ndf[mask].head()", "_____no_output_____" ] ], [ [ "# Filter with More than One Condition (AND - &)", "_____no_output_____" ] ], [ [ "df = pd.read_csv('datasets/employees.csv', parse_dates=['Start Date', 'Last Login Time'])\ndf['Senior Management'] = df['Senior Management'].astype('bool')\ndf['Gender'] = df['Gender'].astype('category')\ndf.head(3)", "_____no_output_____" ], [ "pd.Series([True, False]) & pd.Series([True, True])", "_____no_output_____" ], [ "mask1 = df['Gender'] == 'Male'\nmask2 = df['Team'] == 'Marketing'\n\ndf[mask1 & mask2].head()", "_____no_output_____" ] ], [ [ "# Filter with More than One Condition (OR - |)", "_____no_output_____" ] ], [ [ "mask1 = df['Senior Management']\nmask2 = df['Start Date'] < '1990-01-01'\n\ndf[mask1 | mask2]", "_____no_output_____" ], [ "mask1 = df['First Name'] == 'Robert'\nmask2 = df['Team'] == 'Client Services'\nmask3 = df['Start Date'] > '2016-06-01'\n\ndf[(mask1 & mask2) | mask3]", "_____no_output_____" ] ], [ [ "# The `.isin()` Method", "_____no_output_____" ], [ "### bad way", "_____no_output_____" ] ], [ [ "mask1 = df['Team'] == 'Legal'\nmask2 = df['Team'] == 'Sales'\nmask3 = df['Team'] == 'Product'\n\ndf[mask1 | mask2 | mask3]", "_____no_output_____" ] ], [ [ "### good way - `.isin()` method", "_____no_output_____" ] ], [ [ "import numpy as np\nmask = df['Team'].isin(['Legal', 'Product', 'Marketing', np.nan]) # you can also pass a Pandas Series\ndf[mask]", "_____no_output_____" ] ], [ [ "# The `.isnull()` and `.notnull()` Methods", "_____no_output_____" ] ], [ [ "df['Team'].isnull()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb21d16c0256f75b4868d21f4b8906a62c1cec66
90,287
ipynb
Jupyter Notebook
h3Copy_of_LS_DS_223_assignment.ipynb
pingao2019/DS-Unit-2-Kaggle-Challenge
370fee51d8f4d89373828c06ef708bc2c19b1c72
[ "MIT" ]
null
null
null
h3Copy_of_LS_DS_223_assignment.ipynb
pingao2019/DS-Unit-2-Kaggle-Challenge
370fee51d8f4d89373828c06ef708bc2c19b1c72
[ "MIT" ]
null
null
null
h3Copy_of_LS_DS_223_assignment.ipynb
pingao2019/DS-Unit-2-Kaggle-Challenge
370fee51d8f4d89373828c06ef708bc2c19b1c72
[ "MIT" ]
null
null
null
53.11
2,949
0.439011
[ [ [ "<a href=\"https://colab.research.google.com/github/pingao2019/DS-Unit-2-Kaggle-Challenge/blob/master/h3Copy_of_LS_DS_223_assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Lambda School Data Science\n\n*Unit 2, Sprint 2, Module 3*\n\n---", "_____no_output_____" ], [ "# Cross-Validation\n\n\n## Assignment\n- [ ] [Review requirements for your portfolio project](https://lambdaschool.github.io/ds/unit2), then submit your dataset.\n- [ ] Continue to participate in our Kaggle challenge. \n- [ ] Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.\n- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)\n- [ ] Commit your notebook to your fork of the GitHub repo.\n\n\nYou won't be able to just copy from the lesson notebook to this assignment.\n\n- Because the lesson was ***regression***, but the assignment is ***classification.***\n- Because the lesson used [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html), which doesn't work as-is for _multi-class_ classification.\n\nSo you will have to adapt the example, which is good real-world practice.\n\n1. Use a model for classification, such as [RandomForestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)\n2. Use hyperparameters that match the classifier, such as `randomforestclassifier__ ...`\n3. Use a metric for classification, such as [`scoring='accuracy'`](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values)\n4. If you’re doing a multi-class classification problem — such as whether a waterpump is functional, functional needs repair, or nonfunctional — then use a categorical encoding that works for multi-class classification, such as [OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html) (not [TargetEncoder](https://contrib.scikit-learn.org/categorical-encoding/targetencoder.html))\n\n\n\n## Stretch Goals\n\n### Reading\n- Jake VanderPlas, [Python Data Science Handbook, Chapter 5.3](https://jakevdp.github.io/PythonDataScienceHandbook/05.03-hyperparameters-and-model-validation.html), Hyperparameters and Model Validation\n- Jake VanderPlas, [Statistics for Hackers](https://speakerdeck.com/jakevdp/statistics-for-hackers?slide=107)\n- Ron Zacharski, [A Programmer's Guide to Data Mining, Chapter 5](http://guidetodatamining.com/chapter5/), 10-fold cross validation\n- Sebastian Raschka, [A Basic Pipeline and Grid Search Setup](https://github.com/rasbt/python-machine-learning-book/blob/master/code/bonus/svm_iris_pipeline_and_gridsearch.ipynb)\n- Peter Worcester, [A Comparison of Grid Search and Randomized Search Using Scikit Learn](https://blog.usejournal.com/a-comparison-of-grid-search-and-randomized-search-using-scikit-learn-29823179bc85)\n\n### Doing\n- Add your own stretch goals!\n- Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/). See the previous assignment notebook for details.\n- In additon to `RandomizedSearchCV`, scikit-learn has [`GridSearchCV`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html). Another library called scikit-optimize has [`BayesSearchCV`](https://scikit-optimize.github.io/notebooks/sklearn-gridsearchcv-replacement.html). Experiment with these alternatives.\n- _[Introduction to Machine Learning with Python](http://shop.oreilly.com/product/0636920030515.do)_ discusses options for \"Grid-Searching Which Model To Use\" in Chapter 6:\n\n> You can even go further in combining GridSearchCV and Pipeline: it is also possible to search over the actual steps being performed in the pipeline (say whether to use StandardScaler or MinMaxScaler). This leads to an even bigger search space and should be considered carefully. Trying all possible solutions is usually not a viable machine learning strategy. However, here is an example comparing a RandomForestClassifier and an SVC ...\n\nThe example is shown in [the accompanying notebook](https://github.com/amueller/introduction_to_ml_with_python/blob/master/06-algorithm-chains-and-pipelines.ipynb), code cells 35-37. Could you apply this concept to your own pipelines?\n", "_____no_output_____" ], [ "### BONUS: Stacking!\n\nHere's some code you can use to \"stack\" multiple submissions, which is another form of ensembling:\n\n```python\nimport pandas as pd\n\n# Filenames of your submissions you want to ensemble\nfiles = ['submission-01.csv', 'submission-02.csv', 'submission-03.csv']\n\ntarget = 'status_group'\nsubmissions = (pd.read_csv(file)[[target]] for file in files)\nensemble = pd.concat(submissions, axis='columns')\nmajority_vote = ensemble.mode(axis='columns')[0]\n\nsample_submission = pd.read_csv('sample_submission.csv')\nsubmission = sample_submission.copy()\nsubmission[target] = majority_vote\nsubmission.to_csv('my-ultimate-ensemble-submission.csv', index=False)\n```", "_____no_output_____" ] ], [ [ "%%capture\nimport sys\n\n# If you're on Colab:\nif 'google.colab' in sys.modules:\n DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'\n !pip install category_encoders==2.*\n\n# If you're working locally:\nelse:\n DATA_PATH = '../data/'", "_____no_output_____" ], [ "import pandas as pd\n\n# Merge train_features.csv & train_labels.csv\ntrain = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'), \n pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))\n\n# Read test_features.csv & sample_submission.csv\ntest = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')\nsample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Split train into train & val\ntrain, val = train_test_split(train, train_size=0.80, test_size=0.20, \n stratify=train['status_group'], random_state=42)\n\n\ndef wrangle(X):\n \"\"\"Wrangle train, validate, and test sets in the same way\"\"\"\n \n # Prevent SettingWithCopyWarning\n X = X.copy()\n \n # About 3% of the time, latitude has small values near zero,\n # outside Tanzania, so we'll treat these values like zero.\n X['latitude'] = X['latitude'].replace(-2e-08, 0)\n \n # When columns have zeros and shouldn't, they are like null values.\n # So we will replace the zeros with nulls, and impute missing values later.\n # Also create a \"missing indicator\" column, because the fact that\n # values are missing may be a predictive signal.\n cols_with_zeros = ['longitude', 'latitude', 'construction_year', \n 'gps_height', 'population']\n for col in cols_with_zeros:\n X[col] = X[col].replace(0, np.nan)\n X[col+'_MISSING'] = X[col].isnull()\n \n # Drop duplicate columns\n duplicates = ['quantity_group', 'payment_type']\n X = X.drop(columns=duplicates)\n \n # Drop recorded_by (never varies) and id (always varies, random)\n unusable_variance = ['recorded_by', 'id']\n X = X.drop(columns=unusable_variance)\n \n # Convert date_recorded to datetime\n X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)\n \n # Extract components from date_recorded, then drop the original column\n X['year_recorded'] = X['date_recorded'].dt.year\n X['month_recorded'] = X['date_recorded'].dt.month\n X['day_recorded'] = X['date_recorded'].dt.day\n X = X.drop(columns='date_recorded')\n \n # Engineer feature: how many years from construction_year to date_recorded\n X['years'] = X['year_recorded'] - X['construction_year']\n X['years_MISSING'] = X['years'].isnull()\n \n # return the wrangled dataframe\n return X\n\ntrain = wrangle(train)\nval = wrangle(val)\ntest = wrangle(test)", "_____no_output_____" ], [ "# The status_group column is the target\ntarget = 'status_group'\n\n# Get a dataframe with all train columns except the target\ntrain_features = train.drop(columns=[target])\n\n# Get a list of the numeric features\nnumeric_features = train_features.select_dtypes(include='number').columns.tolist()\n\n# Get a series with the cardinality of the nonnumeric features\ncardinality = train_features.select_dtypes(exclude='number').nunique()\n\n# Get a list of all categorical features with cardinality <= 50\ncategorical_features = cardinality[cardinality <= 50].index.tolist()\n\n# Combine the lists \nfeatures = numeric_features + categorical_features", "_____no_output_____" ], [ "# Arrange data into X features matrix and y target vector \nX_train = train[features]\ny_train = train[target]\nX_val = val[features]\ny_val = val[target]\nX_test = test[features]", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "Use scikit-learn for hyperparameter optimization with RandomizedSearchCV.", "_____no_output_____" ] ], [ [ "import category_encoders as ce\nimport numpy as np\n\nfrom sklearn.feature_selection import f_regression, SelectKBest\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV", "_____no_output_____" ], [ "encoder= ce.OneHotEncoder(use_cat_names=True)\nx_train_encoded = encoder.fit_transform(X_train)", "_____no_output_____" ], [ "x_train_encoded.sample(10)", "_____no_output_____" ], [ "y_train.value_counts()", "_____no_output_____" ], [ "y_train_encoded = y_train.replace({'functional': 1, 'non functional': 2, 'functional needs repair':3})", "_____no_output_____" ], [ "y_train_encoded.value_counts()", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "\npipeline = make_pipeline(\n StandardScaler(),\n \n SimpleImputer(), \n RandomForestClassifier()\n)\n\nparam_distributions = {\n 'simpleimputer__strategy': ['mean', 'median'], \n 'randomforestClassifier__max_depth': [10, 20, 30, 40], \n 'randomforestClassifier__min_samples_leaf': [1,3,5]\n}\n\n\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions= param_distributions, \n n_iter=30, \n cv=3, \n scoring='neg_mean_absolute_error', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n \n)\n\nsearch.fit(x_train_encoded, y_train_encoded);", "Fitting 3 folds for each of 24 candidates, totalling 72 fits\n" ], [ "", "_____no_output_____" ], [ "from sklearn.impute import SimpleImputer\npipeline = make_pipeline(\n StandardScaler(),\n ce.OneHotEncoder(use_cat_names=True),\n SimpleImputer(), \n RandomForestClassifier()\n)\n\nparam_distributions = {\n 'simpleimputer__strategy': ['mean', 'median'], \n 'randomforestClassifier__max_depth': [10, 20, 30, 40], \n 'randomforestClassifier__min_samples_leaf': [1,3,5]\n}\n\n\nsearch = RandomizedSearchCV(\n pipeline, \n param_distributions=param_distributions, \n n_iter=30, \n cv=3, \n scoring='neg_mean_absolute_error', \n verbose=10, \n return_train_score=True, \n n_jobs=-1\n)\n\nsearch.fit(X_train, y_train);", "Fitting 3 folds for each of 24 candidates, totalling 72 fits\n" ], [ "from sklearn.metrics import classification_report, accuracy_score, precision_score, confusion_matrix", "_____no_output_____" ], [ "cv_result = cross_val_score(search,X_train,y_train, scoring = \"accuracy\")", "_____no_output_____" ], [ "search.fit(X_train, y_train)\nprint('Train Accuracy', search.score(X_train, y_train))\nprint('Validation Accuracy', search.score(X_val, y_val))", "_____no_output_____" ], [ "submission = test[['id']].copy()\nsubmission['status_group'] = y_pred", "_____no_output_____" ], [ "submission.to_csv('waterpumps-submission.csv', index=False)", "_____no_output_____" ], [ "!head 'waterpumps-submission.csv'", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb21d57346c2fab03fce7af017c6c21938bec44b
583,863
ipynb
Jupyter Notebook
day_1/Day_1_.ipynb
M7hesh/-10daysofMLChallenge
31c335004bfb5223a9677608eeca8e4ebba42e54
[ "Apache-2.0" ]
1
2020-03-23T05:31:29.000Z
2020-03-23T05:31:29.000Z
day_1/Day_1_.ipynb
M7hesh/-10daysofMLChallenge
31c335004bfb5223a9677608eeca8e4ebba42e54
[ "Apache-2.0" ]
1
2020-10-01T11:49:02.000Z
2020-10-16T06:47:33.000Z
day_1/Day_1_.ipynb
M7hesh/-10daysofMLChallenge
31c335004bfb5223a9677608eeca8e4ebba42e54
[ "Apache-2.0" ]
1
2020-10-01T11:32:45.000Z
2020-10-01T11:32:45.000Z
135.153472
132,148
0.802022
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport operator", "_____no_output_____" ], [ "df_Confirmed=pd.read_csv('/content/drive/My Drive/datasets/Tensorflow community challenge /Datasets /time_series_2019-ncov-Confirmed (1).csv')\ndf_Confirmed.head()", "_____no_output_____" ], [ "draft=df_Confirmed.copy()\ndf_Confirmed.keys()", "_____no_output_____" ], [ "df_Confirmed.describe()", "_____no_output_____" ], [ "key=df_Confirmed.describe().keys()\nkey", "_____no_output_____" ], [ "df_Confirmed=df_Confirmed.drop(['1/22/20', '1/23/20', '1/24/20', '1/25/20', '1/26/20',\n '1/27/20', '1/28/20', '1/29/20', '1/30/20', '1/31/20', '2/1/20',\n '2/2/20', '2/3/20', '2/4/20', '2/5/20', '2/6/20', '2/7/20', '2/8/20',\n '2/9/20', '2/10/20', '2/11/20', '2/12/20', '2/13/20', '2/14/20',\n '2/15/20', '2/16/20', '2/17/20', '2/18/20', '2/19/20', '2/20/20',\n '2/21/20', '2/22/20', '2/23/20', '2/24/20', '2/25/20', '2/26/20',\n '2/27/20', '2/28/20', '2/29/20', '3/1/20'],axis=1)\ndf_Confirmed.describe()", "_____no_output_____" ], [ "#df_Confirmed.head()", "_____no_output_____" ], [ "lastest_confirmed=df_Confirmed['3/22/20']", "_____no_output_____" ] ], [ [ "# **confirmed cases throughout the world reported**", "_____no_output_____" ] ], [ [ "df_contoury_wise=df_Confirmed.sort_values(by=['Country/Region'])\nunique_country_list=list(df_contoury_wise['Country/Region'].unique())", "_____no_output_____" ], [ "confirmed_country_list=[]\nno_cases=[]\nfor i in unique_country_list:\n cases = lastest_confirmed[df_Confirmed['Country/Region']==i].sum()\n if cases>0:\n confirmed_country_list.append(cases)\n else:\n no_cases.append(i)\n\nfor i in no_cases:\n unique_country_list.remove(i)\n\nunique_countries = [k for k, v in sorted(zip(unique_country_list, confirmed_country_list), key=operator.itemgetter(1), reverse=True)]\nfor i in range(len(unique_countries)):\n confirmed_country_list[i] = lastest_confirmed[df_Confirmed['Country/Region']==unique_countries[i]].sum()", "_____no_output_____" ], [ "for i in range(len(unique_countries)):\n print(f'{unique_countries[i]}: {confirmed_country_list[i]} cases')", "China: 81397 cases\nItaly: 59138 cases\nUS: 33272 cases\nSpain: 28768 cases\nGermany: 24873 cases\nIran: 21638 cases\nFrance: 16176 cases\nKorea, South: 8897 cases\nSwitzerland: 7245 cases\nUnited Kingdom: 5741 cases\nNetherlands: 4216 cases\nBelgium: 3401 cases\nAustria: 3244 cases\nNorway: 2383 cases\nSweden: 1934 cases\nPortugal: 1600 cases\nBrazil: 1593 cases\nDenmark: 1514 cases\nCanada: 1465 cases\nAustralia: 1314 cases\nMalaysia: 1306 cases\nTurkey: 1236 cases\nCzechia: 1120 cases\nJapan: 1086 cases\nIsrael: 1071 cases\nIreland: 906 cases\nLuxembourg: 798 cases\nEcuador: 789 cases\nPakistan: 776 cases\nCruise Ship: 712 cases\nPoland: 634 cases\nChile: 632 cases\nFinland: 626 cases\nGreece: 624 cases\nThailand: 599 cases\nIceland: 568 cases\nIndonesia: 514 cases\nSaudi Arabia: 511 cases\nQatar: 494 cases\nSingapore: 455 cases\nRomania: 433 cases\nSlovenia: 414 cases\nIndia: 396 cases\nPhilippines: 380 cases\nRussia: 367 cases\nPeru: 363 cases\nBahrain: 332 cases\nEgypt: 327 cases\nEstonia: 326 cases\nSouth Africa: 274 cases\nCroatia: 254 cases\nMexico: 251 cases\nLebanon: 248 cases\nPanama: 245 cases\nIraq: 233 cases\nColombia: 231 cases\nArgentina: 225 cases\nSerbia: 222 cases\nDominican Republic: 202 cases\nAlgeria: 201 cases\nArmenia: 194 cases\nKuwait: 188 cases\nBulgaria: 187 cases\nSlovakia: 185 cases\nTaiwan*: 169 cases\nSan Marino: 160 cases\nUnited Arab Emirates: 153 cases\nLatvia: 139 cases\nUruguay: 135 cases\nCosta Rica: 134 cases\nHungary: 131 cases\nLithuania: 131 cases\nBosnia and Herzegovina: 126 cases\nMorocco: 115 cases\nNorth Macedonia: 114 cases\nAndorra: 113 cases\nVietnam: 113 cases\nJordan: 112 cases\nCyprus: 95 cases\nMoldova: 94 cases\nMalta: 90 cases\nAlbania: 89 cases\nBrunei: 88 cases\nCambodia: 84 cases\nSri Lanka: 82 cases\nBelarus: 76 cases\nBurkina Faso: 75 cases\nTunisia: 75 cases\nUkraine: 73 cases\nVenezuela: 70 cases\nSenegal: 67 cases\nNew Zealand: 66 cases\nAzerbaijan: 65 cases\nKazakhstan: 60 cases\nOman: 55 cases\nGeorgia: 54 cases\nTrinidad and Tobago: 50 cases\nUzbekistan: 43 cases\nAfghanistan: 40 cases\nCameroon: 40 cases\nLiechtenstein: 37 cases\nMartinique: 37 cases\nCuba: 35 cases\nCongo (Kinshasa): 30 cases\nNigeria: 30 cases\nBangladesh: 27 cases\nHonduras: 26 cases\nBolivia: 24 cases\nGhana: 24 cases\nMonaco: 23 cases\nParaguay: 22 cases\nMontenegro: 21 cases\nGuatemala: 19 cases\nRwanda: 19 cases\nMauritius: 18 cases\nJamaica: 16 cases\nTogo: 16 cases\nKenya: 15 cases\nBarbados: 14 cases\nCote d'Ivoire: 14 cases\nKyrgyzstan: 14 cases\nMaldives: 13 cases\nTanzania: 12 cases\nEthiopia: 11 cases\nMongolia: 10 cases\nGuyana: 7 cases\nSeychelles: 7 cases\nEquatorial Guinea: 6 cases\nGabon: 5 cases\nSuriname: 5 cases\nBahamas, The: 4 cases\nEswatini: 4 cases\nCabo Verde: 3 cases\nCentral African Republic: 3 cases\nCongo (Brazzaville): 3 cases\nEl Salvador: 3 cases\nLiberia: 3 cases\nMadagascar: 3 cases\nNamibia: 3 cases\nZambia: 3 cases\nZimbabwe: 3 cases\nAngola: 2 cases\nBenin: 2 cases\nBhutan: 2 cases\nFiji: 2 cases\nGuinea: 2 cases\nHaiti: 2 cases\nKosovo: 2 cases\nMauritania: 2 cases\nNepal: 2 cases\nNicaragua: 2 cases\nNiger: 2 cases\nSaint Lucia: 2 cases\nSudan: 2 cases\nAntigua and Barbuda: 1 cases\nCape Verde: 1 cases\nChad: 1 cases\nDjibouti: 1 cases\nDominica: 1 cases\nEast Timor: 1 cases\nEritrea: 1 cases\nGambia, The: 1 cases\nGrenada: 1 cases\nHoly See: 1 cases\nMozambique: 1 cases\nPapua New Guinea: 1 cases\nSaint Vincent and the Grenadines: 1 cases\nSomalia: 1 cases\nSyria: 1 cases\nTimor-Leste: 1 cases\nUganda: 1 cases\n" ], [ "unique_provinces = list(df_Confirmed['Province/State'].unique())\n\noutliers = ['United Kingdom', 'Denmark', 'France']\nfor i in outliers:\n unique_provinces.remove(i)", "_____no_output_____" ], [ "province_confirmed_cases = []\nno_cases = [] \nfor i in unique_provinces:\n cases = lastest_confirmed[df_Confirmed['Province/State']==i].sum()\n if cases > 0:\n province_confirmed_cases.append(cases)\n else:\n no_cases.append(i)\n\nfor i in no_cases:\n unique_provinces.remove(i)\n \nunique_provinces = [k for k, v in sorted(zip(unique_provinces, province_confirmed_cases), key=operator.itemgetter(1), reverse=True)]\nfor i in range(len(unique_provinces)):\n province_confirmed_cases[i] = lastest_confirmed[df_Confirmed['Province/State']==unique_provinces[i]].sum()", "_____no_output_____" ], [ "#unique_provinces", "_____no_output_____" ], [ "print('Confirmed Cases by Province/States (US, China, Australia, Canada):')\nfor i in range(len(unique_provinces)):\n print(f'{unique_provinces[i]}: {province_confirmed_cases[i]} cases')", "Confirmed Cases by Province/States (US, China, Australia, Canada):\nHubei: 67800 cases\nNew York: 15793 cases\nNetherlands: 4204 cases\nWashington: 1996 cases\nNew Jersey: 1914 cases\nCalifornia: 1642 cases\nGuangdong: 1407 cases\nHenan: 1273 cases\nZhejiang: 1237 cases\nIllinois: 1049 cases\nMichigan: 1037 cases\nHunan: 1018 cases\nAnhui: 990 cases\nJiangxi: 936 cases\nLouisiana: 837 cases\nFlorida: 830 cases\nShandong: 766 cases\nDiamond Princess: 761 cases\nMassachusetts: 646 cases\nJiangsu: 633 cases\nTexas: 627 cases\nGeorgia: 600 cases\nChongqing: 577 cases\nSichuan: 543 cases\nNew South Wales: 533 cases\nBeijing: 514 cases\nPennsylvania: 509 cases\nTennessee: 505 cases\nHeilongjiang: 484 cases\nColorado: 476 cases\nOntario: 425 cases\nBritish Columbia: 424 cases\nShanghai: 394 cases\nWisconsin: 381 cases\nOhio: 355 cases\nHebei: 319 cases\nHong Kong: 317 cases\nFujian: 307 cases\nNorth Carolina: 302 cases\nVictoria: 296 cases\nAlberta: 259 cases\nGuangxi: 254 cases\nShaanxi: 248 cases\nMaryland: 244 cases\nConnecticut: 223 cases\nQueensland: 221 cases\nVirginia: 221 cases\nQuebec: 219 cases\nMississippi: 207 cases\nIndiana: 201 cases\nSouth Carolina: 196 cases\nNevada: 190 cases\nUtah: 181 cases\nYunnan: 176 cases\nMinnesota: 169 cases\nHainan: 168 cases\nArkansas: 165 cases\nOregon: 161 cases\nArizona: 152 cases\nGuizhou: 146 cases\nAlabama: 138 cases\nTianjin: 137 cases\nGansu: 136 cases\nShanxi: 133 cases\nLiaoning: 126 cases\nWestern Australia: 120 cases\nFaroe Islands: 115 cases\nDistrict of Columbia: 102 cases\nSouth Australia: 100 cases\nMissouri: 100 cases\nKentucky: 99 cases\nJilin: 93 cases\nIowa: 90 cases\nMaine: 89 cases\nRhode Island: 83 cases\nXinjiang: 76 cases\nInner Mongolia: 75 cases\nNingxia: 75 cases\nOklahoma: 67 cases\nNew Hampshire: 65 cases\nKansas: 64 cases\nNew Mexico: 57 cases\nGuadeloupe: 56 cases\nVermont: 52 cases\nSaskatchewan: 52 cases\nNebraska: 51 cases\nHawaii: 48 cases\nDelaware: 47 cases\nReunion: 47 cases\nGrand Princess: 43 cases\nIdaho: 42 cases\nChannel Islands: 32 cases\nMontana: 31 cases\nNorth Dakota: 28 cases\nNova Scotia: 28 cases\nGuam: 27 cases\nWyoming: 24 cases\nPuerto Rico: 23 cases\nTasmania: 22 cases\nAlaska: 21 cases\nSouth Dakota: 21 cases\nMacau: 20 cases\nAustralian Capital Territory: 19 cases\nManitoba: 19 cases\nQinghai: 18 cases\nFrench Guiana: 18 cases\nNew Brunswick: 17 cases\nGibraltar: 15 cases\nFrench Polynesia: 15 cases\nWest Virginia: 12 cases\nMayotte: 11 cases\nAruba: 8 cases\nNewfoundland and Labrador: 6 cases\nIsle of Man: 5 cases\nSt Martin: 4 cases\nGreenland: 4 cases\nNew Caledonia: 4 cases\nNorthern Territory: 3 cases\nSaint Barthelemy: 3 cases\nCuracao: 3 cases\nVirgin Islands: 3 cases\nCayman Islands: 3 cases\nPrince Edward Island: 2 cases\nBermuda: 2 cases\nTibet: 1 cases\nMontserrat: 1 cases\nSint Maarten: 1 cases\nNorthwest Territories: 1 cases\n" ], [ "nan_indices = [] \n\nfor i in range(len(unique_provinces)):\n if type(unique_provinces[i]) == float:\n nan_indices.append(i)\n\nunique_provinces = list(unique_provinces)\nprovince_confirmed_cases = list(province_confirmed_cases)\n\nfor i in nan_indices:\n unique_provinces.pop(i)\n province_confirmed_cases.pop(i)", "_____no_output_____" ], [ "import random\nimport matplotlib.colors as mcolors", "_____no_output_____" ], [ "'''c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries))\nplt.figure(figsize=(15,15))\nplt.pie(confirmed_country_list, colors=c)\nplt.legend(unique_countries, loc='best', bbox_to_anchor=(0.7, 0., 1, 1))\nplt.title('confirmed cases which are repored from country till the last date',size=32)\nplt.show()'''", "_____no_output_____" ] ], [ [ "# **Date wise plot**", "_____no_output_____" ] ], [ [ "dates=df_Confirmed.keys()\ndates=dates[4:]\ndates=list(dates)", "_____no_output_____" ], [ "df_Confirmed.head()", "_____no_output_____" ], [ "cases_on_dates=[]\nfor i in dates:\n sum_of_cases=df_Confirmed[i].sum()\n cases_on_dates.append(sum_of_cases)\n print(f'{i}: {sum_of_cases}')", "3/2/20: 90306\n3/3/20: 92840\n3/4/20: 95120\n3/5/20: 97882\n3/6/20: 101784\n3/7/20: 105821\n3/8/20: 109795\n3/9/20: 113561\n3/10/20: 118592\n3/11/20: 125865\n3/12/20: 128343\n3/13/20: 145193\n3/14/20: 156094\n3/15/20: 167446\n3/16/20: 181527\n3/17/20: 197142\n3/18/20: 214910\n3/19/20: 242708\n3/20/20: 272166\n3/21/20: 304524\n3/22/20: 335955\n" ], [ "c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries))", "_____no_output_____" ], [ "c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates))\nplt.figure(figsize=(15,15))\nplt.pie(cases_on_dates,colors=c)\nplt.title('confirmed cases on basis of dates',size=30)\nplt.legend(dates, loc='best')\nplt.show()", "_____no_output_____" ] ], [ [ "# **continent plot**", "_____no_output_____" ], [ "# top 10 country in which most of cases reporeted", "_____no_output_____" ], [ "\n\n```\n# confirmed_country_list,unique_countries\n```\n\n", "_____no_output_____" ] ], [ [ "visual_unique_countries = [] \nvisual_confirmed_cases = []\nothers = np.sum(confirmed_country_list[10:])\nfor i in range(len(confirmed_country_list[:10])):\n visual_unique_countries.append(unique_countries[i])\n visual_confirmed_cases.append(confirmed_country_list[i])\n\nvisual_unique_countries.append('Others')\nvisual_confirmed_cases.append(others)", "_____no_output_____" ], [ "c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates))\nplt.figure(figsize=(10,10))\nplt.pie(visual_confirmed_cases,colors=c)\nplt.legend(visual_unique_countries,loc='best')\nplt.title('Top 10 country in which highest no of cases has been reported',size=30)\nplt.show()", "_____no_output_____" ], [ "for i,j in enumerate(unique_countries):\n print(i,j)", "0 China\n1 Italy\n2 US\n3 Spain\n4 Germany\n5 Iran\n6 France\n7 Korea, South\n8 Switzerland\n9 United Kingdom\n10 Netherlands\n11 Belgium\n12 Austria\n13 Norway\n14 Sweden\n15 Portugal\n16 Brazil\n17 Denmark\n18 Canada\n19 Australia\n20 Malaysia\n21 Turkey\n22 Czechia\n23 Japan\n24 Israel\n25 Ireland\n26 Luxembourg\n27 Ecuador\n28 Pakistan\n29 Cruise Ship\n30 Poland\n31 Chile\n32 Finland\n33 Greece\n34 Thailand\n35 Iceland\n36 Indonesia\n37 Saudi Arabia\n38 Qatar\n39 Singapore\n40 Romania\n41 Slovenia\n42 India\n43 Philippines\n44 Russia\n45 Peru\n46 Bahrain\n47 Egypt\n48 Estonia\n49 South Africa\n50 Croatia\n51 Mexico\n52 Lebanon\n53 Panama\n54 Iraq\n55 Colombia\n56 Argentina\n57 Serbia\n58 Dominican Republic\n59 Algeria\n60 Armenia\n61 Kuwait\n62 Bulgaria\n63 Slovakia\n64 Taiwan*\n65 San Marino\n66 United Arab Emirates\n67 Latvia\n68 Uruguay\n69 Costa Rica\n70 Hungary\n71 Lithuania\n72 Bosnia and Herzegovina\n73 Morocco\n74 North Macedonia\n75 Andorra\n76 Vietnam\n77 Jordan\n78 Cyprus\n79 Moldova\n80 Malta\n81 Albania\n82 Brunei\n83 Cambodia\n84 Sri Lanka\n85 Belarus\n86 Burkina Faso\n87 Tunisia\n88 Ukraine\n89 Venezuela\n90 Senegal\n91 New Zealand\n92 Azerbaijan\n93 Kazakhstan\n94 Oman\n95 Georgia\n96 Trinidad and Tobago\n97 Uzbekistan\n98 Afghanistan\n99 Cameroon\n100 Liechtenstein\n101 Martinique\n102 Cuba\n103 Congo (Kinshasa)\n104 Nigeria\n105 Bangladesh\n106 Honduras\n107 Bolivia\n108 Ghana\n109 Monaco\n110 Paraguay\n111 Montenegro\n112 Guatemala\n113 Rwanda\n114 Mauritius\n115 Jamaica\n116 Togo\n117 Kenya\n118 Barbados\n119 Cote d'Ivoire\n120 Kyrgyzstan\n121 Maldives\n122 Tanzania\n123 Ethiopia\n124 Mongolia\n125 Guyana\n126 Seychelles\n127 Equatorial Guinea\n128 Gabon\n129 Suriname\n130 Bahamas, The\n131 Eswatini\n132 Cabo Verde\n133 Central African Republic\n134 Congo (Brazzaville)\n135 El Salvador\n136 Liberia\n137 Madagascar\n138 Namibia\n139 Zambia\n140 Zimbabwe\n141 Angola\n142 Benin\n143 Bhutan\n144 Fiji\n145 Guinea\n146 Haiti\n147 Kosovo\n148 Mauritania\n149 Nepal\n150 Nicaragua\n151 Niger\n152 Saint Lucia\n153 Sudan\n154 Antigua and Barbuda\n155 Cape Verde\n156 Chad\n157 Djibouti\n158 Dominica\n159 East Timor\n160 Eritrea\n161 Gambia, The\n162 Grenada\n163 Holy See\n164 Mozambique\n165 Papua New Guinea\n166 Saint Vincent and the Grenadines\n167 Somalia\n168 Syria\n169 Timor-Leste\n170 Uganda\n" ], [ "for i in range(len(unique_countries)):\n print(f'{unique_countries[i]}: {confirmed_country_list[i]} ')", "China: 81397 \nItaly: 59138 \nUS: 33272 \nSpain: 28768 \nGermany: 24873 \nIran: 21638 \nFrance: 16176 \nKorea, South: 8897 \nSwitzerland: 7245 \nUnited Kingdom: 5741 \nNetherlands: 4216 \nBelgium: 3401 \nAustria: 3244 \nNorway: 2383 \nSweden: 1934 \nPortugal: 1600 \nBrazil: 1593 \nDenmark: 1514 \nCanada: 1465 \nAustralia: 1314 \nMalaysia: 1306 \nTurkey: 1236 \nCzechia: 1120 \nJapan: 1086 \nIsrael: 1071 \nIreland: 906 \nLuxembourg: 798 \nEcuador: 789 \nPakistan: 776 \nCruise Ship: 712 \nPoland: 634 \nChile: 632 \nFinland: 626 \nGreece: 624 \nThailand: 599 \nIceland: 568 \nIndonesia: 514 \nSaudi Arabia: 511 \nQatar: 494 \nSingapore: 455 \nRomania: 433 \nSlovenia: 414 \nIndia: 396 \nPhilippines: 380 \nRussia: 367 \nPeru: 363 \nBahrain: 332 \nEgypt: 327 \nEstonia: 326 \nSouth Africa: 274 \nCroatia: 254 \nMexico: 251 \nLebanon: 248 \nPanama: 245 \nIraq: 233 \nColombia: 231 \nArgentina: 225 \nSerbia: 222 \nDominican Republic: 202 \nAlgeria: 201 \nArmenia: 194 \nKuwait: 188 \nBulgaria: 187 \nSlovakia: 185 \nTaiwan*: 169 \nSan Marino: 160 \nUnited Arab Emirates: 153 \nLatvia: 139 \nUruguay: 135 \nCosta Rica: 134 \nHungary: 131 \nLithuania: 131 \nBosnia and Herzegovina: 126 \nMorocco: 115 \nNorth Macedonia: 114 \nAndorra: 113 \nVietnam: 113 \nJordan: 112 \nCyprus: 95 \nMoldova: 94 \nMalta: 90 \nAlbania: 89 \nBrunei: 88 \nCambodia: 84 \nSri Lanka: 82 \nBelarus: 76 \nBurkina Faso: 75 \nTunisia: 75 \nUkraine: 73 \nVenezuela: 70 \nSenegal: 67 \nNew Zealand: 66 \nAzerbaijan: 65 \nKazakhstan: 60 \nOman: 55 \nGeorgia: 54 \nTrinidad and Tobago: 50 \nUzbekistan: 43 \nAfghanistan: 40 \nCameroon: 40 \nLiechtenstein: 37 \nMartinique: 37 \nCuba: 35 \nCongo (Kinshasa): 30 \nNigeria: 30 \nBangladesh: 27 \nHonduras: 26 \nBolivia: 24 \nGhana: 24 \nMonaco: 23 \nParaguay: 22 \nMontenegro: 21 \nGuatemala: 19 \nRwanda: 19 \nMauritius: 18 \nJamaica: 16 \nTogo: 16 \nKenya: 15 \nBarbados: 14 \nCote d'Ivoire: 14 \nKyrgyzstan: 14 \nMaldives: 13 \nTanzania: 12 \nEthiopia: 11 \nMongolia: 10 \nGuyana: 7 \nSeychelles: 7 \nEquatorial Guinea: 6 \nGabon: 5 \nSuriname: 5 \nBahamas, The: 4 \nEswatini: 4 \nCabo Verde: 3 \nCentral African Republic: 3 \nCongo (Brazzaville): 3 \nEl Salvador: 3 \nLiberia: 3 \nMadagascar: 3 \nNamibia: 3 \nZambia: 3 \nZimbabwe: 3 \nAngola: 2 \nBenin: 2 \nBhutan: 2 \nFiji: 2 \nGuinea: 2 \nHaiti: 2 \nKosovo: 2 \nMauritania: 2 \nNepal: 2 \nNicaragua: 2 \nNiger: 2 \nSaint Lucia: 2 \nSudan: 2 \nAntigua and Barbuda: 1 \nCape Verde: 1 \nChad: 1 \nDjibouti: 1 \nDominica: 1 \nEast Timor: 1 \nEritrea: 1 \nGambia, The: 1 \nGrenada: 1 \nHoly See: 1 \nMozambique: 1 \nPapua New Guinea: 1 \nSaint Vincent and the Grenadines: 1 \nSomalia: 1 \nSyria: 1 \nTimor-Leste: 1 \nUganda: 1 \n" ] ], [ [ "102", "_____no_output_____" ] ], [ [ "visual_unique_countries_all = [] \nvisual_confirmed_cases_all = []\nothers = np.sum(confirmed_country_list[20:])\nfor i in range(len(confirmed_country_list[:20])):\n visual_unique_countries_all.append(unique_countries[i])\n visual_confirmed_cases_all.append(confirmed_country_list[i])\n\nvisual_unique_countries_all.append('Others')\nvisual_confirmed_cases_all.append(others)", "_____no_output_____" ], [ "c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates))\nplt.figure(figsize=(10,10))\nplt.pie(visual_confirmed_cases_all,colors=c)\nplt.legend(visual_unique_countries_all,loc='best',bbox_to_anchor=(0.7, 0., 1, 1))\nplt.title('All country in which cases has been reported',size=30)\nplt.show()", "_____no_output_____" ], [ "visual_unique_provinces = [] \nvisual_confirmed_cases2 = []\nothers = np.sum(province_confirmed_cases[10:])\nfor i in range(len(province_confirmed_cases[:10])):\n visual_unique_provinces.append(unique_provinces[i])\n visual_confirmed_cases2.append(province_confirmed_cases[i])\n\nvisual_unique_provinces.append('Others')\nvisual_confirmed_cases2.append(others)", "_____no_output_____" ], [ "c = random.choices(list(mcolors.CSS4_COLORS.values()),k = len(unique_countries))\nplt.figure(figsize=(10,10))\nplt.title(' Confirmed Cases per Province',size=32)\nplt.pie(visual_confirmed_cases2, colors=c)\nplt.legend(visual_unique_provinces, loc='best')\nplt.show()", "_____no_output_____" ] ], [ [ "# so we are going to plot these top 10 countrys", "_____no_output_____" ] ], [ [ "c=random.choices(list(mcolors.CSS4_COLORS.values()),k=len(dates))\nplt.figure(figsize=(10,10))\nplt.pie(confirmed_country_list[:10],colors=c)\nplt.legend(unique_countries[:10],loc='best')\nplt.title('Top 10 country in which highest no of cases has been reported',size=30)\nplt.show()", "_____no_output_____" ], [ "by_Continent=['Europe','Asia','Africa','North america','South america','Australia']", "_____no_output_____" ], [ "#s=list(map(str,input().split(\" \")))", "_____no_output_____" ], [ "#Australia=s", "_____no_output_____" ], [ "'''Australia.append('Marshall Islands')\nAustralia.append('Solomon Islands')\nAustralia.append('New Zealand')\nAustralia.append('Papua New Guinea')'''", "_____no_output_____" ], [ "'''South_America=s\nSouth_America'''", "_____no_output_____" ], [ "'''North_America=[]\nNorth_America=s\nNorth_America'''", "_____no_output_____" ], [ "'''North_America.append('Antigua and Barbuda')\nNorth_America.append('El Salvador')\nNorth_America.append('Dominican Republic')\nNorth_America.append('Saint Kitts and Nevis')\nNorth_America.append('Saint Lucia')\nNorth_America.append('Saint Vincent and the Grenadines')\nNorth_America.append('United States of America')\nNorth_America.append('Trinidad and Tobago')'''", "_____no_output_____" ], [ "#North_America", "_____no_output_____" ], [ "'''Africa.append('Burkina Faso')'''", "_____no_output_____" ], [ "'''Africa.append('Sierra Leone')\nAfrica.append('South Africa')\nAfrica.append(\"Cote d'Ivoire\")\nAfrica.append('Central African Republic')\nAfrica.append('Equatorial Guinea')'''", "_____no_output_____" ], [ "'''Africa'''", "_____no_output_____" ], [ "'''Asia.append('United Arab Emirates')\nAsia'''", "_____no_output_____" ], [ "'''europe_provigences=['Albania',\n'Andorra',\n'Armenia',\n'Austria',\n'Azerbaijan','Belarus',\n'Belgium',\n'Bosnia and Herzegovina',\n'Bulgaria',\n'Croatia',\n'Cyprus',\n'Czechia',\n'Denmark',\n'Estonia',\n'Finland',\n'France',\n'Georgia',\n'Germany',\n'Greece',\n'Hungary',\n'Iceland',\n'Ireland',\n'Italy',\n'Kazakhstan',\n'Kosovo',\n'Latvia',\n'Liechtenstein',\n'Lithuania',\n'Luxembourg',\n'Malta',\n'Moldova,'\n'Monaco',\n'Montenegro',\n'Netherlands',\n'North Macedonia',\n'Norway',\n'Poland',\n'Portugal',\n'Romania',\n'Russia',\n'San Marino',\n'Serbia',\n'Slovakia',\n'Slovenia',\n'Spain',\n'Sweden',\n'Switzerland',\n'Turkey',\n'Ukraine',\n'United Kingdom',\n'Vatican City' ,\n'UK']'''", "_____no_output_____" ], [ "#continent_df=pd.DataFrame()", "_____no_output_____" ], [ "#continent_df['europe_provigences']=europe_provigences", "_____no_output_____" ], [ "#continent_df['Asia']=pd.Series(Asia)\n#continent_df['Africa']=pd.Series(Africa)\n#continent_df['North_America']=pd.Series(North_America)", "_____no_output_____" ], [ "#continent_df['South_America']=pd.Series(South_America)", "_____no_output_____" ], [ "#continent_df['Australia']=pd.Series(Australia)", "_____no_output_____" ], [ "#continent_df.to_csv('/content/continent_df.csv')", "_____no_output_____" ], [ "continent_df=pd.read_csv('/content/drive/My Drive/datasets/Tensorflow community challenge /Datasets /continent_df.csv')\ncontinent_df.pop('Unnamed: 0')", "_____no_output_____" ], [ "europe=continent_df['europe_provigences'].values\nAsia=continent_df['Asia'].values\nAfrica=continent_df['Africa'].values\nNorth_America=continent_df['North_America'].values\nSouth_America=continent_df['South_America'].values\nAustralia=continent_df['Australia'].values", "_____no_output_____" ], [ "South_America", "_____no_output_____" ], [ "confirmed_country_list,unique_countries", "_____no_output_____" ], [ "europe_total_cases=[]\nSouth_America_total_cases=[]\nAsia_total_cases=[]\nAfrica_total_cases=[]\nNorth_America_total_cases=[]\nAustralia_total_cases=[]\nfor i in range(len(unique_countries)):\n if unique_countries[i] in europe:\n europe_total_cases.append(confirmed_country_list[i])\n if unique_countries[i] in Asia:\n Asia_total_cases.append(confirmed_country_list[i])\n if unique_countries[i] in Africa:\n Africa_total_cases.append(confirmed_country_list[i])\n if unique_countries[i] in North_America:\n North_America_total_cases.append(confirmed_country_list[i])\n if unique_countries[i] in Australia:\n Australia_total_cases.append(confirmed_country_list[i])\n if unique_countries[i] in South_America:\n South_America_total_cases.append(confirmed_country_list[i])", "_____no_output_____" ], [ "sum(europe_total_cases)", "_____no_output_____" ], [ "South_America_total_cases", "_____no_output_____" ], [ "sum(Asia_total_cases),sum(Africa_total_cases),sum(North_America_total_cases),sum(Australia_total_cases),sum(South_America_total_cases)", "_____no_output_____" ], [ "total_continent_through_World=[]\ntotal_continent_through_World.append(sum(europe_total_cases))\ntotal_continent_through_World.append(sum(Asia_total_cases))\ntotal_continent_through_World.append(sum(Africa_total_cases))\ntotal_continent_through_World.append(sum(North_America_total_cases))\ntotal_continent_through_World.append(sum(Australia_total_cases))\ntotal_continent_through_World.append(sum(South_America_total_cases))", "_____no_output_____" ], [ "total_continent_through_World_unique=[]\ntotal_continent_through_World_unique.append('Europe')\ntotal_continent_through_World_unique.append('Asia')\ntotal_continent_through_World_unique.append('Africa')\ntotal_continent_through_World_unique.append('North_America')\ntotal_continent_through_World_unique.append('Australia')\ntotal_continent_through_World_unique.append('South_America')", "_____no_output_____" ], [ "total_continent_through_World", "_____no_output_____" ], [ "y_pos = np.arange(len(total_continent_through_World_unique))", "_____no_output_____" ], [ "plt.figure(figsize=(10,10))\nplt.bar(y_pos,total_continent_through_World)\nplt.title('continent wise plot for date 3/22/20')\nplt.xlabel('continent')\nplt.ylabel('no of peoples')\nplt.ylim(0,180000)\nplt.xticks(y_pos, total_continent_through_World_unique)\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb21de61b2da975e272e15fc74c9642a39575dfb
24,623
ipynb
Jupyter Notebook
HarmonicOscillator.ipynb
shumway/srt_bootcamp
c0b1cdc5f4fd57ac4f120e975842ea6bab2fa64b
[ "MIT" ]
2
2015-07-12T23:21:25.000Z
2015-07-15T04:51:37.000Z
HarmonicOscillator.ipynb
shumway/srt_bootcamp
c0b1cdc5f4fd57ac4f120e975842ea6bab2fa64b
[ "MIT" ]
null
null
null
HarmonicOscillator.ipynb
shumway/srt_bootcamp
c0b1cdc5f4fd57ac4f120e975842ea6bab2fa64b
[ "MIT" ]
null
null
null
42.748264
5,414
0.691873
[ [ [ "# Introduction\n\nA mass on a spring experiences a force described by Hookes law.\nFor a displacment $x$, the force is\n$$F=-kx,$$\nwhere $k$ is the spring constant with units of N/m.\n\nThe equation of motion is\n$$ F = ma $$\nor \n$$ -k x = m a .$$\n\nBecause acceleration is the second derivative of displacment, this is\na differential equation,\n$$ \\frac{d^2}{dt^2} = -\\frac{k}{m} x.$$\n\nThe solution to this equation is harmonic motion, for example\n$$ x(t) = A\\sin\\omega t,$$\nwhere $A$ is some amplitude and $\\omega = \\sqrt{k/m}$.\nThis can be verified by plugging the solution into the differential equation.\n\nThe angular frequency $\\omega$ is related to the frequency $f$ and the period $T$ by\n$$f = \\omega/2\\pi$$ and $$T=2\\pi/\\omega$$\n\nWe can illustrate this rather trivial case with an interacive plot.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nfrom IPython.html import widgets\n\ndef make_plot(t):\n fig, ax = plt.subplots()\n x,y = 0,0\n plt.plot(x, y, 'k.')\n plt.plot(x + 0.3 * t, y, 'bo')\n plt.xlim(-1,1)\n plt.ylim(-1,1)\n\nwidgets.interact(make_plot, t=(-1,1,0.1))\n", "_____no_output_____" ] ], [ [ "We want to generalize this result to several massess connected by several springs.", "_____no_output_____" ], [ "# The spring constant as a second derivative of potential\n\nThe force related to poential energy by\n$$ F = -\\frac{d}{dx}V(x).$$\nThs equation comes directly from the definition that work is force times distance.\n\nIntegrating this, we find the potential energy of a mass on a spring,\n$$ V(x) = \\frac{1}{2}kx^2. $$\nIn fact, the spring contant can be defined to be the second derivative of the potential,\n$$ k = \\frac{d^2}{dx^2} V(x).$$ We take the value of the second derivative at the minimum\nof the potential, which assumes that the oscillations are not very far from equilibrium.\n\nWe see that Hooke's law is simply\n$$F = -\\frac{d^2 V(x)}{dx^2} x, $$\nwhere the second derivative is evaluated at the minimum of the potential.\n\nFor a general potential, we can write the equation of motion as\n$$ \\frac{d^2}{dt^2} x = -\\frac{1}{m}\\frac{d^2V(x)}{dx^2} x.$$\n\nThe expression on the right hand side is known as the dynamical matrix, \nthough this is a trivial 1x1 matrix.", "_____no_output_____" ], [ "# Two masses connected by a spring\n\nNow the potential depends on two corrdinates,\n$$ V(x_1, x_2) = \\frac{1}{2} k (x_1 - x_2 - d),$$\nwhere $d$ is the equilibrium separation of the particles.\n\nNow the force on each particle depends on the positions of both of the particles,\n$$\n\\begin{pmatrix}F_1 \\\\ F_2\\end{pmatrix}\n= - \n\\begin{pmatrix}\n\\frac{\\partial^2 V}{\\partial x_1^2} &\n\\frac{\\partial^2 V}{\\partial x_1\\partial x_2} \\\\\n\\frac{\\partial^2 V}{\\partial x_1\\partial x_2} &\n\\frac{\\partial^2 V}{\\partial x_2^2} \\\\\n\\end{pmatrix}\n\\begin{pmatrix}x_1 \\\\ x_2\\end{pmatrix}\n$$\n\nFor performing the derivatives, we find\n$$\n\\begin{pmatrix}F_1 \\\\ F_2\\end{pmatrix}\n= - \n\\begin{pmatrix}\nk & -k \\\\\n-k & k \\\\\n\\end{pmatrix}\n\\begin{pmatrix}x_1 \\\\ x_2\\end{pmatrix}\n$$\n\nThe equations of motion are coupled,\n$$\n\\begin{pmatrix}\n\\frac{d^2x_1}{dt^2} \\\\\n\\frac{d^2x_2}{dt^2} \\\\\n\\end{pmatrix}\n= - \n\\begin{pmatrix}\nk/m & -k/m \\\\\n-k/m & k/m \\\\\n\\end{pmatrix}\n\\begin{pmatrix}x_1 \\\\ x_2\\end{pmatrix}\n$$\n\nTo decouple the equations, we find the eigenvalues and eigenvectors.", "_____no_output_____" ] ], [ [ "import numpy as np\na = np.array([[1, -1], [-1, 1]])\nfreq, vectors = np.linalg.eig(a)\nvectors = vectors.transpose()", "_____no_output_____" ] ], [ [ "The frequencies of the two modes of vibration are (in multiples of $\\sqrt{k/m}$)", "_____no_output_____" ] ], [ [ "freq", "_____no_output_____" ] ], [ [ "The first mode is a vibrational mode were the masses vibrate against each other (moving in opposite directions). This can be seen from the eigenvector.", "_____no_output_____" ] ], [ [ "vectors[0]", "_____no_output_____" ] ], [ [ "The second mode is a translation mode with zero frequency—both masses move in the same direction.", "_____no_output_____" ] ], [ [ "vectors[1]", "_____no_output_____" ] ], [ [ "We can interactively illustrate the vibrational mode.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\nfrom IPython.html import widgets\n\ndef make_plot(t):\n fig, ax = plt.subplots()\n x,y = np.array([-1,1]), np.array([0,0])\n plt.plot(x, y, 'k.')\n plt.plot(x + 0.3 * vectors[0] * t, y, 'bo')\n plt.xlim(-1.5,1.5)\n plt.ylim(-1.5,1.5)\n\nwidgets.interact(make_plot, t=(-1,1,0.1))", "_____no_output_____" ] ], [ [ "# Finding the dynamical matrix with numerical derivatives\nWe start from a function $V(x)$. If we want to calculate a derivative, \nwe just use the difference formula but don't take the delta too small.\nUsing $\\delta x = 10^{-6}$ is safe.\n$$\nF = -\\frac{dV(x)}{dx} \\approx\n\\frac{V(x+\\Delta x) - V(x-\\Delta x)}{2\\Delta x}\n$$\nNote that it is more accurate to do this symmetric difference formula\nthan it would be to use the usual forward derivative from calculus class.\n\nIt's easy to see this formula is just calculating the slope of the function using points near $x$.", "_____no_output_____" ] ], [ [ "def V(x):\n return 0.5 * x**2\n\ndeltax = 1e-6\n\ndef F_approx(x):\n return ( V(x + deltax) - V(x - deltax) ) / (2 * deltax)", "_____no_output_____" ], [ "[(x, F_approx(x)) for x in np.linspace(-2,2,9)]", "_____no_output_____" ] ], [ [ "Next, we can find the second derivative by using the difference formula twice.\nWe find the nice expression,\n$$\n\\frac{d^2V}{dx^2} \\approx \\frac{V(x+\\Delta x) - 2V(x) + V(x-\\Delta x)}{(\\Delta x)^2}.\n$$\n\nThis formula has the nice interpretation of comparing the value of $V(x)$ to\nthe average of points on either side. If it is equal to the average, the line\nis straight and the second derivative is zero.\nIf average of the outer values is larger than $V(x)$, then the ends curve upward,\nand the second derivative is positive.\nLikewise, if the average of the outer values is less than $V(x)$, then the ends curve downward,\nand the second derivative is negative.", "_____no_output_____" ] ], [ [ "def dV2dx2_approx(x):\n return ( V(x + deltax) - 2 * V(x) + V(x - deltax) ) / deltax**2", "_____no_output_____" ], [ "[(x, dV2dx2_approx(x)) for x in np.linspace(-2,2,9)]", "_____no_output_____" ] ], [ [ "Now we can use these derivative formulas to calcuate the dynamical matrix \nfor the two masses on one spring. Well use $k=1$ and $m=1$ for simplicity.", "_____no_output_____" ] ], [ [ "def V2(x1, x2):\n return 0.5 * (x1 - x2)**2", "_____no_output_____" ], [ "x1, x2 = -1, 1\nmat = np.array(\n[[(V2(x1+deltax, x2) - 2 * V2(x1,x2) + V2(x1-deltax, x2)) / deltax**2 ,\n (V2(x1+deltax, x2+deltax) - V2(x1-deltax, x2+deltax) \n - V2(x1+deltax, x2-deltax) + V2(x1+deltax, x2+deltax)) / (2*deltax)**2],\n [(V2(x1+deltax, x2+deltax) - V2(x1-deltax, x2+deltax) \n - V2(x1+deltax, x2-deltax) + V2(x1+deltax, x2+deltax)) / (2*deltax)**2,\n (V2(x1, x2+deltax) - 2 * V2(x1,x2) + V2(x1, x2-deltax)) / deltax**2 ]]\n)", "_____no_output_____" ], [ "mat", "_____no_output_____" ], [ "freq, vectors = np.linalg.eig(mat)\nvectors = vectors.transpose()", "_____no_output_____" ], [ "for f,v in zip(freq, vectors):\n print(\"freqency\", f, \", eigenvector\", v)", "freqency 1.99995575656 , eigenvector [ 0.70710678 -0.70710678]\nfreqency 0.000222044604925 , eigenvector [ 0.70710678 0.70710678]\n" ] ], [ [ "For practical calcuations, we have to automate this matrix construction for an arbitrary potential.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb21f062872ceca8b72d1d0f004cf9e13a981fdd
53,129
ipynb
Jupyter Notebook
lab4/yxu29.ipynb
topseer/APL_Great
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
[ "MIT" ]
null
null
null
lab4/yxu29.ipynb
topseer/APL_Great
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
[ "MIT" ]
null
null
null
lab4/yxu29.ipynb
topseer/APL_Great
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
[ "MIT" ]
null
null
null
38.085305
653
0.364565
[ [ [ "# Module 5 Lab - Data", "_____no_output_____" ] ], [ [ "% matplotlib inline", "_____no_output_____" ] ], [ [ "The special command above will make all the `matplotlib` images appear in the notebook.", "_____no_output_____" ], [ "## Directions\n\n**Failure to follow the directions will result in a \"0\"**\n\nThe due dates for each are indicated in the Syllabus and the course calendar. If anything is unclear, please email [email protected] the official email for the course or ask questions in the Lab discussion area on Blackboard.\n\nThe Labs also present technical material that augments the lectures and \"book\". You should read through the entire lab at the start of each module.\n\n### General Instructions\n\n1. You will be submitting your assignment to Blackboard. If there are no accompanying files, you should submit *only* your notebook and it should be named using *only* your JHED id: fsmith79.ipynb for example if your JHED id were \"fsmith79\". If the assignment requires additional files, you should name the *folder/directory* your JHED id and put all items in that folder/directory, ZIP it up (only ZIP...no other compression), and submit it to Blackboard.\n \n * do **not** use absolute paths in your notebooks. All resources should appear in the same directory as the rest of your assignments.\n * the directory **must** be named your JHED id and **only** your JHED id.\n \nThis assignment has accompanying files. You should include in your zip file:\n\n 1. [jhed_id].ipynb\n 2. hurricanes.py - your preprocessing file.\n 3. hurricanes.html - the local copy of the Wikipedia page.\n 4. hurricanes.db - the SQLite database you create.\n \n2. Data Science is as much about what you write (communicating) as the code you execute (researching). In many places, you will be required to execute code and discuss both the purpose and the result. Additionally, Data Science is about reproducibility and transparency. This includes good communication with your team and possibly with yourself. Therefore, you must show **all** work.\n\n3. Avail yourself of the Markdown/Codecell nature of the notebook. If you don't know about Markdown, look it up. Your notebooks should not look like ransom notes. Don't make everything bold. Clearly indicate what question you are answering.\n\n4. Submit a cleanly executed notebook. The first code cell should say `In [1]` and each successive code cell should increase by 1 throughout the notebook.", "_____no_output_____" ], [ "## Individual Submission\n\n## Getting and Storing Data\n\nThis Lab is about acquiring, cleaning and storing data as well as doing a little bit of analysis.\n\n### Basic Outline\n\n1. Using `curl` or `wget` obtain a local copy of the following web page: Atlantic Hurricane Season ( https://en.wikipedia.org/wiki/Atlantic_hurricane_season ). **include this in your submission as `hurricanes.html`**. This is important. In Spring 2016, the page was edited during the module and different people got different answers at different times. You only need to be correct with respect to your `hurricanes.html` file.\n2. Using Beautiful Soup 4 and Python, parse the HTML file into a useable dataset. **your parsing code should be in a file `hurricanes.py` and included in your submission**.\n3. Write this data set to a SQLite3 database called `hurricanes.db` **include this in your submission**.\n4. Run the requested queries against the data set. **see below** The results should be **nicely formatted**.\n\nAlthough Wikipedia has an API, I do not what you to use it for this assignment.\n\n### Details\n\nThe data is contained in many separate HTML tables. The challenge is to write a general table parsing function and then locate each table and apply the function to it. You only need to get the data from the tables starting at 1850s. Not all years have the same data. You only need to save the following columns. The name is parentheses is the name the column should have in the database table.\n\n- Year (`year`)\n- Number of tropical storms (`tropical_storms`)\n- Number of hurricanes (`hurricanes`)\n- Number of Major Hurricanes (`major_hurricanes`)\n- Deaths (`deaths`)\n- Damage (`damage`)\n- Notes (`notes`)\n\nNote that \"Damage\" doesn't start until 1900s and \"Notes\" was added in 1880s. \"Strongest Storm\" should be added to the Notes column (even in years that didn't have Notes) as should \"Retired Storms\". The name of the database table should be atlantic_hurricanes. The name of the table file (SQLite3 uses a file) should be hurricanes.db (who knows...you might need to add Pacific storms someday).\n\nThere are a number of parsing problems which will most likely require regular expressions. First, the Deaths column has numbers that include commas and entries that are not numbers (Unknown and None). How should you code Unknown and None so that answers are not misleading but queries are still fairly straightforward to write?\n\nSimilarly, Damages has numbers with commas, currency signs and different amount words (millions, billions). How will you normalize all of these so that a query can compare them? You may need regular expressions.\n\nAdditionally, the way that Tropical Storms are accounted for seems to change mysteriously. Looking over the data, it's not immediately apparent when the interpretation should change. 1850s, 1860s definitely but 1870s? Not sure. It could just be a coincidence that there were never more hurricanes than tropical storms which seems to be the norm but see, for example, 1975. Welcome to Data Science!\n\nYou should put your parsing code in `hurricanes.py`. None of it should be in this file. Imagine this file is going to be the report you give to your boss.", "_____no_output_____" ], [ "## Documentation\n\nAny time you run into a problem where you have to decide what to do--how to solve the problem or treat the values--document it here.", "_____no_output_____" ], [ "## Hurricanes.db\n\nWhat is the *function* of `hurricanes.db` in this assignment?", "_____no_output_____" ], [ "### Queries\n\nWhen you are done, you must write and execute the following queries against your database. Those queries should be run from this notebook. Find the documentation for using SQLite3 from Python (the library is already included). You should never output raw Python data structures instead, you need to output well-formatted tables. You may need to look around for a library to help you or write your own formatting code. `Pandas` is one possibility. However, I want you to use raw SQL for your queries so if you use `Pandas` use it only for the formatting of query results (don't load the data into Pandas and use Pandas/Python to query the data).\n\n**Write the most general query possible. Never assume that you are going to get only one result back (i.e, don't assume there won't be ties).** The query result should be in a nicely formatted table; don't show raw data structures to your boss or manager. \n\nAdditionally, don't just run the query. Having gotten an answer, add a textual description of the result to the question. Data Science is not about running code; code is a means to an end. The end is the communication of results. We never just run code in this class.", "_____no_output_____" ] ], [ [ "\"\"# imports\nimport pandas as pd\nimport sqlite3\n\nconn = sqlite3.connect(\"hurricanes.db\")\ndf = pd.read_sql_query(\"select * from Hurricanes;\", conn)\n\n##change strings to numbers \nvalues = {'tropical_storms': 0, 'hurricanes': 0, 'C': 2, 'major_hurricanes': 0}\ndf.fillna(value=values)\n\ndf[['tropical_storms','hurricanes','major_hurricanes']] = df[['tropical_storms','hurricanes','major_hurricanes']].apply(pd.to_numeric)\n\nvalues = {'tropical_storms': 0, 'hurricanes': 0, 'C': 2, 'major_hurricanes': 0}\ndf.fillna(value=values)\n\ndf.deaths = df.deaths.str.replace('+','') \ndf.deaths = df.deaths.str.replace(',','') \ndf.deaths = df.deaths.str.replace('~','') \n\ndf.damage = df.damage.str.replace(\">=\",\"\")\n\ndf.damage = df.damage.str.replace(\">\",\"\")\n\ndf.damage = df.damage.str.replace(\"$\",\"\")\n\n##ignore \"Not Know\"....\ndf.deaths = df[['deaths']].apply(pd.to_numeric, errors = \"coerce\")\n\n\n##remove invalid data\ndf = df.loc[df.Year!=\"\"]\n\ndf", "_____no_output_____" ] ], [ [ "1\\. For the 1920s, list the years by number of tropical storms, then hurricanes.", "_____no_output_____" ] ], [ [ "df.loc[df.Year.str.startswith(\"192\")][[\"Year\",\"tropical_storms\",\"hurricanes\"]]", "_____no_output_____" ] ], [ [ "2\\. What year had the most tropical storms?", "_____no_output_____" ] ], [ [ "df.sort_values('tropical_storms', ascending=0).iloc[0,]", "_____no_output_____" ] ], [ [ "3\\. What year had the most major hurricanes?", "_____no_output_____" ] ], [ [ "df.sort_values('major_hurricanes', ascending=0).iloc[0,]", "_____no_output_____" ] ], [ [ "4\\. What year had the most deaths?", "_____no_output_____" ] ], [ [ "df.sort_values('deaths', ascending=0).iloc[0,]", "_____no_output_____" ] ], [ [ "5\\. What year had the most damage (not inflation adjusted)?", "_____no_output_____" ] ], [ [ "#I could not find a good way to transfer billions and millions to numbers\n#My method seems cubersome, but it works \ndf.loc[df.damage.str.contains( \"282.16\")]\n\nmax_damage = 0\nmax_damage_1 = \"\"\nfor damage in df.damage:\n damage_1 = damage.replace(',','')\n damage_1 = damage_1.replace('+','')\n if \" billion\" in damage_1:\n damage_2 = float(damage_1.replace(\" billion\",\"\"))\n if damage_2>max_damage:\n max_damage = damage_2\n max_damage_1 = damage\n\nprint(\"converted value:\", max_damage, \", raw value:\",max_damage_1)\n\ndf.loc[df.damage == max_damage_1]", "converted value: 282.16 , raw value: 282.16 billion\n" ] ], [ [ "6\\. What year had the highest proportion of tropical storms turn into major hurricanes?", "_____no_output_____" ] ], [ [ "df[\"majorRatio\"] = df.major_hurricanes/df.hurricanes\n\ndf.loc[df.majorRatio>0].sort_values('majorRatio', ascending=0).iloc[0,]\n", "_____no_output_____" ] ], [ [ "## Things to think about\n\n1. What is the granularity of this data? (Are the rows the most specific observation possible?)\n2. What if this data were contained in worksheets in an Excel file. Find a Python library or libraries that work with Excel spreadsheets.\n3. Each section links to details about each hurrican season. Review each Season's page and discuss strategies for extracting the information for every hurricane.\n4. Hurricane tracking maps were recently added. How would you get and store those images?\n5. Damages are not inflation adjusted. How would you go about *enriching* your data with inflation adjusted dollars? Where should this additional data be stored and how would it be used?", "_____no_output_____" ], [ "*notes here*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb21f12849dc84336ea1cc4610374fe32d55fa88
3,647
ipynb
Jupyter Notebook
notebooks/Test new loss.ipynb
arimorcos/stn_motion_correct
ffb344baaa4afc765953d4c29692659b65dc1d41
[ "MIT" ]
null
null
null
notebooks/Test new loss.ipynb
arimorcos/stn_motion_correct
ffb344baaa4afc765953d4c29692659b65dc1d41
[ "MIT" ]
null
null
null
notebooks/Test new loss.ipynb
arimorcos/stn_motion_correct
ffb344baaa4afc765953d4c29692659b65dc1d41
[ "MIT" ]
null
null
null
18.054455
70
0.527831
[ [ [ "import sys\nsys.path.append('..')\nimport numpy as np\n\nimport networks", "_____no_output_____" ], [ "network = networks.stn()", "_____no_output_____" ], [ "test_ref = np.random.randn(32, 512, 512).astype('float32')\ntest_input = np.random.randn(32, 2, 512, 512).astype('float32')", "_____no_output_____" ], [ "network.get_shape_1(test_ref)", "_____no_output_____" ], [ "network.get_shape_2(test_ref)", "_____no_output_____" ], [ "max_shape, min_shape = network.get_max_min_shape(test_ref)\nprint max_shape\nprint min_shape", "_____no_output_____" ], [ "network.get_norm_shape(test_ref)", "_____no_output_____" ], [ "ref_norm = network.get_ref_norm(test_ref)\nprint ref_norm", "_____no_output_____" ], [ "np.allclose(ref_norm.sum(axis=1), norm_sum)", "_____no_output_____" ], [ "norm_max, norm_min = network.get_norm_max_min(test_ref)\nprint np.unique(norm_max)\nprint np.unique(norm_min)", "_____no_output_____" ], [ "norm_sum, part_sum = network.new_sum(test_ref)\nprint norm_sum\nprint part_sum", "_____no_output_____" ], [ "test_ref_reshape = np.reshape(test_ref, (32, -1))\ntest_ref_reshape.shape", "_____no_output_____" ], [ "test_ref_reshape.max(axis=1).shape", "_____no_output_____" ], [ "network.get_cost(test_input, test_ref)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb21f13143b8408218acbba49cd57691517bccd3
130,045
ipynb
Jupyter Notebook
notebooks/Z1. Appendix 1 Plotting code snippets .ipynb
nborwankar/opendatasci
f093015bb97463fbe4964bc5e3af7a4e5f435211
[ "BSD-2-Clause" ]
21
2015-02-16T18:14:20.000Z
2021-04-15T19:27:39.000Z
notebooks/Z1. Appendix 1 Plotting code snippets .ipynb
orsushiva/opendatasci
f093015bb97463fbe4964bc5e3af7a4e5f435211
[ "BSD-2-Clause" ]
null
null
null
notebooks/Z1. Appendix 1 Plotting code snippets .ipynb
orsushiva/opendatasci
f093015bb97463fbe4964bc5e3af7a4e5f435211
[ "BSD-2-Clause" ]
18
2015-01-17T00:42:33.000Z
2020-12-11T01:10:22.000Z
440.830508
29,422
0.927733
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb21f4605628f5e841d851ab5796788f352c36f0
113,866
ipynb
Jupyter Notebook
DQN_variants/DQN/DQN_cartpole.ipynb
CAVED123/DPPO
666d54fb95ce6219771a3747bcae29eb88dd8e4b
[ "MIT" ]
1
2020-12-01T13:23:47.000Z
2020-12-01T13:23:47.000Z
DQN_variants/DQN/DQN_cartpole.ipynb
CAVED123/DPPO
666d54fb95ce6219771a3747bcae29eb88dd8e4b
[ "MIT" ]
null
null
null
DQN_variants/DQN/DQN_cartpole.ipynb
CAVED123/DPPO
666d54fb95ce6219771a3747bcae29eb88dd8e4b
[ "MIT" ]
null
null
null
201.890071
45,782
0.852379
[ [ [ "A **Deep Q Network** implementation in tensorflow with target network & random\nexperience replay. The code is tested with Gym's discrete action space\nenvironment, CartPole-v0 on Colab.\n\n---\n\n## Notations:\n\nModel network = $Q_{\\theta}$\n\nModel parameter = $\\theta$\n\nModel network Q value = $Q_{\\theta}$ (s, a)\n\nTarget network = $Q_{\\phi}$\n\nTarget parameter = $\\phi$\n\nTarget network Q value = $Q_{\\phi}$ ($s^{'}$, $a^{'}$)\n\n---\n\n## Equations:\n\nTD target = r (s, a) $+$ $\\gamma$ $max_{a}$ $Q_{\\phi}$ $s^{'}$, $a^{'}$)\n\nTD error = (TD target) $-$ (Model network Q value)\n= [r (s, a) $+$ $\\gamma$ $max_{a^{'}}$ $Q_{\\phi}$ ($s^{'}$, $a^{'}$)] $-$ $Q_{\\theta}$ (s, a)\n\n---\n\n## Key implementation details:\n\nUpdate target parameter $\\phi$ with model parameter $\\theta$.\nCopy $\\theta$ to $\\phi$ with *either* soft or hard parameter update.\n\nHard parameter update:\n\n```\nwith tf.variable_scope('hard_replace'):\n self.target_replace_hard = [t.assign(m) for t, m in zip(self.target_net_params, self.model_net_params)] \n```\n\n```\n# hard params replacement\nif self.learn_step % self.tau_step == 0:\n self.sess.run(self.target_replace_hard) \nself.learn_step += 1\n```\n\nSoft parameter update: polyak $\\cdot$ $\\theta$ + (1 $-$ polyak) $\\cdot$ $\\phi$\n\n```\nwith tf.variable_scope('soft_replace'): \n self.target_replace_soft = [t.assign(self.polyak * m + (1 - self.polyak) * t)\n for t, m in zip(self.target_net_params, self.model_net_params)] \n```\n\nStop TD target from contributing to gradient computation:\n\n```\n# exclude td_target in gradient computation\ntd_target = tf.stop_gradient(td_target)\n```\n\n---\n\n## References:\n\n[Human-level control through deep reinforcement learning\n(Mnih et al., 2015)](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf)\n\n---\n\n<br>\n", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport gym\nimport numpy as np\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "# random sampling for learning from experience replay\nclass Exp():\n def __init__(self, obs_size, max_size):\n self.obs_size = obs_size\n self.num_obs = 0\n self.max_size = max_size\n self.mem_full = False\n \n # memory structure that stores samples from observations\n self.mem = {'s' : np.zeros(self.max_size * self.obs_size, dtype=np.float32).reshape(self.max_size,self.obs_size),\n 'a' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1),\n 'r' : np.zeros(self.max_size * 1).reshape(self.max_size,1),\n 'done' : np.zeros(self.max_size * 1, dtype=np.int32).reshape(self.max_size,1)}\n\n # stores sample obervation at each time step in experience memory\n def store(self, s, a, r, done):\n i = self.num_obs % self.max_size\n \n self.mem['s'][i,:] = s\n self.mem['a'][i,:] = a\n self.mem['r'][i,:] = r\n self.mem['done'][i,:] = done\n \n self.num_obs += 1\n \n if self.num_obs == self.max_size:\n self.num_obs = 0 # reset number of observation\n self.mem_full = True\n\n # returns a minibatch of experience\n def minibatch(self, minibatch_size):\n if self.mem_full == False:\n max_i = min(self.num_obs, self.max_size) - 1\n else:\n max_i = self.max_size - 1\n \n # randomly sample a minibatch of indexes\n sampled_i = np.random.randint(max_i, size=minibatch_size) \n \n s = self.mem['s'][sampled_i,:].reshape(minibatch_size, self.obs_size)\n a = self.mem['a'][sampled_i].reshape(minibatch_size)\n r = self.mem['r'][sampled_i].reshape((minibatch_size,1))\n s_next = self.mem['s'][sampled_i + 1,:].reshape(minibatch_size, self.obs_size)\n done = self.mem['done'][sampled_i].reshape((minibatch_size,1))\n \n return (s, a, r, s_next, done)", "_____no_output_____" ], [ "# Evaluates behavior policy while improving target policy\nclass DQN_agent():\n def __init__(self, num_actions, obs_size, nhidden,\n epoch, \n epsilon, gamma, learning_rate, \n replace, polyak, tau_step,\n mem_size, minibatch_size):\n super(DQN_agent, self).__init__()\n \n self.actions = range(num_actions)\n self.num_actions = num_actions\n self.obs_size = obs_size # number of features\n self.nhidden = nhidden # hidden nodes\n \n self.epoch = epoch # for epsilon decay & to decide when to start training\n\n self.epsilon = epsilon # for eploration\n self.gamma = gamma # discount factor\n self.learning_rate = learning_rate # learning rate alpha\n \n # for params replacement\n self.replace = replace # type of replacement\n self.polyak = polyak # for soft replacement\n self.tau_step = tau_step # for hard replacement\n self.learn_step = 0 # steps after learning\n \n # for Experience replay\n self.mem = Exp(self.obs_size, mem_size) # memory that holds experiences\n self.minibatch_size = minibatch_size\n \n self.step = 0 # each step in a episode\n \n # for tensorflow ops\n self.built_graph() \n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.sess.run(self.target_replace_hard)\n \n self.cum_loss_per_episode = 0 # for charting display\n \n # decay epsilon after each epoch \n def epsilon_decay(self):\n if self.step % self.epoch == 0:\n self.epsilon = max(.01, self.epsilon * .95)\n \n # epsilon-greedy behaviour policy for action selection \n def act(self, state):\n if np.random.random() < self.epsilon:\n i = np.random.randint(0,len(self.actions))\n else: \n # get Q(s,a) from model network\n Q_val = self.sess.run(self.model_Q_val, feed_dict={self.s: np.reshape(state, (1,state.shape[0]))})\n # get index of largest Q(s,a)\n i = np.argmax(Q_val)\n \n action = self.actions[i] \n \n self.step += 1 \n self.epsilon_decay()\n \n return action \n \n def learn(self, s, a, r, done):\n # stores observation in memory as experience at each time step \n self.mem.store(s, a, r, done)\n # starts training a minibatch from experience after 1st epoch\n if self.step > self.epoch:\n self.replay() # start training with experience replay\n \n def td_target(self, r, done, target_Q_val):\n # select max Q values from target network (greedy policy)\n max_target_Q_val = tf.reduce_max(target_Q_val, axis=1, keepdims=True)\n \n # if state = done, td_target = r\n td_target = (1.0 - tf.cast(done, tf.float32)) * tf.math.multiply(self.gamma, max_target_Q_val) + r\n \n # exclude td_target in gradient computation\n td_target = tf.stop_gradient(td_target)\n \n return td_target\n \n # select Q(s,a) from actions using e-greedy as behaviour policy from model network\n def predicted_Q_val(self, a, model_Q_val): \n # create 1D tensor of length = number of rows in a\n arr = tf.range(tf.shape(a)[0], dtype=tf.int32)\n \n # stack by column to create indices for Q(s,a) selections based on a\n indices = tf.stack([arr, a], axis=1)\n \n # select Q(s,a) using indice from model_Q_val\n Q_val = tf.gather_nd(model_Q_val, indices)\n Q_val = tf.reshape(Q_val, (self.minibatch_size, 1))\n \n return Q_val\n \n # contruct neural network\n def built_net(self, var_scope, w_init, b_init, features, num_hidden, num_output): \n with tf.variable_scope(var_scope):\n feature_layer = tf.contrib.layers.fully_connected(features, num_hidden, \n activation_fn = tf.nn.relu,\n weights_initializer = w_init,\n biases_initializer = b_init)\n Q_val = tf.contrib.layers.fully_connected(feature_layer, num_output, \n activation_fn = None,\n weights_initializer = w_init,\n biases_initializer = b_init) \n return Q_val\n \n # contruct tensorflow graph\n def built_graph(self): \n tf.reset_default_graph()\n \n self.s = tf.placeholder(tf.float32, [None,self.obs_size], name='s')\n self.a = tf.placeholder(tf.int32, [None,], name='a')\n self.r = tf.placeholder(tf.float32, [None,1], name='r')\n self.s_next = tf.placeholder(tf.float32, [None,self.obs_size], name='s_next')\n self.done = tf.placeholder(tf.int32, [None,1], name='done') \n \n # weight, bias initialization\n w_init = tf.initializers.lecun_uniform()\n b_init = tf.initializers.he_uniform(1e-4)\n \n self.model_Q_val = self.built_net('model_net', w_init, b_init, self.s, self.nhidden, self.num_actions)\n self.target_Q_val = self.built_net('target_net', w_init, b_init, self.s_next, self.nhidden, self.num_actions)\n\n with tf.variable_scope('td_target'):\n td_target = self.td_target(self.r, self.done, self.target_Q_val)\n with tf.variable_scope('predicted_Q_val'):\n predicted_Q_val = self.predicted_Q_val(self.a, self.model_Q_val)\n with tf.variable_scope('loss'):\n self.loss = tf.losses.huber_loss(td_target, predicted_Q_val)\n with tf.variable_scope('optimizer'):\n self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)\n \n # get network params \n with tf.variable_scope('params'):\n self.target_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')\n self.model_net_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model_net') \n \n # replace target net params with model net params\n with tf.variable_scope('hard_replace'):\n self.target_replace_hard = [t.assign(m) for t, m in zip(self.target_net_params, self.model_net_params)] \n with tf.variable_scope('soft_replace'): \n self.target_replace_soft = [t.assign(self.polyak * m + (1 - self.polyak) * t) \n for t, m in zip(self.target_net_params, self.model_net_params)] \n \n # decide soft or hard params replacement \n def replace_params(self):\n if self.replace == 'soft':\n # soft params replacement \n self.sess.run(self.target_replace_soft) \n else:\n # hard params replacement\n if self.learn_step % self.tau_step == 0:\n self.sess.run(self.target_replace_hard) \n self.learn_step += 1\n \n def replay(self): \n # select minibatch of experiences from memory for training\n (s, a, r, s_next, done) = self.mem.minibatch(self.minibatch_size)\n \n # training\n _, loss = self.sess.run([self.optimizer, self.loss], feed_dict = {self.s: s,\n self.a: a,\n self.r: r,\n self.s_next: s_next,\n self.done: done}) \n self.cum_loss_per_episode += loss\n self.replace_params() ", "_____no_output_____" ], [ "# compute stats\ndef stats(r_per_episode, R, cum_R, cum_R_episodes, \n cum_loss_per_episode, cum_loss, cum_loss_episodes):\n r_per_episode = np.append(r_per_episode, R) # store reward per episode\n cum_R_episodes += R\n cum_R = np.append(cum_R, cum_R_episodes) # store cumulative reward of all episodes\n\n cum_loss_episodes += cum_loss_per_episode\n cum_loss = np.append(cum_loss, cum_loss_episodes) # store cumulative loss of all episodes\n \n return (r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss)\n\n# plot performance\ndef plot_charts(values, y_label):\n fig = plt.figure(figsize=(10,5))\n plt.title(\"DQN performance\")\n plt.xlabel(\"Episode\")\n plt.ylabel(y_label)\n plt.plot(values)\n plt.show(fig)\n \ndef display(r_per_episode, cum_R, cum_loss):\n plot_charts(r_per_episode, \"Reward\")\n plot_charts(cum_R, \"cumulative_reward\")\n plot_charts(cum_loss, \"cumulative_loss\")\n \n avg_r = np.sum(r_per_episode) / max_episodes\n print(\"avg_r\", avg_r) \n \n avg_loss = np.sum(cum_loss) / max_episodes\n print(\"avg_loss\", avg_loss) ", "_____no_output_____" ], [ "def run_episodes(env, agent, max_episodes):\n r_per_episode = np.array([0])\n cum_R = np.array([0])\n cum_loss = np.array([0])\n cum_R_episodes = 0\n cum_loss_episodes = 0\n \n # repeat each episode\n for episode_number in range(max_episodes):\n s = env.reset() # reset new episode\n done = False \n R = 0 \n \n # repeat each step\n while not done:\n # select action using behaviour policy(epsilon-greedy) from model network\n a = agent.act(s)\n # take action in environment\n next_s, r, done, _ = env.step(a)\n # agent learns\n agent.learn(s, a, r, done)\n s = next_s\n \n R += r \n\n (r_per_episode, cum_R_episodes, cum_R, cum_loss_episodes, cum_loss) = stats(r_per_episode, R, cum_R, cum_R_episodes, \n agent.cum_loss_per_episode, cum_loss, cum_loss_episodes)\n \n display(r_per_episode, cum_R, cum_loss)\n\n env.close()", "_____no_output_____" ], [ "env = gym.make('CartPole-v0') # openai gym environment\n#env = gym.make('Pong-v0') # openai gym environment\n\nmax_episodes = 500\nepoch = 100\n\nnum_actions = env.action_space.n # number of possible actions\nobs_size = env.observation_space.shape[0] # dimension of state space\nnhidden = 128 # number of hidden nodes\n\nepsilon = .9\ngamma = .9\nlearning_rate = .3\n\nreplace = 'soft' # params replacement type, 'soft' for soft replacement or empty string '' for hard replacement\npolyak = .001 \ntau_step = 300 \n\nmem_size = 30000\nminibatch_size = 64\n\n%matplotlib inline", "_____no_output_____" ], [ "agent = DQN_agent(num_actions, obs_size, nhidden,\n epoch, \n epsilon, gamma, learning_rate, \n replace, polyak, tau_step,\n mem_size, minibatch_size)\nrun_episodes(env, agent, max_episodes)", "\nWARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\nFor more information, please see:\n * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n * https://github.com/tensorflow/addons\nIf you depend on functionality not listed there, please file an issue.\n\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/losses/losses_impl.py:448: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.cast instead.\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb21fcaee0d41fd214165fdab76955f432f359ab
130,165
ipynb
Jupyter Notebook
tests/test.ipynb
cianoc/abjad-ext-ipython
b9a915e6eea045c4975d3f711f9f7eccccc584ed
[ "MIT" ]
3
2021-05-09T15:50:32.000Z
2021-12-19T21:38:07.000Z
tests/test.ipynb
cianoc/abjad-ext-ipython
b9a915e6eea045c4975d3f711f9f7eccccc584ed
[ "MIT" ]
9
2018-05-26T01:03:52.000Z
2021-07-22T17:59:47.000Z
tests/test.ipynb
cianoc/abjad-ext-ipython
b9a915e6eea045c4975d3f711f9f7eccccc584ed
[ "MIT" ]
3
2020-05-09T07:30:58.000Z
2021-09-25T10:39:18.000Z
570.899123
117,796
0.93678
[ [ [ "import abjad", "NOTE: The Pärt demo requires abjad-ext-tonality\n" ], [ "%load_ext abjadext.ipython", "_____no_output_____" ], [ "staff = abjad.Staff(\"c'4 d'4 e'4 f'4\")", "_____no_output_____" ], [ "abjad.show(staff)", "_____no_output_____" ], [ "abjad.graph(staff)", "_____no_output_____" ], [ "abjad.play(staff)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb21fe1073bfa0e86586469fa3082503a5df6cf7
6,778
ipynb
Jupyter Notebook
ps_gpio/kria_kv260_example/ps_gpio_kv260.ipynb
cathalmccabe/PYNQ_tutorials
d7116e2f6c8b6a75fb0ac754ba982e41ccd1ffb3
[ "BSD-3-Clause" ]
6
2019-01-01T14:52:29.000Z
2022-03-26T03:51:46.000Z
ps_gpio/kria_kv260_example/ps_gpio_kv260.ipynb
cathalmccabe/PYNQ_tutorials
d7116e2f6c8b6a75fb0ac754ba982e41ccd1ffb3
[ "BSD-3-Clause" ]
1
2021-11-01T12:50:04.000Z
2021-11-01T12:50:04.000Z
ps_gpio/kria_kv260_example/ps_gpio_kv260.ipynb
cathalmccabe/PYNQ_tutorials
d7116e2f6c8b6a75fb0ac754ba982e41ccd1ffb3
[ "BSD-3-Clause" ]
null
null
null
21.115265
210
0.519327
[ [ [ "# Using PS GPIO with PYNQ\n\n## Goal\n\nThe aim of this notebook is to show how to use the Zynq PS GPIO from PYNQ. The PS GPIO are simple wires from the PS, and don't need a controller in the programmable logic. \n\nUp to 96 input, output and tri-state PS GPIO are available via the EMIO in the Zynq Ultrascale+. They can be used to connect simple control and data signals to IP or external Inputs/Outputs in the PL. \n\n\n## Hardware \n\nThis example uses a bitstream that connects PS GPIO to the PMod on the KV260.\n\n![PS GPIO Design](./ps_gpio/ps_gpio_kv260_bd.png \"PS GPIO Design\")", "_____no_output_____" ], [ "### External Peripherals\nAn LED, a Slider switch and a Buzzer are connected via the Pmod connector and a Grove adapter. These will be used to demonstrate the PS GPIO are working.\n\n![](./images/external_peripherals.jpg)", "_____no_output_____" ], [ "### Download the tutorial overlay\n\nThe `ps_gpio_kv260.bit` and `ps_gpio_kv260.hwh` files are in the `ps_gpio` directory local to this folder. \nThe bitstream can be downloaded using the PYNQ `Overlay` class. ", "_____no_output_____" ] ], [ [ "from pynq import Overlay\nps_gpio_design = Overlay(\"./ps_gpio/ps_gpio_kv260.bit\")", "_____no_output_____" ] ], [ [ "## PYNQ GPIO class\n\nThe PYNQ GPIO class will be used to access the PS GPIO. ", "_____no_output_____" ] ], [ [ "from pynq import GPIO", "_____no_output_____" ] ], [ [ "### GPIO help", "_____no_output_____" ], [ "### Create Python GPIO objects for the led, slider and buzzer and set the direction:", "_____no_output_____" ] ], [ [ "led = GPIO(GPIO.get_gpio_pin(6), 'out')\n\nbuzzer = GPIO(GPIO.get_gpio_pin(0), 'out')\n\nslider = GPIO(GPIO.get_gpio_pin(1), 'in')\nslider_led = GPIO(GPIO.get_gpio_pin(5), 'out')", "_____no_output_____" ], [ "# = GPIO(GPIO.get_gpio_pin(2), 'out')\n# = GPIO(GPIO.get_gpio_pin(3), 'out')\n# = GPIO(GPIO.get_gpio_pin(4), 'out')\n# = GPIO(GPIO.get_gpio_pin(7), 'out')", "_____no_output_____" ] ], [ [ "### led.write() help", "_____no_output_____" ], [ "## Test LED\n", "_____no_output_____" ], [ "Turn on the LED", "_____no_output_____" ], [ "Turn off the LED", "_____no_output_____" ], [ " ## Blinky ", "_____no_output_____" ] ], [ [ "from time import sleep\nDELAY = 0.1\n\nfor i in range(20):\n led.write(0)\n sleep(DELAY)\n led.write(1)\n sleep(DELAY)", "_____no_output_____" ] ], [ [ "### Slider\nRead from Slider", "_____no_output_____" ] ], [ [ "for i in range(50):\n sliver_value = slider.read()\n slider_led.write(sliver_value)\n led.write(sliver_value)\n sleep(DELAY)", "_____no_output_____" ] ], [ [ "### Buzzer", "_____no_output_____" ] ], [ [ "buzzer.write(1)", "_____no_output_____" ], [ "buzzer.write(0)", "_____no_output_____" ], [ "def play_sound(frequency, duration=100):\n period = 1/frequency\n timeHigh = period/2\n for i in range(0, int(duration)): #, int(timeHigh*1000)):\n buzzer.write(1)\n sleep(timeHigh)\n buzzer.write(0)\n sleep(timeHigh)", "_____no_output_____" ] ], [ [ "Alarm clock", "_____no_output_____" ] ], [ [ "for i in range(10):\n play_sound(5000)\n sleep(.1)", "_____no_output_____" ] ], [ [ "### Use an IPython Widget to control the buzzer\n\nThe following example uses an IPython *Integer Slider* to call the `play_sound()` method defined above", "_____no_output_____" ] ], [ [ "from ipywidgets import interact\nimport ipywidgets as widgets\n\ninteract(play_sound, frequency=widgets.IntSlider(min=500, max=10000, step=500, value=500), duration =100);", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb22013d28c655512850cc319e032fa3043c8b6c
79,792
ipynb
Jupyter Notebook
tutorial_deep_learning_basics/dnn_tf_tryout.ipynb
vimalanandans/mit-deep-learning
6cee70bb6473533dae8b59098620afb3feaf1b12
[ "MIT" ]
null
null
null
tutorial_deep_learning_basics/dnn_tf_tryout.ipynb
vimalanandans/mit-deep-learning
6cee70bb6473533dae8b59098620afb3feaf1b12
[ "MIT" ]
null
null
null
tutorial_deep_learning_basics/dnn_tf_tryout.ipynb
vimalanandans/mit-deep-learning
6cee70bb6473533dae8b59098620afb3feaf1b12
[ "MIT" ]
null
null
null
60.817073
13,072
0.6393
[ [ [ "### Basics of Tensorflow", "_____no_output_____" ] ], [ [ "import numpy as np\n\nfrom tensorflow.python.layers import base\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\n\n\n# To support both python 2 and python 3\nfrom __future__ import division, print_function, unicode_literals\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\ndef reset_graph(seed=42):\n tf.reset_default_graph()\n tf.set_random_seed(seed)\n np.random.seed(seed)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams['axes.labelsize'] = 14\nplt.rcParams['xtick.labelsize'] = 12\nplt.rcParams['ytick.labelsize'] = 12\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"tensorflow\"\n\ndef save_fig(fig_id, tight_layout=True):\n path = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id + \".png\")\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()\n plt.savefig(path, format='png', dpi=300)", "_____no_output_____" ] ], [ [ "### Model summary for tensorflow", "_____no_output_____" ] ], [ [ "# model summary for tensorflow\nx = np.zeros((1,4,4,3))\nx_tf = tf.convert_to_tensor(x, np.float32)\nz_tf = tf.layers.conv2d(x_tf, filters=32, kernel_size=(3,3))\n\ndef model_summary():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\n\nmodel_summary()", "---------\nVariables: name (type shape) [size]\n---------\nconv2d/kernel:0 (float32_ref 3x3x3x32) [864, bytes: 3456]\nconv2d/bias:0 (float32_ref 32) [32, bytes: 128]\nconv2d_1/kernel:0 (float32_ref 3x3x3x32) [864, bytes: 3456]\nconv2d_1/bias:0 (float32_ref 32) [32, bytes: 128]\nTotal size of variables: 1792\nTotal bytes of variables: 7168\n" ] ], [ [ "### Inspired from https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/1_Introduction/basic_operations.ipynb", "_____no_output_____" ] ], [ [ "\n# Basic constant operations\n# The value returned by the constructor represents the output\n# of the Constant op.\na = tf.constant(2)\nb = tf.constant(3)\n\n# Launch the default graph.\nwith tf.Session() as sess:\n print (\"a: %i\" % sess.run(a), \"b: %i\" % sess.run(b))\n print (\"Addition with constants: %i\" % sess.run(a+b))\n print (\"Multiplication with constants: %i\" % sess.run(a*b))", "a: 2 b: 3\nAddition with constants: 5\nMultiplication with constants: 6\n" ] ], [ [ "https://github.com/vahidk/EffectiveTensorflow\n### Numpy version of below\nimport numpy as np\n\nx = np.random.normal(size=[10, 10])\n\ny = np.random.normal(size=[10, 10])\n\nz = np.dot(x, y)\n\nprint(z)", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nx = tf.random_normal([10, 10])\ny = tf.random_normal([10, 10])\nz = tf.matmul(x, y)\n\nsess = tf.Session()\nz_val = sess.run(z)\n\nprint(z_val)", "[[ 0.34614965 -7.3237243 0.9355222 0.7125121 4.2473693 -3.2875426\n 2.6320853 0.60445595 -1.8011832 -1.4778548 ]\n [-8.892176 2.2700188 0.84772235 0.44044608 1.9714642 0.20464115\n -1.4039882 1.1470723 0.18000975 0.34565252]\n [ 1.6059456 -4.0441747 2.264099 -2.958685 0.69468784 0.38571823\n 5.6918163 1.4194463 1.7618747 -4.2023764 ]\n [-1.2256149 2.5026336 -0.5831862 1.1683558 0.60250676 0.34096497\n -2.0679157 1.0514991 -2.714299 0.22064567]\n [-5.664401 -4.645505 -1.6683139 2.680467 0.61823153 1.5860802\n -4.2265906 2.8202052 -0.52584696 -3.7972417 ]\n [-5.008916 -3.762639 -0.66483474 3.618266 3.3554142 -5.250147\n -0.41241854 0.91815126 -4.481329 -2.478259 ]\n [ 4.498228 2.0109146 0.91723716 1.3889239 0.18919843 -7.4312754\n 3.381404 0.9045108 -6.4896193 2.0509806 ]\n [ 1.5027142 1.0791501 -0.530404 -0.5126542 1.0960672 2.2281358\n 0.20963605 0.53031576 1.7040433 2.4191246 ]\n [-5.358087 -1.2983735 -2.6722093 3.1247656 1.0554792 1.8205503\n -3.9763834 0.13570663 1.3080064 -0.42651823]\n [-8.480544 -5.7079782 3.4556909 2.3919075 3.5993748 -8.836415\n -0.9253386 2.1694067 -7.6993933 -1.2274268 ]]\n" ], [ "tf.reset_default_graph()", "_____no_output_____" ] ], [ [ "##### Note the below doesn't explain the basics. hence skip the below for a while\nTo understand how powerful symbolic computation can be let's have a look at another example. Assume that we have samples from a curve (say f(x) = 5x^2 + 3) and we want to estimate f(x) based on these samples. We define a parametric function g(x, w) = w0 x^2 + w1 x + w2, which is a function of the input x and latent parameters w, our goal is then to find the latent parameters such that g(x, w) ≈ f(x). This can be done by minimizing the following loss function: L(w) = ∑ (f(x) - g(x, w))^2. Although there's a closed form solution for this simple problem, we opt to use a more general approach that can be applied to any arbitrary differentiable function, and that is using stochastic gradient descent. We simply compute the average gradient of L(w) with respect to w over a set of sample points and move in the opposite direction.\n\nHere's how it can be done in TensorFlow:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf\n\ntf.reset_default_graph()\n\n# Placeholders are used to feed values from python to TensorFlow ops. We define\n# two placeholders, one for input feature x, and one for output y.\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\n\n# Assuming we know that the desired function is a polynomial of 2nd degree, we\n# allocate a vector of size 3 to hold the coefficients. The variable will be\n# automatically initialized with random noise.\nw = tf.get_variable(\"w\", shape=[3, 1])\n\n# We define yhat to be our estimate of y.\nf = tf.stack([tf.square(x), x, tf.ones_like(x)], 1)\nyhat = tf.squeeze(tf.matmul(f, w), 1)\n\n# The loss is defined to be the l2 distance between our estimate of y and its\n# true value. We also added a shrinkage term, to ensure the resulting weights\n# would be small.\nloss = tf.nn.l2_loss(yhat - y) + 0.1 * tf.nn.l2_loss(w)\n\n# We use the Adam optimizer with learning rate set to 0.1 to minimize the loss.\ntrain_op = tf.train.AdamOptimizer(0.1).minimize(loss)\n\ndef generate_data():\n x_val = np.random.uniform(-10.0, 10.0, size=100)\n y_val = 5 * np.square(x_val) + 3\n return x_val, y_val\n\nsess = tf.Session()\n# Since we are using variables we first need to initialize them.\nsess.run(tf.global_variables_initializer())\nfor _ in range(1000):\n x_val, y_val = generate_data()\n _, loss_val = sess.run([train_op, loss], {x: x_val, y: y_val})\n #print(loss_val)\nprint(sess.run([w]))\n", "[array([[ 4.9953189e+00],\n [-5.3842890e-05],\n [ 3.2785773e+00]], dtype=float32)]\n" ] ], [ [ "# \nNormal Equation (θ = XT · X)–1 · XT · y for house market prediction", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom sklearn.datasets import fetch_california_housing\n\ntf.reset_default_graph()\nhousing = fetch_california_housing()\nm, n = housing.data.shape\nhousing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data]\nX = tf.constant(housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\nXT = tf.transpose(X)\ntheta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y)\nwith tf.Session() as sess: \n theta_value = theta.eval()\n print(theta_value)", "[[-3.7185181e+01]\n [ 4.3633747e-01]\n [ 9.3952334e-03]\n [-1.0711310e-01]\n [ 6.4479220e-01]\n [-4.0338000e-06]\n [-3.7813708e-03]\n [-4.2348403e-01]\n [-4.3721911e-01]]\n" ], [ "# Pure numpy version\nX = housing_data_plus_bias\ny = housing.target.reshape(-1, 1)\ntheta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)\n\nprint(theta_numpy)", "[[-3.69419202e+01]\n [ 4.36693293e-01]\n [ 9.43577803e-03]\n [-1.07322041e-01]\n [ 6.45065694e-01]\n [-3.97638942e-06]\n [-3.78654265e-03]\n [-4.21314378e-01]\n [-4.34513755e-01]]\n" ] ], [ [ "## Using Batch Gradient Descent", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaled_housing_data = scaler.fit_transform(housing.data)\nscaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]\n\nprint(scaled_housing_data_plus_bias.mean(axis=0))\nprint(scaled_housing_data_plus_bias.mean(axis=1))\nprint(scaled_housing_data_plus_bias.mean())\nprint(scaled_housing_data_plus_bias.shape)", "[ 1.00000000e+00 6.60969987e-17 5.50808322e-18 6.60969987e-17\n -1.06030602e-16 -1.10161664e-17 3.44255201e-18 -1.07958431e-15\n -8.52651283e-15]\n[ 0.38915536 0.36424355 0.5116157 ... -0.06612179 -0.06360587\n 0.01359031]\n0.11111111111111005\n(20640, 9)\n" ], [ "#Gradient Descent\nreset_graph()\n\nn_epochs = 1000\nlearning_rate = 0.01\n\nX = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\ntheta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\ny_pred = tf.matmul(X, theta, name=\"predictions\")\nerror = y_pred - y\nmse = tf.reduce_mean(tf.square(error), name=\"mse\")\ngradients = 2/m * tf.matmul(tf.transpose(X), error)\ntraining_op = tf.assign(theta, theta - learning_rate * gradients)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n sess.run(training_op)\n \n best_theta = theta.eval()\nprint(best_theta)", "Epoch 0 MSE = 9.161543\nEpoch 100 MSE = 0.71450067\nEpoch 200 MSE = 0.5667049\nEpoch 300 MSE = 0.5555719\nEpoch 400 MSE = 0.5488112\nEpoch 500 MSE = 0.5436362\nEpoch 600 MSE = 0.5396294\nEpoch 700 MSE = 0.53650916\nEpoch 800 MSE = 0.5340678\nEpoch 900 MSE = 0.5321474\n[[ 2.0685523 ]\n [ 0.8874027 ]\n [ 0.14401656]\n [-0.34770885]\n [ 0.36178368]\n [ 0.00393811]\n [-0.04269556]\n [-0.66145283]\n [-0.6375278 ]]\n" ], [ "# Using autodiff\nreset_graph()\n\nn_epochs = 1000\nlearning_rate = 0.01\n\nX = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name=\"X\")\ny = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name=\"y\")\ntheta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name=\"theta\")\ny_pred = tf.matmul(X, theta, name=\"predictions\")\nerror = y_pred - y\nmse = tf.reduce_mean(tf.square(error), name=\"mse\")\ngradients = tf.gradients(mse, [theta])[0]\n\ntraining_op = tf.assign(theta, theta - learning_rate * gradients)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for epoch in range(n_epochs):\n if epoch % 100 == 0:\n print(\"Epoch\", epoch, \"MSE =\", mse.eval())\n sess.run(training_op)\n \n best_theta = theta.eval()\n\nprint(\"Best theta:\")\nprint(best_theta)", "Epoch 0 MSE = 9.161543\nEpoch 100 MSE = 0.7145006\nEpoch 200 MSE = 0.566705\nEpoch 300 MSE = 0.5555719\nEpoch 400 MSE = 0.5488112\nEpoch 500 MSE = 0.5436362\nEpoch 600 MSE = 0.5396294\nEpoch 700 MSE = 0.5365092\nEpoch 800 MSE = 0.5340678\nEpoch 900 MSE = 0.5321474\nBest theta:\n[[ 2.0685525 ]\n [ 0.8874027 ]\n [ 0.14401658]\n [-0.34770882]\n [ 0.36178368]\n [ 0.00393811]\n [-0.04269556]\n [-0.6614528 ]\n [-0.6375277 ]]\n" ] ], [ [ "## Linear Regression Example\nhttps://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/linear_regression.ipynb", "_____no_output_____" ] ], [ [ "rng = np.random\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = 1000\ndisplay_step = 50\n\n# Training Data\ntrain_X = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,\n 7.042,10.791,5.313,7.997,5.654,9.27,3.1])\ntrain_Y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,\n 2.827,3.465,1.65,2.904,2.42,2.94,1.3])\nn_samples = train_X.shape[0]\n\n# tf Graph Input\nX = tf.placeholder(\"float\")\nY = tf.placeholder(\"float\")\n\n# Set model weights and bias random\nW = tf.Variable(rng.randn(), name=\"weight\")\nb = tf.Variable(rng.randn(), name=\"bias\")\n\n# Construct a linear model. y = XW + b\npred = tf.add(tf.multiply(X, W), b)\n\n# Mean squared error\ncost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)\n\n# Gradient descent\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()\n\n\n# Start training\nwith tf.Session() as sess:\n sess.run(init)\n\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n\n #Display logs per epoch step\n if (epoch+1) % display_step == 0:\n c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})\n print (\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(c), \\\n \"W = \", sess.run(W), \"b = \", sess.run(b))\n\n print (\"Optimization Finished!\")\n training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})\n print (\"Training cost=\", training_cost, \"W = \", sess.run(W), \"b = \", sess.run(b), '\\n')\n\n #Graphic display\n plt.plot(train_X, train_Y, 'ro', label='Original data')\n plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\n plt.legend()\n plt.show()\n \n ", "Epoch: 0050 cost= 0.126894742 W= 0.3745814 b= -0.097686656\nEpoch: 0100 cost= 0.121124744 W= 0.36714777 b= -0.044209745\nEpoch: 0150 cost= 0.116021343 W= 0.36015627 b= 0.00608666\nEpoch: 0200 cost= 0.111507520 W= 0.35358056 b= 0.05339157\nEpoch: 0250 cost= 0.107515201 W= 0.3473959 b= 0.09788313\nEpoch: 0300 cost= 0.103984140 W= 0.3415792 b= 0.1397285\nEpoch: 0350 cost= 0.100861095 W= 0.33610845 b= 0.1790852\nEpoch: 0400 cost= 0.098098941 W= 0.33096302 b= 0.21610112\nEpoch: 0450 cost= 0.095655993 W= 0.3261235 b= 0.25091574\nEpoch: 0500 cost= 0.093495443 W= 0.3215719 b= 0.28365952\nEpoch: 0550 cost= 0.091584556 W= 0.31729096 b= 0.31445622\nEpoch: 0600 cost= 0.089894608 W= 0.31326464 b= 0.343421\nEpoch: 0650 cost= 0.088400029 W= 0.3094778 b= 0.3706633\nEpoch: 0700 cost= 0.087078258 W= 0.3059162 b= 0.39628544\nEpoch: 0750 cost= 0.085909322 W= 0.30256656 b= 0.42038321\nEpoch: 0800 cost= 0.084875606 W= 0.29941604 b= 0.44304764\nEpoch: 0850 cost= 0.083961442 W= 0.29645273 b= 0.46436492\nEpoch: 0900 cost= 0.083152987 W= 0.29366568 b= 0.48441482\nEpoch: 0950 cost= 0.082438134 W= 0.29104447 b= 0.50327134\nEpoch: 1000 cost= 0.081805959 W= 0.28857931 b= 0.5210061\nOptimization Finished!\nTraining cost= 0.08180596 W= 0.28857931 b= 0.5210061 \n\n" ], [ "# Logistic Regression Example\n\n# Import MINST data\n#from tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n", "Extracting /tmp/data/train-images-idx3-ubyte.gz\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\n" ], [ "# Parameters\nlearning_rate = 0.01\ntraining_epochs = 25\nbatch_size = 100\ndisplay_step = 1\n\n# tf Graph Input\nx = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784\ny = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes\n\n# Set model weights\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n# Construct model\npred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax\n\n# Minimize error using cross entropy\ncost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))\n# Gradient Descent\noptimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n# Initialize the variables (i.e. assign their default value)\ninit = tf.global_variables_initializer()", "_____no_output_____" ], [ "# Start training\nwith tf.Session() as sess:\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(mnist.train.num_examples/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n # Fit training using batch data\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,\n y: batch_ys})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if (epoch+1) % display_step == 0:\n print (\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost))\n print (\"W = \", sess.run(W), \"b = \", sess.run(b))\n\n print (\"Optimization Finished!\")\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n # Calculate accuracy for 3000 examples\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print (\"Accuracy:\", accuracy.eval({x: mnist.test.images[:3000], y: mnist.test.labels[:3000]}))\n \n ", "Epoch: 0001 cost= 1.184351654\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.03730078 0.079611 -0.01896858 -0.02118796 0.01909295 0.0290922\n -0.0093307 0.03109496 -0.06625962 -0.00584353]\nEpoch: 0002 cost= 0.665294986\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.05097382 0.11064371 -0.02714274 -0.03299111 0.03137657 0.06248994\n -0.01303456 0.0533782 -0.11813473 -0.01561154]\nEpoch: 0003 cost= 0.552845062\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.06005226 0.13130666 -0.03169165 -0.04252985 0.03687295 0.09206253\n -0.01484207 0.07162529 -0.161586 -0.02116575]\nEpoch: 0004 cost= 0.498638791\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.06812466 0.14778477 -0.03708484 -0.04939237 0.04321228 0.12082824\n -0.01635786 0.087356 -0.19968395 -0.02853803]\nEpoch: 0005 cost= 0.465521762\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.07547318 0.1604302 -0.03652826 -0.05572912 0.04746671 0.14966229\n -0.01817798 0.09991929 -0.2383257 -0.03324459]\nEpoch: 0006 cost= 0.442640048\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.08153969 0.17013672 -0.03679978 -0.06331212 0.04944078 0.17483146\n -0.01861076 0.1140615 -0.2693314 -0.03887695]\nEpoch: 0007 cost= 0.425481379\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.08848141 0.18146156 -0.03798386 -0.06761739 0.05332766 0.19949351\n -0.01861517 0.12466203 -0.30236405 -0.04388312]\nEpoch: 0008 cost= 0.412171161\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.09313428 0.18949196 -0.03838684 -0.07316037 0.05374273 0.22340766\n -0.02087186 0.13762471 -0.33184364 -0.04686971]\nEpoch: 0009 cost= 0.401392119\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.0990689 0.1960882 -0.03592963 -0.07843751 0.05531807 0.24680783\n -0.02088237 0.14924437 -0.3616927 -0.05144653]\nEpoch: 0010 cost= 0.392403340\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.10445043 0.20348659 -0.03892315 -0.0832253 0.05772278 0.27106145\n -0.02252211 0.1597161 -0.38710326 -0.05576197]\nEpoch: 0011 cost= 0.384773500\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.10876292 0.21040373 -0.0357703 -0.08684084 0.05734215 0.29051816\n -0.02293032 0.16954266 -0.4133052 -0.06019624]\nEpoch: 0012 cost= 0.378178392\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.11365984 0.21648863 -0.03518485 -0.0926086 0.0592064 0.31241697\n -0.02448673 0.17998059 -0.44008526 -0.06206581]\nEpoch: 0013 cost= 0.372386288\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.11877178 0.22191867 -0.03602133 -0.09552856 0.06074506 0.3333259\n -0.02509693 0.18969345 -0.463182 -0.06708133]\nEpoch: 0014 cost= 0.367272738\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.12416838 0.2257404 -0.03433709 -0.09767388 0.06083799 0.3527775\n -0.02654182 0.199941 -0.48622563 -0.07034939]\nEpoch: 0015 cost= 0.362750185\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.12856363 0.23138134 -0.03092325 -0.10336305 0.06109109 0.37260556\n -0.02755004 0.20756975 -0.50907403 -0.07317296]\nEpoch: 0016 cost= 0.358613667\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.1326521 0.23466517 -0.0313612 -0.1057642 0.06152226 0.3914546\n -0.0270358 0.21647635 -0.53087926 -0.07642572]\nEpoch: 0017 cost= 0.354890988\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.13678822 0.2400118 -0.02834421 -0.10973241 0.06161172 0.41064554\n -0.02878498 0.22524315 -0.5525351 -0.08132744]\nEpoch: 0018 cost= 0.351455844\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.14169511 0.24368851 -0.02746959 -0.11422423 0.06051907 0.4298534\n -0.02932757 0.2341022 -0.57317317 -0.08227421]\nEpoch: 0019 cost= 0.348301004\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.14530456 0.24754585 -0.0264463 -0.11690253 0.06098108 0.4471063\n -0.02999718 0.24277438 -0.59326774 -0.08648942]\nEpoch: 0020 cost= 0.345450270\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.14967677 0.25064653 -0.02487657 -0.11899381 0.06016582 0.4659972\n -0.03192828 0.25152162 -0.61397874 -0.08887698]\nEpoch: 0021 cost= 0.342746153\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.15434207 0.25267965 -0.02194185 -0.12138518 0.06083791 0.48278758\n -0.0336969 0.25966427 -0.6317692 -0.09283436]\nEpoch: 0022 cost= 0.340233801\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.15849629 0.2573678 -0.02097109 -0.12418069 0.06164705 0.50061244\n -0.03446113 0.26553428 -0.65165555 -0.09539565]\nEpoch: 0023 cost= 0.337906543\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.161714 0.2599396 -0.01815308 -0.1288951 0.05983865 0.51536673\n -0.03468929 0.27460033 -0.66929084 -0.09700102]\nEpoch: 0024 cost= 0.335751848\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.16501151 0.2627594 -0.01875818 -0.1301892 0.06000581 0.532264\n -0.03506881 0.28079903 -0.68712956 -0.09966965]\nEpoch: 0025 cost= 0.333729791\nW = [[0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n ...\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]\n [0. 0. 0. ... 0. 0. 0.]] b = [-0.17010799 0.26588017 -0.01508724 -0.13369185 0.06029621 0.5489422\n -0.03660642 0.28865454 -0.70499253 -0.10328635]\nOptimization Finished!\nAccuracy: 0.889\n" ] ], [ [ "## Simple neural network\nA 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)\nimplementation with TensorFlow. This example is using the MNIST database\nof handwritten digits (http://yann.lecun.com/exdb/mnist/).\n", "_____no_output_____" ] ], [ [ "def simple_nn (layer1_n,layer2_n) : \n print(\"Starting simple Neural network \" + str(layer1_n) +\" : \" + str(layer2_n) )\n reset_graph()\n # Parameters\n learning_rate = 0.1\n num_steps = 500\n batch_size = 128\n display_step = 100\n\n # Network Parameters\n n_hidden_1 = layer1_n # 1st layer number of neurons\n n_hidden_2 = layer2_n # 2nd layer number of neurons\n num_input = 784 # MNIST data input (img shape: 28*28)\n num_classes = 10 # MNIST total classes (0-9 digits)\n\n # tf Graph input\n X = tf.placeholder(\"float\", [None, num_input])\n Y = tf.placeholder(\"float\", [None, num_classes])\n\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([num_classes]))\n }\n\n\n # Create model\n def neural_net(x):\n # Hidden fully connected layer with layer 1 neurons\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Hidden fully connected layer with layer 2 neurons\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Output fully connected layer with a neuron for each class\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n # Construct model\n logits = neural_net(X)\n prediction = tf.nn.softmax(logits)\n\n # Define loss and optimizer\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=logits, labels=Y))\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss_op)\n\n # Evaluate model\n correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n # Initialize the variables (i.e. assign their default value)\n init = tf.global_variables_initializer()\n\n # display all the parameters\n vars = 0\n for v in tf.all_variables():\n vars += np.prod(v.get_shape().as_list())\n print(vars)\n \n # Start training\n with tf.Session() as sess:\n\n # Run the initializer\n sess.run(init)\n\n for step in range(1, num_steps+1):\n batch_x, batch_y = mnist.train.next_batch(batch_size)\n # Run optimization op (backprop)\n sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})\n if step % display_step == 0 or step == 1:\n # Calculate batch loss and accuracy\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,\n Y: batch_y})\n print(\"Step \" + str(step) + \", Minibatch Loss= \" + \\\n \"{:.4f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.3f}\".format(acc))\n\n print(\"Optimization Finished!\")\n\n # Calculate accuracy for MNIST test images\n print(\"Testing Accuracy:\", \\\n sess.run(accuracy, feed_dict={X: mnist.test.images,\n Y: mnist.test.labels}))\n \n", "_____no_output_____" ], [ "simple_nn(2048,2048)\nsimple_nn(1024,1024)\nsimple_nn(512,512)\nsimple_nn(256,256)\nsimple_nn(128,128)\nsimple_nn(24,24)\n# 256 : 256 > 807968.0\n# 512 : 512 > 2009120.0", "Starting simple Neural network 2048 : 2048\n17473568.0\nStep 1, Minibatch Loss= 474724.2500, Training Accuracy= 0.352\nStep 100, Minibatch Loss= 12443.1035, Training Accuracy= 0.867\nStep 200, Minibatch Loss= 6874.4072, Training Accuracy= 0.852\nStep 300, Minibatch Loss= 1211.8154, Training Accuracy= 0.914\nStep 400, Minibatch Loss= 1998.8522, Training Accuracy= 0.875\nStep 500, Minibatch Loss= 1319.4133, Training Accuracy= 0.852\nOptimization Finished!\nTesting Accuracy: 0.8519\nStarting simple Neural network 1024 : 1024\n5591072.0\nStep 1, Minibatch Loss= 103726.3438, Training Accuracy= 0.484\nStep 100, Minibatch Loss= 6831.8525, Training Accuracy= 0.852\nStep 200, Minibatch Loss= 2858.8306, Training Accuracy= 0.836\nStep 300, Minibatch Loss= 1054.2766, Training Accuracy= 0.914\nStep 400, Minibatch Loss= 358.0754, Training Accuracy= 0.883\nStep 500, Minibatch Loss= 553.5654, Training Accuracy= 0.867\nOptimization Finished!\nTesting Accuracy: 0.8651\nStarting simple Neural network 512 : 512\n2009120.0\nStep 1, Minibatch Loss= 35562.5625, Training Accuracy= 0.289\nStep 100, Minibatch Loss= 704.7876, Training Accuracy= 0.930\nStep 200, Minibatch Loss= 479.4126, Training Accuracy= 0.875\nStep 300, Minibatch Loss= 245.0352, Training Accuracy= 0.844\nStep 400, Minibatch Loss= 143.5513, Training Accuracy= 0.883\nStep 500, Minibatch Loss= 82.5241, Training Accuracy= 0.891\nOptimization Finished!\nTesting Accuracy: 0.8675\nStarting simple Neural network 256 : 256\n807968.0\nStep 1, Minibatch Loss= 15379.7881, Training Accuracy= 0.219\nStep 100, Minibatch Loss= 328.2344, Training Accuracy= 0.844\nStep 200, Minibatch Loss= 158.4039, Training Accuracy= 0.875\nStep 300, Minibatch Loss= 148.0388, Training Accuracy= 0.828\nStep 400, Minibatch Loss= 40.8494, Training Accuracy= 0.891\nStep 500, Minibatch Loss= 19.9304, Training Accuracy= 0.922\nOptimization Finished!\nTesting Accuracy: 0.8473\nStarting simple Neural network 128 : 128\n354848.0\nStep 1, Minibatch Loss= 3644.5850, Training Accuracy= 0.359\nStep 100, Minibatch Loss= 69.0138, Training Accuracy= 0.883\nStep 200, Minibatch Loss= 46.2350, Training Accuracy= 0.906\nStep 300, Minibatch Loss= 13.0044, Training Accuracy= 0.898\nStep 400, Minibatch Loss= 17.5829, Training Accuracy= 0.875\nStep 500, Minibatch Loss= 14.1382, Training Accuracy= 0.836\nOptimization Finished!\nTesting Accuracy: 0.8328\nStarting simple Neural network 24 : 24\n59072.0\nStep 1, Minibatch Loss= 438.1121, Training Accuracy= 0.164\nStep 100, Minibatch Loss= 5.8225, Training Accuracy= 0.859\nStep 200, Minibatch Loss= 2.8557, Training Accuracy= 0.906\nStep 300, Minibatch Loss= 2.7573, Training Accuracy= 0.828\nStep 400, Minibatch Loss= 1.4487, Training Accuracy= 0.828\nStep 500, Minibatch Loss= 0.5442, Training Accuracy= 0.875\nOptimization Finished!\nTesting Accuracy: 0.8594\n" ], [ "simple_nn(512,128)\nsimple_nn(12,12)", "Starting simple Neural network 512 : 128\n1406624.0\nStep 1, Minibatch Loss= 15699.5000, Training Accuracy= 0.258\nStep 100, Minibatch Loss= 399.3960, Training Accuracy= 0.844\nStep 200, Minibatch Loss= 127.8773, Training Accuracy= 0.906\nStep 300, Minibatch Loss= 94.4084, Training Accuracy= 0.828\nStep 400, Minibatch Loss= 23.0377, Training Accuracy= 0.930\nStep 500, Minibatch Loss= 21.7889, Training Accuracy= 0.906\nOptimization Finished!\nTesting Accuracy: 0.842\nStarting simple Neural network 12 : 12\n29120.0\nStep 1, Minibatch Loss= 173.4673, Training Accuracy= 0.117\nStep 100, Minibatch Loss= 1.9999, Training Accuracy= 0.773\nStep 200, Minibatch Loss= 0.7199, Training Accuracy= 0.836\nStep 300, Minibatch Loss= 0.4309, Training Accuracy= 0.875\nStep 400, Minibatch Loss= 0.5109, Training Accuracy= 0.867\nStep 500, Minibatch Loss= 0.7298, Training Accuracy= 0.852\nOptimization Finished!\nTesting Accuracy: 0.861\n" ] ], [ [ "## Simple Neural Network (tf.layers/estimator api) \nA 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron)\nimplementation with TensorFlow. This example is using the MNIST database\nof handwritten digits (http://yann.lecun.com/exdb/mnist/).\n\nThis example is using TensorFlow layers, see 'neural_network_raw' example for\na raw implementation with variables.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb2204cfc23fecace2ff127350a118c47e492f84
577,193
ipynb
Jupyter Notebook
examples/example-1d-Spin-bath-model-ohmic-fitting.ipynb
bitcoffe/bofin
1a8969f399ff0ddeb817fc272afa2c8400956556
[ "BSD-3-Clause" ]
7
2020-10-22T12:25:54.000Z
2021-11-23T12:51:56.000Z
examples/example-1d-Spin-bath-model-ohmic-fitting.ipynb
yanghf263/bofin
a2ba7b33050e71ffc3d95bb43e7907a3d6e1167f
[ "BSD-3-Clause" ]
5
2020-10-20T07:12:10.000Z
2021-07-06T22:28:29.000Z
examples/example-1d-Spin-bath-model-ohmic-fitting.ipynb
yanghf263/bofin
a2ba7b33050e71ffc3d95bb43e7907a3d6e1167f
[ "BSD-3-Clause" ]
10
2020-10-26T19:40:29.000Z
2022-03-29T19:45:25.000Z
305.231623
109,764
0.915131
[ [ [ "# Example 1d: Spin-Bath model, fitting of spectrum and correlation functions\n\n### Introduction", "_____no_output_____" ], [ "The HEOM method solves the dynamics and steady state of a system and its environment, the latter of which is encoded in a set of auxiliary density matrices.\n\nIn this example we show the evolution of a single two-level system in contact with a single Bosonic environment. The properties of the system are encoded in Hamiltonian, and a coupling operator which describes how it is coupled to the environment.\n\nThe Bosonic environment is implicitly assumed to obey a particular Hamiltonian (see paper), the parameters of which are encoded in the spectral density, and subsequently the free-bath correlation functions.\n\nIn the example below we show how model an Ohmic environment with exponential cut-off in two ways. First we fit the spectrum with a set of underdamped brownian oscillator functions. Second, we evaluate the correlation functions, and fit those with a certain choice of exponential functions. \n\n", "_____no_output_____" ] ], [ [ "%pylab inline\n", "The history saving thread hit an unexpected error (DatabaseError('database disk image is malformed')).History will not be written to the database.\nPopulating the interactive namespace from numpy and matplotlib\n" ], [ "from qutip import *", "_____no_output_____" ], [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "\nfrom bofin.heom import BosonicHEOMSolver", "_____no_output_____" ], [ "\ndef cot(x):\n return 1./np.tan(x)\n", "_____no_output_____" ], [ "# Defining the system Hamiltonian\neps = .0 # Energy of the 2-level system.\nDel = .2 # Tunnelling term\nHsys = 0.5 * eps * sigmaz() + 0.5 * Del* sigmax()", "_____no_output_____" ], [ "# Initial state of the system.\nrho0 = basis(2,0) * basis(2,0).dag() ", "_____no_output_____" ], [ "#Import mpmath functions for evaluation of correlation functions\n\nfrom mpmath import mp\nfrom mpmath import zeta\nfrom mpmath import gamma\n\nmp.dps = 15; mp.pretty = True", "_____no_output_____" ], [ "\nQ = sigmaz()\n\nalpha = 3.25\nT = 0.5\n\nwc = 1\nbeta = 1/T \ns = 1\n\n\ntlist = np.linspace(0, 10, 5000)\ntlist3 = linspace(0,15,50000)\n\n\n#note: the arguments to zeta should be in as high precision as possible, might need some adjustment\n# see http://mpmath.org/doc/current/basics.html#providing-correct-input\nct = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + \n zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist]\n\n\n#also check long timescales\nctlong = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + \n zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist3]\n\n\ncorrRana = real(ctlong)\ncorrIana = imag(ctlong)\n\n\npref = 1.\n\n", "_____no_output_____" ], [ "\n#lets try fitting the spectrurum\n#use underdamped case with meier tannor form \n\n\nwlist = np.linspace(0, 25, 20000)\n\nfrom scipy.optimize import curve_fit\n\n#seperate functions for plotting later:\n\n\n\ndef fit_func_nocost(x, a, b, c, N):\n tot = 0\n for i in range(N):\n \n tot+= 2 * a[i] * b[i] * (x)/(((x+c[i])**2 + (b[i]**2))*((x-c[i])**2 + (b[i]**2)))\n cost = 0.\n \n return tot \n\ndef wrapper_fit_func_nocost(x, N, *args):\n a, b, c = list(args[0][:N]), list(args[0][N:2*N]),list(args[0][2*N:3*N])\n # print(\"debug\")\n return fit_func_nocost(x, a, b, c, N)\n\n\n# function that evaluates values with fitted params at\n# given inputs\ndef checker(tlist, vals, N):\n y = []\n for i in tlist:\n # print(i)\n \n y.append(wrapper_fit_func_nocost(i, N, vals))\n return y\n\n\n#######\n#Real part \n\ndef wrapper_fit_func(x, N, *args):\n a, b, c = list(args[0][:N]), list(args[0][N:2*N]),list(args[0][2*N:3*N])\n # print(\"debug\")\n return fit_func(x, a, b, c, N)\n\n\n\ndef fit_func(x, a, b, c, N):\n tot = 0\n for i in range(N):\n \n tot+= 2 * a[i] * b[i] * (x)/(((x+c[i])**2 + (b[i]**2))*((x-c[i])**2 + (b[i]**2)))\n cost = 0.\n #for i in range(N):\n #print(i)\n # cost += ((corrRana[0]-a[i]*np.cos(d[i])))\n \n \n tot+=0.0*cost\n \n return tot \n\ndef fitterR(ans, tlist, k):\n # the actual computing of fit\n popt = []\n pcov = [] \n # tries to fit for k exponents\n for i in range(k):\n #params_0 = [0]*(2*(i+1))\n params_0 = [0.]*(3*(i+1))\n upper_a = 100*abs(max(ans, key = abs))\n #sets initial guess\n guess = []\n #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1)\n aguess = [abs(max(ans, key = abs))]*(i+1)\n bguess = [1*wc]*(i+1)\n cguess = [1*wc]*(i+1)\n \n guess.extend(aguess)\n guess.extend(bguess)\n guess.extend(cguess)\n \n # sets bounds\n # a's = anything , b's negative\n # sets lower bound\n b_lower = []\n alower = [-upper_a]*(i+1)\n blower = [0.1*wc]*(i+1)\n clower = [0.1*wc]*(i+1)\n \n b_lower.extend(alower)\n b_lower.extend(blower)\n b_lower.extend(clower)\n \n # sets higher bound\n b_higher = []\n ahigher = [upper_a]*(i+1)\n #bhigher = [np.inf]*(i+1)\n bhigher = [100*wc]*(i+1)\n chigher = [100*wc]*(i+1)\n \n b_higher.extend(ahigher)\n b_higher.extend(bhigher)\n b_higher.extend(chigher)\n \n param_bounds = (b_lower, b_higher)\n \n p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func(x, i+1, \\\n params_0), tlist, ans, p0=guess, bounds = param_bounds,sigma=[0.0001 for w in wlist], maxfev = 1000000000)\n popt.append(p1)\n pcov.append(p2)\n print(i+1)\n return popt\n# print(popt)\n\n\nJ = [w * alpha * e**(-w/wc) for w in wlist]\nk = 4\npopt1 = fitterR(J, wlist, k)\nfor i in range(k):\n y = checker(wlist, popt1[i],i+1)\n print(popt1[i])\n plt.plot(wlist, J, wlist, y)\n \n plt.show()\n \n", "1\n2\n3\n4\n[6.14746382 1.77939431 0.1 ]\n" ], [ "\n\nlam = list(popt1[k-1])[:k]\n\ngamma = list(popt1[k-1])[k:2*k] #damping terms\n\nw0 = list(popt1[k-1])[2*k:3*k] #w0 termss\n\n\n\n\nprint(lam)\nprint(gamma)\nprint(w0)", "[0.60083768747418, 7.9159271481798354, -4.407893509547376, 0.010585173501683682]\n[1.0024683988395526, 2.296190188285376, 4.299081656029848, 0.30736352564766894]\n[0.10000000000000002, 0.10000000000032039, 3.981685947699812, 0.10000000000000002]\n" ], [ "\nlamT = []\n\nprint(lam)\nprint(gamma)\nprint(w0)\n\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\naxes.plot(wlist, J, 'r--', linewidth=2, label=\"original\")\nfor kk,ll in enumerate(lam):\n #axes.plot(wlist, [lam[kk] * gamma[kk] * (w)/(((w**2-w0[kk]**2)**2 + (gamma[kk]**2*w**2))) for w in wlist],linewidth=2)\n axes.plot(wlist, [2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2))) for w in wlist],linewidth=2, label=\"fit\")\n\n\n\n\n\naxes.set_xlabel(r'$w$', fontsize=28)\naxes.set_ylabel(r'J', fontsize=28)\n\naxes.legend()\nfig.savefig('noisepower.eps')\nwlist2 = np.linspace(-10,10 , 50000)\n\n\n\ns1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2]\ns2 = [sum([(2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2)))) * ((1/(e**(w/T)-1))+1) for kk,lamkk in enumerate(lam)]) for w in wlist2]\n\n\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(8,8))\naxes.plot(wlist2, s1, 'r', linewidth=2,label=\"original\")\naxes.plot(wlist2, s2, 'b', linewidth=2,label=\"fit\")\n\naxes.set_xlabel(r'$w$', fontsize=28)\naxes.set_ylabel(r'S(w)', fontsize=28)\n\n#axes.axvline(x=Del)\nprint(min(s2))\naxes.legend()\n\n#fig.savefig('powerspectrum.eps')\n#J(w>0) * (n(w>w)+1)\n", "[0.60083768747418, 7.9159271481798354, -4.407893509547376, 0.010585173501683682]\n[1.0024683988395526, 2.296190188285376, 4.299081656029848, 0.30736352564766894]\n[0.10000000000000002, 0.10000000000032039, 3.981685947699812, 0.10000000000000002]\n" ], [ "def cot(x):\n return 1./np.tan(x)\n\n\ndef coth(x):\n \"\"\"\n Calculates the coth function.\n \n Parameters\n ----------\n x: np.ndarray\n Any numpy array or list like input.\n \n Returns\n -------\n cothx: ndarray\n The coth function applied to the input.\n \"\"\"\n return 1/np.tanh(x)", "_____no_output_____" ], [ "\n#underdamped meier tannior version with terminator\n\n\nTermMax = 1000\nTermOps = 0.*spre(sigmaz())\nNk = 1 # number of exponentials in approximation of the Matsubara approximation\n\npref = 1\n\nckAR = []\nvkAR = []\nckAI = []\nvkAI = []\nfor kk, ll in enumerate(lam):\n #print(kk)\n lamt = lam[kk]\n Om = w0[kk]\n Gamma = gamma[kk]\n print(T)\n print(coth(beta*(Om+1.0j*Gamma)/2))\n ckAR_temp = [(lamt/(4*Om))*coth(beta*(Om+1.0j*Gamma)/2),(lamt/(4*Om))*coth(beta*(Om-1.0j*Gamma)/2)]\n for k in range(1,Nk+1):\n #print(k)\n ek = 2*pi*k/beta\n ckAR_temp.append((-2*lamt*2*Gamma/beta)*ek/(((Om+1.0j*Gamma)**2+ek**2)*((Om-1.0j*Gamma)**2+ek**2)))\n \n \n \n term = 0\n for k in range(Nk+1,TermMax):\n #print(k)\n ek = 2*pi*k/beta\n ck = ((-2*lamt*2*Gamma/beta)*ek/(((Om+1.0j*Gamma)**2+ek**2)*((Om-1.0j*Gamma)**2+ek**2)))\n term += ck/ek\n ckAR.extend(ckAR_temp)\n \n vkAR_temp = [-1.0j*Om+Gamma,1.0j*Om+Gamma]\n vkAR_temp.extend([2 * np.pi * k * T + 0.j for k in range(1,Nk+1)])\n \n vkAR.extend(vkAR_temp)\n factor=1./4.\n ckAI.extend([-factor*lamt*1.0j/(Om),factor*lamt*1.0j/(Om)])\n\n vkAI.extend( [-(-1.0j*(Om) - Gamma),-(1.0j*(Om) - Gamma)])\n \n TermOps += term * (2*spre(Q)*spost(Q.dag()) - spre(Q.dag()*Q) - spost(Q.dag()*Q))\n\nprint(ckAR)\nprint(vkAR)\n\nQ2 = []\n\nNR = len(ckAR)\nNI = len(ckAI)\n\nQ2.extend([ sigmaz() for kk in range(NR)])\nQ2.extend([ sigmaz() for kk in range(NI)])\noptions = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14)", "0.5\n(0.13974897556002197-0.6297171398194138j)\n0.5\n(0.17664345475450763+0.8710462271395351j)\n0.5\n(0.999528560660008-0.0005117331721781953j)\n0.5\n(0.9911396415614544-2.839161761980097j)\n[(0.20991612825592318-0.9458944751298783j), (0.20991612825592318+0.9458944751298783j), (-0.04802667599727985+0j), (3.4957417975875082+17.237846191778434j), (3.4957417975875082-17.237846191778434j), (-5.3276721492789765+0j), (-0.2766300201103056+0.00014162752649838175j), (-0.2766300201103056-0.00014162752649838175j), (0.09723738681206452+0j), (0.026228462675811418-0.07513254962476318j), (0.026228462675811418+0.07513254962476318j), (-0.0002134910414080296+0j)]\n[(1.0024683988395526-0.10000000000000002j), (1.0024683988395526+0.10000000000000002j), (3.141592653589793+0j), (2.296190188285376-0.10000000000032039j), (2.296190188285376+0.10000000000032039j), (3.141592653589793+0j), (4.299081656029848-3.981685947699812j), (4.299081656029848+3.981685947699812j), (3.141592653589793+0j), (0.30736352564766894-0.10000000000000002j), (0.30736352564766894+0.10000000000000002j), (3.141592653589793+0j)]\n" ], [ "#corrRana = real(ct)\n#corrIana = imag(ct)\ncorrRana = real(ctlong)\ncorrIana = imag(ctlong)\n\ndef checker2(tlisttemp):\n y = []\n for i in tlisttemp:\n # print(i)\n \n temp = []\n for kkk,ck in enumerate(ckAR):\n \n temp.append(ck*exp(-vkAR[kkk]*i))\n \n y.append(sum(temp))\n return y\n\n\nyR = checker2(tlist3)\n\n\n\n# function that evaluates values with fitted params at\n# given inputs\ndef checker2(tlisttemp):\n y = []\n for i in tlisttemp:\n # print(i)\n \n temp = []\n for kkk,ck in enumerate(ckAI):\n if i==0: \n print(vkAI[kkk])\n temp.append(ck*exp(-vkAI[kkk]*i))\n \n y.append(sum(temp))\n return y\n\n\n\nyI = checker2(tlist3)\n\n", "(1.0024683988395526+0.10000000000000002j)\n(1.0024683988395526-0.10000000000000002j)\n(2.296190188285376+0.10000000000032039j)\n(2.296190188285376-0.10000000000032039j)\n(4.299081656029848+3.981685947699812j)\n(4.299081656029848-3.981685947699812j)\n(0.30736352564766894+0.10000000000000002j)\n(0.30736352564766894-0.10000000000000002j)\n" ], [ "matplotlib.rcParams['figure.figsize'] = (7, 5)\nmatplotlib.rcParams['axes.titlesize'] = 25\nmatplotlib.rcParams['axes.labelsize'] = 30\nmatplotlib.rcParams['xtick.labelsize'] = 28\nmatplotlib.rcParams['ytick.labelsize'] = 28\nmatplotlib.rcParams['legend.fontsize'] = 20\nmatplotlib.rcParams['axes.grid'] = False\nmatplotlib.rcParams['savefig.bbox'] = 'tight'\nmatplotlib.rcParams['lines.markersize'] = 5\nmatplotlib.rcParams['font.family'] = 'STIXgeneral' \nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams[\"font.serif\"] = \"STIX\"\nmatplotlib.rcParams['text.usetex'] = False", "_____no_output_____" ], [ "tlist2 = tlist3\nfrom cycler import cycler\n\nwlist2 = np.linspace(-2*pi*4,2 * pi *4 , 50000)\nwlist2 = np.linspace(-7,7 , 50000)\n\n\n\n\nfig = plt.figure(figsize=(12,10))\ngrid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3)\n\ndefault_cycler = (cycler(color=['r', 'g', 'b', 'y','c','m','k']) +\n cycler(linestyle=['-', '--', ':', '-.',(0, (1, 10)), (0, (5, 10)),(0, (3, 10, 1, 10))]))\nplt.rc('axes',prop_cycle=default_cycler )\n\n\naxes1 = fig.add_subplot(grid[0,0])\naxes1.set_yticks([0.,1.])\naxes1.set_yticklabels([0,1]) \naxes1.plot(tlist2, corrRana,\"r\",linewidth=3,label=\"Original\")\naxes1.plot(tlist2, yR,\"g\",dashes=[3,3],linewidth=2,label=\"Reconstructed\")\naxes1.legend(loc=0)\n\naxes1.set_ylabel(r'$C_R(t)$',fontsize=28)\n\naxes1.set_xlabel(r'$t\\;\\omega_c$',fontsize=28)\naxes1.locator_params(axis='y', nbins=4)\naxes1.locator_params(axis='x', nbins=4)\naxes1.text(2.,1.5,\"(a)\",fontsize=28)\n\n\naxes2 = fig.add_subplot(grid[0,1])\naxes2.set_yticks([0.,-0.4])\naxes2.set_yticklabels([0,-0.4])\n\naxes2.plot(tlist2, corrIana,\"r\",linewidth=3,label=\"Original\")\naxes2.plot(tlist2, yI,\"g\",dashes=[3,3], linewidth=2,label=\"Reconstructed\")\naxes2.legend(loc=0)\n\naxes2.set_ylabel(r'$C_I(t)$',fontsize=28)\n\naxes2.set_xlabel(r'$t\\;\\omega_c$',fontsize=28)\naxes2.locator_params(axis='y', nbins=4)\naxes2.locator_params(axis='x', nbins=4)\n\n\naxes2.text(12.5,-0.2,\"(b)\",fontsize=28)\n\n\naxes3 = fig.add_subplot(grid[1,0])\n\n\naxes3.set_yticks([0.,.5,1])\naxes3.set_yticklabels([0,0.5,1])\n\naxes3.plot(wlist, J, \"r\",linewidth=3,label=\"$J(\\omega)$ original\")\ny = checker(wlist, popt1[3],4)\naxes3.plot(wlist, y, \"g\", dashes=[3,3], linewidth=2, label=\"$J(\\omega)$ Fit $k_J = 4$\")\n\naxes3.set_ylabel(r'$J(\\omega)$',fontsize=28)\n\naxes3.set_xlabel(r'$\\omega/\\omega_c$',fontsize=28)\naxes3.locator_params(axis='y', nbins=4)\naxes3.locator_params(axis='x', nbins=4)\naxes3.legend(loc=0)\naxes3.text(3,1.1,\"(c)\",fontsize=28)\n\n\ns1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2]\ns2 = [sum([(2* lam[kk] * gamma[kk] * (w)/(((w+w0[kk])**2 + (gamma[kk]**2))*((w-w0[kk])**2 + (gamma[kk]**2)))) * ((1/(e**(w/T)-1))+1) for kk,lamkk in enumerate(lam)]) for w in wlist2]\n\n\naxes4 = fig.add_subplot(grid[1,1])\n\n\n\naxes4.set_yticks([0.,1])\naxes4.set_yticklabels([0,1])\naxes4.plot(wlist2, s1,\"r\",linewidth=3,label=\"Original\")\naxes4.plot(wlist2, s2, \"g\", dashes=[3,3], linewidth=2,label=\"Reconstructed\")\n\naxes4.set_xlabel(r'$\\omega/\\omega_c$', fontsize=28)\naxes4.set_ylabel(r'$S(\\omega)$', fontsize=28)\naxes4.locator_params(axis='y', nbins=4)\naxes4.locator_params(axis='x', nbins=4)\naxes4.legend()\naxes4.text(4.,1.2,\"(d)\",fontsize=28)\n\nfig.savefig(\"figures/figFiJspec.pdf\")\n", "/home/neill/anaconda3/lib/python3.7/site-packages/numpy/core/numeric.py:501: ComplexWarning: Casting complex values to real discards the imaginary part\n return array(a, dtype, copy=False, order=order)\n" ], [ "\n\n\nNC = 11\n\n\nNR = len(ckAR)\nNI = len(ckAI)\nprint(NR)\nprint(NI)\nQ2 = []\nQ2.extend([ sigmaz() for kk in range(NR)])\nQ2.extend([ sigmaz() for kk in range(NI)])\n\n", "12\n8\n" ], [ "\n#Q2 = [Q for kk in range(NR+NI)]\n#print(Q2)\n\noptions = Options(nsteps=1500, store_states=True, rtol=1e-12, atol=1e-12, method=\"bdf\") \nimport time \n\nstart = time.time()\nprint(\"start\")\n\n\nLtot = liouvillian(Hsys) + TermOps\n\n\nHEOMFit = BosonicHEOMSolver(Ltot, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\n\nprint(\"end\")\nend = time.time()\nprint(end - start)", "start\n" ], [ "#tlist4 = np.linspace(0, 50, 1000)\n\ntlist4 = np.linspace(0, 4*pi/Del, 600)\ntlist4 = np.linspace(0, 30*pi/Del, 600)\n\nrho0 = basis(2,0) * basis(2,0).dag() \n\n\nimport time\nstart = time.time()\nresultFit = HEOMFit.run(rho0, tlist4)\n\nend = time.time()\nprint(end - start)", "_____no_output_____" ], [ "# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11exp11K4NK1TL = expect(resultFit.states, P11p)\nP22exp11K4NK1TL = expect(resultFit.states, P22p)\nP12exp11K4NK1TL = expect(resultFit.states, P12p)", "_____no_output_____" ], [ "\ntlist3 = linspace(0,15,50000)\n\n\n#also check long timescales\nctlong = [complex((1/pi)*alpha * wc**(1-s) * beta**(-(s+1)) * (zeta(s+1,(1+beta*wc-1.0j*wc*t)/(beta*wc)) + \n zeta(s+1,(1+1.0j*wc*t)/(beta*wc)))) for t in tlist3]\n\n\ncorrRana = real(ctlong)\ncorrIana = imag(ctlong)", "_____no_output_____" ], [ "\ntlist2 = tlist3\nfrom scipy.optimize import curve_fit\n\n#seperate functions for plotting later:\n\n\n\ndef fit_func_nocost(x, a, b, c, N):\n tot = 0\n for i in range(N):\n # print(i)\n tot += a[i]*np.exp(b[i]*x)*np.cos(c[i]*x)\n cost = 0.\n \n return tot \n\ndef wrapper_fit_func_nocost(x, N, *args):\n a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N])\n # print(\"debug\")\n return fit_func_nocost(x, a, b, c, N)\n\n\n# function that evaluates values with fitted params at\n# given inputs\ndef checker(tlist_local, vals, N):\n y = []\n for i in tlist_local:\n # print(i)\n \n y.append(wrapper_fit_func_nocost(i, N, vals))\n return y\n\n\n#######\n#Real part \n\ndef wrapper_fit_func(x, N, *args):\n a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N])\n # print(\"debug\")\n return fit_func(x, a, b, c, N)\n\n\n\ndef fit_func(x, a, b, c, N):\n tot = 0\n for i in range(N):\n # print(i)\n tot += a[i]*np.exp(b[i]*x)*np.cos(c[i]*x )\n cost = 0.\n for i in range(N):\n #print(i)\n cost += ((corrRana[0]-a[i]))\n \n \n tot+=0.0*cost\n \n return tot \n\ndef fitterR(ans, tlist_local, k):\n # the actual computing of fit\n popt = []\n pcov = [] \n # tries to fit for k exponents\n for i in range(k):\n #params_0 = [0]*(2*(i+1))\n params_0 = [0.]*(3*(i+1))\n upper_a = 20*abs(max(ans, key = abs))\n #sets initial guess\n guess = []\n #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1)\n aguess = [abs(max(ans, key = abs))]*(i+1)\n bguess = [-wc]*(i+1)\n cguess = [wc]*(i+1)\n \n guess.extend(aguess)\n guess.extend(bguess)\n guess.extend(cguess) #c \n \n # sets bounds\n # a's = anything , b's negative\n # sets lower bound\n b_lower = []\n alower = [-upper_a]*(i+1)\n blower = [-np.inf]*(i+1)\n clower = [0]*(i+1)\n \n b_lower.extend(alower)\n b_lower.extend(blower)\n b_lower.extend(clower)\n \n # sets higher bound\n b_higher = []\n ahigher = [upper_a]*(i+1)\n #bhigher = [np.inf]*(i+1)\n bhigher = [0.1]*(i+1)\n chigher = [np.inf]*(i+1)\n \n b_higher.extend(ahigher)\n b_higher.extend(bhigher)\n b_higher.extend(chigher)\n \n param_bounds = (b_lower, b_higher)\n \n p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func(x, i+1, \\\n params_0), tlist_local, ans, p0=guess, sigma=[0.1 for t in tlist_local], bounds = param_bounds, maxfev = 100000000)\n popt.append(p1)\n pcov.append(p2)\n print(i+1)\n return popt\n# print(popt)\n\n\n\nk = 3\npopt1 = fitterR(corrRana, tlist2, k)\nfor i in range(k):\n y = checker(tlist2, popt1[i],i+1)\n plt.plot(tlist2, corrRana, tlist2, y)\n \n plt.show()\n \n\n\n#y = checker(tlist3, popt1[k-1],k)\n#plt.plot(tlist3, real(ctlong), tlist3, y)\n\n#plt.show()\n\n\n#######\n#Imag part \n\n\n\ndef fit_func2(x, a, b, c, N):\n tot = 0\n for i in range(N):\n # print(i)\n tot += a[i]*np.exp(b[i]*x)*np.sin(c[i]*x)\n cost = 0.\n for i in range(N):\n # print(i)\n cost += (corrIana[0]-a[i])\n \n \n tot+=0*cost\n \n return tot \n# actual fitting function\n\n\ndef wrapper_fit_func2(x, N, *args):\n a, b, c = list(args[0][:N]), list(args[0][N:2*N]), list(args[0][2*N:3*N])\n # print(\"debug\")\n return fit_func2(x, a, b, c, N)\n\n# function that evaluates values with fitted params at\n# given inputs\ndef checker2(tlist_local, vals, N):\n y = []\n for i in tlist_local:\n # print(i)\n \n y.append(wrapper_fit_func2(i, N, vals))\n return y\n\n \n \ndef fitterI(ans, tlist_local, k):\n # the actual computing of fit\n popt = []\n pcov = [] \n # tries to fit for k exponents\n for i in range(k):\n #params_0 = [0]*(2*(i+1))\n params_0 = [0.]*(3*(i+1))\n upper_a = abs(max(ans, key = abs))*5\n #sets initial guess\n guess = []\n #aguess = [ans[0]]*(i+1)#[max(ans)]*(i+1)\n aguess = [-abs(max(ans, key = abs))]*(i+1)\n bguess = [-2]*(i+1)\n cguess = [1]*(i+1)\n \n guess.extend(aguess)\n guess.extend(bguess)\n guess.extend(cguess) #c \n \n # sets bounds\n # a's = anything , b's negative\n # sets lower bound\n b_lower = []\n alower = [-upper_a]*(i+1)\n blower = [-100]*(i+1)\n clower = [0]*(i+1)\n \n b_lower.extend(alower)\n b_lower.extend(blower)\n b_lower.extend(clower)\n \n # sets higher bound\n b_higher = []\n ahigher = [upper_a]*(i+1)\n bhigher = [0.01]*(i+1) \n chigher = [100]*(i+1)\n\n b_higher.extend(ahigher)\n b_higher.extend(bhigher)\n b_higher.extend(chigher)\n \n param_bounds = (b_lower, b_higher)\n \n p1, p2 = curve_fit(lambda x, *params_0: wrapper_fit_func2(x, i+1, \\\n params_0), tlist_local, ans, p0=guess, sigma=[0.0001 for t in tlist_local], bounds = param_bounds, maxfev = 100000000)\n popt.append(p1)\n pcov.append(p2)\n print(i+1)\n return popt\n# print(popt)\n\nk1 = 3\npopt2 = fitterI(corrIana, tlist2, k1)\nfor i in range(k1):\n y = checker2(tlist2, popt2[i], i+1)\n plt.plot(tlist2, corrIana, tlist2, y)\n plt.show() \n \n#tlist3 = linspace(0,1,1000)\n\n#y = checker(tlist3, popt2[k-1],k)\n#plt.plot(tlist3, imag(ctlong), tlist3, y)\n\n#plt.show()", "1\n2\n3\n" ], [ "#ckAR1 = list(popt1[k-1])[:len(list(popt1[k-1]))//2]\nckAR1 = list(popt1[k-1])[:k]\n#0.5 from cosine\nckAR = [0.5*x+0j for x in ckAR1]\n\n#dress with exp(id)\n\n#for kk in range(k):\n# ckAR[kk] = ckAR[kk]*exp(1.0j*list(popt1[k-1])[3*k+kk])\n\nckAR.extend(conjugate(ckAR)) #just directly double\n\n\n# vkAR, vkAI\nvkAR1 = list(popt1[k-1])[k:2*k] #damping terms\nwkAR1 = list(popt1[k-1])[2*k:3*k] #oscillating term\nvkAR = [-x-1.0j*wkAR1[kk] for kk, x in enumerate(vkAR1)] #combine\nvkAR.extend([-x+1.0j*wkAR1[kk] for kk, x in enumerate(vkAR1)]) #double\n\n\nprint(ckAR)\nprint(vkAR)\n\n", "[(0.11144410593349063+0j), (1.1220870984713296+0j), (-0.46905439824964296+0j), (0.11144410593349063-0j), (1.1220870984713296-0j), (-0.46905439824964296-0j)]\n[(0.34246550478406607-1.3300052634275397e-20j), (2.217530091540586-2.464176361865918e-14j), (4.925330410074036-3.8813583110031855j), (0.34246550478406607+1.3300052634275397e-20j), (2.217530091540586+2.464176361865918e-14j), (4.925330410074036+3.8813583110031855j)]\n" ], [ "#ckAR1 = list(popt1[k-1])[:len(list(popt1[k-1]))//2]\nckAI1 = list(popt2[k1-1])[:k1]\n#0.5 from cosine\nckAI = [-1.0j*0.5*x for x in ckAI1]\n\n#dress with exp(id)\n\n#for kk in range(k1):\n# ckAI[kk] = ckAI[kk]*exp(1.0j*list(popt2[k1-1])[3*k1+kk])\n\nckAI.extend(conjugate(ckAI)) #just directly double\n\n\n# vkAR, vkAI\nvkAI1 = list(popt2[k1-1])[k1:2*k1] #damping terms\nwkAI1 = list(popt2[k1-1])[2*k1:3*k1] #oscillating term\nvkAI = [-x-1.0j*wkAI1[kk] for kk, x in enumerate(vkAI1)] #combine\nvkAI.extend([-x+1.0j*wkAI1[kk] for kk, x in enumerate(vkAI1)]) #double\n\n\n\nprint(ckAI)\nprint(vkAI)", "[0.42951890967132456j, 1.6563481683820898j, 0.15377742510150993j, -0.42951890967132456j, -1.6563481683820898j, -0.15377742510150993j]\n[(1.0930279223636246-1.3691370731372476j), (0.9959034288214084-0.16655390928549837j), (1.186330164110597-2.696687419426089j), (1.0930279223636246+1.3691370731372476j), (0.9959034288214084+0.16655390928549837j), (1.186330164110597+2.696687419426089j)]\n" ], [ "#check the spectrum of the fit\n\ndef spectrum_matsubara_approx(w, ck, vk):\n \"\"\"\n Calculates the approximate Matsubara correlation spectrum\n from ck and vk.\n\n Parameters\n ==========\n\n w: np.ndarray\n A 1D numpy array of frequencies.\n\n ck: float\n The coefficient of the exponential function.\n\n vk: float\n The frequency of the exponential function.\n \"\"\"\n return ck*2*(vk)/(w**2 + vk**2)\n\ndef spectrum_approx(w, ck,vk):\n \"\"\"\n Calculates the approximate non Matsubara correlation spectrum\n from the bath parameters.\n\n Parameters\n ==========\n w: np.ndarray\n A 1D numpy array of frequencies.\n\n coup_strength: float\n The coupling strength parameter.\n\n bath_broad: float\n A parameter characterizing the FWHM of the spectral density, i.e.,\n the bath broadening.\n\n bath_freq: float\n The bath frequency.\n \"\"\"\n sw = []\n for kk,ckk in enumerate(ck):\n \n #sw.append((ckk*(real(vk[kk]))/((w-imag(vk[kk]))**2+(real(vk[kk])**2))))\n sw.append((ckk*(real(vk[kk]))/((w-imag(vk[kk]))**2+(real(vk[kk])**2))))\n return sw\n\n", "_____no_output_____" ], [ "\nfrom cycler import cycler\n\n\nwlist2 = np.linspace(-7,7 , 50000)\n\n\n\ns1 = [w * alpha * e**(-abs(w)/wc) * ((1/(e**(w/T)-1))+1) for w in wlist2]\ns2 = spectrum_approx(wlist2,ckAR,vkAR)\ns2.extend(spectrum_approx(wlist2,[1.0j*ckk for ckk in ckAI],vkAI))\n\n#s2 = spectrum_approx(wlist2,ckAI,vkAI)\nprint(len(s2))\ns2sum = [0. for w in wlist2]\nfor s22 in s2:\n for kk,ww in enumerate(wlist2):\n s2sum[kk] += s22[kk]\n\n\nfig = plt.figure(figsize=(12,10))\ngrid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3)\n\ndefault_cycler = (cycler(color=['r', 'g', 'b', 'y','c','m','k']) +\n cycler(linestyle=['-', '--', ':', '-.',(0, (1, 10)), (0, (5, 10)),(0, (3, 10, 1, 10))]))\nplt.rc('axes',prop_cycle=default_cycler )\n\naxes1 = fig.add_subplot(grid[0,0])\naxes1.set_yticks([0.,1.])\naxes1.set_yticklabels([0,1]) \n\ny = checker(tlist2, popt1[2], 3)\naxes1.plot(tlist2, corrRana,'r',linewidth=3,label=\"Original\")\naxes1.plot(tlist2, y,'g',dashes=[3,3],linewidth=3,label=\"Fit $k_R = 3$\")\naxes1.legend(loc=0)\n\naxes1.set_ylabel(r'$C_R(t)$',fontsize=28)\n\naxes1.set_xlabel(r'$t\\;\\omega_c$',fontsize=28)\naxes1.locator_params(axis='y', nbins=3)\naxes1.locator_params(axis='x', nbins=3)\naxes1.text(2.5,0.5,\"(a)\",fontsize=28)\n\naxes2 = fig.add_subplot(grid[0,1])\ny = checker2(tlist2, popt2[2], 3)\naxes2.plot(tlist2, corrIana,'r',linewidth=3,label=\"Original\")\naxes2.plot(tlist2, y,'g',dashes=[3,3],linewidth=3,label=\"Fit $k_I = 3$\")\naxes2.legend(loc=0)\naxes2.set_yticks([0.,-0.4])\naxes2.set_yticklabels([0,-0.4]) \n\naxes2.set_ylabel(r'$C_I(t)$',fontsize=28)\n\naxes2.set_xlabel(r'$t\\;\\omega_c$',fontsize=28)\naxes2.locator_params(axis='y', nbins=3)\naxes2.locator_params(axis='x', nbins=3)\naxes2.text(12.5,-0.1,\"(b)\",fontsize=28)\n\n\naxes3 = fig.add_subplot(grid[1,0:])\naxes3.plot(wlist2, s1, 'r',linewidth=3,label=\"$S(\\omega)$ original\")\naxes3.plot(wlist2, real(s2sum), 'g',dashes=[3,3],linewidth=3, label=\"$S(\\omega)$ reconstruction\")\n\naxes3.set_yticks([0.,1.])\naxes3.set_yticklabels([0,1]) \n\naxes3.set_xlim(-5,5)\n\naxes3.set_ylabel(r'$S(\\omega)$',fontsize=28)\n\naxes3.set_xlabel(r'$\\omega/\\omega_c$',fontsize=28)\naxes3.locator_params(axis='y', nbins=3)\naxes3.locator_params(axis='x', nbins=3)\naxes3.legend(loc=1)\naxes3.text(-4,1.5,\"(c)\",fontsize=28)\n\nfig.savefig(\"figures/figFitCspec.pdf\")\n", "12\n" ], [ "\nQ2 = []\n\nNR = len(ckAR)\nNI = len(ckAI)\n\nQ2.extend([ sigmaz() for kk in range(NR)])\nQ2.extend([ sigmaz() for kk in range(NI)])\noptions = Options(nsteps=15000, store_states=True, rtol=1e-14, atol=1e-14)", "_____no_output_____" ], [ "\n\nNC = 11\n\n#Q2 = [Q for kk in range(NR+NI)]\n#print(Q2)\noptions = Options(nsteps=1500, store_states=True, rtol=1e-12, atol=1e-12, method=\"bdf\") \nimport time\n\nstart = time.time()\n\n#HEOMFit = BosonicHEOMSolver(Hsys, Q2, ckAR2, ckAI2, vkAR2, vkAI2, NC, options=options)\nHEOMFitC = BosonicHEOMSolver(Hsys, Q2, ckAR, ckAI, vkAR, vkAI, NC, options=options)\nprint(\"hello\")\nend = time.time()\nprint(end - start)", "hello\n470.1895263195038\n" ], [ "\ntlist4 = np.linspace(0, 30*pi/Del, 600)\nrho0 = basis(2,0) * basis(2,0).dag() \n\n\nimport time\n\nstart = time.time()\nresultFit = HEOMFitC.run(rho0, tlist4)\nprint(\"hello\")\nend = time.time()\nprint(end - start)", "_____no_output_____" ], [ "# Define some operators with which we will measure the system\n# 1,1 element of density matrix - corresonding to groundstate\nP11p=basis(2,0) * basis(2,0).dag()\nP22p=basis(2,1) * basis(2,1).dag()\n# 1,2 element of density matrix - corresonding to coherence\nP12p=basis(2,0) * basis(2,1).dag()\n# Calculate expectation values in the bases\nP11expC11k33L = expect(resultFit.states, P11p)\nP22expC11k33L = expect(resultFit.states, P22p)\nP12expC11k33L = expect(resultFit.states, P12p)\n", "_____no_output_____" ], [ "qsave(P11expC11k33L,'P11expC12k33L')\nqsave(P11exp11K4NK1TL,'P11exp11K4NK1TL')\nqsave(P11exp11K3NK1TL,'P11exp11K3NK1TL')\nqsave(P11exp11K3NK2TL,'P11exp11K3NK2TL')", "_____no_output_____" ], [ "P11expC11k33L=qload('data/P11expC12k33L')\nP11exp11K4NK1TL=qload('data/P11exp11K4NK1TL')\nP11exp11K3NK1TL=qload('data/P11exp11K3NK1TL')\nP11exp11K3NK2TL=qload('data/P11exp11K3NK2TL')", "Loaded ndarray object.\nLoaded ndarray object.\nLoaded ndarray object.\nLoaded ndarray object.\n" ], [ "matplotlib.rcParams['figure.figsize'] = (7, 5)\nmatplotlib.rcParams['axes.titlesize'] = 25\nmatplotlib.rcParams['axes.labelsize'] = 30\nmatplotlib.rcParams['xtick.labelsize'] = 28\nmatplotlib.rcParams['ytick.labelsize'] = 28\nmatplotlib.rcParams['legend.fontsize'] = 28\nmatplotlib.rcParams['axes.grid'] = False\nmatplotlib.rcParams['savefig.bbox'] = 'tight'\nmatplotlib.rcParams['lines.markersize'] = 5\nmatplotlib.rcParams['font.family'] = 'STIXgeneral' \nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\nmatplotlib.rcParams[\"font.serif\"] = \"STIX\"\nmatplotlib.rcParams['text.usetex'] = False", "_____no_output_____" ], [ "tlist4 = np.linspace(0, 4*pi/Del, 600)\n# Plot the results\nfig, axes = plt.subplots(2, 1, sharex=True, figsize=(12,15))\naxes[0].set_yticks([0.6,0.8,1])\naxes[0].set_yticklabels([0.6,0.8,1]) \naxes[0].plot(tlist4, np.real(P11expC11k33L), 'y', linewidth=2, label=\"Correlation Function Fit $k_R=k_I=3$\")\n\naxes[0].plot(tlist4, np.real(P11exp11K3NK1TL), 'b-.', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $N_k=1$ & Terminator\")\naxes[0].plot(tlist4, np.real(P11exp11K3NK2TL), 'r--', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $N_k=2$ & Terminator\")\naxes[0].plot(tlist4, np.real(P11exp11K4NK1TL), 'g--', linewidth=2, label=\"Spectral Density Fit $k_J=4$, $N_k=1$ & Terminator\")\naxes[0].set_ylabel(r'$\\rho_{11}$',fontsize=30)\n\naxes[0].set_xlabel(r'$t\\;\\omega_c$',fontsize=30)\naxes[0].locator_params(axis='y', nbins=3)\naxes[0].locator_params(axis='x', nbins=3)\naxes[0].legend(loc=0, fontsize=25)\n\naxes[1].set_yticks([0,0.01])\naxes[1].set_yticklabels([0,0.01]) \n#axes[0].plot(tlist4, np.real(P11exp11K3NK1TL)-np.real(P11expC11k33L), 'b-.', linewidth=2, label=\"Correlation Function Fit $k_R=k_I=3$\")\n\naxes[1].plot(tlist4, np.real(P11exp11K3NK1TL)-np.real(P11expC11k33L), 'b-.', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $K=1$ & Terminator\")\n\naxes[1].plot(tlist4, np.real(P11exp11K3NK2TL)-np.real(P11expC11k33L), 'r--', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $K=2$ & Terminator\")\naxes[1].plot(tlist4, np.real(P11exp11K4NK1TL)-np.real(P11expC11k33L), 'g--', linewidth=2, label=\"Spectral Density Fit $k_J=4$, $K=1$ & Terminator\")\naxes[1].set_ylabel(r'$\\rho_{11}$ difference',fontsize=30)\n\naxes[1].set_xlabel(r'$t\\;\\omega_c$',fontsize=30)\naxes[1].locator_params(axis='y', nbins=3)\naxes[1].locator_params(axis='x', nbins=3)\n#axes[1].legend(loc=0, fontsize=25)\n\n\n\nfig.savefig(\"figures/figFit.pdf\")", "_____no_output_____" ], [ "tlist4 = np.linspace(0, 4*pi/Del, 600)\n# Plot the results\nfig, axes = plt.subplots(1, 1, sharex=True, figsize=(12,5))\n\naxes.plot(tlist4, np.real(P12expC11k33L), 'y', linewidth=2, label=\"Correlation Function Fit $k_R=k_I=3$\")\n\naxes.plot(tlist4, np.real(P12exp11K3NK1TL), 'b-.', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $K=1$ & Terminator\")\naxes.plot(tlist4, np.real(P12exp11K3NK2TL), 'r--', linewidth=2, label=\"Spectral Density Fit $k_J=3$, $K=1$ & Terminator\")\naxes.plot(tlist4, np.real(P12exp11K4NK1TL), 'g--', linewidth=2, label=\"Spectral Density Fit $k_J=4$, $K=1$ & Terminator\")\naxes.set_ylabel(r'$\\rho_{12}$',fontsize=28)\n\naxes.set_xlabel(r'$t\\;\\omega_c$',fontsize=28)\naxes.locator_params(axis='y', nbins=6)\naxes.locator_params(axis='x', nbins=6)\naxes.legend(loc=0)\n", "_____no_output_____" ], [ "from qutip.ipynbtools import version_table\n\nversion_table()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb2210845b610952cce10bf41bb4a083d1ce4bb7
2,151
ipynb
Jupyter Notebook
matplotlib/gallery_jupyter/axes_grid1/demo_colorbar_of_inset_axes.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
matplotlib/gallery_jupyter/axes_grid1/demo_colorbar_of_inset_axes.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
matplotlib/gallery_jupyter/axes_grid1/demo_colorbar_of_inset_axes.ipynb
kingreatwill/penter
2d027fd2ae639ac45149659a410042fe76b9dab0
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
39.833333
1,140
0.530451
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Demo Colorbar of Inset Axes\n\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes\n\n\ndef get_demo_image():\n from matplotlib.cbook import get_sample_data\n import numpy as np\n f = get_sample_data(\"axes_grid/bivariate_normal.npy\", asfileobj=False)\n z = np.load(f)\n # z is a numpy array of 15x15\n return z, (-3, 4, -4, 3)\n\n\nfig, ax = plt.subplots(figsize=[5, 4])\n\nZ, extent = get_demo_image()\n\nax.set(aspect=1,\n xlim=(-15, 15),\n ylim=(-20, 5))\n\n\naxins = zoomed_inset_axes(ax, zoom=2, loc='upper left')\nim = axins.imshow(Z, extent=extent, interpolation=\"nearest\",\n origin=\"lower\")\n\nplt.xticks(visible=False)\nplt.yticks(visible=False)\n\n\n# colorbar\ncax = inset_axes(axins,\n width=\"5%\", # width = 10% of parent_bbox width\n height=\"100%\", # height : 50%\n loc='lower left',\n bbox_to_anchor=(1.05, 0., 1, 1),\n bbox_transform=axins.transAxes,\n borderpad=0,\n )\n\nfig.colorbar(im, cax=cax)\n\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
cb222266a5fddb6bf0096e7813356ca1a3eaecff
14,474
ipynb
Jupyter Notebook
genome_in_a_list.ipynb
25shmeckles/scripts
61f2fd82a9500176e2b0d89665fedb936fdd9bc8
[ "MIT" ]
null
null
null
genome_in_a_list.ipynb
25shmeckles/scripts
61f2fd82a9500176e2b0d89665fedb936fdd9bc8
[ "MIT" ]
null
null
null
genome_in_a_list.ipynb
25shmeckles/scripts
61f2fd82a9500176e2b0d89665fedb936fdd9bc8
[ "MIT" ]
1
2018-03-02T08:02:50.000Z
2018-03-02T08:02:50.000Z
25.348511
107
0.408318
[ [ [ "import random\ng = 10 # number of genes\nm = 10 # max length of a gene, a gene is a collection of integers \"v\"\nv = 100 # max value for \"v\"\ngenome = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)]", "_____no_output_____" ], [ "genome", "_____no_output_____" ], [ "def mutate(genome, t='deletion', p=0.1):\n \n if t == 'SNP':\n new_genome = []\n for gene in genome:\n pg = random.random()\n if pg <= p:\n i = random.randint(0,len(gene)-1)\n SNP = gene[:]\n SNP[i] = random.randint(0,100)\n new_genome.append(SNP)\n print('SNP generated! From {} to {}'.format(gene,SNP))\n else:\n new_genome.append(gene)\n return new_genome\n \n \n if t == 'deletion':\n new_genome = genome[:]\n for gene in genome:\n pg = random.random()\n if pg <= p:\n new_genome.remove(gene)\n print('Deleted {}'.format(gene))\n #print('Deleted {} gene/s'.format(len(genome)-len(new_genome)))\n return new_genome\n \n if t == 'inversion':\n new_genome = []\n for gene in genome:\n pg = random.random()\n if pg <= p:\n inverted = gene[::-1]\n new_genome.append(inverted)\n print('Inverted {} to {}'.format(gene,inverted))\n else:\n new_genome.append(gene)\n return new_genome\n \n if t == 'duplication':\n new_genome = []\n for gene in genome:\n pg = random.random()\n if pg <= p:\n duplicated = gene + gene\n new_genome.append(duplicated)\n print('Duplicated {} to {}'.format(gene,duplicated))\n else:\n new_genome.append(gene)\n return new_genome\n \n if t == 'all':\n all_ = ['deletion','inversion','duplication','SNP']\n new_genome = genome[:]\n for _ in all_:\n new_genome = mutate(new_genome, _, p)\n return new_genome\n \ndef translocate(gA,gB, p=0.1):\n \n gA_brks = []\n for _ in range(len(gA)):\n pg = random.random()\n if pg <= p:\n gA_brks.append(_)\n \n gB_brks = []\n for _ in range(len(gB)):\n pg = random.random()\n if pg <= p:\n gB_brks.append(_)\n \n if len(gA_brks) and len(gB_brks):\n iA = random.sample(gA_brks,1)[0]\n iB = random.sample(gB_brks,1)[0]\n print('Breaks generated in gA[{}] and gB[{}]'.format(iA,iB))\n derA = gA[:iA]+gB[iB:]\n derB = gB[:iB]+gB[iA:]\n print('Translocation done')\n return derA, derB\n \n print('Translocation failed!')\n return gA, gB", "_____no_output_____" ], [ "genome = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)]\ngenome", "_____no_output_____" ], [ "#SNP test\nSNP_genome = mutate(genome, t='SNP', p=0.5)\nSNP_genome", "7 4\nSNP generated! From [43, 93, 2, 57, 98, 19, 44] to [43, 93, 2, 57, 95, 19, 44]\n7 1\nSNP generated! From [87, 64, 40, 51, 6, 19, 18] to [87, 42, 40, 51, 6, 19, 18]\n4 1\nSNP generated! From [79, 20, 24, 68] to [79, 64, 24, 68]\n6 3\nSNP generated! From [20, 98, 0, 12, 43, 9] to [20, 98, 0, 21, 43, 9]\n2 1\nSNP generated! From [45, 64] to [45, 31]\n" ], [ "#Deletion test\ndel_genome = mutate(genome, t='deletion', p=0.1)\ndel_genome", "Deleted [90, 48, 24, 46, 22]\n" ], [ "#Inversion test\ninv_genome = mutate(genome, t='inversion', p=0.1)\ninv_genome", "_____no_output_____" ], [ "#Duplication test\ndup_genome = mutate(genome, t='duplication', p=0.1)\ndup_genome", "Duplicated [87, 64, 40, 51, 6, 19, 18] to [87, 64, 40, 51, 6, 19, 18, 87, 64, 40, 51, 6, 19, 18]\nDuplicated [90, 48, 24, 46, 22] to [90, 48, 24, 46, 22, 90, 48, 24, 46, 22]\nDuplicated [20, 98, 0, 12, 43, 9] to [20, 98, 0, 12, 43, 9, 20, 98, 0, 12, 43, 9]\n" ], [ "#All test\nall_genome = mutate(genome, t='all', p=0.1)\nall_genome", "Deleted [20, 98, 0, 12, 43, 9]\nDeleted [37, 33]\n1 0\nSNP generated! From [40] to [96]\n4 2\nSNP generated! From [79, 20, 24, 68] to [79, 20, 4, 68]\n9 3\nSNP generated! From [63, 83, 11, 86, 81, 28, 98, 19, 96] to [63, 83, 11, 70, 81, 28, 98, 19, 96]\n" ], [ "#Translocation test\ngA = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)]\ngB = [[random.randint(0,v) for _ in range(random.randint(0,m))] for _ in range(g)]", "_____no_output_____" ], [ "gA", "_____no_output_____" ], [ "gB", "_____no_output_____" ], [ "derA,derB = translocate(gA,gB,p=0.5)", "Breaks generated in gA[1] and gB[2]\nTranslocation done\n" ], [ "derA", "_____no_output_____" ], [ "derB", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb222cf2ba277f4a14e9bd18a307dbd9e5814cf7
9,257
ipynb
Jupyter Notebook
spark-training/spark-python/jupyter-advanced-pivoting/Pivoting - Skeleton.ipynb
Code360In/spark-code-examples
181c9906d32571ba6138e63040edfcb4c74ef4bf
[ "MIT" ]
null
null
null
spark-training/spark-python/jupyter-advanced-pivoting/Pivoting - Skeleton.ipynb
Code360In/spark-code-examples
181c9906d32571ba6138e63040edfcb4c74ef4bf
[ "MIT" ]
null
null
null
spark-training/spark-python/jupyter-advanced-pivoting/Pivoting - Skeleton.ipynb
Code360In/spark-code-examples
181c9906d32571ba6138e63040edfcb4c74ef4bf
[ "MIT" ]
null
null
null
29.110063
387
0.582478
[ [ [ "import pyspark.sql\nimport pyspark.sql.functions as sf", "_____no_output_____" ], [ "spark = pyspark.sql.SparkSession.Builder().getOrCreate()", "_____no_output_____" ] ], [ [ "# Watson Sales Product Sample Data\n\nIn this example, we want to have a look at the pivoting capabilities of Spark. Since pivoting is commonly used with sales data containing information for different product categories or countries, we will use a data set called \"Watson Sales Product Sample Data\" which was downloaded from https://www.ibm.com/communities/analytics/watson-analytics-blog/sales-products-sample-data/", "_____no_output_____" ], [ "# 1 Load and inspect data\n\nFirst we load the data, which is provided as a single CSV file, which again is well supported by Apache Spark", "_____no_output_____" ] ], [ [ "basedir = \"s3://dimajix-training/data\"", "_____no_output_____" ], [ "data = spark.read\\\n .option(\"header\", True) \\\n .option(\"inferSchema\", True) \\\n .csv(basedir + \"/watson-sales-products/WA_Sales_Products_2012-14.csv\")", "_____no_output_____" ] ], [ [ "### Inspect schema\n\nSince we used the existing header information and also let Spark infer appropriate data types, let us inspect the schema now.", "_____no_output_____" ] ], [ [ "data.printSchema()", "_____no_output_____" ] ], [ [ "### Inspect pivoting candidates\n\nNow let us find some good candidates for a pivoting column. A pivoting column shouldn't have too many distinct entries, otherwise the result probably doesn't make too much sense and doesn't help the business expert in interpretation.\n\nWe can either use\n```\ndata.select(\"Retailer type\").distinct().count()\n```\nwhich will give us the number of distinct values for a single column, or we can use the Spark aggregate function `countDistinct` which allows us to retrieve information for multiple columns within a single `select`.", "_____no_output_____" ] ], [ [ "result = data.select(\n sf.countDistinct(\"Retailer country\"),\n sf.countDistinct(\"Retailer type\"),\n sf.countDistinct(\"Product line\"),\n sf.countDistinct(\"Product type\"),\n sf.countDistinct(\"Quarter\")\n)\n\nresult.toPandas()", "_____no_output_____" ] ], [ [ "# 2 Pivoting by Product Line\n\nThe first example pivots by the product line, since there are only five different distinct values.", "_____no_output_____" ] ], [ [ "revenue_per_product_line = # YOUR CODE HERE\nrevenue_per_product_line.toPandas()", "_____no_output_____" ] ], [ [ "## 2.1 Exercise\n\nCraete an aggragated table with\n* Country and Product Line in Rows\n* The quantity for each quarter in different columns", "_____no_output_____" ] ], [ [ "# YOUR CODE HERE", "_____no_output_____" ] ], [ [ "# 3 Unpivoting again\n\nSometimes you just need the opposite operation: You have a data set in pivoted format and want to unpivot it. There is no simple built in function provided by Spark, but you can construct the unpivoted table as follows\n* For every pivoted column:\n * Project data frame onto non-pivot columns\n * Add a new column with an appropriate name containing the name of the pivot column as its value\n * Add a new column with an appropriate name containing the values of the pivot column\n* Union together all these data frames", "_____no_output_____" ], [ "## 3.1 Specific Example\n\nNow let us perform these steps for the pivoted table above", "_____no_output_____" ] ], [ [ "revenue_camping = revenue_per_product_line.select(\n # YOUR CODE HERE\n)\n\nrevenue_golf = revenue_per_product_line.select(\n sf.col(\"Quarter\"),\n sf.col(\"Retailer Country\"),\n sf.lit(\"Golf Equipment\").alias(\"Product line\"),\n sf.col(\"Golf Equipment\").alias(\"Revenue\")\n)\n\nrevenue_mountaineering = revenue_per_product_line.select(\n sf.col(\"Quarter\"),\n sf.col(\"Retailer Country\"),\n sf.lit(\"Mountaineering Equipment\").alias(\"Product line\"),\n sf.col(\"Mountaineering Equipment\").alias(\"Revenue\")\n)\n\nrevenue_outdoor = revenue_per_product_line.select(\n sf.col(\"Quarter\"),\n sf.col(\"Retailer Country\"),\n sf.lit(\"Outdoor Protection\").alias(\"Product line\"),\n sf.col(\"Outdoor Protection\").alias(\"Revenue\")\n)\n\nrevenue_personal = revenue_per_product_line.select(\n sf.col(\"Quarter\"),\n sf.col(\"Retailer Country\"),\n sf.lit(\"Personal Accessories\").alias(\"Product line\"),\n sf.col(\"Personal Accessories\").alias(\"Revenue\")\n)\n\nresult = # YOUR CODE HERE\n\nresult.limit(10).toPandas()", "_____no_output_____" ] ], [ [ "## 3.2 Generic Approach\n\nOf course manually unpivoting is somewhat tedious, but we already see a pattern:\n* Select all non-pivot columns\n* Create a new column containing the pivot column name\n* Create a new column containing the pivot column values\n* Union together everything\n\nThis can be done by writing some small Python functions as follows:", "_____no_output_____" ] ], [ [ "import functools\n\n\n# Unpivot a single column, thereby creating one data frame\ndef unpivot_column(df, other, pivot_column, pivot_value, result_column):\n columns = [df[c] for c in other] + \\\n [sf.lit(pivot_value).alias(pivot_column)] + \\\n [df[pivot_value].alias(result_column)]\n return df.select(*columns)\n\n# Unpivot multiple columns by using the above method\ndef unpivot(df, pivot_column, pivot_values, result_column):\n \"\"\"\n df - input data frame\n pivot_column - the name of the new column containg each pivot column name\n pivot_values - the list of pivoted column names\n result_column - the name of the column containing the values of the pivot columns\n \"\"\"\n common_columns = [f.name for f in df.schema.fields if not f.name in pivot_values]\n unpivot_dfs = [unpivot_column(df, common_columns, pivot_column, v, result_column) for v in pivot_values]\n return functools.reduce(lambda x,y: x.union(y), unpivot_dfs)", "_____no_output_____" ] ], [ [ "Let's test the function", "_____no_output_____" ] ], [ [ "product_lines = # YOUR CODE HERE\nresult_per_product_line = # YOUR CODE HERE\n\nresult_per_product_line.toPandas()", "_____no_output_____" ] ], [ [ "## 3.3 Exercise\n\nNow unpivot the result of exercise 2.1. You can do that either manually or try using the generic function defined above.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb222db031a2024b4e4036f1da61168217ec8cb6
10,268
ipynb
Jupyter Notebook
dev/94_notebook_test.ipynb
ctombumila37/fastai_dev
141bffce2cf9f8a019e51199a6363338b135a1a9
[ "Apache-2.0" ]
null
null
null
dev/94_notebook_test.ipynb
ctombumila37/fastai_dev
141bffce2cf9f8a019e51199a6363338b135a1a9
[ "Apache-2.0" ]
null
null
null
dev/94_notebook_test.ipynb
ctombumila37/fastai_dev
141bffce2cf9f8a019e51199a6363338b135a1a9
[ "Apache-2.0" ]
null
null
null
31.209726
113
0.55931
[ [ [ "# export\nfrom local.imports import *\nfrom local.notebook.core import *\nfrom local.notebook.export import *\nimport nbformat,inspect\nfrom nbformat.sign import NotebookNotary\nfrom nbconvert.preprocessors import ExecutePreprocessor\nfrom local.test import *\nfrom local.core import *", "_____no_output_____" ], [ "# default_exp notebook.test", "_____no_output_____" ] ], [ [ "# Extracting tests from notebooks\n\n> The functions that grab the cells containing tests (filtering with potential flags) and execute them", "_____no_output_____" ] ], [ [ "# export\n_re_all_flag = re.compile(\"\"\"\n# Matches any line with #all_something and catches that something in a group:\n^ # beginning of line (since re.MULTILINE is passed)\n\\s* # any number of whitespace\n\\#\\s* # # then any number of whitespace\nall_(\\S+) # all_ followed by a group with any non-whitespace chars\n\\s* # any number of whitespace\n$ # end of line (since re.MULTILINE is passed)\n\"\"\", re.IGNORECASE | re.MULTILINE | re.VERBOSE)", "_____no_output_____" ], [ "# export\ndef check_all_flag(cells):\n for cell in cells:\n if check_re(cell, _re_all_flag): return check_re(cell, _re_all_flag).groups()[0]", "_____no_output_____" ], [ "nb = read_nb(\"35_tutorial_wikitext.ipynb\")\ntest_eq(check_all_flag(nb['cells']), 'slow')\nnb = read_nb(\"91_notebook_export.ipynb\")\nassert check_all_flag(nb['cells']) is None", "_____no_output_____" ], [ "# export\n_re_flags = re.compile(\"\"\"\n# Matches any line with a test flad and catches it in a group:\n^ # beginning of line (since re.MULTILINE is passed)\n\\s* # any number of whitespace\n\\#\\s* # # then any number of whitespace\n(slow|cuda|cpp) # all test flags\n\\s* # any number of whitespace\n$ # end of line (since re.MULTILINE is passed)\n\"\"\", re.IGNORECASE | re.MULTILINE | re.VERBOSE)", "_____no_output_____" ], [ "# export\ndef get_cell_flags(cell):\n if cell['cell_type'] != 'code': return []\n return _re_flags.findall(cell['source'])", "_____no_output_____" ], [ "test_eq(get_cell_flags({'cell_type': 'code', 'source': \"#hide\\n# slow\\n\"}), ['slow'])\ntest_eq(get_cell_flags({'cell_type': 'code', 'source': \"#hide\\n# slow\\n # cuda\"}), ['slow', 'cuda'])\ntest_eq(get_cell_flags({'cell_type': 'markdown', 'source': \"#hide\\n# slow\\n # cuda\"}), [])\ntest_eq(get_cell_flags({'cell_type': 'code', 'source': \"#hide\\n\"}), [])", "_____no_output_____" ], [ "# export\ndef _add_import_cell(mod):\n \"Return an import cell for `mod`\"\n return {'cell_type': 'code',\n 'execution_count': None,\n 'metadata': {'hide_input': True},\n 'outputs': [],\n 'source': f\"\\nfrom local.{mod} import *\"}", "_____no_output_____" ], [ "# export\n_re_is_export = re.compile(r\"\"\"\n# Matches any text with #export or #exports flag:\n^ # beginning of line (since re.MULTILINE is passed)\n\\s* # any number of whitespace\n\\#\\s* # # then any number of whitespace\nexports? # export or exports\n\\s* # any number of whitespace\n\"\"\", re.IGNORECASE | re.MULTILINE | re.VERBOSE)", "_____no_output_____" ], [ "# export\n_re_has_import = re.compile(r\"\"\"\n# Matches any text with import statement:\n^ # beginning of line (since re.MULTILINE is passed)\n\\s* # any number of whitespace\nimport # # then any number of whitespace\n\\s+ \n|\n\\s*\nfrom\n\\s+\\S+\\s+\nimport\n\\s+\n\"\"\", re.IGNORECASE | re.MULTILINE | re.VERBOSE)", "_____no_output_____" ], [ "# export\nclass NoExportPreprocessor(ExecutePreprocessor):\n \"An `ExecutePreprocessor` that executes not exported cells\"\n @delegates(ExecutePreprocessor.__init__)\n def __init__(self, flags, **kwargs):\n self.flags = flags\n super().__init__(**kwargs)\n \n def preprocess_cell(self, cell, resources, index):\n if 'source' not in cell or cell['cell_type'] != \"code\": return cell, resources\n #if _re_is_export.search(cell['source']) and not _re_has_import.search(cell['source']): \n # return cell, resources\n for f in get_cell_flags(cell):\n if f not in self.flags: return cell, resources\n res = super().preprocess_cell(cell, resources, index)\n return res", "_____no_output_____" ], [ "# export\ndef test_nb(fn, flags=None):\n \"Execute `nb` (or only the `show_doc` cells) with `metadata`\"\n os.environ[\"IN_TEST\"] = '1'\n try:\n nb = read_nb(fn)\n all_flag = check_all_flag(nb['cells'])\n if all_flag is not None and all_flag not in L(flags): return\n mod = find_default_export(nb['cells'])\n #if mod is not None: nb['cells'].insert(0, _add_import_cell(mod))\n ep = NoExportPreprocessor(L(flags), timeout=600, kernel_name='python3')\n pnb = nbformat.from_dict(nb)\n ep.preprocess(pnb)\n except Exception as e: \n print(f\"Error in {fn}\")\n raise e\n finally: os.environ.pop(\"IN_TEST\")", "_____no_output_____" ], [ "test_nb(\"07_vision_core.ipynb\")", "_____no_output_____" ] ], [ [ "## Export-", "_____no_output_____" ] ], [ [ "#hide\nnotebook2script(all_fs=True)", "Converted 00_test.ipynb.\nConverted 01_core.ipynb.\nConverted 01a_dispatch.ipynb.\nConverted 01b_torch_core.ipynb.\nConverted 02_script.ipynb.\nConverted 03_dataloader.ipynb.\nConverted 04_transform.ipynb.\nConverted 05_data_core.ipynb.\nConverted 06_data_transforms.ipynb.\nConverted 07_vision_core.ipynb.\nConverted 08_pets_tutorial.ipynb.\nConverted 09_vision_augment.ipynb.\nConverted 10_data_block.ipynb.\nConverted 11_layers.ipynb.\nConverted 11a_vision_models_xresnet.ipynb.\nConverted 12_optimizer.ipynb.\nConverted 13_learner.ipynb.\nConverted 14_callback_schedule.ipynb.\nConverted 14a_callback_data.ipynb.\nConverted 15_callback_hook.ipynb.\nConverted 16_callback_progress.ipynb.\nConverted 17_callback_tracker.ipynb.\nConverted 18_callback_fp16.ipynb.\nConverted 19_callback_mixup.ipynb.\nConverted 20_metrics.ipynb.\nConverted 21_tutorial_imagenette.ipynb.\nConverted 22_vision_learner.ipynb.\nConverted 23_tutorial_transfer_learning.ipynb.\nConverted 30_text_core.ipynb.\nConverted 31_text_data.ipynb.\nConverted 32_text_models_awdlstm.ipynb.\nConverted 33_text_models_core.ipynb.\nConverted 34_callback_rnn.ipynb.\nConverted 35_tutorial_wikitext.ipynb.\nConverted 36_text_models_qrnn.ipynb.\nConverted 37_text_learner.ipynb.\nConverted 38_tutorial_ulmfit.ipynb.\nConverted 40_tabular_core.ipynb.\nConverted 41_tabular_model.ipynb.\nConverted 42_tabular_rapids.ipynb.\nThis cell doesn't have an export destination and was ignored:\ne\nConverted 50_data_block_examples.ipynb.\nConverted 90_notebook_core.ipynb.\nConverted 91_notebook_export.ipynb.\nConverted 92_notebook_showdoc.ipynb.\nConverted 93_notebook_export2html.ipynb.\nConverted 94_notebook_test.ipynb.\nConverted 95_index.ipynb.\nConverted 96_data_external.ipynb.\nConverted 97_utils_test.ipynb.\nConverted notebook2jekyll.ipynb.\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb2231a248104e0a68f05909d856eeb0c6168865
56,911
ipynb
Jupyter Notebook
notebooks/measure_n_steps.ipynb
robertjankowski/social-media-influence-on-covid-pandemic
1b04aa4aa88d4788fdfa023eb21b1f00b16f0110
[ "MIT" ]
null
null
null
notebooks/measure_n_steps.ipynb
robertjankowski/social-media-influence-on-covid-pandemic
1b04aa4aa88d4788fdfa023eb21b1f00b16f0110
[ "MIT" ]
null
null
null
notebooks/measure_n_steps.ipynb
robertjankowski/social-media-influence-on-covid-pandemic
1b04aa4aa88d4788fdfa023eb21b1f00b16f0110
[ "MIT" ]
null
null
null
247.43913
50,323
0.910861
[ [ [ "from scripts.constants import SimulationConstants\nfrom scripts.epidemic_metrics import dead_ratio, infected_ratio\n%load_ext autoreload\n%autoreload 2\n\nfrom scripts.simulation import init_run_simulation\nimport scripts.visualization as viz\n\nviz.load_matplotlib()\nplt = viz.plt", "_____no_output_____" ], [ "contants = SimulationConstants()\nmetrics = {'dead_ratio': ('l1_layer', dead_ratio),\n 'infected_ratio': ('l1_layer', infected_ratio)}\n\ncontants.n_agents = 100\ncontants.n_steps = 20000\n\nout1, _, _ = init_run_simulation(contants.n_agents,\n contants.n_additional_virtual_links,\n contants.init_infection_fraction,\n contants.init_aware_fraction,\n contants.n_steps,\n contants.l1_params,\n contants.l2_params,\n contants.l2_voter_params,\n contants.l2_social_media_params,\n metrics)", "_____no_output_____" ], [ "contants.n_agents = 1000\ncontants.n_steps = 20000\n\nout2, _, _ = init_run_simulation(contants.n_agents,\n contants.n_additional_virtual_links,\n contants.init_infection_fraction,\n contants.init_aware_fraction,\n contants.n_steps,\n contants.l1_params,\n contants.l2_params,\n contants.l2_voter_params,\n contants.l2_social_media_params,\n metrics)\n", "_____no_output_____" ], [ "%%time\n\ncontants.n_agents = 5000\ncontants.n_steps = 100000 # ~100k is ok -- around 3 minutes\n\nout3, _, _ = init_run_simulation(contants.n_agents,\n contants.n_additional_virtual_links,\n contants.init_infection_fraction,\n contants.init_aware_fraction,\n contants.n_steps,\n contants.l1_params,\n contants.l2_params,\n contants.l2_voter_params,\n contants.l2_social_media_params,\n metrics)", "CPU times: user 3min 22s, sys: 11.8 ms, total: 3min 22s\nWall time: 3min 22s\n" ], [ "%%time\n\ncontants.n_agents = 10000\ncontants.n_steps = 150000\n\n# around 12 min -- single realisation for N_STEPS = 150k\nout4, _, _ = init_run_simulation(contants.n_agents,\n contants.n_additional_virtual_links,\n contants.init_infection_fraction,\n contants.init_aware_fraction,\n contants.n_steps,\n contants.l1_params,\n contants.l2_params,\n contants.l2_voter_params,\n contants.l2_social_media_params,\n metrics)", "CPU times: user 11min 36s, sys: 196 ms, total: 11min 36s\nWall time: 11min 36s\n" ], [ "plt.grid(alpha=0.1)\n\nplt.plot(out1['dead_ratio'], color='black', linewidth=3, label='dead ratio (N=100)')\nplt.plot(out1['infected_ratio'], color='red', linewidth=3, label='infected ratio (N=100)')\n\nplt.plot(out2['dead_ratio'], color='blue', linewidth=3, label='dead ratio (N=1000)')\nplt.plot(out2['infected_ratio'], color='green', linewidth=3, label='infected ratio (N=1000)')\n\nplt.plot(out3['dead_ratio'], color='brown', linewidth=3, label='dead ratio (N=5000)')\nplt.plot(out3['infected_ratio'], color='purple', linewidth=3, label='infected ratio (N=5000)')\n\nplt.plot(out4['dead_ratio'], color='orange', linewidth=3, label='dead ratio (N=10000)')\nplt.plot(out4['infected_ratio'], color='violet', linewidth=3, label='infected ratio (N=10000)')\n\nplt.xlabel('time')\nplt.legend(loc='center right', borderaxespad=0.1, fontsize=12, ncol=1,\n labelspacing=0.02, fancybox=False, columnspacing=0.3)\n# plt.savefig(\"../plots/measure_n_steps_n_agents.pdf\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb22392fef7348d7694dbff9287349dd3eb925be
14,820
ipynb
Jupyter Notebook
ps/ps2.ipynb
minyoungrho/Econometrics2
96142dd9b1db14ddf61f57408ed7505a951135f9
[ "MIT" ]
null
null
null
ps/ps2.ipynb
minyoungrho/Econometrics2
96142dd9b1db14ddf61f57408ed7505a951135f9
[ "MIT" ]
null
null
null
ps/ps2.ipynb
minyoungrho/Econometrics2
96142dd9b1db14ddf61f57408ed7505a951135f9
[ "MIT" ]
2
2021-04-21T09:25:38.000Z
2021-04-26T08:05:15.000Z
30
890
0.517814
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb223b07fceb56a0e2cb27233fa92ac81f50b39f
777,974
ipynb
Jupyter Notebook
image-classifier-project/jupyter-notebook/Image Classifier Project.ipynb
kimcrab/udacity-image-classifer-part-one
78229e8521c89a22699457931550ae960b27d765
[ "MIT" ]
null
null
null
image-classifier-project/jupyter-notebook/Image Classifier Project.ipynb
kimcrab/udacity-image-classifer-part-one
78229e8521c89a22699457931550ae960b27d765
[ "MIT" ]
null
null
null
image-classifier-project/jupyter-notebook/Image Classifier Project.ipynb
kimcrab/udacity-image-classifer-part-one
78229e8521c89a22699457931550ae960b27d765
[ "MIT" ]
null
null
null
862.498891
373,912
0.946183
[ [ [ "# Developing an AI application\n\nGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. \n\nIn this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. \n\n<img src='assets/Flowers.png' width=500px>\n\nThe project is broken down into multiple steps:\n\n* Load and preprocess the image dataset\n* Train the image classifier on your dataset\n* Use the trained classifier to predict image content\n\nWe'll lead you through each part which you'll implement in Python.\n\nWhen you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.\n\nFirst up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.", "_____no_output_____" ], [ "## How to apply Deep Learning algorithm \n\n### 1. Exploring the Data\n* Use head, describe, shape functions.\n* Define your transforms for the training, validation, and testing sets.\n\n### 2. Building and training the classifier\n* Load a pre-trained network.\n* Define a new, untrained feed-forward network as a classifier, using activations and dropout.\n* Train the classifier layers using backpropagation using the pre-trained network to get the features.\n* Track the loss and accuracy on the validation set to determine the best hyperparameters.\n\n### 3. Testing your network\n* Do validation on the test set and plot the graph.\n\n### 4. Save the checkpoint\n* Save the model so you can load it later for making predictions.\n\n### 5. Loading the checkpoint\n* You can come back to this project and keep working on it without having to retrain the network.\n\n### 6. Class Prediction\n* What are the strengths of the model; when does it perform well?\n* What are the weaknesses of the model; when does it perform poorly?\n* What makes this model a good candidate for the problem, given what you know about the data?\n\n### 7. Train and compare predicted results between models\n* Compare time on testing set\n* Compare accuracy score on testing set\n* Compare F-score on testing set\n* Compare results based on the size of testing set\n\n### 8. Improve results by tuning model\n* Use grid search to find best parameters\n\n### 9. Check feature importance\n* Use feature_importance_ attribute\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport os, random\nimport time\n\nfrom workspace_utils import active_session\nimport torchvision\nfrom torchvision import transforms, datasets, models\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n# allow to plot graph on jupyter notebook \n%matplotlib inline\n#higher resolution\n%config InlineBackend.figure_format = 'retina'\n", "_____no_output_____" ] ], [ [ "## Load the data\n\nHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.\n\nThe validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.\n\nThe pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.\n ", "_____no_output_____" ] ], [ [ "data_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'", "_____no_output_____" ], [ "# TODO: Define your transforms for the training, validation, and testing sets\n# Image transformations\nimage_transforms = {\n 'train':\n transforms.Compose([\n # use data augmentation on training sets\n transforms.RandomResizedCrop(224), \n transforms.RandomRotation(degrees=30), \n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]) # Imagenet standards\n ]),\n # no augmentation on validation and test sets\n 'valid':\n transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'test':\n transforms.Compose([\n transforms.Resize(size=256),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n\n}\n\ndata = {\n 'train':\n datasets.ImageFolder(root=train_dir, transform=image_transforms['train']),\n 'valid':\n datasets.ImageFolder(root=valid_dir, transform=image_transforms['valid']),\n 'test':\n datasets.ImageFolder(root=test_dir, transform=image_transforms['test']),\n}\n\n\ndataloaders = {\n 'train': DataLoader(data['train'], batch_size=32, shuffle=True),\n 'val': DataLoader(data['valid'], batch_size=32, shuffle=True),\n 'test': DataLoader(data['test'], batch_size=32, shuffle=True)\n}\n", "_____no_output_____" ] ], [ [ "### Label mapping\n\nYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.", "_____no_output_____" ] ], [ [ "import json\n\nwith open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)", "_____no_output_____" ] ], [ [ "# Building and training the classifier\n\nNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.\n\nWe're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:\n\n* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)\n* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout\n* Train the classifier layers using backpropagation using the pre-trained network to get the features\n* Track the loss and accuracy on the validation set to determine the best hyperparameters\n\nWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!\n\nWhen training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.\n\nOne last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to\nGPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.\n\n**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.", "_____no_output_____" ] ], [ [ "# TODO: Build and train your network\n# Check GPU availability\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = models.vgg16(pretrained=True)\n\n# Freeze the parameters of densenet\nfor param in model.parameters():\n param.requires_grad = False\n\n\n# Custom Classifier\nclassifier = nn.Sequential(\n nn.Linear(25088, 4096, bias=True),\n nn.ReLU(),\n nn.Dropout(),\n nn.Linear(4096, 102, bias=True),\n nn.LogSoftmax(dim=1)\n)\n\nmodel.classifier = classifier\n\ncriterion = nn.NLLLoss()\n# Only train the classifier parameters, feature parameters are frozen\noptimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\n\n\n# Move to GPU or CPU mode\nmodel = model.to(device)\n", "_____no_output_____" ], [ "with active_session():\n epochs = 3\n steps = 0\n running_loss = 0\n print_every = 10\n\n train_losses, validation_losses = [], []\n\n for epoch in range(epochs):\n for inputs, labels in dataloaders['train']:\n steps += 1\n # Move inputs and labels to the GPU/CPU\n inputs, labels = inputs.to(device), labels.to(device)\n\n # Set gradient to zero so that you do the parameter update correctly. \n # Else the gradient would point in some other direction than the intended direction towards the minimum \n optimizer.zero_grad()\n\n # Propagation\n logps = model.forward(inputs)\n \n # Calculate loss\n loss = criterion(logps, labels)\n \n # Backpropagation\n loss.backward()\n \n # Update parameters based on the current gradient\n optimizer.step()\n \n running_loss += loss.item()\n\n # Print current error and accuracy in every n time\n if steps % print_every == 0:\n val_loss = 0\n val_accuracy = 0\n # Set to evaluation mode\n model.eval()\n # Deactivate autograde engine to save memory and time\n with torch.no_grad():\n for inputs, labels in dataloaders['val']:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n batch_loss = criterion(logps, labels)\n\n val_loss += batch_loss.item()\n\n # Since our model outputs a LogSoftmax, find the real \n # percentages by reversing the log function\n ps = torch.exp(logps)\n \n # Get the top class and probability\n top_p, top_class = ps.topk(1, dim=1)\n \n # Check correct classes\n equals = top_class == labels.view(*top_class.shape)\n \n val_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n train_losses.append(running_loss/len(dataloaders['train']))\n validation_losses.append(val_loss/len(dataloaders['val']))\n\n print(f\"Epoch {epoch+1}/{epochs}.. \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Validation loss: {val_loss/len(dataloaders['val']):.3f}.. \"\n f\"Validation accuracy: {val_accuracy/len(dataloaders['val']):.3f}\")\n running_loss = 0\n # Set to train mode \n model.train()", "Epoch 1/3.. Train loss: 12.929.. Validation loss: 8.427.. Validation accuracy: 0.088\nEpoch 1/3.. Train loss: 7.293.. Validation loss: 4.337.. Validation accuracy: 0.152\nEpoch 1/3.. Train loss: 4.418.. Validation loss: 3.723.. Validation accuracy: 0.203\nEpoch 1/3.. Train loss: 4.033.. Validation loss: 3.395.. Validation accuracy: 0.248\nEpoch 1/3.. Train loss: 3.780.. Validation loss: 3.015.. Validation accuracy: 0.325\nEpoch 1/3.. Train loss: 3.521.. Validation loss: 2.635.. Validation accuracy: 0.375\nEpoch 1/3.. Train loss: 3.522.. Validation loss: 2.629.. Validation accuracy: 0.385\nEpoch 1/3.. Train loss: 3.285.. Validation loss: 2.248.. Validation accuracy: 0.469\nEpoch 1/3.. Train loss: 3.264.. Validation loss: 2.255.. Validation accuracy: 0.450\nEpoch 1/3.. Train loss: 3.289.. Validation loss: 2.070.. Validation accuracy: 0.488\nEpoch 1/3.. Train loss: 2.998.. Validation loss: 2.059.. Validation accuracy: 0.482\nEpoch 1/3.. Train loss: 2.993.. Validation loss: 1.851.. Validation accuracy: 0.529\nEpoch 1/3.. Train loss: 2.844.. Validation loss: 1.807.. Validation accuracy: 0.537\nEpoch 1/3.. Train loss: 2.891.. Validation loss: 1.737.. Validation accuracy: 0.574\nEpoch 1/3.. Train loss: 2.867.. Validation loss: 1.654.. Validation accuracy: 0.578\nEpoch 1/3.. Train loss: 2.873.. Validation loss: 1.645.. Validation accuracy: 0.583\nEpoch 1/3.. Train loss: 2.786.. Validation loss: 1.821.. Validation accuracy: 0.538\nEpoch 1/3.. Train loss: 2.830.. Validation loss: 1.573.. Validation accuracy: 0.586\nEpoch 1/3.. Train loss: 2.530.. Validation loss: 1.455.. Validation accuracy: 0.619\nEpoch 1/3.. Train loss: 2.637.. Validation loss: 1.454.. Validation accuracy: 0.611\nEpoch 2/3.. Train loss: 2.649.. Validation loss: 1.542.. Validation accuracy: 0.585\nEpoch 2/3.. Train loss: 2.250.. Validation loss: 1.538.. Validation accuracy: 0.593\nEpoch 2/3.. Train loss: 2.371.. Validation loss: 1.388.. Validation accuracy: 0.651\nEpoch 2/3.. Train loss: 2.495.. Validation loss: 1.342.. Validation accuracy: 0.646\nEpoch 2/3.. Train loss: 2.211.. Validation loss: 1.319.. Validation accuracy: 0.661\nEpoch 2/3.. Train loss: 2.512.. Validation loss: 1.336.. Validation accuracy: 0.654\nEpoch 2/3.. Train loss: 2.477.. Validation loss: 1.276.. Validation accuracy: 0.654\nEpoch 2/3.. Train loss: 2.267.. Validation loss: 1.197.. Validation accuracy: 0.679\nEpoch 2/3.. Train loss: 2.286.. Validation loss: 1.224.. Validation accuracy: 0.682\nEpoch 2/3.. Train loss: 2.218.. Validation loss: 1.204.. Validation accuracy: 0.678\nEpoch 2/3.. Train loss: 2.337.. Validation loss: 1.177.. Validation accuracy: 0.667\nEpoch 2/3.. Train loss: 2.390.. Validation loss: 1.172.. Validation accuracy: 0.680\nEpoch 2/3.. Train loss: 2.309.. Validation loss: 1.096.. Validation accuracy: 0.700\nEpoch 2/3.. Train loss: 2.371.. Validation loss: 1.089.. Validation accuracy: 0.706\nEpoch 2/3.. Train loss: 2.344.. Validation loss: 1.015.. Validation accuracy: 0.711\nEpoch 2/3.. Train loss: 1.973.. Validation loss: 1.122.. Validation accuracy: 0.692\nEpoch 2/3.. Train loss: 2.341.. Validation loss: 1.026.. Validation accuracy: 0.717\nEpoch 2/3.. Train loss: 2.245.. Validation loss: 1.107.. Validation accuracy: 0.697\nEpoch 2/3.. Train loss: 2.006.. Validation loss: 1.185.. Validation accuracy: 0.680\nEpoch 2/3.. Train loss: 2.199.. Validation loss: 1.174.. Validation accuracy: 0.692\nEpoch 2/3.. Train loss: 2.258.. Validation loss: 1.150.. Validation accuracy: 0.697\nEpoch 3/3.. Train loss: 2.162.. Validation loss: 1.020.. Validation accuracy: 0.730\nEpoch 3/3.. Train loss: 2.040.. Validation loss: 0.999.. Validation accuracy: 0.727\nEpoch 3/3.. Train loss: 2.058.. Validation loss: 0.973.. Validation accuracy: 0.732\nEpoch 3/3.. Train loss: 1.893.. Validation loss: 0.921.. Validation accuracy: 0.746\nEpoch 3/3.. Train loss: 1.892.. Validation loss: 0.982.. Validation accuracy: 0.731\nEpoch 3/3.. Train loss: 2.343.. Validation loss: 0.980.. Validation accuracy: 0.745\nEpoch 3/3.. Train loss: 1.962.. Validation loss: 0.981.. Validation accuracy: 0.727\nEpoch 3/3.. Train loss: 2.105.. Validation loss: 0.963.. Validation accuracy: 0.723\nEpoch 3/3.. Train loss: 2.219.. Validation loss: 1.093.. Validation accuracy: 0.704\nEpoch 3/3.. Train loss: 2.104.. Validation loss: 1.017.. Validation accuracy: 0.742\nEpoch 3/3.. Train loss: 2.073.. Validation loss: 0.946.. Validation accuracy: 0.731\nEpoch 3/3.. Train loss: 1.958.. Validation loss: 0.943.. Validation accuracy: 0.735\nEpoch 3/3.. Train loss: 1.987.. Validation loss: 0.891.. Validation accuracy: 0.757\nEpoch 3/3.. Train loss: 1.999.. Validation loss: 0.929.. Validation accuracy: 0.739\nEpoch 3/3.. Train loss: 1.911.. Validation loss: 1.087.. Validation accuracy: 0.724\nEpoch 3/3.. Train loss: 2.094.. Validation loss: 0.956.. Validation accuracy: 0.738\nEpoch 3/3.. Train loss: 2.288.. Validation loss: 0.968.. Validation accuracy: 0.747\nEpoch 3/3.. Train loss: 1.964.. Validation loss: 0.872.. Validation accuracy: 0.767\nEpoch 3/3.. Train loss: 2.036.. Validation loss: 0.875.. Validation accuracy: 0.774\nEpoch 3/3.. Train loss: 2.036.. Validation loss: 0.908.. Validation accuracy: 0.756\n" ] ], [ [ "## Testing your network\n\nIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.", "_____no_output_____" ] ], [ [ "plt.plot(train_losses, label='Training loss')\nplt.plot(validation_losses, label='Validation loss')\nplt.legend(frameon=False)", "_____no_output_____" ], [ "# TODO: Do validation on the test set\ntest_accuracy = 0\nmodel.eval()\nwith torch.no_grad():\n for inputs, labels in dataloaders['test']:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n\n # Calculate accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n test_accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\nprint(f\"Test accuracy: {test_accuracy/len(dataloaders['test']):.3f}\")", "Test accuracy: 0.712\n" ] ], [ [ "## Save the checkpoint\n\nNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.\n\n```model.class_to_idx = image_datasets['train'].class_to_idx```\n\nRemember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.", "_____no_output_____" ] ], [ [ "# TODO: Save the checkpoint \nmodel.class_to_idx = data['train'].class_to_idx\n\ncheckpoint = {'input_size': 25088,\n 'hidden_layers':[4096],\n 'output_size': 102,\n 'arch': 'vgg16',\n 'learning_rate': 0.001,\n 'batch_size': 32,\n 'classifier' : classifier,\n 'epochs': epochs,\n 'optimizer': optimizer.state_dict(),\n 'state_dict': model.state_dict(),\n 'class_to_idx': model.class_to_idx}\n\ntorch.save(checkpoint, 'checkpoint.pth')", "_____no_output_____" ] ], [ [ "## Loading the checkpoint\n\nAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.", "_____no_output_____" ] ], [ [ "# TODO: Write a function that loads a checkpoint and rebuilds the model\ndef load_checkpoint(filename):\n checkpoint = torch.load(filename)\n learning_rate = checkpoint['learning_rate']\n model = getattr(torchvision.models, checkpoint['arch'])(pretrained=True)\n model.classifier = checkpoint['classifier']\n model.epochs = checkpoint['epochs']\n model.load_state_dict(checkpoint['state_dict'])\n model.class_to_idx = checkpoint['class_to_idx']\n optimizer.load_state_dict(checkpoint['optimizer'])\n \n return model, optimizer", "_____no_output_____" ], [ "nn_filename = 'checkpoint.pth'\n\nmodel, optimizer = load_checkpoint(nn_filename)\n\nsaved_model = print(model)", "VGG(\n (features): Sequential(\n (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (1): ReLU(inplace)\n (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (3): ReLU(inplace)\n (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (6): ReLU(inplace)\n (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (8): ReLU(inplace)\n (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (11): ReLU(inplace)\n (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (13): ReLU(inplace)\n (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (15): ReLU(inplace)\n (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (18): ReLU(inplace)\n (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (20): ReLU(inplace)\n (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (22): ReLU(inplace)\n (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (25): ReLU(inplace)\n (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (27): ReLU(inplace)\n (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n (29): ReLU(inplace)\n (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n )\n (classifier): Sequential(\n (0): Linear(in_features=25088, out_features=4096, bias=True)\n (1): ReLU()\n (2): Dropout(p=0.5)\n (3): Linear(in_features=4096, out_features=102, bias=True)\n (4): LogSoftmax()\n )\n)\n" ] ], [ [ "# Inference for classification\n\nNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like \n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```\n\nFirst you'll need to handle processing the input image such that it can be used in your network. \n\n## Image Preprocessing\n\nYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. \n\nFirst, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.\n\nColor channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.\n\nAs before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. \n\nAnd finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.", "_____no_output_____" ] ], [ [ "def process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n \n # TODO: Process a PIL image for use in a PyTorch model\n img_size = 256\n crop_size = 224\n \n im = Image.open(image)\n im = im.resize((img_size,img_size))\n \n left = (img_size-crop_size)*0.5\n right = left + crop_size\n upper = (img_size-crop_size)*0.5\n lower = upper + crop_size\n \n im = im.crop((left, upper, right, lower))\n im = np.array(im)/255\n\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n im = (im - mean) / std\n \n return im.transpose(2,0,1)", "_____no_output_____" ] ], [ [ "To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).", "_____no_output_____" ] ], [ [ "def imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax", "_____no_output_____" ] ], [ [ "## Class Prediction\n\nOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.\n\nTo get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.\n\nAgain, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.\n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```", "_____no_output_____" ] ], [ [ "def predict(image_path, model, topk=5):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n \n # TODO: Implement the code to predict the class from an image file\n model.eval()\n model.cpu()\n \n image = process_image(image_path)\n image = torch.from_numpy(np.array([image])).float()\n image = Variable(image)\n \n with torch.no_grad():\n logps = model.forward(image)\n ps = torch.exp(logps)\n probs, labels = torch.topk(ps, topk)\n class_to_idx_rev = {model.class_to_idx[k]: k for k in model.class_to_idx}\n classes = []\n \n for label in labels.numpy()[0]:\n classes.append(class_to_idx_rev[label])\n \n return probs.numpy()[0], classes", "_____no_output_____" ], [ "img = random.choice(os.listdir('./flowers/test/56/'))\nimg_path = './flowers/test/56/' + img\nwith Image.open(img_path) as image:\n plt.imshow(image)\n \nprob, classes = predict(img_path, model)\nprint(prob)\nprint(classes)\nprint([cat_to_name[x] for x in classes])", "[ 9.99985576e-01 5.35655818e-06 3.74382284e-06 2.03814830e-06\n 7.16588829e-07]\n['56', '5', '59', '46', '66']\n['bishop of llandaff', 'english marigold', 'orange dahlia', 'wallflower', 'osteospermum']\n" ] ], [ [ "## Sanity Checking\n\nNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:\n\n<img src='assets/inference_example.png' width=300px>\n\nYou can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.", "_____no_output_____" ] ], [ [ "# TODO: Display an image along with the top 5 classes\nprobs, classes = predict(img_path, model) \nmax_index = np.argmax(prob)\nmax_probability = prob[max_index]\nlabel = classes[max_index]\n\nfig = plt.figure(figsize=(6,6))\nax1 = plt.subplot2grid((15,9), (0,0), colspan=9, rowspan=9)\nax2 = plt.subplot2grid((15,9), (9,2), colspan=5, rowspan=5)\n\n\nax1.axis('off')\nax1.set_title(cat_to_name[label])\nax1.imshow(Image.open(img_path))\n\nlabels = []\nfor cl in classes:\n labels.append(cat_to_name[cl])\n\ny_pos = np.arange(5)\nax2.set_yticks(y_pos)\nax2.set_yticklabels(labels)\nax2.set_xlabel('Probability')\nax2.invert_yaxis()\nax2.barh(y_pos, prob, xerr=0, align='center', color='blue')\n\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb223f5a1eb55d01df0bdcc5e420229a9ed58491
498,001
ipynb
Jupyter Notebook
example_notebooks/4_efficientdet/train - without val.ipynb
OMKARTT/Monk_Object_Detection
fa4482a9e323b12b443c8825d052fda4cc54350a
[ "Apache-2.0" ]
1
2020-09-24T14:20:48.000Z
2020-09-24T14:20:48.000Z
example_notebooks/4_efficientdet/train - without val.ipynb
Kushagra-awasthi/Monk_Object_Detection
4a9e700cbf0267b8bce2fda5e915b135269248fc
[ "Apache-2.0" ]
null
null
null
example_notebooks/4_efficientdet/train - without val.ipynb
Kushagra-awasthi/Monk_Object_Detection
4a9e700cbf0267b8bce2fda5e915b135269248fc
[ "Apache-2.0" ]
null
null
null
444.247101
147,645
0.871516
[ [ [ "<a href=\"https://colab.research.google.com/github/Tessellate-Imaging/Monk_Object_Detection/blob/master/example_notebooks/4_efficientdet/train%20-%20with%20validation%20dataset.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Installation\n\n - Run these commands\n \n - git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git\n \n - cd Monk_Object_Detection/4_efficientdet/installation\n \n - Select the right requirements file and run\n \n - cat requirements.txt | xargs -n 1 -L 1 pip install", "_____no_output_____" ] ], [ [ "! git clone https://github.com/Tessellate-Imaging/Monk_Object_Detection.git", "_____no_output_____" ], [ "# For colab use the command below\n! cd Monk_Object_Detection/4_efficientdet/installation && cat requirements_colab.txt | xargs -n 1 -L 1 pip install\n\n# For Local systems and cloud select the right CUDA version\n#! cd Monk_Object_Detection/4_efficientdet/installation && cat requirements_cuda9.0.txt | xargs -n 1 -L 1 pip install", "_____no_output_____" ] ], [ [ "# About the network\n\n1. Paper on EfficientDet: https://arxiv.org/abs/1911.09070\n\n2. Blog 1 on EfficientDet: https://towardsdatascience.com/efficientdet-scalable-and-efficient-object-detection-review-4472ffc34fd9\n\n3. Blog 2 on EfficientDet: https://medium.com/@nainaakash012/efficientdet-scalable-and-efficient-object-detection-ea05ccd28427", "_____no_output_____" ], [ "# COCO Format - 1\n\n## Dataset Directory Structure\n\n ../sample_dataset (root_dir)\n |\n |------ship (coco_dir) \n | |\n | |----images (img_dir)\n | |\n | |------Train (set_dir) (Train)\n | |\n | |---------img1.jpg\n | |---------img2.jpg\n | |---------..........(and so on)\n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_Train.json (instances_<set_dir>.json)\n | |--------------------classes.txt\n \n \n - instances_Train.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n\nFor TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"ship\";\n - img_dir = \"images\";\n - set_dir = \"Train\";\n\n \n Note: Annotation file name too coincides against the set_dir", "_____no_output_____" ], [ "# COCO Format - 2\n\n## Dataset Directory Structure\n\n ../sample_dataset (root_dir)\n |\n |------ship (coco_dir) \n | |\n | |---ImagesTrain (set_dir)\n | |----|\n | |-------------------img1.jpg\n | |-------------------img2.jpg\n | |-------------------.........(and so on)\n |\n |\n | |---annotations \n | |----|\n | |--------------------instances_ImagesTrain.json (instances_<set_dir>.json)\n | |--------------------classes.txt\n \n \n - instances_Train.json -> In proper COCO format\n - classes.txt -> A list of classes in alphabetical order\n \n For TrainSet\n - root_dir = \"../sample_dataset\";\n - coco_dir = \"ship\";\n - img_dir = \"./\";\n - set_dir = \"ImagesTrain\";\n\n \n Note: Annotation file name too coincides against the set_dir\n ", "_____no_output_____" ], [ "# Sample Dataset Credits\n\n credits: https://www.tejashwi.io/object-detection-with-fizyr-retinanet/", "_____no_output_____" ] ], [ [ "import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/4_efficientdet/lib/\");", "_____no_output_____" ], [ "from train_detector import Detector", "../../4_efficientdet/lib/train_detector.py:12: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n from tqdm.autonotebook import tqdm\n" ], [ "gtf = Detector();", "_____no_output_____" ], [ "root_dir = \"Monk_Object_Detection/example_notebooks/sample_dataset\";\ncoco_dir = \"ship\";\nimg_dir = \"./\";\nset_dir = \"Images\";", "_____no_output_____" ], [ "gtf.Train_Dataset(root_dir, coco_dir, img_dir, set_dir, batch_size=8, image_size=512, use_gpu=True)", "loading annotations into memory...\nDone (t=0.01s)\ncreating index...\nindex created!\n" ], [ "# Available models\n# model_name=\"efficientnet-b0\"\n# model_name=\"efficientnet-b1\"\n# model_name=\"efficientnet-b2\"\n# model_name=\"efficientnet-b3\"\n# model_name=\"efficientnet-b4\"\n# model_name=\"efficientnet-b5\"\n# model_name=\"efficientnet-b6\"\n# model_name=\"efficientnet-b7\"\n# model_name=\"efficientnet-b8\"", "_____no_output_____" ], [ "gtf.Model(model_name=\"efficientnet-b0\");\n\n# To resume training\n#gtf.Model(model_name=\"efficientnet-b0\", load_pretrained_model_from=\"path to model.pth\");", "Loaded pretrained weights for efficientnet-b0\n" ], [ "gtf.Set_Hyperparams(lr=0.0001, val_interval=1, es_min_delta=0.0, es_patience=0)", "_____no_output_____" ], [ "gtf.Train(num_epochs=10, model_output_dir=\"trained/\");", "_____no_output_____" ] ], [ [ "# Inference ", "_____no_output_____" ] ], [ [ "import os\nimport sys\nsys.path.append(\"Monk_Object_Detection/4_efficientdet/lib/\");", "_____no_output_____" ], [ "from infer_detector import Infer", "../../4_efficientdet/lib/infer_detector.py:12: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n from tqdm.autonotebook import tqdm\n" ], [ "gtf = Infer();", "_____no_output_____" ], [ "gtf.Model(model_dir=\"trained/\")", "_____no_output_____" ], [ "f = open(\"Monk_Object_Detection/example_notebooks/sample_dataset/ship/annotations/classes.txt\", 'r');\nclass_list = f.readlines();\nf.close();\nfor i in range(len(class_list)):\n class_list[i] = class_list[i][:-1]", "_____no_output_____" ], [ "class_list", "_____no_output_____" ], [ "img_path = \"Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img1.jpg\";\nscores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4);", "_____no_output_____" ], [ "from IPython.display import Image\nImage(filename='output.jpg') ", "_____no_output_____" ], [ "img_path = \"../sample_dataset/ship/test/img4.jpg\";\nscores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4);", "_____no_output_____" ], [ "from IPython.display import Image\nImage(filename='output.jpg')", "_____no_output_____" ], [ "img_path = \"Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img5.jpg\";\nscores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4);", "_____no_output_____" ], [ "from IPython.display import Image\nImage(filename='output.jpg')", "_____no_output_____" ], [ "img_path = \"Monk_Object_Detection/example_notebooks/sample_dataset/ship/test/img6.jpg\";\nscores, labels, boxes = gtf.Predict(img_path, class_list, vis_threshold=0.4);", "_____no_output_____" ], [ "from IPython.display import Image\nImage(filename='output.jpg')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb224886e1f0f0d8f1b3041b273622620c0897fd
8,192
ipynb
Jupyter Notebook
.ipynb_checkpoints/Gentrification Paper-checkpoint.ipynb
JanineW/Quantitative-Economics
54577eb68c3e7c373e7376433a8750c34374cf9d
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Gentrification Paper-checkpoint.ipynb
JanineW/Quantitative-Economics
54577eb68c3e7c373e7376433a8750c34374cf9d
[ "MIT" ]
null
null
null
.ipynb_checkpoints/Gentrification Paper-checkpoint.ipynb
JanineW/Quantitative-Economics
54577eb68c3e7c373e7376433a8750c34374cf9d
[ "MIT" ]
null
null
null
25.128834
110
0.434937
[ [ [ "import pandas as pd\nimport os\nimport json\nimport requests\nimport graphlab\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "Seattle = graphlab.SFrame(\"Seattle.csv\")", "_____no_output_____" ], [ "Seattle['2015Number'] = Seattle['2015Number'].astype(int)", "_____no_output_____" ], [ "Seattle['2000Number'] = Seattle['2000Number'].astype(int)", "_____no_output_____" ], [ "Seattle['diff'] = Seattle['2015Number']-Seattle['2000Number']", "_____no_output_____" ], [ "df = pd.DataFrame({'x': Seattle['INDUSTRY'], 'y': Seattle['diff']})", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "my_plot = df.plot(kind='bar')", "_____no_output_____" ], [ "plt.style.use('ggplot')", "_____no_output_____" ], [ "plt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb22496b2f3731fbeebbaba7155afb069cc1388c
18,808
ipynb
Jupyter Notebook
notebooks/conditional-probability-tables-with-constraints.ipynb
munichpavel/fake-data-for-learning
a0ebcce333955174d6d3a551402d46aa29904ac4
[ "MIT" ]
10
2020-01-14T07:30:05.000Z
2022-03-15T18:52:25.000Z
notebooks/conditional-probability-tables-with-constraints.ipynb
munichpavel/fake-data-for-learning
a0ebcce333955174d6d3a551402d46aa29904ac4
[ "MIT" ]
21
2019-06-30T10:49:16.000Z
2022-03-07T11:04:20.000Z
notebooks/conditional-probability-tables-with-constraints.ipynb
munichpavel/fake-data-for-learning
a0ebcce333955174d6d3a551402d46aa29904ac4
[ "MIT" ]
null
null
null
27.740413
464
0.492344
[ [ [ "# Generating conditional probability tables subject to constraints\n", "_____no_output_____" ] ], [ [ "import os\nfrom pathlib import Path\n\nfrom itertools import product\n\nimport numpy as np\nimport pandas as pd\n\nfrom fake_data_for_learning.fake_data_for_learning import (\n BayesianNodeRV, FakeDataBayesianNetwork, SampleValue\n)\nfrom fake_data_for_learning.utils import RandomCpt\nfrom fake_data_for_learning.probability_polytopes import (\n MapMultidimIndexToLinear, ProbabilityPolytope, ExpectationConstraint\n)", "_____no_output_____" ] ], [ [ "Suppose we want to generate data from a discrete Bayesian network, such as\n\nProduct -> Days <- Rating, \n\nwhere e.g. Product is the (insurance) product name, Rating is rating strength (i.e. market price / technical price) for a submission, and Days is the number of days to generate a quote for the submission.\n\nThe number of entries in probability and conditional probability tables to define this Bayesian network is\n\n$ | Product | + | Rating | + | Product | \\times | Rating | \\times | Days |$.\n\nFor example, let us define Industry and Rating as follows", "_____no_output_____" ] ], [ [ "product_values = ['financial', 'liability', 'property']\nproduct_type = BayesianNodeRV('product_type', np.array([0.2, 0.5, 0.3]), values=product_values)\nrating_values = range(2)\nrating = BayesianNodeRV('rating', np.array([0.3, 0.7]))", "_____no_output_____" ] ], [ [ "Suppose that Days is also discrete, e.g.", "_____no_output_____" ] ], [ [ "days_values = range(4)", "_____no_output_____" ] ], [ [ "Then if we choose the ordering of the conditional probability table axes as Product, Rating, Days, we can generate the entries of the conditional probability table for Days conditioned on Industry and Rating with `utils.RandomCpt`:", "_____no_output_____" ] ], [ [ "random_cpt = RandomCpt(len(product_values), len(rating_values), len(days_values))\nX = random_cpt()", "_____no_output_____" ], [ "X[0, 0, :].sum()", "_____no_output_____" ] ], [ [ "So the total number of probability table entries to specify is, as in the formula above,", "_____no_output_____" ] ], [ [ "f'Number of probability table entries: {len(product_values) + len(rating_values) + (len(product_values) * len(rating_values) * len(days_values))}'", "_____no_output_____" ] ], [ [ "It would be nice to specify certain properties of the matrix without having to change entries individually. For example, we may want to insist that\n\n\\begin{equation*}\nE(D | P = property) = 3.5 \\\\\nE(D | P = financial) = 1.0 \\\\\nE(D | P= liability) = 2.0\n\\end{equation*}\n\nDenote the entries of the conditional probability table as \n\n$$(\\rho_{p, r | d})$$\n\nThe the above constraints become\n\n\\begin{equation*}\n\\frac{1}{|R|} \\sum_{r, d} d \\, \\rho_{\\mathrm{property},\\, r\\, | d} = 3.5 \\\\\n\\frac{1}{|R|} \\sum_{r, d} d \\, \\rho_{\\mathrm{financial},\\, r\\, | d} = 1.0\\\\\n\\frac{1}{|R|} \\sum_{r, d} d \\, \\rho_{\\mathrm{liability},\\, r\\, | d} = 2.0.\n\\end{equation*}\n\nAs $(\\rho)$ is a conditional probability table, we also have the constraints \n\n\\begin{equation*}\n0 \\leq \\rho_{p,\\,r\\,|d} \\leq 1 \\textrm{ for all }(p,\\,r,\\,d),\\\\\n\\sum_{d} \\rho_{p,\\,r,\\,| d} = 1 \\textrm{ for each pair } (p, \\, r)\n\\end{equation*}\n\nTogether, these constraints define convex polytope contained in (probability) simplex $\\Delta_{R-1} \\subseteq \\mathbb{R}^{R}$, where $R = |Product | \\times | Rating | \\times | Days|$ (see e.g. Chapter 1 of *Lectures on Algebraic Statistics*, Drton, Sturmfels, Sullivant). This polytope is defined as an intersection of half-spaces, i.e. using the so-called *H-representation* of the polytope, see *Lectures on Polytopes* by Ziegler, Chapters 0 and 1.\n\nTo generate a random (conditional) probability table to these constraints, the vertex-, or *V-representation* of the probability polytope $P$ is much more useful, because given the a vertex matrix $V$, where each column is a vertex of $P$ in $\\mathbb{R}^R$, and all points in $P$ can be obtained as\n\n$$\n\\begin{equation*}\nx = V \\cdot t\n\\end{equation*}\n$$\n\nwhere $t \\in \\mathbb{R}^N$, with $N$ being the number of vertices for $P$, and $t$ satisfying $0 \\leq t_i \\leq 1$, $\\sum t_i = 1$.\n\nOnce we have determined the V-representation $V$, then the problem of generating conditional probability tables subject to our given expectation value constraints reduces to the much simpler problem of generating points on the non-negative quadrant of the unit (hyper) cube in $R^N$.\n\nBefore we get to our goal of generating these probability tables for our hit ratio problem, let's look at elementary examples.", "_____no_output_____" ], [ "## (Conditional) Probability Polytopes\n\nThe simplest example of a probability polytope is that of a Bernoulli random variable.", "_____no_output_____" ] ], [ [ "bernoulli = ProbabilityPolytope(('outcome',), dict(outcome=range(2)))\nA, b = bernoulli.get_probability_half_planes()\nprint(A, '\\n', b)", "[[ 1. 1.]\n [-1. -1.]\n [ 1. 0.]\n [ 0. 1.]\n [-1. -0.]\n [-0. -1.]] \n [ 1. -1. 1. 1. 0. 0.]\n" ] ], [ [ "We convert the formulation A x <= b to the V-description", "_____no_output_____" ] ], [ [ "bernoulli.get_vertex_representation()", "_____no_output_____" ], [ "tertiary = ProbabilityPolytope(('outcome',), dict(outcome=range(3)))\ntertiary.get_vertex_representation()", "_____no_output_____" ], [ "conditional_bernoullis = ProbabilityPolytope(\n ('input', 'output'), dict(input=range(2), output=range(2))\n)\nconditional_bernoullis.get_vertex_representation()", "_____no_output_____" ] ], [ [ "The benefit of having the vertex-representation (V-representation) of the probability polytope is that generating random (conditional) probability tables is straightforward, namely, we can get all elements of the probability polytope by taking combinations of the vertex (column) vectors.\n\nIn the flattened coordinates, we have, e.g.", "_____no_output_____" ] ], [ [ "conditional_bernoullis.generate_flat_random_cpt()", "_____no_output_____" ] ], [ [ "In the multidimensional coordinates for conditional probability tables here, we have e.g.", "_____no_output_____" ] ], [ [ "conditional_bernoullis.generate_random_cpt()", "_____no_output_____" ] ], [ [ "## Adding contraints on conditional expectation values", "_____no_output_____" ] ], [ [ "conditional_bernoullis.set_expectation_constraints(\n [ExpectationConstraint(equation=dict(input=1), moment=1, value=0.5)]\n)", "_____no_output_____" ], [ "conditional_bernoullis.get_expect_equations_col_indices(conditional_bernoullis.expect_constraints[0].equation)", "_____no_output_____" ], [ "conditional_bernoullis.get_vertex_representation()", "_____no_output_____" ], [ "conditional_bernoullis.generate_random_cpt()", "_____no_output_____" ], [ "two_input_constrained_polytope = ProbabilityPolytope(\n ('input', 'more_input', 'output'),\n dict(input=['hi', 'low'], more_input=range(2), output=range(2))\n)\ntwo_input_constrained_polytope.set_expectation_constraints(\n [ExpectationConstraint(equation=dict(more_input=0), moment=1, value=0.25)]\n)\ntwo_input_constrained_polytope.get_vertex_representation()", "_____no_output_____" ] ], [ [ "## Hit rate polytope again", "_____no_output_____" ] ], [ [ "days_polytope = ProbabilityPolytope(\n ('product', 'rating', 'days'),\n coords = {\n 'product': product_values, \n 'rating': rating_values, \n 'days': days_values\n }\n)\ndays_polytope.set_expectation_constraints(\n [\n ExpectationConstraint(equation=dict(product='financial'), moment=1, value=0.2),\n ExpectationConstraint(equation=dict(product='liability'), moment=1, value=0.9),\n ExpectationConstraint(equation=dict(product='property'), moment=1, value=0.5),\n ]\n)\ndays_cpt = days_polytope.generate_random_cpt()\ndays_cpt", "_____no_output_____" ] ], [ [ "Now we create our Bayesian network with desired constraints on some expectation values", "_____no_output_____" ] ], [ [ "days = BayesianNodeRV('days', days_cpt, parent_names=['product_type', 'rating'])\nbn = FakeDataBayesianNetwork(product_type, rating)#, days)\nbn = FakeDataBayesianNetwork(product_type, rating, days)\nbn.rvs(10)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb22562e050668b1f4a031ca46d005debceabc9a
424,632
ipynb
Jupyter Notebook
convolutional-neural-networks/conv-visualization/custom_filters.ipynb
ahmed-gharib89/ud_deep_learning_v2_pytorch
bb5f66fc8e674daba134f1feba4877f784430486
[ "MIT" ]
1
2020-07-21T19:06:10.000Z
2020-07-21T19:06:10.000Z
convolutional-neural-networks/conv-visualization/custom_filters.ipynb
ahmed-gharib89/ud_deep_learning_v2_pytorch
bb5f66fc8e674daba134f1feba4877f784430486
[ "MIT" ]
2
2020-06-24T22:51:09.000Z
2020-09-26T07:28:35.000Z
convolutional-neural-networks/conv-visualization/custom_filters.ipynb
ahmed-gharib89/ud_deep_learning_v2_pytorch
bb5f66fc8e674daba134f1feba4877f784430486
[ "MIT" ]
null
null
null
1,572.711111
174,860
0.958778
[ [ [ "# Creating a Filter, Edge Detection", "_____no_output_____" ], [ "### Import resources and display image", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport cv2\nimport numpy as np\n\n%matplotlib inline\n\n# Read in the image\nimage = mpimg.imread('data/udacity_sdc.png')\n\nplt.imshow(image)", "_____no_output_____" ] ], [ [ "### Convert the image to grayscale", "_____no_output_____" ] ], [ [ "# Convert to grayscale for filtering\ngray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\nplt.imshow(gray, cmap='gray')", "_____no_output_____" ] ], [ [ "### TODO: Create a custom kernel\n\nBelow, you've been given one common type of edge detection filter: a Sobel operator.\n\nThe Sobel filter is very commonly used in edge detection and in finding patterns in intensity in an image. Applying a Sobel filter to an image is a way of **taking (an approximation) of the derivative of the image** in the x or y direction, separately. The operators look as follows.\n\n<img src=\"notebook_ims/sobel_ops.png\" width=200 height=200>\n\n**It's up to you to create a Sobel x operator and apply it to the given image.**\n\nFor a challenge, see if you can put the image through a series of filters: first one that blurs the image (takes an average of pixels), and then one that detects the edges.", "_____no_output_____" ] ], [ [ "# Create a custom kernel\n\n# 3x3 array for edge detection\nsobel_y = np.array([[ -1, -2, -1], \n [ 0, 0, 0], \n [ 1, 2, 1]])\n\n## TODO: Create and apply a Sobel x operator\nsobel_x = np.array([[ 0, -2, 0],\n [-2, 8, -2],\n [ 0, -2, -1]])\n\nfiltered_image = cv2.filter2D(gray, -1, sobel_x)\n\n# Filter the image using filter2D, which has inputs: (grayscale image, bit-depth, kernel) \nfiltered_image = cv2.filter2D(gray, -1, sobel_y)\n\nplt.imshow(filtered_image, cmap='gray')", "_____no_output_____" ] ], [ [ "### Test out other filters!\n\nYou're encouraged to create other kinds of filters and apply them to see what happens! As an **optional exercise**, try the following:\n* Create a filter with decimal value weights.\n* Create a 5x5 filter\n* Apply your filters to the other images in the `images` directory.\n\n", "_____no_output_____" ] ], [ [ "sobel_x = np.array([[-1, 2, .5, 1, 2],\n [-1, 2, 0, 1, 2],\n [-1, 2, .5, 1, 2],\n [-1, 2, 0, 1, 2],\n [-1, 2, 2, 1, 2]])\n\nfiltered_image = cv2.filter2D(gray, -1, sobel_x)\n\nplt.imshow(filtered_image, cmap='gray')\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb225a54248ece6337a0dec06493dd5baafa9c88
262,020
ipynb
Jupyter Notebook
docs/notebooks/analysis/example_anticrossing.ipynb
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
docs/notebooks/analysis/example_anticrossing.ipynb
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
docs/notebooks/analysis/example_anticrossing.ipynb
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
721.818182
47,592
0.950912
[ [ [ "# Example of automatic fitting of anti-crossing", "_____no_output_____" ], [ "Pieter Eendebak <[email protected]>", "_____no_output_____" ] ], [ [ "# import the modules used in this program:\nimport sys, os, time\nimport qcodes\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n%matplotlib inline \n\nimport scipy.optimize\nimport cv2\nimport qtt\nimport qtt.measurements\nfrom qcodes.data.hdf5_format import HDF5Format\nfrom qtt.algorithms.anticrossing import fit_anticrossing, plot_anticrossing", "_____no_output_____" ] ], [ [ "## Load dataset", "_____no_output_____" ] ], [ [ "exampledatadir=os.path.join(qtt.__path__[0], 'exampledata')\n\nqcodes.DataSet.default_io = qcodes.DiskIO(exampledatadir)\ndata = qcodes.load_data('2017-01-10/09-45-15', formatter=HDF5Format())\n\nqtt.measurements.scans.plotData(data, fig=10)", "_____no_output_____" ], [ "fit_results = fit_anticrossing(data)", "straightenImage: size (60, 928) fx 0.1294 fy 2.0000\nstraightenImage: result size (120, 120) mvx 0.2500 mvy 0.2500\nfitModel: score 1618.90 -> 1297.85\nfitModel: score 1297.85 -> 1297.85\nfit_anticrossing: patch size (60, 60)\n" ], [ "plot_anticrossing(data, fit_results)", "_____no_output_____" ], [ "print(fit_results)", "{'labels': ['P4', 'P3'], 'centre': array([[-10.87200927],\n [-12.26911426]]), 'fitpoints': {'centre': array([[-10.87200927],\n [-12.26911426]]), 'lp': array([[-13.08187867],\n [-14.47898366]]), 'hp': array([[ -8.66213987],\n [-10.05924485]]), 'ip': array([[-13.08187867, -13.08187867, -8.66213987, -8.66213987,\n -24.87058186],\n [-14.47898366, -14.47898366, -10.05924485, -10.05924485,\n 2.15483141]]), 'op': array([[ -8.75020813, -21.89998627, -12.61622885, 0.23561125],\n [-23.4921202 , -9.76294947, -0.87419285, -14.62323707]])}, 'fit_params': array([13.99857259, 14.42394567, 3.12522728, 1.12279268, 3.63270088,\n 4.30587599, 0.47394414]), 'params': {}}\n" ] ], [ [ "## Detailed steps (mainly for debugging)", "_____no_output_____" ], [ "### Pre-process image to a honeycomb", "_____no_output_____" ] ], [ [ "from qtt.algorithms.images import straightenImage\nfrom qtt.utilities.imagetools import cleanSensingImage\nfrom qtt.utilities.tools import showImage as showIm\nfrom qtt.measurements.scans import fixReversal\nfrom qtt.utilities.imagetools import fitModel, evaluateCross", "_____no_output_____" ], [ "im, tr = qtt.data.dataset2image(data)\nimextent = tr.scan_image_extent()\nmpl_imextent =tr.matplotlib_image_extent()\nistep=.25\n\nimc = cleanSensingImage(im, sigma=0.93, verbose=1)\nimx, (fw, fh, mvx, mvy, Hstraight) = straightenImage(imc, imextent, mvx=istep, verbose=2) \n\nimx = imx.astype(np.float64)*(100./np.percentile(imx, 99)) # scale image\n\nshowIm(imx, fig=100, title='straight image')", "fitBackground: is1d 0, order 3\n checkReversal: 1 (mval 0.1, thr -0.4)\nstraightenImage: size (60, 928) fx 0.1294 fy 2.0000\nstraightenImage: result size (120, 120) mvx 0.2500 mvy 0.2500\n" ] ], [ [ "### Initial input", "_____no_output_____" ] ], [ [ "istepmodel = .5\nksizemv = 31\nparam0 = [(imx.shape[0] / 2 + .5) * istep, (imx.shape[0] / 2 + .5) * istep, \\\n 3.5, 1.17809725, 3.5, 4.3196899, 0.39269908]\nparam0e = np.hstack((param0, [np.pi / 4]))\ncost, patch, r, _ = evaluateCross(param0e, imx, verbose=0, fig=21, istep=istep, istepmodel=istepmodel)", "_____no_output_____" ] ], [ [ "### Find the anti-crossing", "_____no_output_____" ] ], [ [ "t0 = time.time()\nres = qtt.utilities.imagetools.fitModel(param0e, imx, verbose=1, cfig=10, istep=istep,\n istepmodel=istepmodel, ksizemv=ksizemv, w=2.5, use_abs=True)\nparam = res.x\ndt = time.time() - t0\nprint('calculation time: %.2f [s]' % dt)\n\ncost, patch, cdata, _ = evaluateCross(param, imx, verbose=1, fig=25, istep=istep, istepmodel=istepmodel, linewidth=4)", "fitModel: score 1618.90 -> 1297.57\ncalculation time: 2.84 [s]\nevaluateCross: patch shape (60, 60)\n add cost for image cc: 2.0\n" ] ], [ [ "### Show orthogonal line (for polarization scan)", "_____no_output_____" ] ], [ [ "showIm(patch, fig=25)\nppV, ccV, slopeV = qtt.utilities.imagetools.Vtrace(cdata, param, fig=25)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb225c7bac4a56902924eb05461e86d5c8c22465
13,358
ipynb
Jupyter Notebook
PyimageTutorial/Module 2 My Own Object Detector/chp2_2_2_object_detection_made_easy/Untitled1.ipynb
wcsodw1/Computer-Vision-with-Artificial-intelligence
1fc58466bf82c33939fae911140737a8d9681ebd
[ "MIT" ]
null
null
null
PyimageTutorial/Module 2 My Own Object Detector/chp2_2_2_object_detection_made_easy/Untitled1.ipynb
wcsodw1/Computer-Vision-with-Artificial-intelligence
1fc58466bf82c33939fae911140737a8d9681ebd
[ "MIT" ]
null
null
null
PyimageTutorial/Module 2 My Own Object Detector/chp2_2_2_object_detection_made_easy/Untitled1.ipynb
wcsodw1/Computer-Vision-with-Artificial-intelligence
1fc58466bf82c33939fae911140737a8d9681ebd
[ "MIT" ]
null
null
null
77.213873
1,550
0.654589
[ [ [ "# python David_2_2_2_train_detector.py --class \"../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/stop_sign_images --annotations ../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/stop_sign_annotations --output ../../../CV_PyImageSearch/Dataset/Chapter_Specific/chp2_2_stop_sign/output/2020_StopSignTest.svm'\n\n# python David_2_2_2_train_detector.py\n\n# import the necessary packages\nfrom __future__ import print_function\nfrom imutils import paths\nfrom scipy.io import loadmat\nfrom skimage import io\nimport argparse\nimport dlib\nimport sys\n\n# handle Python 3 compatibility\nif sys.version_info > (3,):\n\tlong = int\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-c\", \"--class\", required=True,\n\thelp=\"Path to the CALTECH-101 class images\")\nap.add_argument(\"-a\", \"--annotations\", required=True,\n\thelp=\"Path to the CALTECH-101 class annotations\")\nap.add_argument(\"-o\", \"--output\", required=True,\n\thelp=\"Path to the output detector\")\n\n\n# import sys \n# sys.argv[1:] = '-c stop_sign_images -a stop_sign_annotations -o output/stop_sign_detector.svm'.split()\n# sys.argv[1:] = '-c Airplane/image -a Airplane/annotations -o Airplane/output/airplane.svm'.split()\nsys.argv[1:] = '-c ../../../CV_PyImageSearch/Dataset/caltech101/101_ObjectCategories -a ../../../CV_PyImageSearch/Dataset/caltech101/Annotations -o 20200724Face_Detector.svm'.split()\n\nargs = vars(ap.parse_args())\n\n\n# grab the default training options for our HOG + Linear SVM detector initialize the\n# list of images and bounding boxes used to train the classifier\nprint(\"[INFO] gathering images and bounding boxes...\")\noptions = dlib.simple_object_detector_training_options()\nimages = []\nboxes = []", "[INFO] gathering images and bounding boxes...\n" ], [ "# loop over the image paths\nfor imagePath in paths.list_images(args[\"class\"]):\n # extract the image ID from the image path and load the annotations file\n imageID = imagePath[imagePath.rfind(\"/\") + 1:].split(\"_\")[1]\n #print(imageID)\n id1= imagePath.find(\"\\\\\")\n #print(id1)\n id2= imagePath[id1+1:]\n #print(id2)\n imageID = id2.replace(\".jpg\", \"\")\n #print(imageID)\n str = imageID[-4:]\n #print(str)\n #dir= \"./Airplane1/annotations/\"\n dir= \"./Face_easy/annotations\"\n #print(dir)\n p = \"{}annotation_{}.mat\".format(dir, str)\n print(p)\n annotations = loadmat(p)[\"box_coord\"]\n #print(annotations)\n", "./Face_easy/annotation_0001.mat\n" ], [ " #print(dir)\n p = \"{}annotation_{}.mat\".format(dir, str)\n #print(p)\n annotations = loadmat(p)[\"box_coord\"]\n #print(annotations)\n bb = [dlib.rectangle(left=long(x), top=long(y), right=long(w), bottom=long(h)) \n for (y, h, x, w) in annotations]\n #print(bb)\n boxes.append(bb) \n #print(boxes)\n #print(len(boxes))\n # add the image to the list of images\n images.append(io.imread(imagePath))\n #print(images)\n\n# train the object detector\nprint(\"[INFO] training detector...\")\ndetector = dlib.train_simple_object_detector(images, boxes, options)\n\n# dump the classifier to file\nprint(\"[INFO] dumping classifier to file...\")\ndetector.save(args[\"output\"])\n\n# visualize the results of the detector\nwin = dlib.image_window()\nwin.set_image(detector)\ndlib.hit_enter_to_continue()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
cb22624617eb256528d47417d8608a7387fbc206
364,654
ipynb
Jupyter Notebook
data_analysis/AP-border_fitting_analysis.ipynb
ecuracosta/Modeling_the_spatiotemporal_control_of_cell_cycle_acceleration
3991cb6b97b1d742971b93f32ef1cca9de7b067f
[ "BSD-3-Clause" ]
2
2020-02-05T23:31:04.000Z
2020-08-29T11:00:26.000Z
data_analysis/AP-border_fitting_analysis.ipynb
ecuracosta/Modeling_the_spatiotemporal_control_of_cell_cycle_acceleration
3991cb6b97b1d742971b93f32ef1cca9de7b067f
[ "BSD-3-Clause" ]
null
null
null
data_analysis/AP-border_fitting_analysis.ipynb
ecuracosta/Modeling_the_spatiotemporal_control_of_cell_cycle_acceleration
3991cb6b97b1d742971b93f32ef1cca9de7b067f
[ "BSD-3-Clause" ]
null
null
null
1,302.335714
67,296
0.955004
[ [ [ "# Spatiotemporal distribution of AxFUCCI cells", "_____no_output_____" ] ], [ [ "# Required libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport scipy\nimport os\nimport seaborn as sns \nfrom pyabc import (Distribution, History)", "_____no_output_____" ], [ "# Experimental data\noutgrowth_df = pd.read_csv('./outgrowth.csv')\noutgrowth_df.set_index(['day', 'tail'], inplace=True)\noutgrowth_mean = outgrowth_df.groupby('day').mean()['outgrowth']\n\npercentage_df = pd.read_csv('./percentage_100um.csv')\ndf = percentage_df\nfor day in range(0,6):\n df.loc[df['day'] == day, 'position'] = (outgrowth_mean[day] - (df.loc[df['day'] == day, 'position']-100)).astype(int)\npercentage_df = df\npercentage_df.set_index(['day', 'tail', 'position'], inplace=True)\npercentage_df = percentage_df.drop(['unlabelled'], axis=1)\nexperiments = percentage_df", "_____no_output_____" ], [ "# Fitting results for each animal tail and day\nmeans,stds = {},{}\nfor day,df_day in percentage_df.groupby(level='day'):\n tails_mean, tails_std = {},{}\n for tail,df_animal in df_day.groupby(level='tail'):\n db_path = (\"sqlite:///\" + os.path.join(\"./fitting_results/\",\n \"sp_fitting-day=\"+str(day)+\"-tail=\"+str(tail)+\".db\"))\n h = History(db_path) \n df = h.get_distribution(m=0)\n tails_mean[tail] = df[0].mean()\n tails_std[tail] = df[0].std() \n means[day] = pd.DataFrame.from_dict(tails_mean, orient='index')\n stds[day] = pd.DataFrame.from_dict(tails_std, orient='index')\nmeans = pd.concat(means, names=['day','tail']) \nmeans['sp'] = outgrowth_mean-means['sp']\nstds = pd.concat(stds, names=['day','tail']) \nday_means = means.groupby('day').mean()\nday_std = means.groupby('day').std()", "_____no_output_____" ], [ "# Kolmogorov-Smirnov statistic\nsignificance = 0.05\nfor day in range(0,6):\n print(\"Day\",day)\n green = scipy.stats.ks_2samp(means.xs(day,level='day')['c1g'],means.xs(day,level='day')['c2g'])\n magenta = scipy.stats.ks_2samp(means.xs(day,level='day')['c1m'],means.xs(day,level='day')['c2m'])\n print(\"Green:\",green)\n print(\"H0:\",green[1]>significance)\n print(\"Magenta\",magenta)\n print(\"H0:\",magenta[1]>significance)", "Day 0\nGreen: KstestResult(statistic=0.4, pvalue=0.873015873015873)\nH0: True\nMagenta KstestResult(statistic=0.4, pvalue=0.873015873015873)\nH0: True\nDay 1\nGreen: KstestResult(statistic=0.4, pvalue=0.873015873015873)\nH0: True\nMagenta KstestResult(statistic=0.6, pvalue=0.35714285714285715)\nH0: True\nDay 2\nGreen: KstestResult(statistic=0.8, pvalue=0.07936507936507936)\nH0: True\nMagenta KstestResult(statistic=0.8, pvalue=0.07936507936507936)\nH0: True\nDay 3\nGreen: KstestResult(statistic=0.5, pvalue=0.474025974025974)\nH0: True\nMagenta KstestResult(statistic=0.5, pvalue=0.474025974025974)\nH0: True\nDay 4\nGreen: KstestResult(statistic=1.0, pvalue=0.028571428571428577)\nH0: False\nMagenta KstestResult(statistic=1.0, pvalue=0.028571428571428577)\nH0: False\nDay 5\nGreen: KstestResult(statistic=1.0, pvalue=0.028571428571428577)\nH0: False\nMagenta KstestResult(statistic=1.0, pvalue=0.028571428571428577)\nH0: False\n" ], [ "# Fitting results plots\nfor day in range(0,6):\n sp_mean = means.groupby('day')['sp'].mean().iloc[day]\n sp_std = means.groupby('day')['sp'].std().iloc[day]\n c1g = means.groupby('day')['c1g'].mean().iloc[day]\n c2g = means.groupby('day')['c2g'].mean().iloc[day]\n c1g_std = means.groupby('day')['c1g'].std().iloc[day]\n c2g_std = means.groupby('day')['c2g'].std().iloc[day]\n \n c1m = means.groupby('day')['c1m'].mean().iloc[day]\n c2m = means.groupby('day')['c2m'].mean().iloc[day]\n c1m_std = means.groupby('day')['c1m'].std().iloc[day]\n c2m_std = means.groupby('day')['c2m'].std().iloc[day] \n \n pos = experiments.sort_index().xs(day,level='day').groupby('position').mean().dropna().index\n data = experiments.sort_index().xs(day,level='day').reset_index().dropna()\n\n ax = sns.scatterplot(x='position', y='green', data=data,style='tail',color='green')\n ax = sns.lineplot(x='position', y='green', data=data,style='tail',color='green')\n \n ax.step([-3000,sp_mean,sp_mean,3000], [c2g,c2g,c1g,c1g], color='darkgreen',linewidth=5, alpha=0.5)\n \n ax = sns.scatterplot(x='position', y='magenta', data=data,color='magenta',style='tail')\n ax = sns.lineplot(x='position', y='magenta', data=data,color='magenta',style='tail')\n\n ax.step([-3000,sp_mean,sp_mean,3000], [c2m,c2m,c1m,c1m], color='darkmagenta',linewidth=5, alpha=0.5)\n\n if day == 4:\n plt.axvline(-717.65,color='black',linestyle='--')\n plt.axvspan(-717.65-271.9, -717.65+271.9, color='black', alpha=0.1)\n plt.axvspan(-717.65-2*271.9, -717.65+2*271.9, color='black', alpha=0.1)\n if day == 5:\n plt.axvline(-446.43,color='black',linestyle='--')\n plt.axvspan(-446.43-112.46, -446.43+112.46, color='black', alpha=0.1) \n plt.axvspan(-446.43-2*112.46, -446.43+2*112.46, color='black', alpha=0.1)\n title = 'Time = '+ str(day)+\" dpa\"\n plt.xlim(data['position'].min()-100,data['position'].max()+100)\n plt.ylim(0,110)\n plt.xlabel('AP Position' + ' (' + r'$\\mu$'+'m)')\n plt.ylabel('G0/G1 and S/G2 AxFUCCI cells (%)')\n plt.suptitle(title,size='24') \n plt.rcParams.update({'font.size': 14})\n plt.legend([],[], frameon=False)\n plt.savefig('./fit_plot2/ap-border_'+str(day), dpi=300, bbox_inches='tight')\n plt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb2265400b63110153d3d1b22922d06dd3b56233
190,573
ipynb
Jupyter Notebook
notebooks/estimate_betas_CellRegMap_Bcells_noplasma.ipynb
annacuomo/TenK10K_analyses_HPC
2023a3e36d06cf66bc114e6b4d00a2e3345fbc3b
[ "MIT" ]
null
null
null
notebooks/estimate_betas_CellRegMap_Bcells_noplasma.ipynb
annacuomo/TenK10K_analyses_HPC
2023a3e36d06cf66bc114e6b4d00a2e3345fbc3b
[ "MIT" ]
null
null
null
notebooks/estimate_betas_CellRegMap_Bcells_noplasma.ipynb
annacuomo/TenK10K_analyses_HPC
2023a3e36d06cf66bc114e6b4d00a2e3345fbc3b
[ "MIT" ]
null
null
null
70.844981
51,600
0.67115
[ [ [ "import scanpy as sc\nimport pandas as pd\nimport xarray as xr\nfrom numpy import ones\nfrom pandas_plink import read_plink1_bin\nfrom numpy.linalg import cholesky\nimport matplotlib.pyplot as plt\nimport time\nfrom limix.qc import quantile_gaussianize", "Matplotlib created a temporary config/cache directory at /tmp/matplotlib-dnvcosyx because the default path (/home/jovyan/.cache/matplotlib) is not a writable directory; it is highly recommended to set the MPLCONFIGDIR environment variable to a writable directory, in particular to speed up the import of Matplotlib and to better support multiprocessing.\n" ], [ "import cellregmap\ncellregmap ", "_____no_output_____" ], [ "from cellregmap import estimate_betas", "_____no_output_____" ], [ "mydir = \"/share/ScratchGeneral/anncuo/OneK1K/\"\ninput_files_dir = \"/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/\"", "_____no_output_____" ], [ "chrom = 8", "_____no_output_____" ], [ "## sample mapping file\n## this file will map cells to donors \n## here, B cells only\nsample_mapping_file = input_files_dir+\"smf_Bcells_noplasma.csv\"\nsample_mapping = pd.read_csv(sample_mapping_file, dtype={\"individual_long\": str, \"genotype_individual_id\": str, \"phenotype_sample_id\": str}, index_col=0)", "_____no_output_____" ], [ "sample_mapping.shape", "_____no_output_____" ], [ "sample_mapping.head()", "_____no_output_____" ], [ "## extract unique individuals\ndonors0 = sample_mapping[\"genotype_individual_id\"].unique()\ndonors0.sort()\nprint(\"Number of unique donors: {}\".format(len(donors0)))", "Number of unique donors: 981\n" ], [ "#### kinship file", "_____no_output_____" ], [ "## read in GRM (genotype relationship matrix; kinship matrix)\nkinship_file=\"/share/ScratchGeneral/anncuo/OneK1K/input_files_CellRegMap/grm_wide.csv\"\nK = pd.read_csv(kinship_file, index_col=0)\nK.index = K.index.astype('str')\nassert all(K.columns == K.index) #symmetric matrix, donors x donors", "/share/ScratchGeneral/anncuo/jupyter/conda_notebooks/envs/cellregmap_notebook/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3457: DtypeWarning: Columns (0) have mixed types.Specify dtype option on import or set low_memory=False.\n exec(code_obj, self.user_global_ns, self.user_ns)\n" ], [ "K = xr.DataArray(K.values, dims=[\"sample_0\", \"sample_1\"], coords={\"sample_0\": K.columns, \"sample_1\": K.index})\nK = K.sortby(\"sample_0\").sortby(\"sample_1\")\ndonors = sorted(set(list(K.sample_0.values)).intersection(donors0))\nprint(\"Number of donors after kinship intersection: {}\".format(len(donors)))", "Number of donors after kinship intersection: 981\n" ], [ "## subset to relevant donors\nK = K.sel(sample_0=donors, sample_1=donors)\nassert all(K.sample_0 == donors)\nassert all(K.sample_1 == donors)", "_____no_output_____" ], [ "plt.matshow(K)", "_____no_output_____" ], [ "## and decompose such as K = hK @ hK.T (using Cholesky decomposition)\nhK = cholesky(K.values)\nhK = xr.DataArray(hK, dims=[\"sample\", \"col\"], coords={\"sample\": K.sample_0.values})\nassert all(hK.sample.values == K.sample_0.values)", "_____no_output_____" ], [ "del K\nprint(\"Sample mapping number of rows BEFORE intersection: {}\".format(sample_mapping.shape[0]))\n## subsample sample mapping file to donors in the kinship matrix\nsample_mapping = sample_mapping[sample_mapping[\"genotype_individual_id\"].isin(donors)]\nprint(\"Sample mapping number of rows AFTER intersection: {}\".format(sample_mapping.shape[0]))", "Sample mapping number of rows BEFORE intersection: 130091\nSample mapping number of rows AFTER intersection: 130091\n" ], [ "## use sel from xarray to expand hK (using the sample mapping file)\nhK_expanded = hK.sel(sample=sample_mapping[\"genotype_individual_id\"].values)\nassert all(hK_expanded.sample.values == sample_mapping[\"genotype_individual_id\"].values)", "_____no_output_____" ], [ "hK_expanded.shape", "_____no_output_____" ], [ "#### phenotype file", "_____no_output_____" ], [ "# open anndata \nmy_file = \"/share/ScratchGeneral/anncuo/OneK1K/expression_objects/sce\"+str(chrom)+\".h5ad\"\nadata = sc.read(my_file)\n# sparse to dense\nmat = adata.raw.X.todense()\n# make pandas dataframe\nmat_df = pd.DataFrame(data=mat.T, index=adata.raw.var.index, columns=adata.obs.index)\n# turn into xr array\nphenotype = xr.DataArray(mat_df.values, dims=[\"trait\", \"cell\"], coords={\"trait\": mat_df.index.values, \"cell\": mat_df.columns.values})\nphenotype = phenotype.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)", "_____no_output_____" ], [ "del mat\ndel mat_df", "_____no_output_____" ], [ "phenotype.shape", "_____no_output_____" ], [ "phenotype.head()", "_____no_output_____" ], [ "#### genotype file", "_____no_output_____" ], [ "## read in genotype file (plink format)\nplink_folder = \"/share/ScratchGeneral/anncuo/OneK1K/plink_files/\"\nplink_file = plink_folder+\"plink_chr\"+str(chrom)+\".bed\"\nG = read_plink1_bin(plink_file)", "Mapping files: 100%|██████████| 3/3 [00:01<00:00, 3.00it/s]\n" ], [ "G", "_____no_output_____" ], [ "G.shape", "_____no_output_____" ], [ "# change this to select known eQTLs instead", "_____no_output_____" ], [ "# Filter on specific gene-SNP pairs\n# eQTL from B cells (B IN + B Mem)\nBcell_eqtl_file = input_files_dir+\"fvf_Bcell_eqtls.csv\"\nBcell_eqtl = pd.read_csv(Bcell_eqtl_file, index_col = 0)\nBcell_eqtl.head()", "_____no_output_____" ], [ "## SELL (chr1, index=30)\n## REL (chr2, index=6)\n## BLK (chr8, index=4)\n## ORMDL3 (chr17, index=2)", "_____no_output_____" ], [ "genes = Bcell_eqtl[Bcell_eqtl['chrom']==int(chrom)]['feature'].unique()\ngenes", "_____no_output_____" ], [ "# (1) gene name (feature_id)\ngene_name = genes[4]\ngene_name", "_____no_output_____" ], [ "# select SNPs for a given gene\nleads = Bcell_eqtl[Bcell_eqtl['feature']==gene_name]['snp_id'].unique()\nleads", "_____no_output_____" ], [ "#breakpoint()\nG_sel = G[:,G['snp'].isin(leads)]", "_____no_output_____" ], [ "G_sel", "_____no_output_____" ], [ "# expand out genotypes from cells to donors (and select relevant donors in the same step)\nG_expanded = G_sel.sel(sample=sample_mapping[\"individual_long\"].values)\n# assert all(hK_expanded.sample.values == G_expanded.sample.values)", "/share/ScratchGeneral/anncuo/jupyter/conda_notebooks/envs/cellregmap_notebook/lib/python3.7/site-packages/xarray/core/indexing.py:1227: PerformanceWarning: Slicing with an out-of-order index is generating 281 times more chunks\n return self.array[key]\n" ], [ "G_expanded.shape", "_____no_output_____" ], [ "del G", "_____no_output_____" ], [ "#### context file", "_____no_output_____" ], [ "# cells (B cells only) by PCs\nC_file = input_files_dir+\"PCs_Bcells_noplasma.csv\"\nC = pd.read_csv(C_file, index_col = 0)\n# C_file = input_files_dir+\"PCs_Bcells.csv.pkl\"\n# C = pd.read_pickle(C_file)\nC = xr.DataArray(C.values, dims=[\"cell\", \"pc\"], coords={\"cell\": C.index.values, \"pc\": C.columns.values})\nC = C.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)\nassert all(C.cell.values == sample_mapping[\"phenotype_sample_id\"].values)", "_____no_output_____" ], [ "C.shape", "_____no_output_____" ], [ "# C_gauss = quantile_gaussianize(C)", "_____no_output_____" ], [ "# select gene\ny = phenotype.sel(trait=gene_name)", "_____no_output_____" ], [ "[(y == 0).astype(int).sum()/len(y)]", "_____no_output_____" ], [ "plt.hist(y)\nplt.show()", "_____no_output_____" ], [ "y = quantile_gaussianize(y)", "_____no_output_____" ], [ "plt.hist(y)\nplt.show()", "_____no_output_____" ], [ "n_cells = phenotype.shape[1]\nW = ones((n_cells, 1))", "_____no_output_____" ], [ "del phenotype", "_____no_output_____" ], [ "start_time = time.time()\nGG = G_expanded.values\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "--- 0.16577982902526855 seconds ---\n" ], [ "# del G_expanded\ndel G_sel", "_____no_output_____" ], [ "snps = G_expanded[\"snp\"].values\nsnps", "_____no_output_____" ], [ "# get MAF\nMAF_dir = \"/share/ScratchGeneral/anncuo/OneK1K/snps_with_maf_greaterthan0.05/\"\nmyfile = MAF_dir+\"chr\"+str(chrom)+\".SNPs.txt\"\ndf_maf = pd.read_csv(myfile, sep=\"\\t\")\ndf_maf.head()", "_____no_output_____" ], [ "import numpy as np", "_____no_output_____" ], [ "mafs = np.array([])\nfor snp in snps:\n mafs = np.append(mafs, df_maf[df_maf[\"SNP\"] == snp][\"MAF\"].values)\nmafs", "_____no_output_____" ], [ "start_time = time.time()\nbetas = estimate_betas(y=y, W=W, E=C.values[:,0:10], G=GG, hK=hK_expanded, maf=mafs)\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "_____no_output_____" ], [ "beta_G = betas[0]\nbeta_GxC = betas[1][0]", "_____no_output_____" ], [ "beta_G_df = pd.DataFrame({\"chrom\":G_expanded.chrom.values,\n \"betaG\":beta_G,\n \"variant\":G_expanded.snp.values})\nbeta_G_df.head()", "_____no_output_____" ], [ "cells = phenotype[\"cell\"].values\nsnps = G_expanded[\"variant\"].values\n\nbeta_GxC_df = pd.DataFrame(data = beta_GxC, columns=snps, index=cells)\nbeta_GxC_df.head()", "_____no_output_____" ], [ "## took over an hour to run for one SNP!", "_____no_output_____" ], [ "gene_name", "_____no_output_____" ], [ "folder = mydir + \"CRM_interaction/Bcells_noplasma_Bcell_eQTLs/betas/\"\noutfilename = f\"{folder}{gene_name}\"\nprint(outfilename)", "_____no_output_____" ], [ "beta_G_df.to_csv(outfilename+\"_betaG.csv\")", "_____no_output_____" ], [ "beta_GxC_df.to_csv(outfilename+\"_betaGxC.csv\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb227bcd913a600a7e6efe995de111c6ea9b1e96
27,488
ipynb
Jupyter Notebook
how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.ipynb
alla15747/MachineLearningNotebooks
cf7bbd693ac956a9c3c6692addf84db3e58df23a
[ "MIT" ]
3
2020-08-04T18:37:21.000Z
2020-09-21T20:09:31.000Z
how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.ipynb
alla15747/MachineLearningNotebooks
cf7bbd693ac956a9c3c6692addf84db3e58df23a
[ "MIT" ]
null
null
null
how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.ipynb
alla15747/MachineLearningNotebooks
cf7bbd693ac956a9c3c6692addf84db3e58df23a
[ "MIT" ]
1
2019-12-27T23:14:38.000Z
2019-12-27T23:14:38.000Z
48.998217
240
0.523174
[ [ [ "# Set up Azure ML Automated Machine Learning on SQL Server 2019 CTP 2.4 big data cluster\r\n\r\n\\# Prerequisites: \r\n\\# - An Azure subscription and resource group \r\n\\# - An Azure Machine Learning workspace \r\n\\# - A SQL Server 2019 CTP 2.4 big data cluster with Internet access and a database named 'automl' \r\n\\# - Azure CLI \r\n\\# - kubectl command \r\n\\# - The https://github.com/Azure/MachineLearningNotebooks repository downloaded (cloned) to your local machine\r\n\r\n\\# In the 'automl' database, create a table named 'dbo.nyc_energy' as follows: \r\n\\# - In SQL Server Management Studio, right-click the 'automl' database, select Tasks, then Import Flat File. \r\n\\# - Select the file AzureMlCli\\notebooks\\how-to-use-azureml\\automated-machine-learning\\forecasting-energy-demand\\nyc_energy.csv. \r\n\\# - Using the \"Modify Columns\" page, allow nulls for all columns. \r\n\r\n\\# Create an Azure Machine Learning Workspace using the instructions at https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-workspace \r\n\r\n\\# Create an Azure service principal. You can do this with the following commands: \r\n\r\naz login \r\naz account set --subscription *subscriptionid* \r\n\r\n\\# The following command prints out the **appId** and **tenant**, \r\n\\# which you insert into the indicated cell later in this notebook \r\n\\# to allow AutoML to authenticate with Azure: \r\n\r\naz ad sp create-for-rbac --name *principlename* --password *password*\r\n\r\n\\# Log into the master instance of SQL Server 2019 CTP 2.4: \r\nkubectl exec -it mssql-master-pool-0 -n *clustername* -c mssql-server -- /bin/bash\r\n\r\nmkdir /tmp/aml\r\n\r\ncd /tmp/aml\r\n\r\n\\# **Modify** the following with your subscription_id, resource_group, and workspace_name: \r\ncat > config.json << EOF \r\n{ \r\n \"subscription_id\": \"123456ab-78cd-0123-45ef-abcd12345678\", \r\n \"resource_group\": \"myrg1\", \r\n \"workspace_name\": \"myws1\" \r\n} \r\nEOF\r\n\r\n\\# The directory referenced below is appropriate for the master instance of SQL Server 2019 CTP 2.4.\r\n\r\ncd /opt/mssql/mlservices/runtime/python/bin\r\n\r\n./python -m pip install azureml-sdk[automl]\r\n\r\n./python -m pip install --upgrade numpy \r\n\r\n./python -m pip install --upgrade sklearn\r\n", "_____no_output_____" ], [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/sql-server/setup/auto-ml-sql-setup.png)", "_____no_output_____" ] ], [ [ "-- Enable external scripts to allow invoking Python\r\nsp_configure 'external scripts enabled',1 \r\nreconfigure with override \r\nGO\r\n", "_____no_output_____" ], [ "-- Use database 'automl'\r\nUSE [automl]\r\nGO", "_____no_output_____" ], [ "-- This is a table to hold the Azure ML connection information.\r\nSET ANSI_NULLS ON\r\nGO\r\n\r\nSET QUOTED_IDENTIFIER ON\r\nGO\r\n\r\nCREATE TABLE [dbo].[aml_connection](\r\n [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,\r\n\t[ConnectionName] [nvarchar](255) NULL,\r\n\t[TenantId] [nvarchar](255) NULL,\r\n\t[AppId] [nvarchar](255) NULL,\r\n\t[Password] [nvarchar](255) NULL,\r\n\t[ConfigFile] [nvarchar](255) NULL\r\n) ON [PRIMARY]\r\nGO", "_____no_output_____" ] ], [ [ "# Copy the values from create-for-rbac above into the cell below", "_____no_output_____" ] ], [ [ "-- Use the following values:\r\n-- Leave the name as 'Default'\r\n-- Insert <tenant> returned by create-for-rbac above\r\n-- Insert <AppId> returned by create-for-rbac above\r\n-- Insert <password> used in create-for-rbac above\r\n-- Leave <path> as '/tmp/aml/config.json'\r\nINSERT INTO [dbo].[aml_connection] \r\nVALUES (\r\n N'Default', -- Name\r\n N'11111111-2222-3333-4444-555555555555', -- Tenant\r\n N'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee', -- AppId\r\n N'insertpasswordhere', -- Password\r\n N'/tmp/aml/config.json' -- Path\r\n );\r\nGO", "_____no_output_____" ], [ "-- This is a table to hold the results from the AutoMLTrain procedure.\r\nSET ANSI_NULLS ON\r\nGO\r\n\r\nSET QUOTED_IDENTIFIER ON\r\nGO\r\n\r\nCREATE TABLE [dbo].[aml_model](\r\n [Id] [int] IDENTITY(1,1) NOT NULL PRIMARY KEY,\r\n [Model] [varchar](max) NOT NULL, -- The model, which can be passed to AutoMLPredict for testing or prediction.\r\n [RunId] [nvarchar](250) NULL, -- The RunId, which can be used to view the model in the Azure Portal.\r\n [CreatedDate] [datetime] NULL,\r\n [ExperimentName] [nvarchar](100) NULL, -- Azure ML Experiment Name\r\n [WorkspaceName] [nvarchar](100) NULL, -- Azure ML Workspace Name\r\n\t[LogFileText] [nvarchar](max) NULL\r\n) \r\nGO\r\n\r\nALTER TABLE [dbo].[aml_model] ADD DEFAULT (getutcdate()) FOR [CreatedDate]\r\nGO\r\n", "_____no_output_____" ], [ "-- This stored procedure uses automated machine learning to train several models\r\n-- and return the best model.\r\n--\r\n-- The result set has several columns:\r\n-- best_run - ID of the best model found\r\n-- experiment_name - training run name\r\n-- fitted_model - best model found\r\n-- log_file_text - console output\r\n-- workspace - name of the Azure ML workspace where run history is stored\r\n--\r\n-- An example call for a classification problem is:\r\n-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)\r\n-- exec dbo.AutoMLTrain @input_query='\r\n-- SELECT top 100000 \r\n-- CAST([pickup_datetime] AS NVARCHAR(30)) AS pickup_datetime\r\n-- ,CAST([dropoff_datetime] AS NVARCHAR(30)) AS dropoff_datetime\r\n-- ,[passenger_count]\r\n-- ,[trip_time_in_secs]\r\n-- ,[trip_distance]\r\n-- ,[payment_type]\r\n-- ,[tip_class]\r\n-- FROM [dbo].[nyctaxi_sample] order by [hack_license] ',\r\n-- @label_column = 'tip_class',\r\n-- @iterations=10\r\n-- \r\n-- An example call for forecasting is:\r\n-- insert into dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)\r\n-- exec dbo.AutoMLTrain @input_query='\r\n-- select cast(timeStamp as nvarchar(30)) as timeStamp,\r\n-- demand,\r\n-- \t precip,\r\n-- \t temp,\r\n-- case when timeStamp < ''2017-01-01'' then 0 else 1 end as is_validate_column\r\n-- from nyc_energy\r\n-- where demand is not null and precip is not null and temp is not null\r\n-- and timeStamp < ''2017-02-01''',\r\n-- @label_column='demand',\r\n-- @task='forecasting',\r\n-- @iterations=10,\r\n-- @iteration_timeout_minutes=5,\r\n-- @time_column_name='timeStamp',\r\n-- @is_validate_column='is_validate_column',\r\n-- @experiment_name='automl-sql-forecast',\r\n-- @primary_metric='normalized_root_mean_squared_error'\r\n\r\nSET ANSI_NULLS ON\r\nGO\r\nSET QUOTED_IDENTIFIER ON\r\nGO\r\nCREATE OR ALTER PROCEDURE [dbo].[AutoMLTrain]\r\n (\r\n @input_query NVARCHAR(MAX), -- The SQL Query that will return the data to train and validate the model.\r\n @label_column NVARCHAR(255)='Label', -- The name of the column in the result of @input_query that is the label.\r\n @primary_metric NVARCHAR(40)='AUC_weighted', -- The metric to optimize.\r\n @iterations INT=100, -- The maximum number of pipelines to train.\r\n @task NVARCHAR(40)='classification', -- The type of task. Can be classification, regression or forecasting.\r\n @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.\r\n @iteration_timeout_minutes INT = 15, -- The maximum time in minutes for training a single pipeline. \r\n @experiment_timeout_minutes INT = 60, -- The maximum time in minutes for training all pipelines.\r\n @n_cross_validations INT = 3, -- The number of cross validations.\r\n @blacklist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that will not be used.\r\n -- The list of possible models can be found at:\r\n -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings\r\n @whitelist_models NVARCHAR(MAX) = '', -- A comma separated list of algos that can be used.\r\n -- The list of possible models can be found at:\r\n -- https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#configure-your-experiment-settings\r\n @experiment_exit_score FLOAT = 0, -- Stop the experiment if this score is acheived.\r\n @sample_weight_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that gives a sample weight.\r\n @is_validate_column NVARCHAR(255)='', -- The name of the column in the result of @input_query that indicates if the row is for training or validation.\r\n\t -- In the values of the column, 0 means for training and 1 means for validation.\r\n @time_column_name NVARCHAR(255)='', -- The name of the timestamp column for forecasting.\r\n\t@connection_name NVARCHAR(255)='default' -- The AML connection to use.\r\n ) AS\r\nBEGIN\r\n\r\n DECLARE @tenantid NVARCHAR(255)\r\n DECLARE @appid NVARCHAR(255)\r\n DECLARE @password NVARCHAR(255)\r\n DECLARE @config_file NVARCHAR(255)\r\n\r\n\tSELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile\r\n\tFROM aml_connection\r\n\tWHERE ConnectionName = @connection_name;\r\n\r\n\tEXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd\r\nimport logging \r\nimport azureml.core \r\nimport pandas as pd\r\nimport numpy as np\r\nfrom azureml.core.experiment import Experiment \r\nfrom azureml.train.automl import AutoMLConfig \r\nfrom sklearn import datasets \r\nimport pickle\r\nimport codecs\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication \r\nfrom azureml.core.workspace import Workspace \r\n\r\nif __name__.startswith(\"sqlindb\"):\r\n auth = ServicePrincipalAuthentication(tenantid, appid, password) \r\n \r\n ws = Workspace.from_config(path=config_file, auth=auth) \r\n \r\n project_folder = \"./sample_projects/\" + experiment_name\r\n \r\n experiment = Experiment(ws, experiment_name) \r\n\r\n data_train = input_data\r\n X_valid = None\r\n y_valid = None\r\n sample_weight_valid = None\r\n\r\n if is_validate_column != \"\" and is_validate_column is not None:\r\n data_train = input_data[input_data[is_validate_column] <= 0]\r\n data_valid = input_data[input_data[is_validate_column] > 0]\r\n data_train.pop(is_validate_column)\r\n data_valid.pop(is_validate_column)\r\n y_valid = data_valid.pop(label_column).values\r\n if sample_weight_column != \"\" and sample_weight_column is not None:\r\n sample_weight_valid = data_valid.pop(sample_weight_column).values\r\n X_valid = data_valid\r\n n_cross_validations = None\r\n\r\n y_train = data_train.pop(label_column).values\r\n\r\n sample_weight = None\r\n if sample_weight_column != \"\" and sample_weight_column is not None:\r\n sample_weight = data_train.pop(sample_weight_column).values\r\n\r\n X_train = data_train\r\n\r\n if experiment_timeout_minutes == 0:\r\n experiment_timeout_minutes = None\r\n\r\n if experiment_exit_score == 0:\r\n experiment_exit_score = None\r\n\r\n if blacklist_models == \"\":\r\n blacklist_models = None\r\n\r\n if blacklist_models is not None:\r\n blacklist_models = blacklist_models.replace(\" \", \"\").split(\",\")\r\n\r\n if whitelist_models == \"\":\r\n whitelist_models = None\r\n\r\n if whitelist_models is not None:\r\n whitelist_models = whitelist_models.replace(\" \", \"\").split(\",\")\r\n\r\n automl_settings = {}\r\n preprocess = True\r\n if time_column_name != \"\" and time_column_name is not None:\r\n automl_settings = { \"time_column_name\": time_column_name }\r\n preprocess = False\r\n\r\n log_file_name = \"automl_errors.log\"\r\n\t \r\n automl_config = AutoMLConfig(task = task, \r\n debug_log = log_file_name, \r\n primary_metric = primary_metric, \r\n iteration_timeout_minutes = iteration_timeout_minutes, \r\n experiment_timeout_minutes = experiment_timeout_minutes,\r\n iterations = iterations, \r\n n_cross_validations = n_cross_validations, \r\n preprocess = preprocess,\r\n verbosity = logging.INFO, \r\n X = X_train, \r\n y = y_train, \r\n path = project_folder,\r\n blacklist_models = blacklist_models,\r\n whitelist_models = whitelist_models,\r\n experiment_exit_score = experiment_exit_score,\r\n sample_weight = sample_weight,\r\n X_valid = X_valid,\r\n y_valid = y_valid,\r\n sample_weight_valid = sample_weight_valid,\r\n **automl_settings) \r\n \r\n local_run = experiment.submit(automl_config, show_output = True) \r\n\r\n best_run, fitted_model = local_run.get_output()\r\n\r\n pickled_model = codecs.encode(pickle.dumps(fitted_model), \"base64\").decode()\r\n\r\n log_file_text = \"\"\r\n\r\n try:\r\n with open(log_file_name, \"r\") as log_file:\r\n log_file_text = log_file.read()\r\n except:\r\n log_file_text = \"Log file not found\"\r\n\r\n returned_model = pd.DataFrame({\"best_run\": [best_run.id], \"experiment_name\": [experiment_name], \"fitted_model\": [pickled_model], \"log_file_text\": [log_file_text], \"workspace\": [ws.name]}, dtype=np.dtype(np.str))\r\n'\r\n\t, @input_data_1 = @input_query\r\n\t, @input_data_1_name = N'input_data'\r\n\t, @output_data_1_name = N'returned_model'\r\n\t, @params = N'@label_column NVARCHAR(255), \r\n\t @primary_metric NVARCHAR(40),\r\n\t\t\t\t @iterations INT, @task NVARCHAR(40),\r\n\t\t\t\t @experiment_name NVARCHAR(32),\r\n\t\t\t\t @iteration_timeout_minutes INT,\r\n\t\t\t\t @experiment_timeout_minutes INT,\r\n\t\t\t\t @n_cross_validations INT,\r\n\t\t\t\t @blacklist_models NVARCHAR(MAX),\r\n\t\t\t\t @whitelist_models NVARCHAR(MAX),\r\n\t\t\t\t @experiment_exit_score FLOAT,\r\n\t\t\t\t @sample_weight_column NVARCHAR(255),\r\n\t\t\t\t @is_validate_column NVARCHAR(255),\r\n\t\t\t\t @time_column_name NVARCHAR(255),\r\n\t\t\t\t @tenantid NVARCHAR(255),\r\n\t\t\t\t @appid NVARCHAR(255),\r\n\t\t\t\t @password NVARCHAR(255),\r\n\t\t\t\t @config_file NVARCHAR(255)'\r\n\t, @label_column = @label_column\r\n\t, @primary_metric = @primary_metric\r\n\t, @iterations = @iterations\r\n\t, @task = @task\r\n\t, @experiment_name = @experiment_name\r\n\t, @iteration_timeout_minutes = @iteration_timeout_minutes\r\n\t, @experiment_timeout_minutes = @experiment_timeout_minutes\r\n\t, @n_cross_validations = @n_cross_validations\r\n\t, @blacklist_models = @blacklist_models\r\n\t, @whitelist_models = @whitelist_models\r\n\t, @experiment_exit_score = @experiment_exit_score\r\n\t, @sample_weight_column = @sample_weight_column\r\n\t, @is_validate_column = @is_validate_column\r\n\t, @time_column_name = @time_column_name\r\n\t, @tenantid = @tenantid\r\n\t, @appid = @appid\r\n\t, @password = @password\r\n\t, @config_file = @config_file\r\nWITH RESULT SETS ((best_run NVARCHAR(250), experiment_name NVARCHAR(100), fitted_model VARCHAR(MAX), log_file_text NVARCHAR(MAX), workspace NVARCHAR(100)))\r\nEND", "_____no_output_____" ], [ "-- This procedure returns a list of metrics for each iteration of a training run.\r\nSET ANSI_NULLS ON\r\nGO\r\nSET QUOTED_IDENTIFIER ON\r\nGO\r\nCREATE OR ALTER PROCEDURE [dbo].[AutoMLGetMetrics]\r\n (\r\n\t@run_id NVARCHAR(250), -- The RunId\r\n @experiment_name NVARCHAR(32)='automl-sql-test', -- This can be used to find the experiment in the Azure Portal.\r\n @connection_name NVARCHAR(255)='default' -- The AML connection to use.\r\n ) AS\r\nBEGIN\r\n DECLARE @tenantid NVARCHAR(255)\r\n DECLARE @appid NVARCHAR(255)\r\n DECLARE @password NVARCHAR(255)\r\n DECLARE @config_file NVARCHAR(255)\r\n\r\n\tSELECT @tenantid=TenantId, @appid=AppId, @password=Password, @config_file=ConfigFile\r\n\tFROM aml_connection\r\n\tWHERE ConnectionName = @connection_name;\r\n\r\n EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd\r\nimport logging \r\nimport azureml.core \r\nimport numpy as np\r\nfrom azureml.core.experiment import Experiment \r\nfrom azureml.train.automl.run import AutoMLRun\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication \r\nfrom azureml.core.workspace import Workspace \r\n\r\nauth = ServicePrincipalAuthentication(tenantid, appid, password) \r\n \r\nws = Workspace.from_config(path=config_file, auth=auth) \r\n \r\nexperiment = Experiment(ws, experiment_name) \r\n\r\nml_run = AutoMLRun(experiment = experiment, run_id = run_id)\r\n\r\nchildren = list(ml_run.get_children())\r\niterationlist = []\r\nmetricnamelist = []\r\nmetricvaluelist = []\r\n\r\nfor run in children:\r\n properties = run.get_properties()\r\n if \"iteration\" in properties:\r\n iteration = int(properties[\"iteration\"])\r\n for metric_name, metric_value in run.get_metrics().items():\r\n if isinstance(metric_value, float):\r\n iterationlist.append(iteration)\r\n metricnamelist.append(metric_name)\r\n metricvaluelist.append(metric_value)\r\n \r\nmetrics = pd.DataFrame({\"iteration\": iterationlist, \"metric_name\": metricnamelist, \"metric_value\": metricvaluelist})\r\n'\r\n , @output_data_1_name = N'metrics'\r\n\t, @params = N'@run_id NVARCHAR(250), \r\n\t\t\t\t @experiment_name NVARCHAR(32),\r\n \t\t\t\t @tenantid NVARCHAR(255),\r\n\t\t\t\t @appid NVARCHAR(255),\r\n\t\t\t\t @password NVARCHAR(255),\r\n\t\t\t\t @config_file NVARCHAR(255)'\r\n , @run_id = @run_id\r\n\t, @experiment_name = @experiment_name\r\n\t, @tenantid = @tenantid\r\n\t, @appid = @appid\r\n\t, @password = @password\r\n\t, @config_file = @config_file\r\nWITH RESULT SETS ((iteration INT, metric_name NVARCHAR(100), metric_value FLOAT))\r\nEND", "_____no_output_____" ], [ "-- This procedure predicts values based on a model returned by AutoMLTrain and a dataset.\r\n-- It returns the dataset with a new column added, which is the predicted value.\r\nSET ANSI_NULLS ON\r\nGO\r\nSET QUOTED_IDENTIFIER ON\r\nGO\r\nCREATE OR ALTER PROCEDURE [dbo].[AutoMLPredict]\r\n (\r\n @input_query NVARCHAR(MAX), -- A SQL query returning data to predict on.\r\n @model NVARCHAR(MAX), -- A model returned from AutoMLTrain.\r\n @label_column NVARCHAR(255)='' -- Optional name of the column from input_query, which should be ignored when predicting\r\n ) AS \r\nBEGIN \r\n \r\n EXEC sp_execute_external_script @language = N'Python', @script = N'import pandas as pd \r\nimport azureml.core \r\nimport numpy as np \r\nfrom azureml.train.automl import AutoMLConfig \r\nimport pickle \r\nimport codecs \r\n \r\nmodel_obj = pickle.loads(codecs.decode(model.encode(), \"base64\")) \r\n \r\ntest_data = input_data.copy() \r\n\r\nif label_column != \"\" and label_column is not None:\r\n y_test = test_data.pop(label_column).values \r\nX_test = test_data \r\n \r\npredicted = model_obj.predict(X_test) \r\n \r\ncombined_output = input_data.assign(predicted=predicted)\r\n \r\n' \r\n , @input_data_1 = @input_query \r\n , @input_data_1_name = N'input_data' \r\n , @output_data_1_name = N'combined_output' \r\n , @params = N'@model NVARCHAR(MAX), @label_column NVARCHAR(255)' \r\n , @model = @model \r\n\t, @label_column = @label_column\r\nEND", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb2289c4e43d48f29a38114e63c4fc47953203d1
273,598
ipynb
Jupyter Notebook
etc/ScoringAndLearning.ipynb
MangoTeam/mockdown
6f42395b07a3a83d5a3703d30985ef5a5068bf09
[ "MIT" ]
null
null
null
etc/ScoringAndLearning.ipynb
MangoTeam/mockdown
6f42395b07a3a83d5a3703d30985ef5a5068bf09
[ "MIT" ]
2
2022-01-13T03:52:58.000Z
2022-03-12T01:03:41.000Z
etc/ScoringAndLearning.ipynb
MangoTeam/mockdown
6f42395b07a3a83d5a3703d30985ef5a5068bf09
[ "MIT" ]
null
null
null
341.570537
112,788
0.930551
[ [ [ "import itertools\nfrom fractions import Fraction\nfrom typing import Tuple, Sequence\nfrom pprint import pprint\n\nimport numpy as np\nimport scipy.stats as st\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport theano as th\nimport theano.tensor as T\nimport pymc3 as pm\n\nfrom hypothesis import given\nfrom hypothesis import strategies as gen", "_____no_output_____" ], [ "from mockdown.learning.noisetolerant.math import ext_farey", "_____no_output_____" ], [ "# %matplotlib widget\n%config InlineBackend.figure_format='retina'\nsns.set_style(\"darkgrid\")", "_____no_output_____" ] ], [ [ "## Constants", "_____no_output_____" ] ], [ [ "# Maximum y-noise in pixels.\nε_px = 3\n\n# \"Resolution\": Maximum denominator for inferred slopes. \n# Also defines the range of allowed rationals (0 - R_MAX)\nR_MAX = 30\n\nB_MIN = -100\nB_MAX = +100\n\nA_MIN = 0\nA_MAX = R_MAX\n\nEXP_STEPS = 5", "_____no_output_____" ], [ "f_r = ext_farey(R_MAX)", "_____no_output_____" ], [ "TINY = 1e-20 # just a very small number", "_____no_output_____" ] ], [ [ "## Definitions", "_____no_output_____" ] ], [ [ "def q2cf(n1: int, n2: int):\n \"\"\"Yields the continued fraction expansion of the rationals n1/n2.\"\"\"\n while n2:\n n1, (term, n2) = n2, divmod(n1, n2)\n yield term\n\ndef sb_depth(n1: int, n2: int):\n \"\"\"Returns the Stern-Brocot depth of the rationals n1/n2.\"\"\"\n return sum(list(q2cf(n1, n2)))\n\ndef sweep(param_ranges, factor=None):\n \"\"\"\n Returns a dataframe containing model.logp at every combination of possible parameters.\n \"\"\"\n assert factor\n \n logp_fn = factor.logp\n rows = []\n \n keys, ranges = zip(*(param_ranges.items()))\n \n for values in itertools.product(*ranges):\n params = zip(keys, values)\n logp = logp_fn(params)\n rows.append([*values, logp, np.exp(logp)])\n \n return pd.DataFrame(rows, columns=keys + ('logp', 'p'))", "_____no_output_____" ] ], [ [ "## Parameter Space", "_____no_output_____" ] ], [ [ "a_space_np = np.array(f_r, dtype=np.object)\na_space_th = th.shared(np.array([(frac.numerator, frac.denominator) for frac in f_r]))\n\nb_space_np = np.arange(-100, 100, 1, dtype=np.int)\nb_space_th = th.shared(b_space_np)", "_____no_output_____" ], [ "print(a_space_th.get_value()[:10])\nprint(b_space_th.get_value()[:10])", "[[ 0 1]\n [ 1 30]\n [ 1 29]\n [ 1 28]\n [ 1 27]\n [ 1 26]\n [ 1 25]\n [ 1 24]\n [ 1 23]\n [ 1 22]]\n[-100 -99 -98 -97 -96 -95 -94 -93 -92 -91]\n" ], [ "sb_depths = np.vectorize(lambda a: sb_depth(a.numerator, a.denominator))(a_space_np)\nsb_depth_hist, _ = np.histogram(sb_depths, bins=R_MAX + 1)", "_____no_output_____" ], [ "plt.bar(np.arange(0, R_MAX + 1, 1), 1/sb_depth_hist);", "_____no_output_____" ], [ "n = R_MAX\nalpha = EXP_STEPS + 1\nbeta = (R_MAX - EXP_STEPS) + 1\n\nbetabin = np.vectorize(lambda k: st.betabinom.pmf(k, n, alpha, beta))(np.arange(0, R_MAX + 1, 1))\nplt.bar(np.arange(0, R_MAX + 1, 1), betabin);", "_____no_output_____" ], [ "foo = np.vectorize(lambda k: st.betabinom.pmf(k, n, alpha, beta))(sb_depths)\nplt.bar(np.arange(0, len(sb_depths), 1), foo);", "_____no_output_____" ], [ "prior = betabin[sb_depths] * (1/sb_depth_hist)[sb_depths]\nplt.bar(np.arange(0, len(sb_depths), 1), prior)", "_____no_output_____" ], [ "prior.sum()", "_____no_output_____" ] ], [ [ "## Generating Data", "_____no_output_____" ] ], [ [ "size = 3\n\na_true = Fraction(1, 3)\nb_true = 15\n\nx = np.linspace(100, 300, size)\ny_true = a_true * x + b_true\ny = y_true + np.random.normal(scale=ε_px/3, size=size)\n\n# Make data piecewise-linear\n# y[[-1, -2, -3, -4]] = y[-5]\n\ndf = pd.DataFrame(zip(x,y), columns=['parent.width', 'child.width'])", "_____no_output_____" ], [ "# a_i_true = np.where((a_choices == [a_i_true.numerator(), a_i_true.denominator()]).all(axis=1))[0][0]\n# a_i_true, a_space_np[a_i_true]\n\n# b_i_true = np.where((b_choices == 5))[0][0]\n# b_i_true, b_space_np[b_i_true]", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(12, 8))\nax.plot(x, y_true, ':.', color='tab:green', label=\"True\");\nax.plot(x, y, 'o', data=df, label=\"Noisy Samples\")\n\nax.set_xlabel('parent.width')\nax.set_ylabel('child.width')\nax.legend();", "_____no_output_____" ] ], [ [ "## Modeling", "_____no_output_____" ], [ "### Relaxed Model\n\nThe _relaxed_ model is more or less traditional Bayesian linear regression with continuous parameters and priors. The intent of this \"preliminary phase\" is to generate a good estimate as to the location of the best solution in the full model.\n\nThe full model is discrete, and it is therefore computationally expensive to find the maximum a posteriori estimate of (a, b). However, the relaxed model's MAP provides a very good estimate as to the location of the full model's MAP.", "_____no_output_____" ] ], [ [ "def mk_relaxed_model():\n relaxed_model = pm.Model()\n\n with relaxed_model:\n # y ~ N(⍺x + β, σ^2)\n x_data = pm.Data('x_data', x)\n \n σ = pm.HalfCauchy('σ', beta=(ε_px + TINY)/3)\n \n β = pm.Uniform('β', lower=B_MIN, upper=B_MAX)\n α = pm.Uniform('α', lower=A_MIN, upper=A_MAX)\n \n y_obs = pm.Normal('y_obs', mu=α*x_data + β, sigma=σ, observed=y)\n \n return relaxed_model\n \nrelaxed_model = mk_relaxed_model()", "_____no_output_____" ], [ "relaxed_MAP = pm.find_MAP(model=relaxed_model)\na_MAP = relaxed_MAP['α']\nb_MAP = relaxed_MAP['β']\nsigma_MAP = relaxed_MAP['σ']\npprint(relaxed_MAP)", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(12, 8))\nax.plot(x, y, 'o', label=\"noisy Samples\")\nax.plot(x, y_true, ':.', color='tab:green', label=f\"True: y = {a_true}x + {b_true}\");\nax.plot(x, a_MAP*x+b_MAP, ':.', color='tab:orange', label=f\"MAP: y = {a_MAP}x + {b_MAP}\")\nax.fill_between(x, a_MAP*x+b_MAP + 3*sigma_MAP, a_MAP*x+b_MAP - 3*sigma_MAP, color='tab:orange', alpha=.1)\nax.legend()\n\nax.set_xlabel('parent.width')\nax.set_ylabel('child.width')", "_____no_output_____" ], [ "def candidate_bounds(x, y, relaxed_MAP):\n # Note: x and y should be in sorted order!\n \n sigma = np.exp(relaxed_MAP['σ'])\n \n x1, x2 = min(x), max(x)\n y1, y2 = min(y), max(y)\n \n # a = (y2 - y1) / (x2 - x1)\n a1 = ((y2 + 3*sigma) - (y1 - 3*sigma)) / (x2 - x1)\n a2 = ((y2 - 3*sigma) - (y1 + 3*sigma)) / (x2 - x1)\n \n # b = y - mx (where x,y = x1, y1 or x2, y2, equivalently)\n b1 = y1 - a1*x1\n b2 = y1 - a2*x1\n \n return min(a1, a2), max(a1, a2), min(b1, b2), max(b1, b2)\n\ndef candidate_bounds_indices(a_space, b_space, a_lower, a_upper, b_lower, b_upper):\n a_i_lower = np.abs(a_space - a_lower).argmin()\n a_i_upper = np.abs(a_space - a_upper).argmin()\n b_i_lower = np.abs(b_space - b_lower).argmin()\n b_i_upper = np.abs(b_space - b_upper).argmin()\n return a_i_lower, a_i_upper, b_i_lower, b_i_upper", "_____no_output_____" ], [ "a_l, a_u, b_l, b_u = candidate_bounds(x, y, relaxed_MAP)\nprint(f\"a: {a_l}...{a_u}, b: {b_l}...{b_u}\")\n\na_i_l, a_i_u, b_i_l, b_i_u = candidate_bounds_indices(a_space_np, b_space_np, a_l, a_u, b_l, b_u)\nprint(a_i_l, a_i_u, b_i_l, b_i_u)\n\nprint(f\"a: {a_space_np[a_i_l]}...{a_space_np[a_i_u]}, b: {b_space_np[b_i_l]}...{b_space_np[b_i_u]}\")", "a: 0.3121612032948158...0.37999673961735647, b: 9.04669765806971...15.83025129032378\n87 105 109 116\na: 5/16...11/29, b: 9...16\n" ] ], [ [ "## Full Model", "_____no_output_____" ] ], [ [ "with pm.Model() as model: \n b_i = pm.DiscreteUniform('b_i', lower=0, upper=len(b_space_np))\n b = pm.Deterministic('b', b_space_th[b_i])\n \n \n \n a_i = pm.DiscreteUniform('a_i', lower=0, upper=len(a_space_np))\n a_p = pm.Deterministic('a_p', a_space_th[a_i][0])\n a_q = pm.Deterministic('a_q', a_space_th[a_i][1])\n\n y_observed = pm.Normal('y_observed', mu=(a_p / a_q) * x + b, sigma=(ε_px + TINY)/3, observed=y)", "_____no_output_____" ], [ "m_logp = model.logp\n\nscores = sweep({\n 'a_i': range(len(a_space_np)), \n 'b_i': range(len(b_space_np))\n}, factor=model)", "_____no_output_____" ], [ "scale = scores['p'].sum()\nscores['p'] = scores['p'] / scale\nscores['p'].describe()", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(16, 16))\ndata = scores.pivot('b_i', 'a_i', 'p')\nax.imshow(data)", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize=(20, 6))\ndata = scores.pivot('b_i', 'a_i', 'p')\nsns.heatmap(data, \n ax=ax,\n xticklabels=a_space_np, \n yticklabels=b_space_np,\n square=True);\nplt.xlabel(\"a\");\nplt.ylabel(\"b\");\n# ax.hlines(list(range(len(b_choices))), *ax.get_xlim(), colors='darkslategray')\n# ax.vlines(list(range(len(a_choices))), *ax.get_ylim(), colors='darkslategray')\nax.yaxis.set_major_locator(plt.MaxNLocator(30))\nax.xaxis.set_major_locator(plt.MaxNLocator(100))\nax.add_artist(plt.Rectangle((a_i_l, b_i_l), (a_i_u - a_i_l), (b_i_u - b_i_l), color='y', fill=False, zorder=99));", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb2296ba5ca33467067c86e7f26aadddae2d990e
14,473
ipynb
Jupyter Notebook
docs/tutorials/parallel.ipynb
andriyor/emcee
47c7b1704e8a7a986eb2199c898844dea0894280
[ "MIT" ]
null
null
null
docs/tutorials/parallel.ipynb
andriyor/emcee
47c7b1704e8a7a986eb2199c898844dea0894280
[ "MIT" ]
null
null
null
docs/tutorials/parallel.ipynb
andriyor/emcee
47c7b1704e8a7a986eb2199c898844dea0894280
[ "MIT" ]
null
null
null
28.157588
357
0.54771
[ [ [ "(parallel)=\n\n# Parallelization", "_____no_output_____" ] ], [ [ "%config InlineBackend.figure_format = \"retina\"\n\nfrom matplotlib import rcParams\n\nrcParams[\"savefig.dpi\"] = 100\nrcParams[\"figure.dpi\"] = 100\nrcParams[\"font.size\"] = 20\n\nimport multiprocessing\n\nmultiprocessing.set_start_method(\"fork\")", "_____no_output_____" ] ], [ [ ":::{note}\nSome builds of NumPy (including the version included with Anaconda) will automatically parallelize some operations using something like the MKL linear algebra. This can cause problems when used with the parallelization methods described here so it can be good to turn that off (by setting the environment variable `OMP_NUM_THREADS=1`, for example).\n:::", "_____no_output_____" ] ], [ [ "import os\n\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"", "_____no_output_____" ] ], [ [ "With emcee, it's easy to make use of multiple CPUs to speed up slow sampling.\nThere will always be some computational overhead introduced by parallelization so it will only be beneficial in the case where the model is expensive, but this is often true for real research problems.\nAll parallelization techniques are accessed using the `pool` keyword argument in the :class:`EnsembleSampler` class but, depending on your system and your model, there are a few pool options that you can choose from.\nIn general, a `pool` is any Python object with a `map` method that can be used to apply a function to a list of numpy arrays.\nBelow, we will discuss a few options.", "_____no_output_____" ], [ "In all of the following examples, we'll test the code with the following convoluted model:", "_____no_output_____" ] ], [ [ "import time\nimport numpy as np\n\n\ndef log_prob(theta):\n t = time.time() + np.random.uniform(0.005, 0.008)\n while True:\n if time.time() >= t:\n break\n return -0.5 * np.sum(theta**2)", "_____no_output_____" ] ], [ [ "This probability function will randomly sleep for a fraction of a second every time it is called.\nThis is meant to emulate a more realistic situation where the model is computationally expensive to compute.\n\nTo start, let's sample the usual (serial) way:", "_____no_output_____" ] ], [ [ "import emcee\n\nnp.random.seed(42)\ninitial = np.random.randn(32, 5)\nnwalkers, ndim = initial.shape\nnsteps = 100\n\nsampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob)\nstart = time.time()\nsampler.run_mcmc(initial, nsteps, progress=True)\nend = time.time()\nserial_time = end - start\nprint(\"Serial took {0:.1f} seconds\".format(serial_time))", "100%|██████████| 100/100 [00:21<00:00, 4.71it/s]" ] ], [ [ "## Multiprocessing\n\nThe simplest method of parallelizing emcee is to use the [multiprocessing module from the standard library](https://docs.python.org/3/library/multiprocessing.html).\nTo parallelize the above sampling, you could update the code as follows:", "_____no_output_____" ] ], [ [ "from multiprocessing import Pool\n\nwith Pool() as pool:\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool)\n start = time.time()\n sampler.run_mcmc(initial, nsteps, progress=True)\n end = time.time()\n multi_time = end - start\n print(\"Multiprocessing took {0:.1f} seconds\".format(multi_time))\n print(\"{0:.1f} times faster than serial\".format(serial_time / multi_time))", "100%|██████████| 100/100 [00:06<00:00, 15.65it/s]" ] ], [ [ "I have 4 cores on the machine where this is being tested:", "_____no_output_____" ] ], [ [ "from multiprocessing import cpu_count\n\nncpu = cpu_count()\nprint(\"{0} CPUs\".format(ncpu))", "4 CPUs\n" ] ], [ [ "We don't quite get the factor of 4 runtime decrease that you might expect because there is some overhead in the parallelization, but we're getting pretty close with this example and this will get even closer for more expensive models.", "_____no_output_____" ], [ "## MPI\n\nMultiprocessing can only be used for distributing calculations across processors on one machine.\nIf you want to take advantage of a bigger cluster, you'll need to use MPI.\nIn that case, you need to execute the code using the `mpiexec` executable, so this demo is slightly more convoluted.\nFor this example, we'll write the code to a file called `script.py` and then execute it using MPI, but when you really use the MPI pool, you'll probably just want to edit the script directly.\nTo run this example, you'll first need to install [the schwimmbad library](https://github.com/adrn/schwimmbad) because emcee no longer includes its own `MPIPool`.", "_____no_output_____" ] ], [ [ "with open(\"script.py\", \"w\") as f:\n f.write(\"\"\"\nimport sys\nimport time\nimport emcee\nimport numpy as np\nfrom schwimmbad import MPIPool\n\ndef log_prob(theta):\n t = time.time() + np.random.uniform(0.005, 0.008)\n while True:\n if time.time() >= t:\n break\n return -0.5*np.sum(theta**2)\n\nwith MPIPool() as pool:\n if not pool.is_master():\n pool.wait()\n sys.exit(0)\n \n np.random.seed(42)\n initial = np.random.randn(32, 5)\n nwalkers, ndim = initial.shape\n nsteps = 100\n\n sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, pool=pool)\n start = time.time()\n sampler.run_mcmc(initial, nsteps)\n end = time.time()\n print(end - start)\n\"\"\")\n\nmpi_time = !mpiexec -n {ncpu} python script.py\nmpi_time = float(mpi_time[0])\nprint(\"MPI took {0:.1f} seconds\".format(mpi_time))\nprint(\"{0:.1f} times faster than serial\".format(serial_time / mpi_time))", "MPI took 8.9 seconds\n2.4 times faster than serial\n" ] ], [ [ "There is often more overhead introduced by MPI than multiprocessing so we get less of a gain this time.\nThat being said, MPI is much more flexible and it can be used to scale to huge systems.", "_____no_output_____" ], [ "## Pickling, data transfer & arguments\n\nAll parallel Python implementations work by spinning up multiple `python` processes with identical environments then and passing information between the processes using `pickle`.\nThis means that the probability function [must be picklable](https://docs.python.org/3/library/pickle.html#pickle-picklable).\n\nSome users might hit issues when they use `args` to pass data to their model.\nThese args must be pickled and passed every time the model is called.\nThis can be a problem if you have a large dataset, as you can see here:", "_____no_output_____" ] ], [ [ "def log_prob_data(theta, data):\n a = data[0] # Use the data somehow...\n t = time.time() + np.random.uniform(0.005, 0.008)\n while True:\n if time.time() >= t:\n break\n return -0.5 * np.sum(theta**2)\n\n\ndata = np.random.randn(5000, 200)\n\nsampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob_data, args=(data,))\nstart = time.time()\nsampler.run_mcmc(initial, nsteps, progress=True)\nend = time.time()\nserial_data_time = end - start\nprint(\"Serial took {0:.1f} seconds\".format(serial_data_time))", "100%|██████████| 100/100 [00:21<00:00, 4.70it/s]" ] ], [ [ "We basically get no change in performance when we include the `data` argument here.\nNow let's try including this naively using multiprocessing:", "_____no_output_____" ] ], [ [ "with Pool() as pool:\n sampler = emcee.EnsembleSampler(\n nwalkers, ndim, log_prob_data, pool=pool, args=(data,)\n )\n start = time.time()\n sampler.run_mcmc(initial, nsteps, progress=True)\n end = time.time()\n multi_data_time = end - start\n print(\"Multiprocessing took {0:.1f} seconds\".format(multi_data_time))\n print(\n \"{0:.1f} times faster(?) than serial\".format(\n serial_data_time / multi_data_time\n )\n )", "100%|██████████| 100/100 [01:05<00:00, 1.52it/s]" ] ], [ [ "Brutal.\n\nWe can do better than that though.\nIt's a bit ugly, but if we just make `data` a global variable and use that variable within the model calculation, then we take no hit at all.", "_____no_output_____" ] ], [ [ "def log_prob_data_global(theta):\n a = data[0] # Use the data somehow...\n t = time.time() + np.random.uniform(0.005, 0.008)\n while True:\n if time.time() >= t:\n break\n return -0.5 * np.sum(theta**2)\n\n\nwith Pool() as pool:\n sampler = emcee.EnsembleSampler(\n nwalkers, ndim, log_prob_data_global, pool=pool\n )\n start = time.time()\n sampler.run_mcmc(initial, nsteps, progress=True)\n end = time.time()\n multi_data_global_time = end - start\n print(\n \"Multiprocessing took {0:.1f} seconds\".format(multi_data_global_time)\n )\n print(\n \"{0:.1f} times faster than serial\".format(\n serial_data_time / multi_data_global_time\n )\n )", "100%|██████████| 100/100 [00:06<00:00, 14.60it/s]" ] ], [ [ "That's better!\nThis works because, in the global variable case, the dataset is only pickled and passed between processes once (when the pool is created) instead of once for every model evaluation.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb22a0e55df14b2e028634338eedd39766aa0a9f
7,795
ipynb
Jupyter Notebook
floydhub/Full-version/Experiments/full_version_load_model.ipynb
lorelai22/colorizing
7bbf15fa8cb2c248a45b5ff9dbf5b7a96a6a9436
[ "MIT" ]
1
2019-01-25T15:30:58.000Z
2019-01-25T15:30:58.000Z
floydhub/Full-version/Experiments/full_version_load_model.ipynb
tkasasagi/Coloring-greyscale-images
d34592b38b8dcc89ec9e0e7ecb4a1be93ae9b805
[ "MIT" ]
null
null
null
floydhub/Full-version/Experiments/full_version_load_model.ipynb
tkasasagi/Coloring-greyscale-images
d34592b38b8dcc89ec9e0e7ecb4a1be93ae9b805
[ "MIT" ]
null
null
null
34.644444
156
0.607954
[ [ [ "#Note: You need to reset the kernel for the keras installation to take place\n#Todo: Remove this line once it is installed, reset the kernel: Menu > Kernel > Reset & Clear Output\n!git clone https://github.com/fchollet/keras.git && cd keras && python setup.py install --user", "_____no_output_____" ], [ "import keras\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.preprocessing import image\nfrom keras.engine import Layer\nfrom keras.applications.inception_resnet_v2 import preprocess_input\nfrom keras.layers import Conv2D, UpSampling2D, InputLayer, Conv2DTranspose, Input, Reshape, merge, concatenate, Activation, Dense, Dropout, Flatten\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import TensorBoard \nfrom keras.models import Sequential, Model\nfrom keras.layers.core import RepeatVector, Permute\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom skimage.color import rgb2lab, lab2rgb, rgb2gray, gray2rgb\nfrom skimage.transform import resize\nfrom skimage.io import imsave\nimport numpy as np\nimport os\nimport random\nimport tensorflow as tf", "_____no_output_____" ], [ "# Get images\nX = []\nfor filename in os.listdir('colornet/'):\n X.append(img_to_array(load_img('colornet/'+filename)))\nX = np.array(X, dtype=float)\nXtrain = 1.0/255*X\n\n#Load weights\ninception = InceptionResNetV2(weights=None, include_top=True)\ninception.load_weights('/data/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5')\ninception.graph = tf.get_default_graph()", "_____no_output_____" ], [ "def conv_stack(data, filters, s):\n output = Conv2D(filters, (3, 3), strides=s, activation='relu', padding='same')(data)\n #output = BatchNormalization()(output)\n return output\n\nembed_input = Input(shape=(1000,))\n\n#Encoder\nencoder_input = Input(shape=(256, 256, 1,))\nencoder_output = conv_stack(encoder_input, 64, 2)\nencoder_output = conv_stack(encoder_output, 128, 1)\nencoder_output = conv_stack(encoder_output, 128, 2)\nencoder_output = conv_stack(encoder_output, 256, 1)\nencoder_output = conv_stack(encoder_output, 256, 2)\nencoder_output = conv_stack(encoder_output, 512, 1)\nencoder_output = conv_stack(encoder_output, 512, 1)\nencoder_output = conv_stack(encoder_output, 256, 1)\n\n#Fusion\n# y_mid: (None, 256, 28, 28)\nfusion_output = RepeatVector(32 * 32)(embed_input) \nfusion_output = Reshape(([32, 32, 1000]))(fusion_output)\nfusion_output = concatenate([fusion_output, encoder_output], axis=3) \nfusion_output = Conv2D(256, (1, 1), activation='relu')(fusion_output) \n\n#Decoder\ndecoder_output = conv_stack(fusion_output, 128, 1)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\ndecoder_output = conv_stack(decoder_output, 64, 1)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\ndecoder_output = conv_stack(decoder_output, 32, 1)\ndecoder_output = conv_stack(decoder_output, 16, 1)\ndecoder_output = Conv2D(2, (2, 2), activation='tanh', padding='same')(decoder_output)\ndecoder_output = UpSampling2D((2, 2))(decoder_output)\n\nmodel = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)", "_____no_output_____" ], [ "#Create embedding\ndef create_inception_embedding(grayscaled_rgb):\n grayscaled_rgb_resized = []\n for i in grayscaled_rgb:\n i = resize(i, (299, 299, 3), mode='constant')\n grayscaled_rgb_resized.append(i)\n grayscaled_rgb_resized = np.array(grayscaled_rgb_resized)\n grayscaled_rgb_resized = preprocess_input(grayscaled_rgb_resized)\n with inception.graph.as_default():\n embed = inception.predict(grayscaled_rgb_resized)\n return embed\n\n# Image transformer\ndatagen = ImageDataGenerator(\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=20,\n horizontal_flip=True)\n\n#Generate training data\nbatch_size = 20\n\ndef image_a_b_gen(batch_size):\n for batch in datagen.flow(Xtrain, batch_size=batch_size):\n grayscaled_rgb = gray2rgb(rgb2gray(batch))\n embed = create_inception_embedding(grayscaled_rgb)\n lab_batch = rgb2lab(batch)\n X_batch = lab_batch[:,:,:,0]\n X_batch = X_batch.reshape(X_batch.shape+(1,))\n Y_batch = lab_batch[:,:,:,1:] / 128\n yield ([X_batch, create_inception_embedding(grayscaled_rgb)], Y_batch)\n\n#Train model \ntensorboard = TensorBoard(log_dir=\"/output\")\nmodel.compile(optimizer='adam', loss='mse')\nmodel.fit_generator(image_a_b_gen(batch_size), callbacks=[tensorboard], epochs=1000, steps_per_epoch=20)", "_____no_output_____" ], [ "# Save model\nmodel_json = model.to_json()\nwith open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\nmodel.save_weights(\"color_tensorflow_real_mode.h5\")", "_____no_output_____" ], [ "#Make predictions on validation images\ncolor_me = []\nfor filename in os.listdir('/data/images/Test/'):\n color_me.append(img_to_array(load_img('/data/images/Test/'+filename)))\ncolor_me = np.array(color_me, dtype=float)\ncolor_me_embed = create_inception_embedding(color_me)\ncolor_me = rgb2lab(1.0/255*color_me)[:,:,:,0]\ncolor_me = color_me.reshape(color_me.shape+(1,))\n\n\n# Test model\noutput = model.predict([color_me, color_me_embed])\noutput = output * 128\n\n# Output colorizations\nfor i in range(len(output)):\n cur = np.zeros((256, 256, 3))\n cur[:,:,0] = color_me[i][:,:,0]\n cur[:,:,1:] = output[i]\n imsave(\"result/img_\"+str(i)+\".png\", lab2rgb(cur))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb22afb342e74854899965efc7d5beadb64c9df4
15,519
ipynb
Jupyter Notebook
labs/lab_01_Cristobal_Lobos.ipynb
CristobalLobos/MAT281_Portafolio_CristobalLobos
ad5299e534762469cf83bc10eb6e5a1d736ee090
[ "MIT" ]
null
null
null
labs/lab_01_Cristobal_Lobos.ipynb
CristobalLobos/MAT281_Portafolio_CristobalLobos
ad5299e534762469cf83bc10eb6e5a1d736ee090
[ "MIT" ]
null
null
null
labs/lab_01_Cristobal_Lobos.ipynb
CristobalLobos/MAT281_Portafolio_CristobalLobos
ad5299e534762469cf83bc10eb6e5a1d736ee090
[ "MIT" ]
null
null
null
28.527574
459
0.516528
[ [ [ "# MAT281 - Laboratorio N°01 ", "_____no_output_____" ], [ "<a id='p1'></a>\n\n## Problema 01\n\n### a) Calcular el número $\\pi$\n\nEn los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular $\\pi$:\n\n$$\\displaystyle \\pi = 4 \\sum_{k=1}^{\\infty}\\dfrac{(-1)^{k+1}}{2k-1} = 4(1-\\dfrac{1}{3}+\\dfrac{1}{5}-\\dfrac{1}{7} + ...) $$\n\nDesarolle un programa para estimar el valor de $\\pi$ ocupando el método de Leibniz, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.\n\n\n* **Ejemplo**: *calcular_pi(3)* = 3.466666666666667, *calcular_pi(1000)* = 3.140592653839794\n", "_____no_output_____" ], [ "### Definir Función\n", "_____no_output_____" ] ], [ [ "def calcular_pi(n:int)->float:\n \n \"\"\"\n calcular_pi(n)\n\n Aproximacion del valor de pi mediante el método de Leibniz\n\n Parameters\n ----------\n n : int\n Numero de terminos.\n\n Returns\n -------\n output : float\n Valor aproximado de pi.\n \n Examples\n --------\n >>> calcular_pi(3)\n 3.466666666666667\n \n >>> calcular_pi(1000) \n 3.140592653839794\n \"\"\"\n \n pi = 0 # valor incial \n for k in range(1,n+1):\n numerador = (-1)**(k+1) # numerador de la iteracion i\n denominador = 2*k-1 # denominador de la iteracion i\n pi+=numerador/denominador # suma hasta el i-esimo termino\n \n return 4*pi", "_____no_output_____" ], [ "# Acceso a la documentación\nhelp(calcular_pi)", "Help on function calcular_pi in module __main__:\n\ncalcular_pi(n: int) -> float\n calcular_pi(n)\n \n Aproximacion del valor de pi mediante el método de Leibniz\n \n Parameters\n ----------\n n : int\n Numero de terminos.\n \n Returns\n -------\n output : float\n Valor aproximado de pi.\n \n Examples\n --------\n >>> calcular_pi(3)\n 3.466666666666667\n \n >>> calcular_pi(1000) \n 3.140592653839794\n\n" ] ], [ [ "### Verificar ejemplos", "_____no_output_____" ] ], [ [ "# ejemplo 01\nassert calcular_pi(3) == 3.466666666666667, \"ejemplo 01 incorrecto\"", "_____no_output_____" ], [ "# ejemplo 02\nassert calcular_pi(1000) == 3.140592653839794, \"ejemplo 02 incorrecto\"", "_____no_output_____" ] ], [ [ "**Observación**:\n\n* Note que si corre la línea de comando `calcular_pi(3.0)` le mandará un error ... ¿ por qué ?\n* En los laboratorio, no se pide ser tan meticuloso con la documentacion.\n* Lo primero es definir el código, correr los ejemplos y luego documentar correctamente.", "_____no_output_____" ], [ "### b) Calcular el número $e$\n\nEuler realizó varios aportes en relación a $e$, pero no fue hasta 1748 cuando publicó su **Introductio in analysin infinitorum** que dio un tratamiento definitivo a las ideas sobre $e$. Allí mostró que:\n\n\nEn los siglos XVII y XVIII, James Gregory y Gottfried Leibniz descubrieron una serie infinita que sirve para calcular π:\n\n$$\\displaystyle e = \\sum_{k=0}^{\\infty}\\dfrac{1}{k!} = 1+\\dfrac{1}{2!}+\\dfrac{1}{3!}+\\dfrac{1}{4!} + ... $$\n\nDesarolle un programa para estimar el valor de $e$ ocupando el método de Euler, donde la entrada del programa debe ser un número entero $n$ que indique cuántos términos de la suma se utilizará.\n\n\n* **Ejemplo**: *calcular_e(3)* =2.5, *calcular_e(1000)* = 2.7182818284590455", "_____no_output_____" ], [ "### Definir función", "_____no_output_____" ] ], [ [ "def calcular_e(n:int):\n \n if n == 1: #1 termino de suma\n e = 1\n \n if n == 2: #2 terminos de suma\n e = 2\n \n if n >= 3: #3 o mas terminos de suma\n e = 2\n factorial = 1 #def de factorial\n for k in range(2,n): #se suma hasta el termino n-1 de pasos, si fuera n+1 se sumaria hasta n pero la suma de e parte desde 0\n factorial = factorial*k #recursion\n suma = 1/factorial\n e = e+suma\n return e\n\nprint(calcular_e(100))", "2.7182818284590455\n" ] ], [ [ "### Verificar ejemplos", "_____no_output_____" ] ], [ [ "# ejemplo 01\nassert calcular_e(3) == 2.5, \"ejemplo 01 incorrecto\"", "_____no_output_____" ], [ "# ejemplo 02\nassert calcular_e(1000) == 2.7182818284590455, \"ejemplo 02 incorrecto\"", "_____no_output_____" ] ], [ [ "<a id='p2'></a>\n\n## Problema 02\n\n\nSea $\\sigma(n)$ definido como la suma de los divisores propios de $n$ (números menores que n que se dividen en $n$).\n\nLos [números amigos](https://en.wikipedia.org/wiki/Amicable_numbers) son enteros positivos $n_1$ y $n_2$ tales que la suma de los divisores propios de uno es igual al otro número y viceversa, es decir, $\\sigma(n_1)=\\sigma(n_2)$ y $\\sigma(n_2)=\\sigma(n_1)$.\n\n\nPor ejemplo, los números 220 y 284 son números amigos.\n* los divisores propios de 220 son 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 y 110; por lo tanto $\\sigma(220) = 284$. \n* los divisores propios de 284 son 1, 2, 4, 71 y 142; entonces $\\sigma(284) = 220$.\n\n\nImplemente una función llamada `amigos` cuyo input sean dos números naturales $n_1$ y $n_2$, cuyo output sea verifique si los números son amigos o no.\n\n* **Ejemplo**: *amigos(220,284)* = True, *amigos(6,5)* = False\n", "_____no_output_____" ], [ "### Definir Función", "_____no_output_____" ] ], [ [ "def amigos(n1:int, n2:int):\n suma1 = 0 #definicion de las cantidades a sumar\n suma2 = 0\n \n for i in range(1,n1+1):\n if n1%i == 0: #Verificador de divisores propios\n suma1 = i+suma1 #Se agrega a la suma si es divisor propio\n \n for i in range(1,n2+1):\n if n2%i == 0: #Verificador de divisores propios\n suma2 = i+suma2 #Se agrega a la suma si es divisor propio\n \n if suma1==suma2: #Son amigos si ambas sumas son iguales \n return True\n \n else: #Caso contrario no lo son\n return False", "_____no_output_____" ] ], [ [ "### Verificar ejemplos", "_____no_output_____" ] ], [ [ "# ejemplo 01\nassert amigos(220,284) == True, \"ejemplo 01 incorrecto\"", "_____no_output_____" ], [ "# ejemplo 02\nassert amigos(6,5) == False, \"ejemplo 02 incorrecto\"", "_____no_output_____" ] ], [ [ "<a id='p3'></a>\n\n## Problema 03\n\nLa [conjetura de Collatz](https://en.wikipedia.org/wiki/Collatz_conjecture), conocida también como conjetura $3n+1$ o conjetura de Ulam (entre otros nombres), fue enunciada por el matemático Lothar Collatz en 1937, y a la fecha no se ha resuelto.\n\nSea la siguiente operación, aplicable a cualquier número entero positivo:\n* Si el número es par, se divide entre 2.\n* Si el número es impar, se multiplica por 3 y se suma 1.\n\nLa conjetura dice que siempre alcanzaremos el 1 (y por tanto el ciclo 4, 2, 1) para cualquier número con el que comencemos. \n\nImplemente una función llamada `collatz` cuyo input sea un número natural positivo $N$ y como output devulva la secuencia de números hasta llegar a 1.\n\n* **Ejemplo**: *collatz(9)* = [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1]", "_____no_output_____" ], [ "### Definir Función", "_____no_output_____" ] ], [ [ "def collatz(N:int):\n paso = N #paso inicial\n numeros = [N] #lista donde se guardaran los numeros recorridos \n while paso != 1:\n \n if paso%2 == 0: #Verificacion si el numero en el que estamos es par\n paso = paso//2 #Calculo del paso\n numeros.append(paso) #Se agrega a la lista de pasos\n \n else: #Si no es par, es impar \n paso = paso*3 + 1 #Calculo del paso\n numeros.append(paso) #Se agrega a la lista de pasos\n \n return numeros ", "_____no_output_____" ], [ "### Verificar ejemplos", "_____no_output_____" ], [ "# ejemplo 01\nassert collatz(9) == [9, 28, 14, 7, 22, 11, 34, 17, 52, 26, 13, 40, 20, 10, 5, 16, 8, 4, 2, 1], \"ejemplo 01 incorrecto\"", "_____no_output_____" ] ], [ [ "<a id='p4'></a>\n\n## Problema 04\n\nLa [conjetura de Goldbach](https://en.wikipedia.org/wiki/Goldbach%27s_conjecture) es uno de los problemas abiertos más antiguos en matemáticas. Concretamente, G.H. Hardy, en 1921, en su famoso discurso pronunciado en la Sociedad Matemática de Copenhague, comentó que probablemente la conjetura de Goldbach no es solo uno de los problemas no resueltos más difíciles de la teoría de números, sino de todas las matemáticas. Su enunciado es el siguiente:\n\n$$\\textrm{Todo número par mayor que 2 puede escribirse como suma de dos números primos - Christian Goldbach (1742)}$$\n\nImplemente una función llamada `goldbach` cuyo input sea un número natural positivo $N$ y como output devuelva la suma de dos primos ($N1$ y $N2$) tal que: $N1+N2=N$. \n\n * **Ejemplo**: goldbash(4) = (2,2), goldbash(6) = (3,3) , goldbash(8) = (3,5)", "_____no_output_____" ], [ "### Definir función", "_____no_output_____" ] ], [ [ "def goldbach(N:int):\n if N == 2:\n return \"Error\"\n suma = 0 #cantidad a comparar\n primos = [2] #lista para guardar numeros primos\n N1 = 0 \n N2 = 0\n \n for i in range(3,N+1): #ciclo para encontrar todos los numeros primos entre 3 y N\n for j in range(2,i): #comparacion numero a numero\n if (i % j) == 0: #si i tiene un divisor, no es primo\n break \n else: #si no tiene divisores, i es un numero primo y se agrega a la lista\n primos.append(i)\n \n for i in primos: #ciclo para encontrar la suma de primos que de N\n for j in primos: #se fija un numero en la lista y se recorre denuevo\n if (i+j) == N: #si se encuentra una suma de primos que de N, se asignan a N1 y N2 los primos correspondientes\n N1 = j\n N2 = i\n break\n \n return((N1,N2)) #se devuelven los primos encontrados en forma de tupla", "_____no_output_____" ] ], [ [ "### Verificar ejemplos", "_____no_output_____" ] ], [ [ "# ejemplo 01\nassert goldbach(4) == (2,2), \"ejemplo 01 incorrecto\"", "_____no_output_____" ], [ "# ejemplo 02\nassert goldbach(6) == (3,3), \"ejemplo 02 incorrecto\"", "_____no_output_____" ], [ "# ejemplo 03\nassert goldbach(8) == (3,5), \"ejemplo 03 incorrecto\"", "_____no_output_____" ], [ "#Cristobal Lobos; ROL: 201610519-0", "_____no_output_____" ], [ "import time\nstart_time = time.time()\nprint(goldbach(1000000))\nprint(\"--- %s seconds ---\" % (time.time() - start_time))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb22bf67b8d480c6957dac7c3c46405c524087c2
73,896
ipynb
Jupyter Notebook
DepressionDetectionInTwitter.ipynb
peijoy/DetectDepressionInTwitterPosts
2ba04192151eefae7b83f4d051fb719e9f338e8b
[ "MIT" ]
26
2018-09-29T21:27:06.000Z
2021-12-16T18:28:50.000Z
DepressionDetectionInTwitter.ipynb
AsserMazin37/Depression_Detection_via_Twitter
a2567e3d36d8ccecdc04dd1432633b6f21862cf8
[ "MIT" ]
null
null
null
DepressionDetectionInTwitter.ipynb
AsserMazin37/Depression_Detection_via_Twitter
a2567e3d36d8ccecdc04dd1432633b6f21862cf8
[ "MIT" ]
13
2018-09-14T11:08:52.000Z
2022-01-31T23:12:06.000Z
62.570703
19,072
0.72257
[ [ [ "# Depression Detection in Social Media Posts\n\n#### Imports", "_____no_output_____" ] ], [ [ "import warnings\nwarnings.filterwarnings(\"ignore\")\nimport ftfy\nimport matplotlib.pyplot as plt\nimport nltk\nimport numpy as np\nimport pandas as pd\nimport re\n\nfrom math import exp\nfrom numpy import sign\n\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom gensim.models import KeyedVectors\nfrom nltk.corpus import stopwords\nfrom nltk import PorterStemmer\n\nfrom keras.models import Model, Sequential\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers import Conv1D, Dense, Input, LSTM, Embedding, Dropout, Activation, MaxPooling1D\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences", "Using TensorFlow backend.\n" ] ], [ [ "#### Constants", "_____no_output_____" ] ], [ [ "# Reproducibility\nnp.random.seed(1234)\n\nDEPRES_NROWS = 3200 # number of rows to read from DEPRESSIVE_TWEETS_CSV\nRANDOM_NROWS = 12000 # number of rows to read from RANDOM_TWEETS_CSV\nMAX_SEQUENCE_LENGTH = 140 # Max tweet size\nMAX_NB_WORDS = 20000\nEMBEDDING_DIM = 300\nTRAIN_SPLIT = 0.6\nTEST_SPLIT = 0.2\nLEARNING_RATE = 0.1\nEPOCHS= 10", "_____no_output_____" ] ], [ [ "## Section 1: Load Data\n\nLoading depressive tweets scraped from twitter using [TWINT](https://github.com/haccer/twint) and random tweets from Kaggle dataset [twitter_sentiment](https://www.kaggle.com/ywang311/twitter-sentiment/data).\n\n#### File Paths", "_____no_output_____" ] ], [ [ "#DEPRESSIVE_TWEETS_CSV = 'depressive_tweets.csv'\nDEPRESSIVE_TWEETS_CSV = 'depressive_tweets_processed.csv'\nRANDOM_TWEETS_CSV = 'Sentiment Analysis Dataset 2.csv'\nEMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin.gz'", "_____no_output_____" ], [ "depressive_tweets_df = pd.read_csv(DEPRESSIVE_TWEETS_CSV, sep = '|', header = None, usecols = range(0,9), nrows = DEPRES_NROWS)\nrandom_tweets_df = pd.read_csv(RANDOM_TWEETS_CSV, encoding = \"ISO-8859-1\", usecols = range(0,4), nrows = RANDOM_NROWS)", "_____no_output_____" ], [ "depressive_tweets_df.head()", "_____no_output_____" ], [ "random_tweets_df.head()", "_____no_output_____" ] ], [ [ "## Section 2: Data Processing", "_____no_output_____" ], [ "### Load Pretrained Word2Vec Model\n\nThe pretrained vectors for the Word2Vec model is from [here](https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit).\nUsing a Keyed Vectors file, we can get the embedding of any word by calling `.word_vec(word)` and we can get all the words in the model's vocabulary through `.vocab`.", "_____no_output_____" ] ], [ [ "word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, binary=True)", "_____no_output_____" ] ], [ [ "### Preprocessing\n\nPreprocessing the tweets in order to:\n* Remove links and images\n* Remove hashtags\n* Remove @ mentions\n* Remove emojis\n* Remove stop words\n* Remove punctuation\n* Get rid of stuff like \"what's\" and making it \"what is'\n* Stem words so they are all the same tense (e.g. ran -> run)", "_____no_output_____" ] ], [ [ "# Expand Contraction\ncList = {\n \"ain't\": \"am not\",\n \"aren't\": \"are not\",\n \"can't\": \"cannot\",\n \"can't've\": \"cannot have\",\n \"'cause\": \"because\",\n \"could've\": \"could have\",\n \"couldn't\": \"could not\",\n \"couldn't've\": \"could not have\",\n \"didn't\": \"did not\",\n \"doesn't\": \"does not\",\n \"don't\": \"do not\",\n \"hadn't\": \"had not\",\n \"hadn't've\": \"had not have\",\n \"hasn't\": \"has not\",\n \"haven't\": \"have not\",\n \"he'd\": \"he would\",\n \"he'd've\": \"he would have\",\n \"he'll\": \"he will\",\n \"he'll've\": \"he will have\",\n \"he's\": \"he is\",\n \"how'd\": \"how did\",\n \"how'd'y\": \"how do you\",\n \"how'll\": \"how will\",\n \"how's\": \"how is\",\n \"I'd\": \"I would\",\n \"I'd've\": \"I would have\",\n \"I'll\": \"I will\",\n \"I'll've\": \"I will have\",\n \"I'm\": \"I am\",\n \"I've\": \"I have\",\n \"isn't\": \"is not\",\n \"it'd\": \"it had\",\n \"it'd've\": \"it would have\",\n \"it'll\": \"it will\",\n \"it'll've\": \"it will have\",\n \"it's\": \"it is\",\n \"let's\": \"let us\",\n \"ma'am\": \"madam\",\n \"mayn't\": \"may not\",\n \"might've\": \"might have\",\n \"mightn't\": \"might not\",\n \"mightn't've\": \"might not have\",\n \"must've\": \"must have\",\n \"mustn't\": \"must not\",\n \"mustn't've\": \"must not have\",\n \"needn't\": \"need not\",\n \"needn't've\": \"need not have\",\n \"o'clock\": \"of the clock\",\n \"oughtn't\": \"ought not\",\n \"oughtn't've\": \"ought not have\",\n \"shan't\": \"shall not\",\n \"sha'n't\": \"shall not\",\n \"shan't've\": \"shall not have\",\n \"she'd\": \"she would\",\n \"she'd've\": \"she would have\",\n \"she'll\": \"she will\",\n \"she'll've\": \"she will have\",\n \"she's\": \"she is\",\n \"should've\": \"should have\",\n \"shouldn't\": \"should not\",\n \"shouldn't've\": \"should not have\",\n \"so've\": \"so have\",\n \"so's\": \"so is\",\n \"that'd\": \"that would\",\n \"that'd've\": \"that would have\",\n \"that's\": \"that is\",\n \"there'd\": \"there had\",\n \"there'd've\": \"there would have\",\n \"there's\": \"there is\",\n \"they'd\": \"they would\",\n \"they'd've\": \"they would have\",\n \"they'll\": \"they will\",\n \"they'll've\": \"they will have\",\n \"they're\": \"they are\",\n \"they've\": \"they have\",\n \"to've\": \"to have\",\n \"wasn't\": \"was not\",\n \"we'd\": \"we had\",\n \"we'd've\": \"we would have\",\n \"we'll\": \"we will\",\n \"we'll've\": \"we will have\",\n \"we're\": \"we are\",\n \"we've\": \"we have\",\n \"weren't\": \"were not\",\n \"what'll\": \"what will\",\n \"what'll've\": \"what will have\",\n \"what're\": \"what are\",\n \"what's\": \"what is\",\n \"what've\": \"what have\",\n \"when's\": \"when is\",\n \"when've\": \"when have\",\n \"where'd\": \"where did\",\n \"where's\": \"where is\",\n \"where've\": \"where have\",\n \"who'll\": \"who will\",\n \"who'll've\": \"who will have\",\n \"who's\": \"who is\",\n \"who've\": \"who have\",\n \"why's\": \"why is\",\n \"why've\": \"why have\",\n \"will've\": \"will have\",\n \"won't\": \"will not\",\n \"won't've\": \"will not have\",\n \"would've\": \"would have\",\n \"wouldn't\": \"would not\",\n \"wouldn't've\": \"would not have\",\n \"y'all\": \"you all\",\n \"y'alls\": \"you alls\",\n \"y'all'd\": \"you all would\",\n \"y'all'd've\": \"you all would have\",\n \"y'all're\": \"you all are\",\n \"y'all've\": \"you all have\",\n \"you'd\": \"you had\",\n \"you'd've\": \"you would have\",\n \"you'll\": \"you you will\",\n \"you'll've\": \"you you will have\",\n \"you're\": \"you are\",\n \"you've\": \"you have\"\n}\n\nc_re = re.compile('(%s)' % '|'.join(cList.keys()))\n\ndef expandContractions(text, c_re=c_re):\n def replace(match):\n return cList[match.group(0)]\n return c_re.sub(replace, text)", "_____no_output_____" ], [ "def clean_tweets(tweets):\n cleaned_tweets = []\n for tweet in tweets:\n tweet = str(tweet)\n # if url links then dont append to avoid news articles\n # also check tweet length, save those > 10 (length of word \"depression\")\n if re.match(\"(\\w+:\\/\\/\\S+)\", tweet) == None and len(tweet) > 10:\n #remove hashtag, @mention, emoji and image URLs\n tweet = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|(\\#[A-Za-z0-9]+)|(<Emoji:.*>)|(pic\\.twitter\\.com\\/.*)\", \" \", tweet).split())\n \n #fix weirdly encoded texts\n tweet = ftfy.fix_text(tweet)\n \n #expand contraction\n tweet = expandContractions(tweet)\n\n #remove punctuation\n tweet = ' '.join(re.sub(\"([^0-9A-Za-z \\t])\", \" \", tweet).split())\n\n #stop words\n stop_words = set(stopwords.words('english'))\n word_tokens = nltk.word_tokenize(tweet) \n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n tweet = ' '.join(filtered_sentence)\n\n #stemming words\n tweet = PorterStemmer().stem(tweet)\n \n cleaned_tweets.append(tweet)\n\n return cleaned_tweets", "_____no_output_____" ] ], [ [ "Applying the preprocessing `clean_text` function to every element in the depressive tweets and random tweets data.", "_____no_output_____" ] ], [ [ "depressive_tweets_arr = [x for x in depressive_tweets_df[5]]\nrandom_tweets_arr = [x for x in random_tweets_df['SentimentText']]\nX_d = clean_tweets(depressive_tweets_arr)\nX_r = clean_tweets(random_tweets_arr)", "_____no_output_____" ] ], [ [ "### Tokenizer\n\nUsing a Tokenizer to assign indices and filtering out unfrequent words. Tokenizer creates a map of every unique word and an assigned index to it. The parameter called num_words indicates that we only care about the top 20000 most frequent words.", "_____no_output_____" ] ], [ [ "tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\ntokenizer.fit_on_texts(X_d + X_r)", "_____no_output_____" ] ], [ [ "Applying the tokenizer to depressive tweets and random tweets data.", "_____no_output_____" ] ], [ [ "sequences_d = tokenizer.texts_to_sequences(X_d)\nsequences_r = tokenizer.texts_to_sequences(X_r)", "_____no_output_____" ] ], [ [ "Number of unique words in tokenizer. Has to be <= 20,000.", "_____no_output_____" ] ], [ [ "word_index = tokenizer.word_index\nprint('Found %s unique tokens' % len(word_index))", "Found 21548 unique tokens\n" ] ], [ [ "Pad sequences all to the same length of 140 words.", "_____no_output_____" ] ], [ [ "data_d = pad_sequences(sequences_d, maxlen=MAX_SEQUENCE_LENGTH)\ndata_r = pad_sequences(sequences_r, maxlen=MAX_SEQUENCE_LENGTH)\nprint('Shape of data_d tensor:', data_d.shape)\nprint('Shape of data_r tensor:', data_r.shape)", "Shape of data_d tensor: (2308, 140)\nShape of data_r tensor: (11911, 140)\n" ] ], [ [ "### Embedding Matrix\n\nThe embedding matrix is a `n x m` matrix where `n` is the number of words and `m` is the dimension of the embedding. In this case, `m=300` and `n=20000`. We take the min between the number of unique words in our tokenizer and max words in case there are less unique words than the max we specified.", "_____no_output_____" ] ], [ [ "nb_words = min(MAX_NB_WORDS, len(word_index))\n\nembedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))\n\nfor (word, idx) in word_index.items():\n if word in word2vec.vocab and idx < MAX_NB_WORDS:\n embedding_matrix[idx] = word2vec.word_vec(word)", "_____no_output_____" ] ], [ [ "### Splitting and Formatting Data\n\nAssigning labels to the depressive tweets and random tweets data, and splitting the arrays into test (60%), validation (20%), and train data (20%). Combine depressive tweets and random tweets arrays and shuffle.", "_____no_output_____" ] ], [ [ "# Assigning labels to the depressive tweets and random tweets data\nlabels_d = np.array([1] * DEPRES_NROWS)\nlabels_r = np.array([0] * RANDOM_NROWS)\n\n# Splitting the arrays into test (60%), validation (20%), and train data (20%)\nperm_d = np.random.permutation(len(data_d))\nidx_train_d = perm_d[:int(len(data_d)*(TRAIN_SPLIT))]\nidx_test_d = perm_d[int(len(data_d)*(TRAIN_SPLIT)):int(len(data_d)*(TRAIN_SPLIT+TEST_SPLIT))]\nidx_val_d = perm_d[int(len(data_d)*(TRAIN_SPLIT+TEST_SPLIT)):]\n\nperm_r = np.random.permutation(len(data_r))\nidx_train_r = perm_r[:int(len(data_r)*(TRAIN_SPLIT))]\nidx_test_r = perm_r[int(len(data_r)*(TRAIN_SPLIT)):int(len(data_r)*(TRAIN_SPLIT+TEST_SPLIT))]\nidx_val_r = perm_r[int(len(data_r)*(TRAIN_SPLIT+TEST_SPLIT)):]\n\n# Combine depressive tweets and random tweets arrays\ndata_train = np.concatenate((data_d[idx_train_d], data_r[idx_train_r]))\nlabels_train = np.concatenate((labels_d[idx_train_d], labels_r[idx_train_r]))\ndata_test = np.concatenate((data_d[idx_test_d], data_r[idx_test_r]))\nlabels_test = np.concatenate((labels_d[idx_test_d], labels_r[idx_test_r]))\ndata_val = np.concatenate((data_d[idx_val_d], data_r[idx_val_r]))\nlabels_val = np.concatenate((labels_d[idx_val_d], labels_r[idx_val_r]))\n\n# Shuffling\nperm_train = np.random.permutation(len(data_train))\ndata_train = data_train[perm_train]\nlabels_train = labels_train[perm_train]\nperm_test = np.random.permutation(len(data_test))\ndata_test = data_test[perm_test]\nlabels_test = labels_test[perm_test]\nperm_val = np.random.permutation(len(data_val))\ndata_val = data_val[perm_val]\nlabels_val = labels_val[perm_val]", "_____no_output_____" ] ], [ [ "## Section 3: Building the Model", "_____no_output_____" ], [ "### Building Model (LSTM + CNN)\n\nThe model takes in an input and then outputs a single number representing the probability that the tweet indicates depression. The model takes in each input sentence, replace it with it's embeddings, then run the new embedding vector through a convolutional layer. CNNs are excellent at learning spatial structure from data, the convolutional layer takes advantage of that and learn some structure from the sequential data then pass into a standard LSTM layer. Last but not least, the output of the LSTM layer is fed into a standard Dense model for prediction.", "_____no_output_____" ] ], [ [ "model = Sequential()\n# Embedded layer\nmodel.add(Embedding(len(embedding_matrix), EMBEDDING_DIM, weights=[embedding_matrix], \n input_length=MAX_SEQUENCE_LENGTH, trainable=False))\n# Convolutional Layer\nmodel.add(Conv1D(filters=32, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Dropout(0.2))\n# LSTM Layer\nmodel.add(LSTM(300))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(1, activation='sigmoid'))", "WARNING:tensorflow:From C:\\Users\\peijo\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\util\\deprecation.py:497: calling conv1d (from tensorflow.python.ops.nn_ops) with data_format=NHWC is deprecated and will be removed in a future version.\nInstructions for updating:\n`NHWC` for data_format is deprecated, use `NWC` instead\n" ] ], [ [ "### Compiling Model", "_____no_output_____" ] ], [ [ "model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['acc'])\nprint(model.summary())", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_1 (Embedding) (None, 140, 300) 6000000 \n_________________________________________________________________\nconv1d_1 (Conv1D) (None, 140, 32) 28832 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 70, 32) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 70, 32) 0 \n_________________________________________________________________\nlstm_1 (LSTM) (None, 300) 399600 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 300) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 1) 301 \n=================================================================\nTotal params: 6,428,733\nTrainable params: 428,733\nNon-trainable params: 6,000,000\n_________________________________________________________________\nNone\n" ] ], [ [ "## Section 4: Training the Model\n\nThe model is trained `EPOCHS` time, and Early Stopping argument is used to end training if the loss or accuracy don't improve within 3 epochs.", "_____no_output_____" ] ], [ [ "early_stop = EarlyStopping(monitor='val_loss', patience=3)\n\nhist = model.fit(data_train, labels_train, \\\n validation_data=(data_val, labels_val), \\\n epochs=EPOCHS, batch_size=40, shuffle=True, \\\n callbacks=[early_stop])", "Train on 8530 samples, validate on 2845 samples\nEpoch 1/10\n8530/8530 [==============================] - 32s 4ms/step - loss: 0.1128 - acc: 0.9673 - val_loss: 0.0357 - val_acc: 0.9930\nEpoch 2/10\n8530/8530 [==============================] - 32s 4ms/step - loss: 0.0378 - acc: 0.9916 - val_loss: 0.0326 - val_acc: 0.9926\nEpoch 3/10\n8530/8530 [==============================] - 33s 4ms/step - loss: 0.0308 - acc: 0.9926 - val_loss: 0.0333 - val_acc: 0.9933\nEpoch 4/10\n8530/8530 [==============================] - 32s 4ms/step - loss: 0.0250 - acc: 0.9945 - val_loss: 0.0432 - val_acc: 0.9902\nEpoch 5/10\n8530/8530 [==============================] - 32s 4ms/step - loss: 0.0214 - acc: 0.9950 - val_loss: 0.0395 - val_acc: 0.9919\n" ] ], [ [ "### Results", "_____no_output_____" ], [ "Summarize history for accuracy", "_____no_output_____" ] ], [ [ "plt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "Summarize history for loss", "_____no_output_____" ] ], [ [ "plt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.show()", "_____no_output_____" ] ], [ [ "Percentage accuracy of model", "_____no_output_____" ] ], [ [ "labels_pred = model.predict(data_test)\nlabels_pred = np.round(labels_pred.flatten())\naccuracy = accuracy_score(labels_test, labels_pred)\nprint(\"Accuracy: %.2f%%\" % (accuracy*100))", "Accuracy: 98.91%\n" ] ], [ [ "f1, precision, and recall scores", "_____no_output_____" ] ], [ [ "print(classification_report(labels_test, labels_pred))", " precision recall f1-score support\n\n 0 0.99 1.00 0.99 2382\n 1 0.98 0.96 0.97 462\n\navg / total 0.99 0.99 0.99 2844\n\n" ] ], [ [ "## Section 5: Comparing the Model to Base Line\n\nIn order to evaluate the effectiveness of the LSTM + CNN model, a logistic regression model is trained with the same train data and the same number of epochs, and tested with the same test data.", "_____no_output_____" ], [ "### Logistic Regression Base Line Model", "_____no_output_____" ] ], [ [ "class LogReg:\n \"\"\"\n Class to represent a logistic regression model.\n \"\"\"\n\n def __init__(self, l_rate, epochs, n_features):\n \"\"\"\n Create a new model with certain parameters.\n\n :param l_rate: Initial learning rate for model.\n :param epoch: Number of epochs to train for.\n :param n_features: Number of features.\n \"\"\"\n self.l_rate = l_rate\n self.epochs = epochs\n self.coef = [0.0] * n_features\n self.bias = 0.0\n\n def sigmoid(self, score, threshold=20.0):\n \"\"\"\n Prevent overflow of exp by capping activation at 20.\n\n :param score: A real valued number to convert into a number between 0 and 1\n \"\"\"\n if abs(score) > threshold:\n score = threshold * sign(score)\n activation = exp(score)\n return activation / (1.0 + activation)\n\n def predict(self, features):\n \"\"\"\n Given an example's features and the coefficients, predicts the class.\n\n :param features: List of real valued features for a single training example.\n\n :return: Returns the predicted class (either 0 or 1).\n \"\"\"\n value = sum([features[i]*self.coef[i] for i in range(len(features))]) + self.bias\n return self.sigmoid(value)\n\n def sg_update(self, features, label):\n \"\"\"\n Computes the update to the weights based on a predicted example.\n\n :param features: Features to train on.\n :param label: Corresponding label for features.\n \"\"\"\n yhat = self.predict(features)\n e = label - yhat\n self.bias = self.bias + self.l_rate * e * yhat * (1-yhat)\n for i in range(len(features)):\n self.coef[i] = self.coef[i] + self.l_rate * e * yhat * (1-yhat) * features[i]\n return\n\n def train(self, X, y):\n \"\"\"\n Computes logistic regression coefficients using stochastic gradient descent.\n\n :param X: Features to train on.\n :param y: Corresponding label for each set of features.\n\n :return: Returns a list of model weight coefficients where coef[0] is the bias.\n \"\"\"\n for epoch in range(self.epochs):\n for features, label in zip(X, y):\n self.sg_update(features, label)\n return self.bias, self.coef", "_____no_output_____" ], [ "def get_accuracy(y_bar, y_pred):\n \"\"\"\n Computes what percent of the total testing data the model classified correctly.\n\n :param y_bar: List of ground truth classes for each example.\n :param y_pred: List of model predicted class for each example.\n\n :return: Returns a real number between 0 and 1 for the model accuracy.\n \"\"\"\n correct = 0\n for i in range(len(y_bar)):\n if y_bar[i] == y_pred[i]:\n correct += 1\n accuracy = (correct / len(y_bar)) * 100.0\n return accuracy", "_____no_output_____" ] ], [ [ "Training the logistic regression model", "_____no_output_____" ] ], [ [ "# Logistic Model\nlogreg = LogReg(LEARNING_RATE, EPOCHS, len(data_train[0]))\nbias_logreg, weights_logreg = logreg.train(data_train, labels_train)\ny_logistic = [round(logreg.predict(example)) for example in data_test]", "_____no_output_____" ] ], [ [ "Getting the accuracy of the logistic regression model predicting the test data", "_____no_output_____" ] ], [ [ "# Compare accuracies\naccuracy_logistic = get_accuracy(y_logistic, labels_test)\nprint('Logistic Regression Accuracy: {:0.3f}'.format(accuracy_logistic))", "Logistic Regression Accuracy: 83.755\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb22c29d68a0ed8da881b20a5e822131e06cf5b7
1,312
ipynb
Jupyter Notebook
Image Subtraction.ipynb
snipsave/opencv_studies
7e12aafab4e569f25a9e6c64d03f18733a773996
[ "MIT" ]
1
2018-07-28T05:45:16.000Z
2018-07-28T05:45:16.000Z
Image Subtraction.ipynb
snipsave/opencv_studies
7e12aafab4e569f25a9e6c64d03f18733a773996
[ "MIT" ]
null
null
null
Image Subtraction.ipynb
snipsave/opencv_studies
7e12aafab4e569f25a9e6c64d03f18733a773996
[ "MIT" ]
2
2018-10-21T11:51:45.000Z
2020-05-09T20:32:56.000Z
24.754717
90
0.552591
[ [ [ "import numpy as np\nimport cv2\n\nif __name__ == '__main__':\n\n image = cv2.imread('data_6/document_img.png',cv2.IMREAD_GRAYSCALE)\n template = cv2.imread('data_6/template_img.png',cv2.IMREAD_GRAYSCALE)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))\n template = cv2.morphologyEx(template, cv2.MORPH_ERODE, kernel,iterations = 2)\n\n image[template == 0] = 255\n result_img = cv2.subtract(template,image)\n\n cv2.imshow('image', result_img)\n cv2.imshow('tmpl', template)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite('data_6/output.png',image)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb22d0646e3bfae44844e739ddbbec7848c3faa2
98,822
ipynb
Jupyter Notebook
code/acousticData/analysis/2-GLSModels.ipynb
leviner/saildrone-2018
ed50dc0470dc057d6e793d242397146ee69b7405
[ "MIT" ]
null
null
null
code/acousticData/analysis/2-GLSModels.ipynb
leviner/saildrone-2018
ed50dc0470dc057d6e793d242397146ee69b7405
[ "MIT" ]
null
null
null
code/acousticData/analysis/2-GLSModels.ipynb
leviner/saildrone-2018
ed50dc0470dc057d6e793d242397146ee69b7405
[ "MIT" ]
null
null
null
89.756585
23,796
0.804295
[ [ [ "# Index Day/Night - Run in python\nThis only needs to be done once", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport cmocean\nimport warnings\nfrom matplotlib import rcParams\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nwarnings.filterwarnings('ignore')\n#Pickled acoustic data\ndf1022 = pd.read_pickle('processingFiles/df1022.pkl')\ndf1023 = pd.read_pickle('processingFiles/df1023.pkl')\ndfMLS1 = pd.read_pickle('processingFiles/dfMLS1.pkl')\ndfMLS2 = pd.read_pickle('processingFiles/dfMLS2.pkl')\ndfMLS3 = pd.read_pickle('processingFiles/dfMLS3.pkl')\ndfMLS4 = pd.read_pickle('processingFiles/dfMLS4.pkl')\n# ENV data files\ndfEnv1022 = pd.read_csv('processingFiles/Env1022.csv')\ndfEnv1022.dtime = pd.to_datetime(dfEnv1022.dtime)\ndfEnv1023 = pd.read_csv('processingFiles/Env1023.csv')\ndfEnv1023.dtime = pd.to_datetime(dfEnv1023.dtime)\ndef combineEnvAcoustic(dfEnv, dfAcoustic):\n dfEnv.dtime = pd.to_datetime(dfEnv.dtime)\n dfEnv = dfEnv.sort_values('dtime')\n dfEnv = dfEnv.set_index('dtime')\n\n dfAcoustic = dfAcoustic.sort_values('datetime')\n dfAcoustic = dfAcoustic.set_index('datetime')\n\n df = dfEnv.reindex(dfAcoustic.index, method='nearest')\n df = df.merge(dfAcoustic,on='datetime')\n return df\ndf1022All= combineEnvAcoustic(dfEnv1022, df1022)\ndf1023All= combineEnvAcoustic(dfEnv1023, df1023)\ndfSurvey1All = df1022All[(df1022All.index > '07-20-2018') & (df1022All.index < '08-21-2018') ]\ndfSurvey1All.name = 'First Complete Survey'\n\ndf1023CS2 = df1023All[(df1023All.index > '08-24-2018 15:00:00') & (df1023All.index < '09-07-2018')]\ndf1022CS2 = df1022All[(df1022All.index > '08-25-2018') & (df1022All.index < '09-11-2018 22:00:00') ]\ndfSurvey2All = pd.concat([df1023CS2, df1022CS2])\ndfSurvey2All.name = 'Second Complete Survey'\ndfEnvMLS1 = dfEnv1023[(dfEnv1023.dtime > '07-20-2018 20:00:00') & (dfEnv1023.dtime < '08-04-2018 00:00:00')]\ndfEnvMLS1.name = 'MLS-1'\ndfEnvMLS2 = dfEnv1022[(dfEnv1022.dtime > '07-23-2018 20:00:00') & (dfEnv1022.dtime < '08-13-2018 00:00:00')]\ndfEnvMLS2.name = 'MLS-2'\ndfEnvMLS3 = pd.concat([dfEnv1023[(dfEnv1023.dtime > '08-13-2018 15:00:00') & (dfEnv1023.dtime < '08-16-2018 20:00:00')], dfEnv1022[(dfEnv1022.dtime > '08-22-2018 00:00:00') & (dfEnv1022.dtime < '08-29-2018 00:00:00')]])\ndfEnvMLS3.name = 'MLS-3'\ndfEnvMLS4 = dfEnv1023[(dfEnv1023.dtime > '08-30-2018 20:00:00') & (dfEnv1023.dtime < '09-11-2018 17:00:00')]\ndfEnvMLS4.name = 'MLS-4'\n\n\ndf1022All= combineEnvAcoustic(dfEnvMLS1, dfMLS1)\ntest = df1022All.resample('H').mean()\ntest['day'] = test.par > 10\ntest['survey'] = [1 for i in range(len(test))]\ncsvMLS1 = test[['mwd','day','survey']]\ncsvMLS1['hour'] = [i for i in range(len(csvMLS1))]\ndf1022All= combineEnvAcoustic(dfEnvMLS2, dfMLS2)\ntest = df1022All.resample('H').mean()\ntest['day'] = test.par > 10\ntest['survey'] = [2 for i in range(len(test))]\ncsvMLS2 = test[['mwd','day','survey']]\ncsvMLS2['hour'] = [i for i in range(len(csvMLS2))]\ndf1022All= combineEnvAcoustic(dfEnvMLS3, dfMLS3)\ntest = df1022All.resample('H').mean()\ntest['day'] = test.par > 10\ntest['survey'] = [3 for i in range(len(test))]\ncsvMLS3 = test[['mwd','day','survey']]\ncsvMLS3['hour'] = [i for i in range(len(csvMLS3))]\ndf1022All= combineEnvAcoustic(dfEnvMLS4, dfMLS4)\ntest = df1022All.resample('H').mean()\ntest['day'] = test.par > 10\ntest['survey'] = [4 for i in range(len(test))]\ncsvMLS4 = test[['mwd','day','survey']]\ncsvMLS4['hour'] = [i for i in range(len(csvMLS4))]\n \ncsvAll = pd.concat([csvMLS1,csvMLS2,csvMLS3,csvMLS4])\ncsvAll['julian']= csvAll.index.to_julian_date() \ncsvAll.julian = csvAll.julian-pd.to_datetime('2017-12-31').to_julian_date()\ncsvAll.day = csvAll.day.astype(int)\ncsvAll.julian = csvAll.julian.astype(float)\ncsvAll.to_csv('processingFiles/daynight.csv')", "_____no_output_____" ] ], [ [ "# Switch to R", "_____no_output_____" ] ], [ [ "df<-read.csv('processingFiles/daynight.csv')\ndf <- df[complete.cases(df),]\nlibrary(nlme)\nlibrary(mgcv)\nlibrary(dplyr)\ndf2 = distinct(df,julian,.keep_all=TRUE)\nmod1<-gls(mwd ~ day*julian, data=df2, correl = corCAR1(form = ~julian)) # whether I use the survey as cetegorical or continuous\nsummary(mod1)", "Warning message:\n\"package 'nlme' was built under R version 3.6.3\"Warning message:\n\"package 'mgcv' was built under R version 3.6.3\"This is mgcv 1.8-32. For overview type 'help(\"mgcv-package\")'.\nWarning message:\n\"package 'dplyr' was built under R version 3.6.3\"\nAttaching package: 'dplyr'\n\nThe following object is masked from 'package:nlme':\n\n collapse\n\nThe following objects are masked from 'package:stats':\n\n filter, lag\n\nThe following objects are masked from 'package:base':\n\n intersect, setdiff, setequal, union\n\n" ] ], [ [ "# Spatial model - back to python\nThis only needs to be done once to make csv versions of the dataframes for use in R.", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.read_pickle('processingFiles/dfMLS1Grid.pkl')\ndf.to_csv('processingFiles/MLS1Grid.csv')\ndf = pd.read_pickle('processingFiles/dfMLS2Grid.pkl')\ndf.to_csv('processingFiles/MLS2Grid.csv')\ndf = pd.read_pickle('processingFiles/dfMLS3Grid.pkl')\ndf.to_csv('processingFiles/MLS3Grid.csv')\ndf = pd.read_pickle('processingFiles/dfMLS4Grid.pkl')\ndf.to_csv('processingFiles/MLS4Grid.csv')\ndf = pd.read_pickle('processingFiles/dfS1Grid.pkl')\ndf.to_csv('processingFiles/S1Grid.csv')\ndf = pd.read_pickle('processingFiles/dfS2Grid.pkl')\ndf.to_csv('processingFiles/S2Grid.csv')\n", "_____no_output_____" ] ], [ [ "## R part", "_____no_output_____" ] ], [ [ "library(nlme)\nlibrary(mgcv)\nlibrary(mapproj)\nlibrary(sp)\n# Read in all of the csv files and combine into small survey and big survey dataframes\ndf1<-read.csv('processingFiles/MLS1Grid.csv')\ndf1$survey=factor(1)\ndf2<-read.csv('processingFiles/MLS2Grid.csv')\ndf2$survey=factor(2)\ndf3<-read.csv('processingFiles/MLS3Grid.csv')\ndf3$survey=factor(3)\ndf4<-read.csv('processingFiles/MLS4Grid.csv')\ndf4$survey=factor(4)\ndfAllSmall <- rbind(df1, df2,df3,df4)\ndf1<-read.csv('processingFiles/S1Grid.csv')\ndf1$survey=factor(1)\ndf2<-read.csv('processingFiles/S2Grid.csv')\ndf2$survey=factor(2)\ndfAllLarge <- rbind(df1, df2)", "Warning message:\n\"package 'nlme' was built under R version 3.6.3\"Warning message:\n\"package 'mgcv' was built under R version 3.6.3\"This is mgcv 1.8-32. For overview type 'help(\"mgcv-package\")'.\nWarning message:\n\"package 'mapproj' was built under R version 3.6.3\"Loading required package: maps\nWarning message:\n\"package 'sp' was built under R version 3.6.3\"" ], [ "# Let's fit a model for the large scale survey\ndfAll <- dfAllLarge\nf.lm <- lm(nasc ~ survey, data=dfAll)\nx <- mapproject(dfAll$lonC, dfAll$latC, \"albers\", param=c(min(dfAll$latC), max(dfAll$latC)))\ndfAll$x <- x$x\ndfAll$y <- x$y\nf.Gaus <-gls(nasc ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T))\nsummary(f.Gaus)\nplot(Variogram(f.Gaus))\nanova(f.Gaus)", "_____no_output_____" ], [ "# now we can fit the model for the small scale survey\ndfAll <- dfAllSmall\nf.lm <- lm(nasc ~ survey, data=dfAll)\nx <- mapproject(dfAll$lonC, dfAll$latC, \"albers\", param=c(min(dfAll$latC), max(dfAll$latC)))\ndfAll$x <- x$x\ndfAll$y <- x$y\nf.Gaus <-gls(nasc ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T))\nsummary(f.Gaus)\nplot(Variogram(f.Gaus))\nanova(f.Gaus)", "_____no_output_____" ] ], [ [ "# Target Strength\nAs a function of time", "_____no_output_____" ] ], [ [ "# Start with the basic linear model\nlibrary(nlme)\nlibrary(mgcv)\nlibrary(dplyr)\ndf<-read.csv('processingFiles/targets6h.csv') \nmdl <- lm(y ~x,data=df)\nsummary(mdl)", "_____no_output_____" ], [ "# Then check the residuals\npar(mfrow=c(2,2))\nplot(mdl)", "_____no_output_____" ], [ "# and finaly take a look at potential autocorrelation effects\npar(mfrow=c(1,2))\nplot(residuals(mdl))\nacf(residuals(mdl), lag.max=24)", "_____no_output_____" ], [ "# The temporal autocorrelation doesn't seem signifiacnt, but here's the model\ndf <- df[complete.cases(df),]\ndf2 = distinct(df,x,.keep_all=TRUE)\nmod1<-gls(y ~ x, data=df2, correl = corCAR1(form = ~x)) \nsummary(mod1)", "_____no_output_____" ] ], [ [ "### Target strength\n\nSpatial version using gridded data", "_____no_output_____" ] ], [ [ "library(nlme)\nlibrary(mgcv)\nlibrary(mapproj)\nlibrary(sp)\n# Read in all of the csv files and combine into small survey and big survey dataframes\ndf1<-read.csv('processingFiles/gridTS1.csv')\ndf1$survey=factor(1)\ndf2<-read.csv('processingFiles/gridTS2.csv')\ndf2$survey=factor(2)\ndf3<-read.csv('processingFiles/gridTS3.csv')\ndf3$survey=factor(3)\ndf4<-read.csv('processingFiles/gridTS4.csv')\ndf4$survey=factor(4)\ndfAllSmall <- rbind(df1, df2,df3,df4)", "_____no_output_____" ], [ "dfAll <- dfAllSmall\nf.lm <- lm(ts ~ survey, data=dfAll)\nx <- mapproject(dfAll$lonC, dfAll$latC, \"albers\", param=c(min(dfAll$latC), max(dfAll$latC)))\ndfAll$x <- x$x\ndfAll$y <- x$y\nf.Gaus <-gls(ts ~ survey, data=dfAll, correl=corGaus(form= ~ x+y | survey, nugget=T))\nsummary(f.Gaus)\nplot(Variogram(f.Gaus))\nanova(f.Gaus)", "_____no_output_____" ], [ "a = summary(f.Gaus)\na$tTable", "_____no_output_____" ], [ "10*log10(a$tTable[1]-a$tTable[5])\n10*log10(a$tTable[1])\n10*log10(a$tTable[1]+a$tTable[5])\n\n10*log10(a$tTable[2]-a$tTable[6])\n10*log10(a$tTable[2])\n10*log10(a$tTable[2]+a$tTable[6])\n\n10*log10(a$tTable[3]-a$tTable[7])\n10*log10(a$tTable[3])\n10*log10(a$tTable[3]+a$tTable[7])\n\n10*log10(a$tTable[4]-a$tTable[8])\n10*log10(a$tTable[4])\n10*log10(a$tTable[4]+a$tTable[8])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb22db3e39a9542bbeb842b3d2be29b704b7e74b
2,292
ipynb
Jupyter Notebook
test/dist-correct-150/exam_65/test-exam.ipynb
chrispyles/jexam
ebe83b170f51c5820e0c93955824c3798922f097
[ "BSD-3-Clause" ]
1
2020-07-25T02:36:38.000Z
2020-07-25T02:36:38.000Z
test/dist-correct/exam_72/test-exam.ipynb
chrispyles/jexam
ebe83b170f51c5820e0c93955824c3798922f097
[ "BSD-3-Clause" ]
null
null
null
test/dist-correct/exam_72/test-exam.ipynb
chrispyles/jexam
ebe83b170f51c5820e0c93955824c3798922f097
[ "BSD-3-Clause" ]
null
null
null
17.630769
251
0.496073
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb22e6e2954e1657a61b7db02f15595932826c4c
30,815
ipynb
Jupyter Notebook
Midterm/p2/Problem2.ipynb
alpha-leo/ComputationalPhysics-Fall2020
737769d4a046b4ecea885cafeaf26e26075f7320
[ "MIT" ]
1
2021-08-10T14:33:35.000Z
2021-08-10T14:33:35.000Z
Midterm/p2/Problem2.ipynb
alpha-leo/ComputationalPhysics-Fall2020
737769d4a046b4ecea885cafeaf26e26075f7320
[ "MIT" ]
null
null
null
Midterm/p2/Problem2.ipynb
alpha-leo/ComputationalPhysics-Fall2020
737769d4a046b4ecea885cafeaf26e26075f7320
[ "MIT" ]
null
null
null
138.183857
8,508
0.889469
[ [ [ "from matplotlib import pyplot as plt\nimport numpy as np\nimport networkx as nx\nfrom scipy.special import factorial", "_____no_output_____" ], [ "k_means = [1, 8, 64, 512]\n\ndef make_clust(k_mean):\n \"\"\" make graph and plot hist of clustering\n then return graph.\n \"\"\"\n N = 10000\n # make graph\n print(\"[info] Generating Graph\")\n G = nx.erdos_renyi_graph(N, k_mean / N)\n\n # clustering\n print(\"[info] Calculating clustering\")\n g_clustering = np.zeros(N)\n clust = nx.clustering(G)\n # convert clustering from dict to numpy array\n for i in range(N):\n g_clustering[i] = clust[i]\n # plot hist\n plt.hist(g_clustering, bins=40, density=0)\n plt.title(\"distribution of clustering for <k>=\" + str(k_mean))\n plt.savefig('clust' + str(k_mean) + '.jpg', bbox_inches='tight')\n plt.close()\n return G, g_clustering\nGs = []\ng_clusts = []\nfor k_mean in k_means:\n G, g_clust = make_clust(k_mean)\n Gs.append(G)\n g_clusts.append(g_clust)\n", "[info] Generating Graph\n[info] Calculating clustering\n[info] Generating Graph\n[info] Calculating clustering\n[info] Generating Graph\n[info] Calculating clustering\n[info] Generating Graph\n[info] Calculating clustering\n" ] ], [ [ "### Part b", "_____no_output_____" ] ], [ [ "# define the poisson distribution function\ndef poisson(mean, k):\n return np.exp(- mean) * (mean ** k) / factorial(k)\n\n# function to compare the deg dist with the poisson dist\ndef comp_deg(G, k_mean):\n N = 10000\n # calculate the degree of each node\n deg = nx.degree(G)\n g_degree = np.zeros(N)\n for i in range(N):\n g_degree[i] = deg[i]\n # use hist to get the data\n dt = plt.hist(g_degree, bins=20, density=1)\n plt.show()\n plt.close()\n prbblty = dt[0]\n bins = dt[1]\n return np.sum(poisson(k_mean, bins[:-1]) - prbblty)", "_____no_output_____" ], [ "for i in range(4):\n print(comp_deg(Gs[i], k_means[i]))\n ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb22ec7a98734200863ac345ea454b71e64f18a9
48,224
ipynb
Jupyter Notebook
notebooks/hypotests/FC_interval_asy.ipynb
scikit-hep/statutils
636d305c1e9d76dea2613ac0ce085789821ee9ad
[ "BSD-3-Clause" ]
36
2019-11-28T10:28:17.000Z
2022-03-12T02:12:26.000Z
notebooks/hypotests/FC_interval_asy.ipynb
scikit-hep/hepstats
76361cabbf3810ea9a951bb90dc45e17ff464406
[ "BSD-2-Clause", "BSD-3-Clause" ]
31
2019-11-21T13:35:10.000Z
2022-01-05T16:17:33.000Z
notebooks/hypotests/FC_interval_asy.ipynb
scikit-hep/hepstats
76361cabbf3810ea9a951bb90dc45e17ff464406
[ "BSD-2-Clause", "BSD-3-Clause" ]
3
2020-03-22T13:28:01.000Z
2021-08-29T05:13:28.000Z
161.284281
36,864
0.866332
[ [ [ "# Feldman and Cousins intervals with asymptotics.\n\nThis is a copy of `FC_interval_freq.ipynb` using the asymptotic formulae instead of toys.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport time\n\nimport zfit\nfrom zfit.loss import UnbinnedNLL\nfrom zfit.minimize import Minuit\nzfit.settings.set_seed(10)\n\nfrom hepstats.hypotests.calculators import AsymptoticCalculator\nfrom hepstats.hypotests import ConfidenceInterval\nfrom hepstats.hypotests.parameters import POIarray\nfrom hepstats.hypotests.exceptions import POIRangeError\n\nfrom utils import one_minus_cl_plot, pltdist, plotfitresult", "/Users/matthieumarinangeli/anaconda3/envs/tfn2/lib/python3.7/site-packages/zfit/util/execution.py:70: UserWarning: Not running on Linux. Determining available cpus for thread can failand be overestimated. Workaround (only if too many cpus are used):`zfit.run.set_n_cpu(your_cpu_number)`\n warnings.warn(\"Not running on Linux. Determining available cpus for thread can fail\"\n" ] ], [ [ "In this example we consider an experiment where the observable $x$ is simply the measured value of $\\mu$ in an experiment with a Gaussian resolution with known width $\\sigma = 1$. We will compute the confidence belt for a 90 % condifdence level for the mean of the Gaussian $\\mu$.\n\nWe define a sampler below for a Gaussian pdf with $\\sigma = 1$ using the `zfit` library, the sampler allows to generate samples for different values of $\\mu$. 1000 entries are generated for each sample.", "_____no_output_____" ] ], [ [ "bounds = (-10, 10)\nobs = zfit.Space('x', limits=bounds)\n\nmean = zfit.Parameter(\"mean\", 0)\nsigma = zfit.Parameter(\"sigma\", 1.0)\nmodel = zfit.pdf.Gauss(obs=obs, mu=mean, sigma=sigma)\n\ndata = model.create_sampler(1000)\ndata.resample()", "_____no_output_____" ] ], [ [ "Below is defined the negative-likelihood function which is needed to compute Feldman and Cousins intervals as described in [arXiv:1109.0714](https://arxiv.org/abs/1109.0714). The negative-likelihood function is mimised to compute the measured mean $x$ and its uncertainty $\\sigma_x$. ", "_____no_output_____" ] ], [ [ "# Create the negative log likelihood\nnll = UnbinnedNLL(model=model, data=data) \n\n# Instantiate a minuit minimizer\nminimizer = Minuit(verbosity=0)\n\n# minimisation of the loss function\nminimum = minimizer.minimize(loss=nll)\nminimum.hesse();\nprint(minimum)\n\nx_err = minimum.params[mean][\"minuit_hesse\"][\"error\"]", "/Users/matthieumarinangeli/anaconda3/envs/tfn2/lib/python3.7/site-packages/zfit/util/cache.py:283: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray\n return all(np.equal(self.immutable_representation, other.immutable_representation))\n" ] ], [ [ "\n\nTo compute the the confidence belt on $\\mu$ 90 % CL intervals have to be computed for several values of the measured mean $x$. Samples are generated for $\\mu = n \\times \\sigma_x$ with $n = -6, -5, -4, ..., 3, 4, 5, 6$, and fitted to measure the mean $x_n$. \n\n90 % CL intervals are evaluated for each $x_n$ for the two following cases, $\\mu > 0$ and $\\mu$ unbounded.\n\nWith `hepstats`, The intervals are obtained with `ConfidenceInterval` object using a calculator. Here the calculator is the `AsymptoticCalculator` which computes the intervals using asymptotic formulae (see [Asymptotic formulae for likelihood-based tests of new physics](https://arxiv.org/pdf/1007.1727.pdf)), an example of a 68 % CL interval with the `AsymptoticCalculator` can be found [here](https://github.com/scikit-hep/hepstats/blob/master/notebooks/hypotests/confidenceinterval_asy_zfit.ipynb).\n\nThe option `qtilde = True` should be used if $\\mu > 0$.", "_____no_output_____" ] ], [ [ "results = {}", "_____no_output_____" ], [ "for n in np.arange(-6, 7, 1.0):\n \n x = n * x_err\n \n if n not in results:\n \n zfit.settings.set_seed(5)\n \n data.resample(param_values={mean: x})\n \n minimum = minimizer.minimize(loss=nll)\n minimum.hesse(); \n \n results_n = {}\n results_n[\"x\"] = minimum.params[mean][\"value\"]\n results_n[\"x_err\"] = minimum.params[mean][\"minuit_hesse\"][\"error\"]\n \n calculator = AsymptoticCalculator(minimum, minimizer)\n \n x_min = results_n[\"x\"] - results_n[\"x_err\"]*3\n x_max = results_n[\"x\"] + results_n[\"x_err\"]*3\n \n if n < -1:\n x_max = max(0.5 * results_n[\"x_err\"], x_max)\n \n poinull = POIarray(mean, np.linspace(x_min, x_max, 50))\n \n results_n[\"calculator\"] = calculator\n results_n[\"poinull\"] = poinull\n \n else:\n results_n = results[n]\n calculator = results_n[\"calculator\"]\n poinull = results_n[\"poinull\"]\n\n if \"mu_lower\" not in results_n:\n \n for qtilde in [True, False]:\n \n while True:\n try:\n ci = ConfidenceInterval(calculator, poinull, qtilde=qtilde)\n interval = ci.interval(alpha=0.05, printlevel=0)\n break\n except POIRangeError:\n values = poinull.values\n poinull = POIarray(mean, np.concatenate([values, [values[-1] + np.diff(values)[0]]]))\n results_n[\"poinull\"] = poinull\n \n if qtilde:\n results_n[\"mu_lower\"] = interval[\"lower\"]\n results_n[\"mu_upper\"] = interval[\"upper\"]\n else:\n results_n[\"mu_lower_unbound\"] = interval[\"lower\"]\n results_n[\"mu_upper_unbound\"] = interval[\"upper\"]\n \n results[n] = results_n", "/Users/matthieumarinangeli/anaconda3/envs/tfn2/lib/python3.7/site-packages/hepstats/hypotests/core/confidence_interval.py:116: UserWarning: Multiple roots have been founds.\n warnings.warn(msg_warn)\n" ] ], [ [ "The plot of the confidence belt of $\\mu$ at 90 % CL as function of the measured mean values $x$ (in unit of $\\sigma_x$), for the bounded and unbounded case are shown below.", "_____no_output_____" ] ], [ [ "f = plt.figure(figsize=(9, 8))\n\nplt.plot([v[\"x\"]/v[\"x_err\"] for v in results.values()], \n [v[\"mu_upper_unbound\"]/v[\"x_err\"] for v in results.values()], color=\"black\", label=\"90 % CL, no boundaries\")\nplt.plot([v[\"x\"]/v[\"x_err\"] for v in results.values()], \n [v[\"mu_lower_unbound\"]/v[\"x_err\"] for v in results.values()], color=\"black\")\nplt.plot([v[\"x\"]/v[\"x_err\"] for v in results.values()], \n [v[\"mu_upper\"]/v[\"x_err\"] for v in results.values()], \"--\", color=\"crimson\", label=\"90 % CL, $\\mu > 0$\")\nplt.plot([v[\"x\"]/v[\"x_err\"] for v in results.values()], \n [v[\"mu_lower\"]/v[\"x_err\"] for v in results.values()], \"--\", color=\"crimson\")\nplt.ylim(0.)\nplt.legend(fontsize=15)\nplt.ylabel(\"Mean $\\mu$\", fontsize=15)\nplt.xlabel(\"Measured mean $x$\", fontsize=15);", "_____no_output_____" ] ], [ [ "For the unbounded and the $\\mu > 0$ cases the plot reproduces the figure 3 and 10, respectively, of [A Unified Approach to the Classical Statistical Analysis of Small Signals, Gary J. Feldman, Robert D. Cousins](https://arxiv.org/pdf/physics/9711021.pdf).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb22ee1926a15dc9401cdaaea75ac3fbd3ae028c
8,760
ipynb
Jupyter Notebook
sparkstreaming/sparkstreaming.ipynb
felipedmnq/streaming-weather-kafka
246daa08dbb41e75a700cb355ee10bded62e8716
[ "MIT" ]
2
2022-01-20T09:22:46.000Z
2022-03-26T06:53:51.000Z
sparkstreaming/sparkstreaming.ipynb
felipedmnq/streaming-weather-kafka
246daa08dbb41e75a700cb355ee10bded62e8716
[ "MIT" ]
null
null
null
sparkstreaming/sparkstreaming.ipynb
felipedmnq/streaming-weather-kafka
246daa08dbb41e75a700cb355ee10bded62e8716
[ "MIT" ]
null
null
null
68.976378
1,682
0.656279
[ [ [ "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *", "_____no_output_____" ], [ "spark = SparkSession.builder.appName(\"StreamingFromKafka\").master(\"local[*]\").getOrCreate()", "22/01/11 13:41:46 WARN Utils: Your hostname, Felipes-MacBook-Pro.local resolves to a loopback address: 127.0.0.1; using 192.168.8.101 instead (on interface en0)\n22/01/11 13:41:46 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to another address\nUsing Spark's default log4j profile: org/apache/spark/log4j-defaults.properties\nSetting default log level to \"WARN\".\nTo adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).\n22/01/11 13:41:47 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable\n" ], [ "main_schema = StructType([\n StructField('_id', StringType(), False),\n StructField('created_at', TimestampType(), False),\n StructField('city_id', IntegerType(), True),\n StructField('lat', DoubleType(), True),\n StructField('lon', DoubleType(), True),\n StructField('country', StringType(), True),\n StructField('temp', DoubleType(), True),\n StructField('max_temp', DoubleType(), True),\n StructField('min_temp', DoubleType(), True),\n StructField('feels_like', DoubleType(), True),\n StructField('humidity', IntegerType(), True)\n])", "_____no_output_____" ], [ "\nstreaming = spark.readStream.format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"openweather\") \\\n .load()", "zsh: no such option: packages org.apache.spark:spark_sql_kafka_0_10_2.11:2.0.2\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb22f8749846b90b646238fb75c43ee77da8f7b5
7,498
ipynb
Jupyter Notebook
projections/tas/TAS_compute.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
1
2021-08-04T09:59:37.000Z
2021-08-04T09:59:37.000Z
projections/tas/TAS_compute.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
null
null
null
projections/tas/TAS_compute.ipynb
peterm790/MASTERS
4c57b25252af138a40415697dc50dbc389f844b3
[ "MIT" ]
null
null
null
38.060914
221
0.540944
[ [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport glob\nimport os\nimport scipy.stats\nfrom matplotlib import cm\nimport seaborn as sns\nimport dask\nimport pickle\nfrom datetime import datetime\nimport ast", "_____no_output_____" ], [ "from dask.distributed import Client, LocalCluster\nif __name__ == \"__main__\":\n cluster=LocalCluster(host=\"tcp://127.0.0.1:2459\",dashboard_address=\"127.0.0.1:2461\",n_workers=4)\n client = Client(cluster)", "_____no_output_____" ], [ "models = [x.split('/')[-1] for x in glob.glob(\"/terra/data/cmip5/global/rcp85/*\")] ", "_____no_output_____" ], [ "dask.config.set(**{'array.slicing.split_large_chunks': False})", "_____no_output_____" ], [ "import warnings\nwarnings.simplefilter(\"ignore\")\n#annoying cftime serialization warning ", "_____no_output_____" ], [ "dic = {}\nfor model in models:\n try:\n rcp85_files = sorted(glob.glob(\"/terra/data/cmip5/global/rcp85/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))\n rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n rcp85 = rcp85.sel(time = slice('2000','2250'))\n hist_files = sorted(glob.glob(\"/terra/data/cmip5/global/historical/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))\n hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n x = xr.concat([hist,rcp85],dim='time').load()\n x = x.sortby(x.time)\n x = x.resample(time='M').mean()\n dic[model] = x - hist.sel(time=slice('1979','2005')).mean(dim='time')\n except:\n if model == 'BNU-ESM': # no historical monthly data \n rcp85_files = sorted(glob.glob(\"/terra/data/cmip5/global/rcp85/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))\n rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n rcp85 = rcp85.sel(time = slice('2000','2250'))\n hist_files = sorted(glob.glob(\"/terra/data/cmip5/global/historical/\"+str(model)+\"/r1i1p1/day/native/tas_*\"))\n hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n hist = hist.resample(time='M').mean()\n x = xr.concat([hist,rcp85],dim='time').load()\n x = x.sortby(x.time)\n x = x.resample(time='M').mean()\n dic[model] = x - hist.sel(time=slice('1979','2005')).mean(dim='time')\n elif model == 'MPI-ESM-LR': # a problem with the later than 2100 data\n rcp85_files = sorted(glob.glob(\"/terra/data/cmip5/global/rcp85/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))[0]\n rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n rcp85 = rcp85.sel(time = slice('2000','2250'))\n hist_files = sorted(glob.glob(\"/terra/data/cmip5/global/historical/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))\n hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n x = xr.concat([hist,rcp85],dim='time').load()\n x = x.sortby(x.time)\n x = x.resample(time='M').mean()\n dic[model] = x - (x.sel(time=slice('1979','2005')).mean(dim='time'))\n elif model == 'CNRM-CM5': # a problem with the later than 2100 data\n rcp85_files = sorted(glob.glob(\"/terra/data/cmip5/global/rcp85/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))[:2]\n rcp85 = xr.open_mfdataset(rcp85_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n rcp85 = rcp85.sel(time = slice('2000','2250'))\n hist_files = sorted(glob.glob(\"/terra/data/cmip5/global/historical/\"+str(model)+\"/r1i1p1/mon/native/tas_*\"))\n hist = xr.open_mfdataset(hist_files, decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').tas\n x = xr.concat([hist,rcp85],dim='time').load()\n x = x.sortby(x.time)\n x = x.resample(time='M').mean()\n dic[model] = x - (x.sel(time=slice('1979','2005')).mean(dim='time'))", "_____no_output_____" ], [ "#NOAA\nx = xr.open_mfdataset('/home/pmarsh/NOAA_2deg/air.2m.mon.mean.nc', decode_cf=True).sel(lat = -34, method = 'nearest').sel(lon = 18, method = 'nearest').air\nx = x.sortby(x.time)\nx = x.resample(time='M').mean()\nx = x.sel(time=slice('1940','2016'))\ndic['NOAA'] = x - (x.sel(time=slice('1979','2005')).mean(dim='time'))", "_____no_output_____" ], [ "#ERA5 - 1hr - daily avalable but missing some data \nx = xr.open_mfdataset(sorted(glob.glob('/terra/data/reanalysis/global/reanalysis/ECMWF/ERA5/1hr/native/tas_*')), decode_cf=True).sel(latitude = -34, method = 'nearest').sel(longitude = 18, method = 'nearest').tas\nx = x.resample(time='M').mean()\nx = x.sortby(x.time).load()\ndic['ERA5'] = x - (x.sel(time=slice('1979','2005')).mean(dim='time'))", "_____no_output_____" ], [ "pickle.dump(dic, open( \"monthly_tas_dic.p\", \"wb\" ) )", "_____no_output_____" ], [ "client.close()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb23005d1d2d9b2ef4abd289d1d7c131aa4e7758
28,121
ipynb
Jupyter Notebook
site/es/tutorials/keras/regression.ipynb
kweinmeister/docs-l10n
e87bd94e18f7df4e3614cbcfece4b051c910bdca
[ "Apache-2.0" ]
null
null
null
site/es/tutorials/keras/regression.ipynb
kweinmeister/docs-l10n
e87bd94e18f7df4e3614cbcfece4b051c910bdca
[ "Apache-2.0" ]
null
null
null
site/es/tutorials/keras/regression.ipynb
kweinmeister/docs-l10n
e87bd94e18f7df4e3614cbcfece4b051c910bdca
[ "Apache-2.0" ]
null
null
null
31.992036
498
0.526048
[ [ [ "##### Copyright 2018 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ], [ "#@title MIT License\n#\n# Copyright (c) 2017 François Chollet\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ], [ [ "# Regresion Basica: Predecir eficiencia de gasolina", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/keras/regression\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/es/tutorials/keras/regression.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs-l10n/blob/master/site/es/tutorials/keras/regression.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/es/tutorials/keras/regression.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "Note: Nuestra comunidad de Tensorflow ha traducido estos documentos. Como las traducciones de la comunidad\nson basados en el \"mejor esfuerzo\", no hay ninguna garantia que esta sea un reflejo preciso y actual \nde la [Documentacion Oficial en Ingles](https://www.tensorflow.org/?hl=en).\nSi tienen sugerencias sobre como mejorar esta traduccion, por favor envian un \"Pull request\"\nal siguiente repositorio [tensorflow/docs](https://github.com/tensorflow/docs).\nPara ofrecerse como voluntario o hacer revision de las traducciones de la Comunidad\npor favor contacten al siguiente grupo [[email protected] list](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).", "_____no_output_____" ], [ "En un problema de *regresion*, buscamos predecir la salida de un valor continuo como la probabilidad de un precio. En contraste en un problema de *Clasificacion*, buscamos seleccionar una clase de una lista de clases (por ejemplo, en donde una imagen contenga una manzana o una naranja queremos reconocer cual es la fruta en la imagen).\n\nEste libro usa el set de datos clasico [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) y construye un modelo para predecir la eficiencia de vehiculos de 1970 y 1980. Para hacer esto proveeremos el modelo con una descripcion de muchos automoviles de ese periodo. Esta descripcion incluye atributos como: Cilindros, desplazamiento, potencia y peso.\n\nEste ejemplo usa el API `tf.keras` , revise [Esta Guia](https://www.tensorflow.org/guide/keras) para obtener mas detalles.", "_____no_output_____" ] ], [ [ "# Use seaborn for pairplot\n!pip install seaborn", "_____no_output_____" ], [ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pathlib\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nprint(tf.__version__)", "_____no_output_____" ] ], [ [ "## El set de Datos de MPG\n\nel set de datos esta disponible de el siguiente repositorio [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/).\n\n", "_____no_output_____" ], [ "### Obtenga la data\nPrimero descargue el set de datos.", "_____no_output_____" ] ], [ [ "dataset_path = keras.utils.get_file(\"auto-mpg.data\", \"http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data\")\ndataset_path", "_____no_output_____" ] ], [ [ "Importelo usando pandas.", "_____no_output_____" ] ], [ [ "column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',\n 'Acceleration', 'Model Year', 'Origin']\nraw_dataset = pd.read_csv(dataset_path, names=column_names,\n na_values = \"?\", comment='\\t',\n sep=\" \", skipinitialspace=True)\n\ndataset = raw_dataset.copy()\ndataset.tail()", "_____no_output_____" ] ], [ [ "### Limpie la data\n\nEl set de datos contiene algunos valores desconocidos.", "_____no_output_____" ] ], [ [ "dataset.isna().sum()", "_____no_output_____" ] ], [ [ "**Para** Mantener este tutorial inicial sencillo eliminemos las siguientes filas.", "_____no_output_____" ] ], [ [ "dataset = dataset.dropna()", "_____no_output_____" ] ], [ [ "La columna de `\"Origin\"` realmente es categorica, no numerica. Entonces conviertala a un \"one-hot\":", "_____no_output_____" ] ], [ [ "origin = dataset.pop('Origin')", "_____no_output_____" ], [ "dataset['USA'] = (origin == 1)*1.0\ndataset['Europe'] = (origin == 2)*1.0\ndataset['Japan'] = (origin == 3)*1.0\ndataset.tail()", "_____no_output_____" ] ], [ [ "### Dividamos la data en entrenamiento y prueba\n\nAhora divida el set de datos en un set de entrenamiento y otro de pruebas.\n\nUsaremos el set de pruebas en la evaluacion final de nuestro modelo.", "_____no_output_____" ] ], [ [ "train_dataset = dataset.sample(frac=0.8,random_state=0)\ntest_dataset = dataset.drop(train_dataset.index)", "_____no_output_____" ] ], [ [ "### Inspeccione la data\n\nRevise rapidamente la distribucion conjunta de un par de columnas de el set de entrenamiento.", "_____no_output_____" ] ], [ [ "sns.pairplot(train_dataset[[\"MPG\", \"Cylinders\", \"Displacement\", \"Weight\"]], diag_kind=\"kde\")", "_____no_output_____" ] ], [ [ "Tambien revise las estadisticas generales:", "_____no_output_____" ] ], [ [ "train_stats = train_dataset.describe()\ntrain_stats.pop(\"MPG\")\ntrain_stats = train_stats.transpose()\ntrain_stats", "_____no_output_____" ] ], [ [ "### Separe las caracteristicas de las etiquetas.\n\nSepare el valor objetivo, o la \"etiqueta\" \nde las caracteristicas. Esta etiqueta es el valor que entrenara el modelo para predecir.", "_____no_output_____" ] ], [ [ "train_labels = train_dataset.pop('MPG')\ntest_labels = test_dataset.pop('MPG')", "_____no_output_____" ] ], [ [ "### Normalice la data\n\nRevise otra vez el bloque de `train_stats` que se presento antes y note la diferencia de rangos de cada caracteristica.", "_____no_output_____" ], [ "Es una buena práctica normalizar funciones que utilizan diferentes escalas y rangos. Aunque el modelo * podría * converger sin normalización de características, dificulta el entrenamiento y hace que el modelo resultante dependa de la elección de las unidades utilizadas en la entrada.\n\nNota: Aunque generamos intencionalmente estas estadísticas solo del conjunto de datos de entrenamiento, estas estadísticas también se utilizarán para normalizar el conjunto de datos de prueba. Necesitamos hacer eso para proyectar el conjunto de datos de prueba en la misma distribución en la que el modelo ha sido entrenado.", "_____no_output_____" ] ], [ [ "def norm(x):\n return (x - train_stats['mean']) / train_stats['std']\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)", "_____no_output_____" ] ], [ [ "Estos datos normalizados es lo que usaremos para entrenar el modelo.\n\nPrecaución: las estadísticas utilizadas para normalizar las entradas aquí (media y desviación estándar) deben aplicarse a cualquier otro dato que se alimente al modelo, junto con la codificación de un punto que hicimos anteriormente. Eso incluye el conjunto de pruebas, así como los datos en vivo cuando el modelo se usa en producción.", "_____no_output_____" ], [ "## El modelo", "_____no_output_____" ], [ "### Construye el modelo\n\nConstruyamos nuestro modelo. Aquí, utilizaremos un modelo `secuencial` con dos capas ocultas densamente conectadas y una capa de salida que devuelve un único valor continuo. Los pasos de construcción del modelo se envuelven en una función, `build_model`, ya que crearemos un segundo modelo, más adelante.", "_____no_output_____" ] ], [ [ "def build_model():\n model = keras.Sequential([\n layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),\n layers.Dense(64, activation='relu'),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n model.compile(loss='mse',\n optimizer=optimizer,\n metrics=['mae', 'mse'])\n return model", "_____no_output_____" ], [ "model = build_model()", "_____no_output_____" ] ], [ [ "### Inspeccione el modelo\n\nUse el método `.summary` para imprimir una descripción simple del modelo", "_____no_output_____" ] ], [ [ "model.summary()", "_____no_output_____" ] ], [ [ "\nAhora pruebe el modelo. Tome un lote de ejemplos `10` de los datos de entrenamiento y llame a` model.predict` en él.", "_____no_output_____" ] ], [ [ "example_batch = normed_train_data[:10]\nexample_result = model.predict(example_batch)\nexample_result", "_____no_output_____" ] ], [ [ "Parece estar funcionando, y produce un resultado de la forma y tipo esperados.", "_____no_output_____" ], [ "### Entrenar a la modelo\n\nEntrene el modelo durante 1000 épocas y registre la precisión de entrenamiento y validación en el objeto `history`.", "_____no_output_____" ] ], [ [ "# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\nEPOCHS = 1000\n\nhistory = model.fit(\n normed_train_data, train_labels,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[PrintDot()])", "_____no_output_____" ] ], [ [ "Visualice el progreso de entrenamiento del modelo usando las estadísticas almacenadas en el objeto `history`.", "_____no_output_____" ] ], [ [ "hist = pd.DataFrame(history.history)\nhist['epoch'] = history.epoch\nhist.tail()", "_____no_output_____" ], [ "def plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Abs Error [MPG]')\n plt.plot(hist['epoch'], hist['mae'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mae'],\n label = 'Val Error')\n plt.ylim([0,5])\n plt.legend()\n\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$MPG^2$]')\n plt.plot(hist['epoch'], hist['mse'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mse'],\n label = 'Val Error')\n plt.ylim([0,20])\n plt.legend()\n plt.show()\n\n\nplot_history(history)", "_____no_output_____" ] ], [ [ "Este gráfico muestra poca mejora, o incluso degradación en el error de validación después de aproximadamente 100 épocas. Actualicemos la llamada `model.fit` para detener automáticamente el entrenamiento cuando el puntaje de validación no mejore. Utilizaremos una * devolución de llamada de EarlyStopping * que pruebe una condición de entrenamiento para cada época. Si transcurre una cantidad determinada de épocas sin mostrar mejoría, entonces detiene automáticamente el entrenamiento.\n\nPuedes obtener más información sobre esta devolución de llamada [Aca](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/callbacks/EarlyStopping).", "_____no_output_____" ] ], [ [ "model = build_model()\n\n# The patience parameter is the amount of epochs to check for improvement\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\n\nhistory = model.fit(normed_train_data, train_labels, epochs=EPOCHS,\n validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])\n\nplot_history(history)", "_____no_output_____" ] ], [ [ "El gráfico muestra que en el conjunto de validación, el error promedio generalmente es de alrededor de +/- 2 MPG. ¿Es esto bueno? Le dejaremos esa decisión a usted.\n\nVeamos qué tan bien generaliza el modelo al usar el conjunto ** test **, que no usamos al entrenar el modelo. Esto nos dice qué tan bien podemos esperar que el modelo prediga cuándo lo usamos en el mundo real.", "_____no_output_____" ] ], [ [ "loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} MPG\".format(mae))", "_____no_output_____" ] ], [ [ "### Haga Predicciones\n\nFinalmente, prediga los valores de MPG utilizando datos en el conjunto de pruebas:", "_____no_output_____" ] ], [ [ "test_predictions = model.predict(normed_test_data).flatten()\n\nplt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [MPG]')\nplt.ylabel('Predictions [MPG]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\n_ = plt.plot([-100, 100], [-100, 100])\n", "_____no_output_____" ] ], [ [ "Parece que nuestro modelo predice razonablemente bien. Echemos un vistazo a la distribución de errores.", "_____no_output_____" ] ], [ [ "error = test_predictions - test_labels\nplt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [MPG]\")\n_ = plt.ylabel(\"Count\")", "_____no_output_____" ] ], [ [ "No es del todo gaussiano, pero podríamos esperar eso porque el número de muestras es muy pequeño.", "_____no_output_____" ], [ "## Conclusion\n\nEste cuaderno introdujo algunas técnicas para manejar un problema de regresión.\n\n* El error cuadrático medio (MSE) es una función de pérdida común utilizada para problemas de regresión (se utilizan diferentes funciones de pérdida para problemas de clasificación).\n* Del mismo modo, las métricas de evaluación utilizadas para la regresión difieren de la clasificación. Una métrica de regresión común es el error absoluto medio (MAE).\n* Cuando las características de datos de entrada numéricos tienen valores con diferentes rangos, cada característica debe escalarse independientemente al mismo rango.\n* Si no hay muchos datos de entrenamiento, una técnica es preferir una red pequeña con pocas capas ocultas para evitar el sobreajuste.\n* La detención temprana es una técnica útil para evitar el sobreajuste.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb23020983f8cf3fb9fe0993bfa494760e32e82a
737,396
ipynb
Jupyter Notebook
pytorch-lightning_ipynb/cnn/cnn-vgg16.ipynb
khoih-prog/deeplearning-models
707e00e34a440cdb3f12d0e37acb0c376f0aa3d5
[ "MIT" ]
1
2022-03-22T11:55:59.000Z
2022-03-22T11:55:59.000Z
pytorch-lightning_ipynb/cnn/cnn-vgg16.ipynb
khoih-prog/deeplearning-models
707e00e34a440cdb3f12d0e37acb0c376f0aa3d5
[ "MIT" ]
null
null
null
pytorch-lightning_ipynb/cnn/cnn-vgg16.ipynb
khoih-prog/deeplearning-models
707e00e34a440cdb3f12d0e37acb0c376f0aa3d5
[ "MIT" ]
null
null
null
386.678553
457,716
0.931154
[ [ [ "%load_ext watermark\n%watermark -p torch,pytorch_lightning,torchvision,torchmetrics,matplotlib", "torch : 1.10.1\npytorch_lightning: 1.6.0.dev0\ntorchvision : 0.11.2\ntorchmetrics : 0.6.2\nmatplotlib : 3.3.4\n\n" ], [ "%load_ext pycodestyle_magic\n%flake8_on --ignore W291,W293,E703", "_____no_output_____" ] ], [ [ "<a href=\"https://pytorch.org\"><img src=\"https://raw.githubusercontent.com/pytorch/pytorch/master/docs/source/_static/img/pytorch-logo-dark.svg\" width=\"90\"/></a> &nbsp; &nbsp;&nbsp;&nbsp;<a href=\"https://www.pytorchlightning.ai\"><img src=\"https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/docs/source/_static/images/logo.svg\" width=\"150\"/></a>\n\n# Model Zoo -- VGG16 Trained on CIFAR-10", "_____no_output_____" ], [ "This notebook implements the VGG16 convolutional network [1] and applies it to CIFAR-10 digit classification.\n\n![](../pytorch_ipynb/images/vgg16/vgg16-arch-table.png)", "_____no_output_____" ], [ "\n### References\n\n- [1] Simonyan, K., & Zisserman, A. (2014). [Very deep convolutional networks for large-scale image recognition](https://arxiv.org/abs/1409.1556). arXiv preprint arXiv:1409.1556.", "_____no_output_____" ], [ "## General settings and hyperparameters", "_____no_output_____" ], [ "- Here, we specify some general hyperparameter values and general settings\n- Note that for small datatsets, it is not necessary and better not to use multiple workers as it can sometimes cause issues with too many open files in PyTorch. So, if you have problems with the data loader later, try setting `NUM_WORKERS = 0` instead.", "_____no_output_____" ] ], [ [ "BATCH_SIZE = 256\nNUM_EPOCHS = 25\nLEARNING_RATE = 0.001\nNUM_WORKERS = 4", "_____no_output_____" ] ], [ [ "## Implementing a Neural Network using PyTorch Lightning's `LightningModule`", "_____no_output_____" ], [ "- In this section, we set up the main model architecture using the `LightningModule` from PyTorch Lightning.\n- When using PyTorch Lightning, we can start with defining our neural network model in pure PyTorch, and then we use it in the `LightningModule` to get all the extra benefits that PyTorch Lightning provides.\n- In this case, since Torchvision already offers a nice and efficient PyTorch implementation of MobileNet-v2, let's load it from the Torchvision hub:", "_____no_output_____" ] ], [ [ "import torch.nn as nn\n\n\nclass PyTorchVGG16(nn.Module):\n\n def __init__(self, num_classes):\n super().__init__()\n \n # calculate same padding:\n # (w - k + 2*p)/s + 1 = o\n # => p = (s(o-1) - w + k)/2\n \n self.block_1 = nn.Sequential(\n nn.Conv2d(in_channels=3,\n out_channels=64,\n kernel_size=(3, 3),\n stride=(1, 1),\n # (1(32-1)- 32 + 3)/2 = 1\n padding=1), \n nn.ReLU(),\n nn.Conv2d(in_channels=64,\n out_channels=64,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2),\n stride=(2, 2))\n )\n \n self.block_2 = nn.Sequential(\n nn.Conv2d(in_channels=64,\n out_channels=128,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=128,\n out_channels=128,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2),\n stride=(2, 2))\n )\n \n self.block_3 = nn.Sequential( \n nn.Conv2d(in_channels=128,\n out_channels=256,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=256,\n out_channels=256,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.Conv2d(in_channels=256,\n out_channels=256,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=(2, 2),\n stride=(2, 2))\n )\n \n self.block_4 = nn.Sequential( \n nn.Conv2d(in_channels=256,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.Conv2d(in_channels=512,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.Conv2d(in_channels=512,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.MaxPool2d(kernel_size=(2, 2),\n stride=(2, 2))\n )\n \n self.block_5 = nn.Sequential(\n nn.Conv2d(in_channels=512,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.Conv2d(in_channels=512,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.Conv2d(in_channels=512,\n out_channels=512,\n kernel_size=(3, 3),\n stride=(1, 1),\n padding=1),\n nn.ReLU(), \n nn.MaxPool2d(kernel_size=(2, 2),\n stride=(2, 2)) \n )\n \n self.features = nn.Sequential(\n self.block_1, self.block_2, \n self.block_3, self.block_4, \n self.block_5\n )\n \n self.classifier = nn.Sequential(\n nn.Linear(512, 4096),\n nn.ReLU(True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(p=0.5),\n nn.Linear(4096, num_classes),\n )\n \n # self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n \n for m in self.modules():\n if isinstance(m, torch.nn.Conv2d):\n #n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n #m.weight.data.normal_(0, np.sqrt(2. / n))\n m.weight.detach().normal_(0, 0.05)\n if m.bias is not None:\n m.bias.detach().zero_()\n elif isinstance(m, torch.nn.Linear):\n m.weight.detach().normal_(0, 0.05)\n m.bias.detach().detach().zero_()\n \n def forward(self, x):\n\n x = self.features(x)\n # x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n logits = self.classifier(x)\n\n return logits", "137:17: E265 block comment should start with '# '\n138:17: E265 block comment should start with '# '\n" ] ], [ [ "- Next, we can define our `LightningModule` as a wrapper around our PyTorch model:", "_____no_output_____" ] ], [ [ "import pytorch_lightning as pl\nimport torchmetrics\n\n\n# LightningModule that receives a PyTorch model as input\nclass LightningModel(pl.LightningModule):\n def __init__(self, model, learning_rate):\n super().__init__()\n\n self.learning_rate = learning_rate\n # The inherited PyTorch module\n self.model = model\n\n # Save settings and hyperparameters to the log directory\n # but skip the model parameters\n self.save_hyperparameters(ignore=['model'])\n\n # Set up attributes for computing the accuracy\n self.train_acc = torchmetrics.Accuracy()\n self.valid_acc = torchmetrics.Accuracy()\n self.test_acc = torchmetrics.Accuracy()\n \n # Defining the forward method is only necessary \n # if you want to use a Trainer's .predict() method (optional)\n def forward(self, x):\n return self.model(x)\n \n # A common forward step to compute the loss and labels\n # this is used for training, validation, and testing below\n def _shared_step(self, batch):\n features, true_labels = batch\n logits = self(features)\n loss = torch.nn.functional.cross_entropy(logits, true_labels)\n predicted_labels = torch.argmax(logits, dim=1)\n\n return loss, true_labels, predicted_labels\n\n def training_step(self, batch, batch_idx):\n loss, true_labels, predicted_labels = self._shared_step(batch)\n self.log(\"train_loss\", loss)\n \n # To account for Dropout behavior during evaluation\n self.model.eval()\n with torch.no_grad():\n _, true_labels, predicted_labels = self._shared_step(batch)\n self.train_acc.update(predicted_labels, true_labels)\n self.log(\"train_acc\", self.train_acc, on_epoch=True, on_step=False)\n self.model.train()\n return loss # this is passed to the optimzer for training\n\n def validation_step(self, batch, batch_idx):\n loss, true_labels, predicted_labels = self._shared_step(batch)\n self.log(\"valid_loss\", loss)\n self.valid_acc(predicted_labels, true_labels)\n self.log(\"valid_acc\", self.valid_acc,\n on_epoch=True, on_step=False, prog_bar=True)\n\n def test_step(self, batch, batch_idx):\n loss, true_labels, predicted_labels = self._shared_step(batch)\n self.test_acc(predicted_labels, true_labels)\n self.log(\"test_acc\", self.test_acc, on_epoch=True, on_step=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n return optimizer", "_____no_output_____" ] ], [ [ "## Setting up the dataset", "_____no_output_____" ], [ "- In this section, we are going to set up our dataset.", "_____no_output_____" ], [ "### Inspecting the dataset", "_____no_output_____" ] ], [ [ "from torchvision import datasets\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\n\n\ntrain_dataset = datasets.CIFAR10(root='./data', \n train=True, \n transform=transforms.ToTensor(),\n download=True)\n\ntrain_loader = DataLoader(dataset=train_dataset, \n batch_size=BATCH_SIZE, \n num_workers=NUM_WORKERS,\n drop_last=True,\n shuffle=True)\n\ntest_dataset = datasets.CIFAR10(root='./data', \n train=False,\n transform=transforms.ToTensor())\n\ntest_loader = DataLoader(dataset=test_dataset, \n batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS,\n drop_last=False,\n shuffle=False)", "Files already downloaded and verified\n" ], [ "from collections import Counter\n\n\ntrain_counter = Counter()\nfor images, labels in train_loader:\n train_counter.update(labels.tolist())\n\nprint('\\nTraining label distribution:')\nsorted(train_counter.items(), key=lambda pair: pair[0])", "\nTraining label distribution:\n" ], [ "test_counter = Counter()\nfor images, labels in test_loader:\n test_counter.update(labels.tolist())\n\nprint('\\nTest label distribution:')\nsorted(test_counter.items(), key=lambda pair: pair[0])", "\nTest label distribution:\n" ] ], [ [ "### A quick visual check", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision", "_____no_output_____" ], [ "for images, labels in train_loader: \n break\n\nplt.figure(figsize=(8, 8))\nplt.axis(\"off\")\nplt.title(\"Training Images\")\nplt.imshow(np.transpose(torchvision.utils.make_grid(\n images[:64], \n padding=2,\n normalize=True),\n (1, 2, 0)))\nplt.show()", "_____no_output_____" ] ], [ [ "### Performance baseline", "_____no_output_____" ], [ "- Especially for imbalanced datasets, it's quite useful to compute a performance baseline.\n- In classification contexts, a useful baseline is to compute the accuracy for a scenario where the model always predicts the majority class -- you want your model to be better than that!", "_____no_output_____" ] ], [ [ "majority_class = test_counter.most_common(1)[0]\nmajority_class", "_____no_output_____" ] ], [ [ "- (To be fair, the classes in the test set are perfectly evenly distributed, so the majority class is an arbitrary choice in this case)", "_____no_output_____" ] ], [ [ "baseline_acc = majority_class[1] / sum(test_counter.values())\nprint('Accuracy when always predicting the majority class:')\nprint(f'{baseline_acc:.2f} ({baseline_acc*100:.2f}%)')", "Accuracy when always predicting the majority class:\n0.10 (10.00%)\n" ] ], [ [ "### Setting up a `DataModule`", "_____no_output_____" ], [ "- There are three main ways we can prepare the dataset for Lightning. We can\n 1. make the dataset part of the model;\n 2. set up the data loaders as usual and feed them to the fit method of a Lightning Trainer -- the Trainer is introduced in the next subsection;\n 3. create a LightningDataModule.\n- Here, we are going to use approach 3, which is the most organized approach. The `LightningDataModule` consists of several self-explanatory methods as we can see below:\n", "_____no_output_____" ] ], [ [ "import os\n\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\n\n\nclass DataModule(pl.LightningDataModule):\n def __init__(self, data_path='./'):\n super().__init__()\n self.data_path = data_path\n \n def prepare_data(self):\n datasets.CIFAR10(root=self.data_path,\n download=True)\n\n self.train_transform = torchvision.transforms.Compose([\n # torchvision.transforms.Resize((70, 70)),\n # torchvision.transforms.RandomCrop((64, 64)),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n self.test_transform = torchvision.transforms.Compose([\n # torchvision.transforms.Resize((70, 70)), \n # torchvision.transforms.CenterCrop((64, 64)), \n torchvision.transforms.ToTensor(), \n torchvision.transforms.Normalize(\n (0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n return\n\n def setup(self, stage=None):\n train = datasets.CIFAR10(root=self.data_path, \n train=True, \n transform=self.train_transform,\n download=False)\n\n self.test = datasets.CIFAR10(root=self.data_path, \n train=False, \n transform=self.test_transform,\n download=False)\n\n self.train, self.valid = random_split(train, lengths=[45000, 5000])\n\n def train_dataloader(self):\n train_loader = DataLoader(dataset=self.train, \n batch_size=BATCH_SIZE, \n drop_last=True,\n shuffle=True,\n num_workers=NUM_WORKERS)\n return train_loader\n\n def val_dataloader(self):\n valid_loader = DataLoader(dataset=self.valid, \n batch_size=BATCH_SIZE, \n drop_last=False,\n shuffle=False,\n num_workers=NUM_WORKERS)\n return valid_loader\n\n def test_dataloader(self):\n test_loader = DataLoader(dataset=self.test, \n batch_size=BATCH_SIZE, \n drop_last=False,\n shuffle=False,\n num_workers=NUM_WORKERS)\n return test_loader", "_____no_output_____" ] ], [ [ "- Note that the `prepare_data` method is usually used for steps that only need to be executed once, for example, downloading the dataset; the `setup` method defines the the dataset loading -- if you run your code in a distributed setting, this will be called on each node / GPU. \n- Next, lets initialize the `DataModule`; we use a random seed for reproducibility (so that the data set is shuffled the same way when we re-execute this code):", "_____no_output_____" ] ], [ [ "import torch\n\n\ntorch.manual_seed(1) \ndata_module = DataModule(data_path='./data')", "_____no_output_____" ] ], [ [ "## Training the model using the PyTorch Lightning Trainer class", "_____no_output_____" ], [ "- Next, we initialize our model.\n- Also, we define a call back so that we can obtain the model with the best validation set performance after training.\n- PyTorch Lightning offers [many advanced logging services](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) like Weights & Biases. Here, we will keep things simple and use the `CSVLogger`:", "_____no_output_____" ] ], [ [ "from pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import CSVLogger\n\n\npytorch_model = PyTorchVGG16(num_classes=10)\n\nlightning_model = LightningModel(\n pytorch_model, learning_rate=LEARNING_RATE)\n\ncallbacks = [ModelCheckpoint(\n save_top_k=1, mode='max', monitor=\"valid_acc\")] # save top 1 model \nlogger = CSVLogger(save_dir=\"logs/\", name=\"my-model\")", "_____no_output_____" ] ], [ [ "- Now it's time to train our model:", "_____no_output_____" ] ], [ [ "import time\n\n\ntrainer = pl.Trainer(\n max_epochs=NUM_EPOCHS,\n callbacks=callbacks,\n progress_bar_refresh_rate=50, # recommended for notebooks\n accelerator=\"auto\", # Uses GPUs or TPUs if available\n devices=\"auto\", # Uses all available GPUs/TPUs if applicable\n logger=logger,\n log_every_n_steps=100)\n\nstart_time = time.time()\ntrainer.fit(model=lightning_model, datamodule=data_module)\n\nruntime = (time.time() - start_time)/60\nprint(f\"Training took {runtime:.2f} min in total.\")", "/home/jovyan/conda/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/callback_connector.py:90: LightningDeprecationWarning: Setting `Trainer(progress_bar_refresh_rate=50)` is deprecated in v1.5 and will be removed in v1.7. Please pass `pytorch_lightning.callbacks.progress.TQDMProgressBar` with `refresh_rate` directly to the Trainer's `callbacks` argument instead. Or, to disable the progress bar pass `enable_progress_bar = False` to the Trainer.\n rank_zero_deprecation(\nGPU available: True, used: True\nTPU available: False, using: 0 TPU cores\nIPU available: False, using: 0 IPUs\n" ] ], [ [ "## Evaluating the model", "_____no_output_____" ], [ "- After training, let's plot our training ACC and validation ACC using pandas, which, in turn, uses matplotlib for plotting (you may want to consider a [more advanced logger](https://pytorch-lightning.readthedocs.io/en/latest/extensions/logging.html) that does that for you):", "_____no_output_____" ] ], [ [ "import pandas as pd\n\n\nmetrics = pd.read_csv(f\"{trainer.logger.log_dir}/metrics.csv\")\n\naggreg_metrics = []\nagg_col = \"epoch\"\nfor i, dfg in metrics.groupby(agg_col):\n agg = dict(dfg.mean())\n agg[agg_col] = i\n aggreg_metrics.append(agg)\n\ndf_metrics = pd.DataFrame(aggreg_metrics)\ndf_metrics[[\"train_loss\", \"valid_loss\"]].plot(\n grid=True, legend=True, xlabel='Epoch', ylabel='Loss')\ndf_metrics[[\"train_acc\", \"valid_acc\"]].plot(\n grid=True, legend=True, xlabel='Epoch', ylabel='ACC')", "_____no_output_____" ] ], [ [ "- The `trainer` automatically saves the model with the best validation accuracy automatically for us, we which we can load from the checkpoint via the `ckpt_path='best'` argument; below we use the `trainer` instance to evaluate the best model on the test set:", "_____no_output_____" ] ], [ [ "trainer.test(model=lightning_model, datamodule=data_module, ckpt_path='best')", "Files already downloaded and verified\n" ] ], [ [ "## Predicting labels of new data", "_____no_output_____" ], [ "- You can use the `trainer.predict` method on a new `DataLoader` or `DataModule` to apply the model to new data.\n- Alternatively, you can also manually load the best model from a checkpoint as shown below:", "_____no_output_____" ] ], [ [ "path = trainer.checkpoint_callback.best_model_path\nprint(path)", "logs/my-model/version_66/checkpoints/epoch=19-step=3499.ckpt\n" ], [ "lightning_model = LightningModel.load_from_checkpoint(\n path, model=pytorch_model)\nlightning_model.eval();", "_____no_output_____" ] ], [ [ "- Note that our PyTorch model, which is passed to the Lightning model requires input arguments. However, this is automatically being taken care of since we used `self.save_hyperparameters()` in our PyTorch model's `__init__` method.\n- Now, below is an example applying the model manually. Here, pretend that the `test_dataloader` is a new data loader.", "_____no_output_____" ] ], [ [ "test_dataloader = data_module.test_dataloader()\n\nall_true_labels = []\nall_predicted_labels = []\nfor batch in test_dataloader:\n features, labels = batch\n \n with torch.no_grad():\n logits = lightning_model(features)\n\n predicted_labels = torch.argmax(logits, dim=1)\n all_predicted_labels.append(predicted_labels)\n all_true_labels.append(labels)\n \nall_predicted_labels = torch.cat(all_predicted_labels)\nall_true_labels = torch.cat(all_true_labels)\nall_predicted_labels[:5]", "_____no_output_____" ] ], [ [ "Just as an internal check, if the model was loaded correctly, the test accuracy below should be identical to the test accuracy we saw earlier in the previous section.", "_____no_output_____" ] ], [ [ "test_acc = torch.mean((all_predicted_labels == all_true_labels).float())\nprint(f'Test accuracy: {test_acc:.4f} ({test_acc*100:.2f}%)')", "Test accuracy: 0.7237 (72.37%)\n" ] ], [ [ "## Inspecting Failure Cases", "_____no_output_____" ], [ "- In practice, it is often informative to look at failure cases like wrong predictions for particular training instances as it can give us some insights into the model behavior and dataset.\n- Inspecting failure cases can sometimes reveal interesting patterns and even highlight dataset and labeling issues.", "_____no_output_____" ] ], [ [ "# Append the folder that contains the \n# helper_data.py, helper_plotting.py, and helper_evaluate.py\n# files so we can import from them\n\nimport sys\nsys.path.append('../pytorch_ipynb')", "_____no_output_____" ], [ "from helper_data import UnNormalize\nfrom helper_plotting import show_examples", "_____no_output_____" ], [ "class_dict = {0: 'airplane',\n 1: 'automobile',\n 2: 'bird',\n 3: 'cat',\n 4: 'deer',\n 5: 'dog',\n 6: 'frog',\n 7: 'horse',\n 8: 'ship',\n 9: 'truck'}\n\n# We normalized each channel during training; here \n# we are reverting the normalization so that we \n# can plot them as images\nunnormalizer = UnNormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n\nshow_examples(\n model=lightning_model,\n data_loader=test_dataloader,\n unnormalizer=unnormalizer,\n class_dict=class_dict)", "_____no_output_____" ], [ "from torchmetrics import ConfusionMatrix\n\n\ncmat = ConfusionMatrix(num_classes=len(class_dict))\n\nfor x, y in test_dataloader:\n pred = lightning_model(x)\n cmat(pred, y)\n\ncmat_tensor = cmat.compute()", "_____no_output_____" ], [ "from helper_plotting import plot_confusion_matrix\n\n\nplot_confusion_matrix(\n cmat_tensor.numpy(),\n class_names=class_dict.values())\nplt.show()", "_____no_output_____" ] ], [ [ "## Single-image usage", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "- Assume we have a single image as shown below:", "_____no_output_____" ] ], [ [ "from PIL import Image\n\n\nimage = Image.open('data/cifar10_pngs/90_airplane.png')\nplt.imshow(image, cmap='Greys')\nplt.show()", "_____no_output_____" ] ], [ [ "- Note that we have to use the same image transformation that we used earlier in the `DataModule`. \n- While we didn't apply any image augmentation, we could use the `to_tensor` function from the torchvision library; however, as a general template that provides flexibility for more complex transformation chains, let's use the `Compose` class for this:", "_____no_output_____" ] ], [ [ "transform = data_module.train_transform\n\nimage_chw = transform(image)", "_____no_output_____" ] ], [ [ "- Note that `ToTensor` returns the image in the CHW format. CHW refers to the dimensions and stands for channel, height, and width.", "_____no_output_____" ] ], [ [ "print(image_chw.shape)", "torch.Size([3, 32, 32])\n" ] ], [ [ "- However, the PyTorch / PyTorch Lightning model expectes images in NCHW format, where N stands for the number of images (e.g., in a batch).\n- We can add the additional channel dimension via `unsqueeze` as shown below:", "_____no_output_____" ] ], [ [ "image_nchw = image_chw.unsqueeze(0)\nprint(image_nchw.shape)", "torch.Size([1, 3, 32, 32])\n" ] ], [ [ "- Now that we have the image in the right format, we can feed it to our classifier:", "_____no_output_____" ] ], [ [ "with torch.no_grad(): # since we don't need to backprop\n logits = lightning_model(image_nchw)\n probas = torch.softmax(logits, axis=1)\n predicted_label = torch.argmax(probas)", "_____no_output_____" ], [ "int_to_str = {\n 0: 'airplane',\n 1: 'automobile',\n 2: 'bird',\n 3: 'cat',\n 4: 'deer',\n 5: 'dog',\n 6: 'frog',\n 7: 'horse',\n 8: 'ship',\n 9: 'truck'}", "_____no_output_____" ], [ "print(f'Predicted label: {int_to_str[predicted_label.item()]}')\nprint(f'Class-membership probability {probas[0][predicted_label]*100:.2f}%')", "Predicted label: airplane\nClass-membership probability 99.54%\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb231eeed5afc248a33e1879648eb1c0a01af167
218,334
ipynb
Jupyter Notebook
figure_3.ipynb
surchs/ASD_high_risk_endophenotype_code_supplement
c9a1544c507fc46c48824968eecf21b7783012c9
[ "CC-BY-4.0" ]
null
null
null
figure_3.ipynb
surchs/ASD_high_risk_endophenotype_code_supplement
c9a1544c507fc46c48824968eecf21b7783012c9
[ "CC-BY-4.0" ]
null
null
null
figure_3.ipynb
surchs/ASD_high_risk_endophenotype_code_supplement
c9a1544c507fc46c48824968eecf21b7783012c9
[ "CC-BY-4.0" ]
null
null
null
210.950725
33,120
0.904344
[ [ [ "# Investigate HPS profile", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import sys\nsys.path.append('/home/surchs/Repositories/abide_univariate/')", "_____no_output_____" ], [ "import asdfc\nimport pandas as pd\nimport scipy as sp\nimport numpy as np\nimport patsy as pat\nimport nibabel as nib\nimport pathlib as pal\nimport seaborn as sbn\nimport matplotlib as mpl\nfrom matplotlib import gridspec\nimport nilearn.input_data as nii\nfrom nilearn import plotting as nlp\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap", "_____no_output_____" ], [ "root_p = pal.Path('/home/surchs/Repositories/autismclassification/20190524_Validation_Data/')\nmodel1_p = root_p / 'validation_net_split_1_model_1_combined_p_values.tsv'\npheno_valid_p = root_p / 'ABIDE_2_Pheno_PSM_matched_ados.tsv'\nresid_valid_p = root_p / 'abide_2_validation_residuals_regressed_with_abide_1.npy'\nseed_valid_p = root_p / 'Sebastian Urchs - abide_2_seed_maps_no_cereb_for_hien.npy'\n\nmask_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcellations/MIST_mask_nocereb.nii.gz'\natlas_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcellations/MIST_20_nocereb.nii.gz'\nlabels_p = '/home/surchs/Repositories/abide_univariate/data/ATLAS/MIST/Parcel_Information/MIST_20_nocereb.csv'\n\ncompleted_pheno_p = root_p / 'completed_abide_2_validation.tsv'\n\nfigure_p = pal.Path('/home/surchs/VM_Transfer/HCS_paper_figures/fig4/')\nif not figure_p.is_dir():\n figure_p.mkdir(parents=True)", "_____no_output_____" ], [ "p_test = '/home/surchs/Repositories/abide_univariate/data/pheno/ABIDEII_Composite_Phenotypic.csv'\np1_test = '/home/surchs/Repositories/abide_univariate/data/pheno/ABIDEII_Composite_Phenotypic.csv'\n\npp = pd.read_csv(p_test, encoding = \"ISO-8859-1\")\npp1 = pd.read_csv(p1_test, encoding = \"ISO-8859-1\")", "_____no_output_____" ], [ "def mm2in(mm):\n return mm/25.4", "_____no_output_____" ] ], [ [ "## Load and preparation", "_____no_output_____" ] ], [ [ "pheno_valid = pd.read_csv(pheno_valid_p, sep='\\t')\nmodel1 = pd.read_csv(model1_p, sep='\\t')\nmodel1.rename(columns={'V1': 'p_ASD', 'V2': 'p_TDC'}, inplace=True)\nresid_valid = np.load(resid_valid_p)\nseed_valid = np.load(resid_valid_p)\n\nlabels = pd.read_csv(labels_p, sep=';')\nmask_i = nib.load(str(mask_p))\natlas_i = nib.load(str(atlas_p))\nvoxel_masker = nii.NiftiMasker(mask_img=mask_i, standardize=False)\nvoxel_masker.fit()", "_____no_output_____" ] ], [ [ "# Regress the seed maps locally\nregressors = ' + '.join(['SITE_ID', 'AGE_AT_SCAN', 'fd_scrubbed'])\ndesign_matrix = pat.dmatrix(regressors, data=pheno_valid)\nresiduals = asdfc.stats.nuisance_correction(seed_valid, design_matrix, n_jobs=4)", "_____no_output_____" ] ], [ [ "# These need to be the values because the model 1 dataframe starts with index 1\npheno_valid.loc[:, 'is_hps'] = ((model1.p_ASD>0.2) & (model1.p_TDC<=0.2)).values\npheno_valid.loc[:, 'p_ASD'] = model1.loc[:, 'p_ASD'].values\npheno_valid.loc[:, 'p_TDC'] = model1.loc[:, 'p_TDC'].values", "_____no_output_____" ], [ "# Compute actual indices to avoid trusting the DF indices\nhps_idx = np.where(pheno_valid.is_hps)[0]\nnon_hps_idx = np.where(~pheno_valid.is_hps)\nnon_hps_asd_idx = np.where((pheno_valid.is_hps) & (pheno_valid.DX_GROUP=='Autism'))[0]\nnon_hps_con_idx = np.where((pheno_valid.is_hps) & (pheno_valid.DX_GROUP=='Control'))[0]", "_____no_output_____" ] ], [ [ "## Describe symptom profile\n- with ADOS raw totals\n- with ADOS raw domain totals\n- with SRS raw totals T (standardized)", "_____no_output_____" ] ], [ [ "p_hps_asd = pheno_valid.query('is_hps and DX_GROUP==\"Autism\"')\np_hps_con = pheno_valid.query('is_hps and DX_GROUP==\"Control\"')\np_idi_asd = pheno_valid.query('not is_hps and DX_GROUP==\"Autism\"')\np_idi_con = pheno_valid.query('not is_hps and DX_GROUP==\"Control\"')", "_____no_output_____" ], [ "def get_counts(pheno, var, ados_values=range(1,11)):\n values = pheno[var].value_counts().keys().tolist()\n counts = pheno[var].value_counts().tolist()\n # Compute counts for each value in case there aren't\n return [counts[values.index(val)] if val in values else 0 for val in ados_values]", "_____no_output_____" ], [ "def get_relative_counts(counts):\n return [count/sum(counts) for count in counts]", "_____no_output_____" ] ], [ [ "## Boxplot + Swarmplot raw ADOS TOTAL", "_____no_output_____" ] ], [ [ "asd_idi_color = 'lightcoral'\nasd_hps_color = 'firebrick'\ncon_idi_color = 'lightgreen'\ncon_hps_color = 'darkgreen'", "_____no_output_____" ], [ "from matplotlib import rcParams\nrcParams['font.sans-serif'] = ['Arial']", "_____no_output_____" ], [ "dpi = 300\nscale_factor = 1\nheight = 55 * scale_factor\nwidth = 55 * scale_factor\n\nf = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=True)\nax = f.add_subplot(111)\n\nsbn.boxplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('not is_hps'), ax=ax, \n palette=['white', 'white'], saturation=1, linewidth=1.2)\nsbn.stripplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('not is_hps'), size=4,ax=ax,\n palette=['#8F09A7', 'lightgrey'], linewidth=0, alpha=0.3, jitter=0.25)\nsbn.swarmplot(x='DX_GROUP', y='ADOS_RAW_TOTAL_combined', data=pheno_valid.query('is_hps'), size=8,ax=ax,\n palette=['#8F09A7', 'lightgrey'], linewidth=1.5, edgecolor='#FE9D08')\n\n#for i,box in enumerate(ax.artists):\n# box.set_edgecolor('black')\n# # iterate over whiskers and median lines\n# for j in range(6*i,6*(i+1)):\n# ax.lines[j].set_color('black')\n\nax.set_xticklabels(['ASD', 'NTC'], fontsize=10);\nax.set_xlabel('Diagnosis', fontsize=10)\n\nax.set_yticks(np.arange(0,26,5))\nax.set_ylim([-1, 26])\nax.set_ylabel('raw ADOS total', fontsize=10)\nf.savefig(figure_p / 'fig4_boxplot_rawADOS_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\nf.savefig(figure_p / 'fig4_boxplot_rawADOS_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)", "_____no_output_____" ], [ "# Make a stupid legend by hand\ndpi = 300\nscale_factor = 1\nheight = 30 * scale_factor\nwidth = 30 * scale_factor\n\nf = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=False)\nax = f.add_subplot(111)\n\nax.scatter(0,-0.5, s=100, color='lightgrey', edgecolor='#FE9D08', linewidth=0)\nax.scatter(0,0.5, s=100, color='#8F09A7', edgecolor='#FE9D08', linewidth=0)\nax.scatter(0,1.5, s=100, color='white', edgecolor='#FE9D08', linewidth=1.5)\nax.set_ylim([-1,2])\nax.set_xlim([-1,4])\n#ax.set_xlim([-0.2,2])\nax.set_axis_off()\n\nax.annotate(f'identified by HRS', xy=(0.5,0.82), xytext=(0, 0), ha='left', va='center', \n xycoords='axes fraction', textcoords='offset points', fontsize=10)\nax.annotate(f'ASD', xy=(0.5,0.49), xytext=(0, 0), ha='left', va='center', \n xycoords='axes fraction', textcoords='offset points', fontsize=10)\nax.annotate(f'NTC', xy=(0.5,0.16), xytext=(0, 0), ha='left', va='center', \n xycoords='axes fraction', textcoords='offset points', fontsize=10)\n \n\nf.savefig(figure_p / 'fig4_boxplot_legend_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\nf.savefig(figure_p / 'fig4_boxplot_legend_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)", "_____no_output_____" ] ], [ [ "## Boxplot + Swarmplot proxy ADOS CSS", "_____no_output_____" ] ], [ [ "dpi = 300\nscale_factor = 1\nheight = 55 * scale_factor\nwidth = 55 * scale_factor\n\nf = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=True)\nax = f.add_subplot(111)\n\n\nsbn.boxplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('not is_hps'), ax=ax, \n palette=['white', 'white'], saturation=1, linewidth=1.2)\nsbn.stripplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('not is_hps'), size=4,ax=ax,\n palette=['#8F09A7', 'lightgrey'], linewidth=0, alpha=0.17, jitter=0.3)\nsbn.swarmplot(x='DX_GROUP', y='ADOS_CSS_proxy_fully', data=pheno_valid.query('is_hps'), size=8,ax=ax,\n palette=['#8F09A7', 'lightgrey'], linewidth=1.5, edgecolor='#FE9D08')\n\n#for i,box in enumerate(ax.artists):\n# box.set_edgecolor('black')\n# # iterate over whiskers and median lines\n# for j in range(6*i,6*(i+1)):\n# ax.lines[j].set_color('black')\n\nax.set_xticklabels(['ASD', 'NTC'], fontsize=10);\nax.set_xlabel('Diagnosis', fontsize=10)\nax.set_yticks(np.arange(1,11))\nax.set_ylabel('proxy ADOS CSS', fontsize=10)\n\n\nf.savefig(figure_p / 'fig4_boxplot_proxyADOS_new.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\nf.savefig(figure_p / 'fig4_boxplot_proxyADOS_new.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)", "_____no_output_____" ] ], [ [ "dpi = 300\nscale_factor = 1\nheight = 100 * scale_factor\nwidth = 160* scale_factor\n\n# Setup\nrad = 10\nstep = (rad/220)*2.5\n\ny_asd = [0.01, 0.07, 0.13]\ny_asd_lim = [0, 0.14]\ny_con = [0.1, 0.3, 0.5]\ny_con_lim = [0, 0.6]\n\nfs_label = 10\nfs_tick = 8\n\n# Make the figure\nf = plt.figure(figsize=(mm2in(width),mm2in(height)), constrained_layout=True)\ngs = gridspec.GridSpec(nrows=2, ncols=1, hspace=0.2)\n\nax_asd = f.add_subplot(gs[0])\nax_con = f.add_subplot(gs[1])\n\n# Plot the barplots\nax_asd.bar(ados_values, total_airc, color=asd_idi_color, label='unselected ASD')\nax_con.bar(ados_values, total_circ, color=con_idi_color, label='unselected NTC')\n\n# Set up the axes\nax_asd.set_xticks(ados_values);\nax_asd.set_xticklabels(ados_values, fontsize=fs_tick);\n\nax_con.set_xticks(ados_values);\nax_con.set_xticklabels(ados_values, fontsize=fs_tick);\nax_con.set_xlabel('raw ADOS total', fontsize=fs_label, labelpad=5)\nax_con.set_ylabel('% of sample', fontsize=fs_label)\nax_con.yaxis.set_label_coords(-0.1,1.1)\n\nax_asd.set_ylim(y_asd_lim)\nax_con.set_ylim(y_con_lim)\n\nax_asd.set_yticks(y_asd)\nax_con.set_yticks(y_con)\n\nax_asd.spines['right'].set_visible(False)\nax_asd.spines['top'].set_visible(False)\n\nax_con.spines['right'].set_visible(False)\nax_con.spines['top'].set_visible(False)\n\n# Get the range of the y axis (sorry)\nasd_yr = np.subtract(*ax_asd.get_ylim()[::-1])\ncon_yr = np.subtract(*ax_con.get_ylim()[::-1])\nasd_step = step*asd_yr\ncon_step = step*con_yr\n\n# ASD HCS\nfor val, count in zip(ados_values, total_ahc):\n for i in range(count):\n ax_asd.plot(val,i*asd_step+asd_step/2, marker='o', color=asd_hps_color, \n linestyle='none', fillstyle='full',markersize=rad, label='HCS_ASD')\n\n# Control HCS\nfor val, count in zip(ados_values, total_chc):\n for i in range(count):\n ax_con.plot(val,i*con_step+con_step/2, marker='o', color=con_hps_color, \n linestyle='none', fillstyle='full',markersize=rad, label='HCS_NTC')\n \n \n# Build a legend\nh1, l1 = ax_asd.get_legend_handles_labels()\nh2, l2 = ax_con.get_legend_handles_labels()\n# Only show one element\nlgd = ax_con.legend(h1[-2:]+h2[-2:], l1[-2:]+l2[-2:], loc='center left', bbox_to_anchor=(1.02, 1.1),\n ncol=1, fancybox=False, shadow=False, borderpad=0, fontsize=fs_label);\n\nlgd.get_frame().set_linewidth(0.0)\n\nf.savefig(figure_p / 'Fig4_HCS_profile_ADOS_raw_total.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\nf.savefig(figure_p / 'Fig4_HCS_profile_ADOS_raw_total.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)", "_____no_output_____" ] ], [ [ "# The brain profile\n- go through each seed region\n- average the HPS individuals\n- show the map", "_____no_output_____" ] ], [ [ "# Networks that we are actually using:\n# These are in R notation and thus start with 1\nused_networks = np.array([18, 3, 9, 5, 16, 1, 13, 4, 12])-1\nnetworks_not_used = np.array([2, 7, 10, 11, 6, 17, 8, 14, 15])-1\nused_numbers = [labels.iloc[n].roi for n in used_networks]\nnot_used_numbers = [labels.iloc[n].roi for n in networks_not_used]", "_____no_output_____" ], [ "atlas = voxel_masker.transform(atlas_i)\nnew_atlas = np.zeros_like(atlas)\nfor n in used_numbers:\n new_atlas[atlas==n] = 1\nnew_atlas_i = voxel_masker.inverse_transform(new_atlas)", "_____no_output_____" ], [ "def mm2in(mm):\n return mm/25.4", "_____no_output_____" ], [ "def seed_map_overlay(width, height, img, over_img, coords, path):\n fig = plt.figure(figsize=(width, height), constrained_layout=True)\n ax = fig.add_subplot(111)\n gp = nlp.plot_stat_map(img, cut_coords=coords, axes=ax, colorbar=False)\n gp.add_contours(over_img, filled=False, alpha=1, levels=[0.5], colors='limegreen', linewidths=2)\n fig.savefig(path, bbox_inches='tight', dpi=300, transparent=True, pad_inches=0)", "_____no_output_____" ] ], [ [ "# Repeat colors for members of the same cluster 7 group\nnet_colors = [sbn.xkcd_rgb[col] for col in \n ['pastel blue'] + ['neon blue']*4 + ['dark sky blue'] + ['blurple']*3]", "_____no_output_____" ] ], [ [ "net_colors = ['#FFB647', # Light Orange\n '#FF907E', '#FF6047', '#BF1A00', '#881200', # 2nd group, shades of red\n '#BF7400', # Burnt Orange\n '#FFEC7E', '#FFE347', '#BFA200', # 4th group, shades of yellow\n '#6C98CA', '#3B72B0', '#09407D', # 5th group (prot), shades of blue\n '#9EEB75', '#77E13E', '#3AA600', # 6th group (prot), shades of green\n '#9D6ECD', '#783CB5', '#440981'] # 7th group (prot), shades of purple", "_____no_output_____" ], [ "import matplotlib.patches as patches", "_____no_output_____" ], [ "dpi=300\nscale_factor = 1\nheight = 20 * scale_factor\nwidth = 55 * scale_factor\n\n\ncoords = [(-2, 24, 24),\n (-2, 32, -18),\n (-22, -4, -16),\n (-41, -6, -34),\n (-48, -30, -4),\n (-18, -2, 4),\n (0, -57, 32),\n (0, 45, 0),\n (4, 42, 40)]\n\nfor fig_idx, nid in enumerate(used_networks):\n\n fig = plt.figure(figsize=(mm2in(width),mm2in(height)))\n gs = gridspec.GridSpec(nrows=1, ncols=1)\n ax_img = fig.add_subplot(gs[0])\n\n a = np.mean(resid_valid[hps_idx, :, nid], 0)\n img = voxel_masker.inverse_transform(a)\n # Make an image for just this seed region\n seed_vec = (voxel_masker.transform(atlas_i)==labels.iloc[nid].roi).squeeze()\n seed_img = voxel_masker.inverse_transform(seed_vec)\n \n gp = nlp.plot_stat_map(img, cut_coords=coords[fig_idx], axes=ax_img, \n colorbar=False, draw_cross=False, annotate=False, black_bg=False)\n gp.add_contours(seed_img, filled=False, alpha=1, levels=[0.5], colors=net_colors[fig_idx], linewidths=1)\n \n net_name = labels.iloc[nid][\"label\"] \n \n ax_img.set_axis_on()\n ax_img.patch.set_alpha(0)\n for spine in ax_img.spines.values():\n spine.set_visible(False)\n ax_img.set_xticks([])\n ax_img.set_yticks([])\n \n ax_img.annotate(f'{net_name}', xy=(0,0), xytext=(17, -5), ha='left', va='top',\n xycoords='axes fraction', textcoords='offset points', fontsize=10)\n ax_img.add_patch(plt.Rectangle((0,-0.32),0.07, 0.2,facecolor=net_colors[fig_idx],\n edgecolor='black', linewidth = 1, clip_on=False))\n \n fig.savefig(figure_p / f'hcs_avg_{net_name}_outline.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)\n fig.savefig(figure_p / f'hcs_avg_{net_name}_outline.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)", "_____no_output_____" ] ], [ [ "dpi=300\nscale_factor = 1\nheight = 20 * scale_factor\nwidth = 55 * scale_factor\n\n\ncoords = [(-2, 24, 24),\n (-2, 32, -18),\n (-22, -4, -16),\n (-41, -6, -34),\n (-48, -30, -4),\n (-18, -2, 4),\n (0, -57, 32),\n (0, 45, 0),\n (4, 42, 40)]\n\nfor fig_idx, nid in enumerate(used_networks):\n\n fig = plt.figure(figsize=(mm2in(width),mm2in(height)))\n gs = gridspec.GridSpec(nrows=1, ncols=1)\n ax_img = fig.add_subplot(gs[0])\n\n a = np.mean(resid_valid[hps_idx, :, nid], 0)\n img = voxel_masker.inverse_transform(a)\n \n gp = nlp.plot_stat_map(img, cut_coords=coords[fig_idx], axes=ax_img, \n colorbar=False, draw_cross=False, annotate=False, black_bg=False)\n \n ax_img.set_axis_on()\n ax_img.patch.set_alpha(0)\n for spine in ax_img.spines.values():\n spine.set_visible(True)\n spine.set_edgecolor(net_colors[fig_idx])\n spine.set_linewidth(3)\n ax_img.set_xticks([])\n ax_img.set_yticks([])\n\n net_name = labels.iloc[nid][\"label\"]\n fig.savefig(figure_p / f'hcs_avg_{net_name}_box.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)\n fig.savefig(figure_p / f'hcs_avg_{net_name}_box.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)", "_____no_output_____" ], [ "dpi=300\nscale_factor = 1\nheight = 20 * scale_factor\nwidth = 55 * scale_factor\n\n\ncoords = [(-2, 24, 24),\n (-2, 32, -18),\n (-22, -4, -16),\n (-41, -6, -34),\n (-48, -30, -4),\n (-18, -2, 4),\n (0, -57, 32),\n (0, 45, 0),\n (4, 42, 40)]\n\nfor fig_idx, nid in enumerate(used_networks):\n\n fig = plt.figure(figsize=(mm2in(width),mm2in(height)))\n gs = gridspec.GridSpec(nrows=1, ncols=1)\n ax_img = fig.add_subplot(gs[0])\n\n a = np.mean(resid_valid[hps_idx, :, nid], 0)\n img = voxel_masker.inverse_transform(a)\n \n gp = nlp.plot_stat_map(img, cut_coords=coords[fig_idx], axes=ax_img, \n colorbar=False, draw_cross=False, annotate=False, black_bg=False)\n\n net_name = labels.iloc[nid][\"label\"]\n fig.savefig(figure_p / f'hcs_avg_{net_name}_blank.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)\n fig.savefig(figure_p / f'hcs_avg_{net_name}_blank.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)", "_____no_output_____" ] ], [ [ "## Plot the brain atlas with corresponding colors", "_____no_output_____" ] ], [ [ "order = np.array([18, 3, 9, 5, 16, 1, 13, 4, 12, 2, 7, 10, 11, 6, 17, 8, 14,\n 15])-1", "_____no_output_____" ], [ "# Manual partition\npart_dict = {1: [18],\n 2: [3, 9, 5, 16],\n 3: [1],\n 4: [13, 4, 12],\n 5: [2, 7, 10],\n 6: [11, 6, 17],\n 7: [8, 14, 15]\n }\nmanual_part = np.array([p for i in order for p, v in part_dict.items() if i+1 in v])", "_____no_output_____" ], [ "atlas = atlas_i.get_fdata().astype(int)\natlas_masked = np.zeros(shape=(atlas.shape))\natlas_ens1 = np.zeros(shape=(atlas.shape))\natlas_ens2 = np.zeros(shape=(atlas.shape))\nidx = 1\nfor i in np.arange(1,8): \n for j in part_dict[i]:\n # convert j to real j\n real_j = labels.iloc[j-1].roi\n if i <=4:\n atlas_ens1[atlas==real_j] = idx\n atlas_masked[atlas==real_j] = 1\n idx +=1\n else:\n atlas_ens2[atlas==real_j] = i\natlas_masked_i = nib.Nifti1Image(atlas_masked, affine=atlas_i.affine, header=atlas_i.header)\natlas_ens1_i = nib.Nifti1Image(atlas_ens1, affine=atlas_i.affine, header=atlas_i.header)", "_____no_output_____" ], [ "cmp = LinearSegmentedColormap.from_list('lala', net_colors, N=18)", "_____no_output_____" ], [ "cut_coords = (10,5,0)\ndpi=300\nscale_factor = 1\nheight = 20 * scale_factor\nwidth = 55 * scale_factor\n\nfig = plt.figure(figsize=(mm2in(width),mm2in(height)))\nax = fig.add_subplot(111)\ngp = nlp.plot_roi(atlas_ens1_i, cmap=cmp, cut_coords=cut_coords, axes=ax, vmin=1, vmax=18, draw_cross=False, annotate=False);\ngp.add_contours(atlas_masked_i, filled=False, alpha=1, levels=[0.5], colors='black', linewidths=0.5)\nfig.savefig('/home/surchs/VM_Folders/HCS_paper_figures/fig4_ensemble1_maps_for_montage.png', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\nfig.savefig('/home/surchs/VM_Folders/HCS_paper_figures/fig4_ensemble1_maps_for_montage.pdf', bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)\n\nfig.savefig(figure_p / 'fig4_ensemble1_maps_for_montage.png', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)\nfig.savefig(figure_p / 'fig4_ensemble1_maps_for_montage.pdf', bbox_inches='tight', dpi=dpi, transparent=False, pad_inches=0)", "_____no_output_____" ], [ "cut_coords = (0,0,-10)\ndpi=300\nscale_factor = 1\nheight = 40 * scale_factor\nwidth = 110 * scale_factor\n\nfig = plt.figure(figsize=(mm2in(width),mm2in(height)))\nax = fig.add_subplot(111)\ngp = nlp.plot_roi(atlas_ens1_i, cmap=cmp, cut_coords=cut_coords, axes=ax, vmin=1, vmax=18, draw_cross=False, annotate=False);\ngp.add_contours(atlas_masked_i, filled=False, alpha=1, levels=[0.5], colors='black', linewidths=0.5)", "_____no_output_____" ] ], [ [ "## Color bar\n", "_____no_output_____" ] ], [ [ "dpi=300\nscale_factor = 1\nheight = 20 * scale_factor\nwidth = 1.5 * scale_factor\n\na = np.array([[0,1]])\nplt.figure(figsize=(mm2in(width),mm2in(height)))\nimg = plt.imshow(a, cmap=nlp.cm.cold_hot)\nplt.gca().set_visible(False)\ncax = plt.axes([0.1, 0.2, 0.8, 0.6])\nplt.colorbar(cax=cax)\ncax.set_ylabel('a.u.', fontsize=10)\ncax.set_axis_on()\ncax.set_yticklabels(('', '', ''), fontsize=8);\nplt.savefig(figure_p / \"colorbar.pdf\", bbox_inches='tight', dpi=dpi, transparent=True, pad_inches=0)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "raw", "code", "markdown", "code", "markdown", "code", "markdown", "code", "raw", "markdown", "code", "raw", "code", "raw", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "raw" ], [ "code", "code", "code" ], [ "raw", "raw" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb2329c3e48eb933ad60e409ed9920c29ed607ee
5,407
ipynb
Jupyter Notebook
hw_controlOfEx.ipynb
anachkheidze/ComputationalThinking_Gov_1
a540a4ced9e0964bdc7c029a357b2f13e96b0491
[ "MIT" ]
null
null
null
hw_controlOfEx.ipynb
anachkheidze/ComputationalThinking_Gov_1
a540a4ced9e0964bdc7c029a357b2f13e96b0491
[ "MIT" ]
null
null
null
hw_controlOfEx.ipynb
anachkheidze/ComputationalThinking_Gov_1
a540a4ced9e0964bdc7c029a357b2f13e96b0491
[ "MIT" ]
null
null
null
24.802752
136
0.402071
[ [ [ "import pandas", "_____no_output_____" ], [ "names=[\"Tomás\", \"Pauline\", \"Pablo\", \"Bjork\",\"Alan\",\"Juana\"]\nwoman=[False,True,False,False,False,True]\nages=[32,33,28,30,32,27]\ncountry=[\"Chile\", \"Senegal\", \"Spain\", \"Norway\",\"Peru\",\"Peru\"]\neducation=[\"Bach\", \"Bach\", \"Master\", \"PhD\",\"Bach\",\"Master\"]", "_____no_output_____" ], [ "d={'names':names, 'woman':woman, 'ages':ages, 'country':country, 'education':education}", "_____no_output_____" ], [ "friends=pandas.DataFrame.from_dict(d)\nfriends", "_____no_output_____" ], [ "#Implement a for loop to get the count of men that have a Bach degree in the data frame. I recommend the use of zip (somwehere)\n\ncountofmen=0\n\nfor woman,education in zip(friends.woman, friends.education):\n if (woman==False) & (education=='Bach'):\n countofmen +=1\ncountofmen", "_____no_output_____" ], [ "#Implement a for loop to get the count of people whose current age is an even number\n\ncountofage=0\n\nfor ages in friends.ages:\n if ages%2==0:\n countofage+=1\n \ncountofage\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
cb233229db10c1926dbb08e9ed5563f3e9cf4141
319,609
ipynb
Jupyter Notebook
Edgeworth Box.ipynb
yyuta0001/Plotting_Intro_Micro_Econ
4ca5ac220c7e70d0c6696f09f04cd7ec03315bd5
[ "MIT" ]
4
2018-10-24T14:54:54.000Z
2020-02-09T22:06:27.000Z
Edgeworth Box.ipynb
yyuta0001/Plotting_Intro_Micro_Econ
4ca5ac220c7e70d0c6696f09f04cd7ec03315bd5
[ "MIT" ]
null
null
null
Edgeworth Box.ipynb
yyuta0001/Plotting_Intro_Micro_Econ
4ca5ac220c7e70d0c6696f09f04cd7ec03315bd5
[ "MIT" ]
null
null
null
940.026471
92,026
0.937743
[ [ [ "## Drawing Edgeworth Box", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n%matplotlib inline", "_____no_output_____" ], [ "def Edgeworth_box(u1, u2, util1, util2, MRS1, MRS2, ttl, ttl_margin= 1, top=0.7, fname=None):\n l1 = 0.00000001\n x1 = np.arange(l1, u1, 0.01)\n x2 = np.arange(l1, u2, 0.01)\n X1, X2 = np.meshgrid(x1, x2)\n\n V1 = util1(X1, X2)\n V2 = util2(u1-X1, u2-X2)\n \n x, y = contract_curve(u1,u2, MRS1,MRS2,num_indiff = 10)\n xr = u1-x[::-1] # to use the same contours for Consumer 2.\n yr = u2-y[::-1] # to use the same contours for Consumer 2.\n clev1 = util1(x,y)\n clev2 = util2(xr, yr)\n\n Draw_Edgeworth_box(u1, u2, X1, X2, V1, V2, clev1,clev2, ttl, ttl_margin, top,contract1=x, contract2=y)\n if fname != None:\n plt.savefig(fname)", "_____no_output_____" ], [ "from scipy.optimize import fsolve\nfrom scipy.optimize import fminbound\n\ndef contract_curve(u1, u2, MRS1, MRS2, num_indiff = 10):\n xs = np.linspace(0,u1,num_indiff+2)\n xs = xs[1:-1]\n ys = []\n for x in xs:\n y = fminbound(lambda y: (MRS1(x,y) - MRS2(u1-x, u2-y))**2, 0, u2)\n ys.append(min(max(np.asscalar(y),0),u2))\n ys = np.asarray(ys)\n return xs, ys", "_____no_output_____" ], [ "def Draw_Edgeworth_box(u1, u2,X1, X2, V1, V2, clev1,clev2, ttl=[], ttl_margin= 1, top=0.7,contract1=[], contract2=[]):\n # u1: the total amount of good 1\n # u2: the total amount of good 2\n # V1: levels of utility for consumer 1\n # V2: levels of utility for consumer 2\n # clev1: levels of contours (for consumer 1) you are interested in\n # clev2: levels of contours (for consumer 2) you are interested in\n # ttl: a title\n # ttl_margin: space added for the title\n # top: a location where the graph starts (if top=1, the title and the graph will overlap)\n xtcks = np.arange(0, u1+1)\n ytcks = np.arange(0,u2+1)\n\n # Adjustment of the title is bit annoying when we try to set xlabel on the top\n if len(ttl)>0:\n fig = plt.figure(figsize = (u1, u2/top))\n ax1 = fig.add_subplot(1,1,1)\n plt.subplots_adjust(top=top)\n fig.suptitle(ttl)\n #plt.title(ttl)\n else:\n fig = plt.figure(figsize = (u1, u2))\n\n col1 = 'tab:red'\n col2 = 'tab:blue'\n \n plt.contour(X1, X2, V1,clev1, linewidths = 1, colors=col1, linestyles = 'dashed')\n plt.contour(X1, X2, V2,clev2, linewidths = 1, colors = col2, linestyles = 'dashed')\n plt.xlim([0,u1])\n plt.ylim([0,u2])\n plt.xlabel('$x_{1,1}$', color = col1, fontsize = 13)\n plt.ylabel('$x_{1,2}$', color = col1, fontsize = 13)\n plt.xticks(xtcks, color = col1)\n plt.yticks(ytcks, color = col1)\n xplt = np.linspace(0,u1,12)\n yplt = np.linspace(0,u2,12)\n if len(contract1)>0:\n xplt[1:-1]=contract1\n yplt[1:-1]=contract2\n plt.plot(xplt,yplt, 'k--', alpha = 0.7)\n\n\n ax1 = plt.gca()\n ax2 = plt.twinx(ax1)\n plt.ylabel('$x_{2,2}$', color=col2 , fontsize = 13)\n plt.yticks(ytcks,ytcks[::-1], color = col2)\n\n # It's a bit hacky, but the following looks an easy way. \n ax3 = plt.twiny(ax1)\n plt.xlabel('$x_{2,1}$', color=col2, fontsize = 13)\n plt.xticks(xtcks,xtcks[::-1], color = col2)\n\n return fig", "_____no_output_____" ] ], [ [ "### Edgeworth Box under Homothetic Preferences\n- ##### Q: Is a contract curve of homothetic preferences always a diagonal line of the Edgeworth Box ?\n- ##### A: If preferences are identical and strictly convex (i.e., strictly (quasi-) concave utility functions), Yes", "_____no_output_____" ], [ "##### Example with CES\n(with the same weights on two goods. )", "_____no_output_____" ] ], [ [ "def CES(x,y,dlt):\n return (x**dlt)/dlt + (y**dlt)/dlt\ndef Cobb_Douglas(x, y, al, bet):\n return (x**al)*(y**bet)\n\ndef MRS_CES(x,y,dlt):\n return (x**(dlt-1))/(y**(dlt-1))\ndef MRS_CD(x, y, al, bet):\n return (al*y)/(bet*x)", "_____no_output_____" ], [ "dlt1 = 0.5\ndlt2 = dlt1\nEdgeworth_box(u1=6, u2=3,\n util1 = lambda x,y: CES(x, y, dlt1),\n util2 =lambda x,y: CES(x, y, dlt2), \n MRS1 =lambda x, y: MRS_CES(x,y,dlt1),\n MRS2 =lambda x, y: MRS_CES(x,y,dlt2),\n ttl='$u_1=x^{\\delta_1}/{\\delta_1} + y^{\\delta_1}/{\\delta_1}$ : ${\\delta_1}=$' + '{}\\n'.format(dlt1)\n +'$u_2=x^{\\delta_2}/\\delta_2 + y^{\\delta_2}/\\delta_2$ : ${\\delta_2}=$' + '{}\\n'.format(dlt2),\n ttl_margin= 1, top=0.7, fname='Edgeworth_identical1.png')", "_____no_output_____" ], [ "dlt1 = 0.5\ndlt2 = -3\nEdgeworth_box(u1=6, u2=3,\n util1 = lambda x,y: CES(x, y, dlt1),\n util2 =lambda x,y: CES(x, y, dlt2), \n MRS1 =lambda x, y: MRS_CES(x,y,dlt1),\n MRS2 =lambda x, y: MRS_CES(x,y,dlt2),\n ttl='$u_1=x^{\\delta_1}/{\\delta_1} + y^{\\delta_1}/{\\delta_1}$ : ${\\delta_1}=$' + '{}\\n'.format(dlt1)\n +'$u_2=x^{\\delta_2}/\\delta_2 + y^{\\delta_2}/\\delta_2$ : ${\\delta_2}=$' + '{}'.format(dlt2),\n ttl_margin= 1, top=0.7, fname='Edgeworth_not_identical1.png')", "_____no_output_____" ] ], [ [ "In the above graph, weights on two goods are the same, but complementarity for each consumer are different. (Consumer 2 feels higher complementaryty than Consumer 1) \n\n##### Example with Cobb-Douglas\n(With different weights for two goods.)", "_____no_output_____" ] ], [ [ "al = 0.3\nbet = 0.7\nEdgeworth_box(u1=5, u2=5,\n util1 = lambda x,y: Cobb_Douglas(x, y, al,bet),\n util2 =lambda x,y: Cobb_Douglas(x, y, al,bet), \n MRS1 =lambda x, y: MRS_CD(x,y, al,bet),\n MRS2 =lambda x, y: MRS_CD(x,y, al,bet),\n ttl=r'$u_1=x^{\\alpha_1}y^{\\beta_1} : {\\alpha_1}$'+ '={}, '.format(al) + r'$\\beta_1$'+ '={}\\n'.format(bet)\n +r'$u_2=x^{\\alpha_2}y^{\\beta_2} : {\\alpha_2}$'+ '={}, '.format(al) + r'$\\beta_2$'+ '={}'.format(bet),\n ttl_margin= 1, top=0.8, fname='Edgeworth_identical2.png')", "_____no_output_____" ] ], [ [ "Even if goods 2 is more important, the contract curve is still diagonal since prefernces are identical and homothetic (and strictly convex) \n\n\nIf we use different weigts for two consumers, then we have a different contract curve.", "_____no_output_____" ] ], [ [ "al = 0.3\nbet = 0.7\nasym = 0.4\nEdgeworth_box(u1=5, u2=5,\n util1 = lambda x,y: Cobb_Douglas(x, y, al,bet),\n util2 =lambda x,y: Cobb_Douglas(x, y, al+asym,bet-asym), \n MRS1 =lambda x, y: MRS_CD(x,y, al,bet),\n MRS2 =lambda x, y: MRS_CD(x,y, al+asym,bet-asym),\n ttl=r'$u_1=x^{\\alpha_1}y^{\\beta_1} : {\\alpha_1}$'+ '={:02.1f}, '.format(al) + r'$\\beta_1$'+ '={:02.1f}\\n'.format(bet)\n +r'$u_2=x^{\\alpha_2}y^{\\beta_2} : {\\alpha_2}$'+ '={:02.1f}, '.format(al+asym) + r'$\\beta_2$'+ '={:02.1f}'.format(bet-asym),\n ttl_margin= 1, top=0.8, fname='Edgeworth_not_identical2.png')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb233aee1051bc06c42e8bbac0f7a58845843b93
1,041,845
ipynb
Jupyter Notebook
nets/notebooks/guncontrol_daily_classifier.ipynb
andrei-comanescu/msc_thesis
4f94c216b6c879bc4e7f810891018f459eb3eac7
[ "MIT" ]
null
null
null
nets/notebooks/guncontrol_daily_classifier.ipynb
andrei-comanescu/msc_thesis
4f94c216b6c879bc4e7f810891018f459eb3eac7
[ "MIT" ]
null
null
null
nets/notebooks/guncontrol_daily_classifier.ipynb
andrei-comanescu/msc_thesis
4f94c216b6c879bc4e7f810891018f459eb3eac7
[ "MIT" ]
null
null
null
1,576.164902
1,020,644
0.962
[ [ [ "import os\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport pickle as pkl\nimport itertools\n\nfrom sklearn.manifold import TSNE\n\nimport stellargraph as sg\nfrom stellargraph import StellarGraph\nfrom stellargraph import globalvar\nfrom stellargraph.mapper import GraphSAGENodeGenerator\nfrom stellargraph.layer import GraphSAGE\n\nfrom tensorflow.keras import layers, optimizers, losses, metrics, Model\nfrom sklearn import preprocessing, feature_extraction, model_selection\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "### Read the graph and the data", "_____no_output_____" ] ], [ [ "data_ab = pd.read_csv(\"../../../generated_csvs/guncontrol_daily_polarities_base.csv\")\ngraph_ab = nx.read_gexf(\"../../../graph/guncontrol_daily_largest.gexf\")", "_____no_output_____" ] ], [ [ "### Divide the data into classes", "_____no_output_____" ] ], [ [ "# n_classes can be 2, 4, 6 or 8\nn_classes = 2\n\n# polarity_type can be either \"Polarity Neighbours\" or \"Polarity Following\"\npolarity_type = \"Polarity Neighbours\"", "_____no_output_____" ], [ "base_polarities = data_ab[polarity_type].dropna()\nbase_polarities[base_polarities > 2.42] = 2.42\nbase_polarities[base_polarities < -2.42] = -2.42\n\nif n_classes == 2:\n base_polarities = base_polarities.to_frame()\n base_polarities['Class'] = 0\n base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1\n \nelif n_classes == 4:\n base_polarities = base_polarities.to_frame()\n base_polarities['Class'] = 0\n\n base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1\n positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = 0.5)\n \n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles] = 2\n\n base_polarities['Class'][base_polarities[polarity_type] < 0] = 3\n negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = 0.5)\n \n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles] = 4\n \n \nelif n_classes == 6:\n base_polarities = base_polarities.to_frame()\n base_polarities['Class'] = 0\n\n base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1\n positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = [0.33, 0.66])\n\n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[0]] = 2\n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[1]] = 3\n\n base_polarities['Class'][base_polarities[polarity_type] < 0] = 4\n negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = [0.33, 0.66])\n\n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[1]] = 5\n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[0]] = 6\n \n \nelif n_classes == 8:\n base_polarities = base_polarities.to_frame()\n base_polarities['Class'] = 0\n\n base_polarities['Class'][base_polarities[polarity_type] >= 0] = 1\n positive_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] >= 0], q = [0.25, 0.5, 0.75])\n\n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[0]] = 2\n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[1]] = 3\n base_polarities['Class'][base_polarities[polarity_type] >= positive_quantiles[2]] = 4\n\n base_polarities['Class'][base_polarities[polarity_type] < 0] = 5\n negative_quantiles = np.quantile(base_polarities[polarity_type][base_polarities[polarity_type] < 0], q = [0.25, 0.5, 0.75])\n\n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[2]] = 6\n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[1]] = 7\n base_polarities['Class'][base_polarities[polarity_type] <= negative_quantiles[0]] = 8", "/home/adc/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:8: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n" ] ], [ [ "### Get the features", "_____no_output_____" ] ], [ [ "# relabel nodes as indexes\nd = dict(zip(data_ab['0'].values, data_ab['0'].index))\ng2 = nx.relabel_nodes(graph_ab, d)", "_____no_output_____" ] ], [ [ "#### Use the node degrees as features", "_____no_output_____" ] ], [ [ "features_idx = np.load(\"../../../npy/guncontrol_daily/index.npy\", allow_pickle = True)\nfeatures_mat = np.load(\"../../../npy/guncontrol_daily/node_features_pca.npy\", allow_pickle = True)", "_____no_output_____" ], [ "map_nodes_to_features = {}\nfor idx, feature in zip(range(0, len(features_idx)), features_idx):\n map_nodes_to_features[d[feature]] = idx\n \nfor i in list(g2.nodes()):\n g2.nodes[i]['features'] = features_mat[map_nodes_to_features[i]]\n \nfor node in list(g2.nodes()):\n g2.node[node]['label'] = 'default'", "_____no_output_____" ] ], [ [ "#### Alternative: use the eigenvectors as features", "_____no_output_____" ] ], [ [ "with open(\"../../../pickle/guncontrol_daily_eig.pkl\", 'rb') as _p:\n _, eigvec = pkl.load(_p)\n \nmap_nodes_to_eigvec = {}\nfor idx, node_name in zip(range(0, len(list(graph_ab.nodes()))), list(graph_ab.nodes())):\n map_nodes_to_eigvec[d[node_name]] = idx\n \nfor i in list(g2.nodes()):\n g2.nodes[i]['features'] = eigvec[map_nodes_to_eigvec[i], 1:]\n \nfor node in list(g2.nodes()):\n g2.node[node]['label'] = 'default'", "_____no_output_____" ] ], [ [ "#### Alternative 2: use both the node degrees and the eigenvectors as features", "_____no_output_____" ] ], [ [ "features_idx = np.load(\"../../../npy/guncontrol_daily/index.npy\", allow_pickle = True)\nfeatures_mat = np.load(\"../../../npy/guncontrol_daily/node_features_pca.npy\", allow_pickle = True)\n\nmap_nodes_to_features = {}\nfor idx, feature in zip(range(0, len(features_idx)), features_idx):\n map_nodes_to_features[d[feature]] = idx\n \nwith open(\"../../../pickle/guncontrol_daily_eig.pkl\", 'rb') as _p:\n _, eigvec = pkl.load(_p)\n \nmap_nodes_to_eigvec = {}\nfor idx, node_name in zip(range(0, len(list(graph_ab.nodes()))), list(graph_ab.nodes())):\n map_nodes_to_eigvec[d[node_name]] = idx\n \nfor i in list(g2.nodes()):\n g2.nodes[i]['features'] = np.concatenate((features_mat[map_nodes_to_features[i]], eigvec[map_nodes_to_eigvec[i], 1:]))\n \nfor node in list(g2.nodes()):\n g2.node[node]['label'] = 'default'", "_____no_output_____" ] ], [ [ "### Train the model", "_____no_output_____" ] ], [ [ "# get the subgraph containing the known nodes\nstellar = StellarGraph.from_networkx(g2, node_features = 'features')\nsubg = stellar.subgraph(base_polarities['Class'].index)", "_____no_output_____" ], [ "train_labels, test_labels = model_selection.train_test_split(\n base_polarities['Class'],\n train_size=0.7,\n test_size=None,\n stratify=base_polarities['Class'],\n random_state=42,\n)\nval_labels, test_labels = model_selection.train_test_split(\n test_labels, train_size=0.2, test_size=None, stratify=test_labels, random_state=100,\n)", "_____no_output_____" ], [ "target_encoding = preprocessing.LabelBinarizer()\n\ntrain_targets = target_encoding.fit_transform(train_labels)\nval_targets = target_encoding.transform(val_labels)\ntest_targets = target_encoding.transform(test_labels)\n\nbatch_size = 50\nnum_samples = [10, 10]\n\ngenerator = GraphSAGENodeGenerator(subg, batch_size, num_samples)\ntrain_gen = generator.flow(train_labels.index, train_targets, shuffle=True)\n\ngraphsage_model = GraphSAGE(\n layer_sizes=[32, 32], generator=generator, bias=True, dropout=0.5,\n)\n\n# choose the activation function and the loss based on the number of classes:\nif n_classes == 2:\n x_inp, x_out = graphsage_model.in_out_tensors()\n prediction = layers.Dense(units=train_targets.shape[1], activation=\"sigmoid\")(x_out)\n \n model = Model(inputs=x_inp, outputs=prediction)\n model.compile(\n optimizer=optimizers.Adam(lr=0.005),\n loss=losses.binary_crossentropy,\n metrics=[\"acc\"],\n )\n \nelse:\n x_inp, x_out = graphsage_model.in_out_tensors()\n prediction = layers.Dense(units=train_targets.shape[1], activation=\"softmax\")(x_out)\n \n model = Model(inputs=x_inp, outputs=prediction)\n model.compile(\n optimizer=optimizers.Adam(lr=0.005),\n loss=losses.categorical_crossentropy,\n metrics=[\"acc\"],\n )\n \nval_gen = generator.flow(val_labels.index, val_targets)\nhistory = model.fit(\n train_gen, epochs=20, validation_data=val_gen, verbose=2, shuffle=False\n)", "Epoch 1/20\n1078/1078 - 138s - loss: 0.2794 - acc: 0.8840 - val_loss: 0.1972 - val_acc: 0.9205\nEpoch 2/20\n1078/1078 - 137s - loss: 0.2284 - acc: 0.9043 - val_loss: 0.1967 - val_acc: 0.9195\nEpoch 3/20\n1078/1078 - 136s - loss: 0.2167 - acc: 0.9102 - val_loss: 0.1881 - val_acc: 0.9251\nEpoch 4/20\n1078/1078 - 136s - loss: 0.2115 - acc: 0.9125 - val_loss: 0.1831 - val_acc: 0.9231\nEpoch 5/20\n1078/1078 - 135s - loss: 0.2094 - acc: 0.9130 - val_loss: 0.1894 - val_acc: 0.9221\nEpoch 6/20\n1078/1078 - 135s - loss: 0.2032 - acc: 0.9143 - val_loss: 0.1724 - val_acc: 0.9294\nEpoch 7/20\n1078/1078 - 140s - loss: 0.2033 - acc: 0.9151 - val_loss: 0.1716 - val_acc: 0.9307\nEpoch 8/20\n1078/1078 - 139s - loss: 0.2006 - acc: 0.9157 - val_loss: 0.1811 - val_acc: 0.9266\nEpoch 9/20\n1078/1078 - 138s - loss: 0.1983 - acc: 0.9166 - val_loss: 0.1785 - val_acc: 0.9244\nEpoch 10/20\n1078/1078 - 138s - loss: 0.1964 - acc: 0.9185 - val_loss: 0.1713 - val_acc: 0.9260\nEpoch 11/20\n1078/1078 - 136s - loss: 0.1969 - acc: 0.9189 - val_loss: 0.1697 - val_acc: 0.9296\nEpoch 12/20\n1078/1078 - 138s - loss: 0.1944 - acc: 0.9183 - val_loss: 0.1681 - val_acc: 0.9301\nEpoch 13/20\n1078/1078 - 141s - loss: 0.1931 - acc: 0.9195 - val_loss: 0.1707 - val_acc: 0.9290\nEpoch 14/20\n1078/1078 - 138s - loss: 0.1944 - acc: 0.9183 - val_loss: 0.1674 - val_acc: 0.9327\nEpoch 15/20\n1078/1078 - 136s - loss: 0.1923 - acc: 0.9195 - val_loss: 0.1624 - val_acc: 0.9370\nEpoch 16/20\n1078/1078 - 140s - loss: 0.1879 - acc: 0.9224 - val_loss: 0.1710 - val_acc: 0.9301\nEpoch 17/20\n1078/1078 - 142s - loss: 0.1909 - acc: 0.9199 - val_loss: 0.1732 - val_acc: 0.9303\nEpoch 18/20\n1078/1078 - 136s - loss: 0.1905 - acc: 0.9214 - val_loss: 0.1651 - val_acc: 0.9325\nEpoch 19/20\n1078/1078 - 135s - loss: 0.1927 - acc: 0.9207 - val_loss: 0.1748 - val_acc: 0.9270\nEpoch 20/20\n1078/1078 - 134s - loss: 0.1865 - acc: 0.9212 - val_loss: 0.1690 - val_acc: 0.9296\n" ], [ "# Save the train / validation training history\nhistory_save_path = \"../../../npy/classifier_history/guncontrol_daily/history_2_neighbours_node_degree.npy\"\nnp.save(history_save_path, history.history)", "_____no_output_____" ] ], [ [ "### Test the model", "_____no_output_____" ] ], [ [ "test_gen = generator.flow(test_labels.index, test_targets)\n\ntest_metrics = model.evaluate(test_gen)\nprint(\"\\nTest Set Metrics:\")\nfor name, val in zip(model.metrics_names, test_metrics):\n print(\"\\t{}: {:0.4f}\".format(name, val))", "370/370 [==============================] - 36s 97ms/step - loss: 0.1670 - acc: 0.9326\n\nTest Set Metrics:\n\tloss: 0.1670\n\tacc: 0.9326\n" ] ], [ [ "### Predict the unlabeled data", "_____no_output_____" ] ], [ [ "generator = GraphSAGENodeGenerator(stellar, batch_size, num_samples)\n\nhold_out_nodes = data_ab.index.difference(base_polarities.index)\n\ndata_ab['Class'] = 0\n\nhold_out_targets = target_encoding.transform(data_ab.loc[hold_out_nodes][\"Class\"])\nhold_out_gen = generator.flow(hold_out_nodes, hold_out_targets)\n\nhold_out_predictions = model.predict(hold_out_gen)\nhold_out_predictions = target_encoding.inverse_transform(hold_out_predictions)", "_____no_output_____" ], [ "pred = data_ab\npred['Class'] = base_polarities['Class']\npred.loc[hold_out_nodes, 'Class'] = hold_out_predictions", "_____no_output_____" ] ], [ [ "### Save the model and the predictions", "_____no_output_____" ] ], [ [ "model.save(\"../../../keras/guncontrol_daily/model\")\npred[['0', 'Class']].to_csv(\"../../../keras/guncontrol_daily/predictions.csv\", index = False)", "WARNING:tensorflow:From /home/adc/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py:1817: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\nINFO:tensorflow:Assets written to: ../../../keras/guncontrol_daily/model/assets\n" ] ], [ [ "### TSNE visualization", "_____no_output_____" ] ], [ [ "all_nodes = pred['Class'].index\nall_mapper = generator.flow(all_nodes)\n\nembedding_model = Model(inputs = x_inp, outputs = x_out)\nemb = embedding_model.predict(all_mapper)", "_____no_output_____" ], [ "# save the embeddings\nnp.save(\"../../../npy/embeddings/guncontrol_daily/emb.npy\", emb)", "_____no_output_____" ], [ "emb_trans = TSNE(n_components = 2, n_jobs = -1).fit_transform(emb)", "_____no_output_____" ], [ "colours = []\nfor iter in range(0, len(map_nodes_to_features)):\n if pred.loc[map_nodes_to_features[iter], 'Class'] == 0: \n colours.append('b')\n \n else:\n colours.append('r')", "_____no_output_____" ], [ "plt.figure(figsize = (20, 10))\nplt.rcParams.update({'font.size': 22})\nplt.scatter(emb_trans[:,0], emb_trans[:,1], c = colours, alpha = 0.5)\nplt.tick_params(labelsize = 20)\nplt.title(\"TSNE embedding for the 'gun control' data set\")\nplt.xlabel(\"Dimension 1\")\nplt.ylabel(\"Dimension 2\")\nplt.grid()\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize = (20, 10))\nplt.rcParams.update({'font.size': 22})\nplt.scatter(emb_trans[:,0], emb_trans[:,1], c = colours, alpha = 0.5)\nplt.tick_params(labelsize = 20)\nplt.title(\"TSNE embedding for the 'gun control' data set\")\nplt.xlabel(\"Dimension 1\")\nplt.ylabel(\"Dimension 2\")\nplt.grid()\n\nplt.savefig(\"../../../plots/guncontrol_daily/tsne_30_neighbours.png\")\nplt.close()", "_____no_output_____" ] ], [ [ "#### Save map to features from node degree", "_____no_output_____" ] ], [ [ "with open(\"../../../keras/guncontrol_daily/map_nodes_to_features.pkl\", \"wb\") as _pkl:\n pkl.dump(map_nodes_to_features, _pkl)", "_____no_output_____" ] ], [ [ "#### Save map to features from eigenvectors", "_____no_output_____" ] ], [ [ "with open(\"../../../keras/guncontrol_daily/map_nodes_to_eigvec.pkl\", \"wb\") as _pkl:\n pkl.dump(map_nodes_to_eigvec, _pkl)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb234382ae42d785ea2d8f785c43d87f9b23406c
603,472
ipynb
Jupyter Notebook
monthly_means_GNSS_RO.ipynb
johanmeh/master
ea9b4d3b67e3fc806b6e4c824dfc79562c721e2e
[ "BSD-2-Clause" ]
null
null
null
monthly_means_GNSS_RO.ipynb
johanmeh/master
ea9b4d3b67e3fc806b6e4c824dfc79562c721e2e
[ "BSD-2-Clause" ]
null
null
null
monthly_means_GNSS_RO.ipynb
johanmeh/master
ea9b4d3b67e3fc806b6e4c824dfc79562c721e2e
[ "BSD-2-Clause" ]
null
null
null
725.326923
102,460
0.941439
[ [ [ "## Test monthly means with leap year for GNSS-RO data", "_____no_output_____" ] ], [ [ "import xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom netCDF4 import num2date", "_____no_output_____" ], [ "import func as func", "_____no_output_____" ], [ "ds_obs = xr.open_dataset('GPS-RO__CP_LR_5x5_2007-2018.nc')\nds_era5 = xr.open_dataset('FULL-ERA5.monthmean.2007-2018.concat_new.nc')\nds_erai = xr.open_dataset('erai.tp.monmean.nc')", "_____no_output_____" ], [ "obs_y = ds_obs.resample(time='M', keep_attrs=True).mean()\nobs_y = obs_y.sel(lat=slice(-20,20))\n\n#era_y = ds_era5.resample(time='Y', keep_attrs=True).mean()\nera_y = ds_era5.sel(lat=slice(-20,20))", "_____no_output_____" ], [ "ds_obs", "_____no_output_____" ], [ "#erai_y = ds_erai.resample(time='Y', keep_attrs=True).mean()\nerai_y = ds_erai.sel(lat=slice(20,-20), time=slice('2007-01-01T00:00:00.000000000', '2018-12-31T00:00:00.000000000'))", "_____no_output_____" ], [ "#era_y_mean= f.year_mean(era_y)\n#erai_y_mean = f.year_mean(erai_y)\n#obs_y_mean = f.year_mean(obs_y, obs=True)", "_____no_output_____" ], [ "era_y_mean = func.w_average(era_y, ['ctpt', 'ctpz', 'tpt', 'tpz'])\nerai_y_mean = func.w_average(erai_y, ['ctpt', 'ctpz', 'tpt', 'tpz'])\nobs_y_mean = func.w_average(obs_y, ['CP_T', 'CP_z', 'LR_T', 'LR_z'])", "_____no_output_____" ], [ "len(erai_y_mean[0])", "_____no_output_____" ], [ "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\nplt.suptitle('Cold point tropopause \\n2007-2018 annual mean', fontsize=12)\n\nyr = np.linspace(2007,2018,144)\nlab1 = 'Era5'\nlab2 = 'Era-I'\nlab3 = 'GNSS-RO'\n\nax1.plot(yr,era_y_mean[0], lw = 2,label = lab1)\nax1.plot(yr, erai_y_mean[0], lw = 2, label = lab2)\nax1.plot(yr,obs_y_mean[0] , lw = 2, label = lab3)\nax1.legend()\nax1.set(ylabel = 'Temperature [K]', xlabel = 'Year')\n\n\nax2.plot(yr, era_y_mean[1], lw=2, label = lab1)\nax2.plot(yr, erai_y_mean[1], lw=2, label = lab2)\nax2.plot(yr, obs_y_mean[1], lw = 2, label = lab3)\nax2.legend()\nax2.set(ylabel = 'Height [km]', xlabel='Year')\n\nplt.tight_layout()\nplt.subplots_adjust(top=0.85)\n#plt.subplots_adjust(top=2)\n#plt.savefig('yearly_mean_cpt.png')", "_____no_output_____" ], [ "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\nplt.suptitle('Lapse rate tropopause \\n2007-2018 annual mean',fontsize=12)\n\nyr = np.linspace(2007,2018,144)\n\nax1.plot(yr,era_y_mean[2], lw=2, label = lab1)\nax1.plot(yr, erai_y_mean[2], lw = 2,label = lab2)\nax1.plot(yr,obs_y_mean[2], lw = 2, label = lab3)\nax1.legend()\nax1.set(ylabel='Temperature [K]', xlabel='years')\n\n\n\nax2.plot(yr, era_y_mean[3], lw = 2,label = lab1)\nax2.plot(yr, erai_y_mean[3], lw = 2,label = lab2)\nax2.plot(yr, obs_y_mean[3], lw = 2,label = lab3)\nax2.legend()\nax2.set(ylabel='Height [km]', xlabel='years')\n\nplt.tight_layout()\nplt.subplots_adjust(top=0.85)\n\n#plt.savefig('yearly_mean_lrt.png')", "_____no_output_____" ], [ "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\ndlab1 = 'ERA5 - GNSS-RO'\ndlab2 = 'ERA-I - GNSS-RO'\n\nyr = np.linspace(2007,2018,144)\n\nax1.plot(yr,era_y_mean[0] - obs_y_mean[0], lw = 1.5, label = dlab1)\nax1.plot(yr, erai_y_mean[0] - obs_y_mean[0], lw = 1.5, label = dlab2)\n#plt.ylim(-0.75,0.75)\nax1.set(title='Difference ERA5 & ERA-I and GNSS-RO \\n Cold point temperature',\n ylabel = 'Temperature difference [K]')\nax1.legend()\n\nax2.plot(yr, era_y_mean[2] - obs_y_mean[2], lw = 1.5, label = dlab1)\nax2.plot(yr, erai_y_mean[2] - obs_y_mean[2], lw = 1.5, label = dlab2)\nax2.set(title='Difference ERA5 & ERA-I and GNSS-RO \\n Lapse rate temperature',\n ylabel = 'Temperature difference [K]')\nax2.legend()\n\nplt.tight_layout()\n\n#plt.savefig('monthly_diff_temp_cpt_lrt.png')", "_____no_output_____" ], [ "f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))\n\n\nax1.plot(yr,era_y_mean[1] - obs_y_mean[1], lw = 1.5, label = dlab1)\nax1.plot(yr, erai_y_mean[1] - obs_y_mean[1], lw = 1.5, label= dlab2)\n#plt.ylim(-0.75,0.75)\nax1.set(title='Difference ERA5 & ERA-I and GNSS-RO \\n Cold point height',\n ylabel='Height difference [km]')\nax1.legend()\n\nax2.plot(yr, era_y_mean[3] - obs_y_mean[3], lw = 1.5, label = dlab1)\nax2.plot(yr, erai_y_mean[3] - obs_y_mean[3], lw = 1.5, label = dlab2)\nax2.set(title='Difference ERA5 & ERA-I and GNSS-RO \\n Lapse rate height',\n ylabel='Height difference [km]')\nax2.legend()\n\nplt.tight_layout()\n\n#plt.savefig('yearly_diff_height_cpt_lrt.png')", "_____no_output_____" ], [ "f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n\nera_y.ctpt[12,:,:].plot(ax=ax1)\nerai_y.ctpt[12,:,:].plot(ax=ax2)\nobs_y.CP_T[12,:,:].plot(ax=ax3)\n\nplt.tight_layout()\n#f.savefig('latlon_cpt_jan08.png')", "_____no_output_____" ], [ "f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n\nera_y.ctpt[11,:,:].plot(ax=ax1)\nerai_y.ctpt[11,:,:].plot(ax=ax2)\nobs_y.CP_T[11,:,:].plot(ax=ax3)\n\nplt.tight_layout()\n#f.savefig('latlon_cpt_dec07')", "_____no_output_____" ], [ "obs_y.CP_T[3,:,:].plot()", "_____no_output_____" ], [ "erai_pre = ds_erai.sel(time=slice('2000-01-15', '2010-12-15'))", "_____no_output_____" ], [ "ds_erai.time[200:]", "_____no_output_____" ], [ "erai_pre.ctpt.time", "_____no_output_____" ], [ "era_pre_av = func.w_average(erai_pre, ['ctpt'])", "_____no_output_____" ], [ "era_pre_av[0]", "_____no_output_____" ], [ "xax = np.linspace(2006,2010,59, dtype=int)\n\nplt.figure()\nplt.plot(xax,era_pre_av[0])", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb2344126cb06966e1e7517cf8d619989b4cab9c
125,657
ipynb
Jupyter Notebook
Farmacia.ipynb
taynnamello/Jupyter-Estatistica
36238f88ad1cac4f76107443bfd2b8c22cfe8435
[ "MIT" ]
null
null
null
Farmacia.ipynb
taynnamello/Jupyter-Estatistica
36238f88ad1cac4f76107443bfd2b8c22cfe8435
[ "MIT" ]
null
null
null
Farmacia.ipynb
taynnamello/Jupyter-Estatistica
36238f88ad1cac4f76107443bfd2b8c22cfe8435
[ "MIT" ]
null
null
null
46.764793
9,110
0.400853
[ [ [ "<a href=\"https://colab.research.google.com/github/taynnamello/jupyter-estatistica/blob/main/Farmacia.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import pandas as pd\nurl_dados = 'https://github.com/alura-cursos/imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true'\ndados = pd.read_csv(url_dados, compression = 'zip')\ndados", "_____no_output_____" ], [ "dados.head()", "_____no_output_____" ], [ "dados['tratamento']", "_____no_output_____" ], [ "dados['tratamento'].unique()", "_____no_output_____" ], [ "dados['tempo'].unique()", "_____no_output_____" ], [ "dados['dose'].unique()", "_____no_output_____" ], [ "dados['droga'].unique()", "_____no_output_____" ], [ "dados['g-0'].unique()", "_____no_output_____" ], [ "dados['tratamento'].value_counts()", "_____no_output_____" ], [ "dados['dose'].value_counts()", "_____no_output_____" ], [ "dados['tratamento'].value_counts(normalize= True)", "_____no_output_____" ], [ "dados['dose'].value_counts(normalize= True)", "_____no_output_____" ], [ "dados['tratamento'].value_counts().plot.pie()", "_____no_output_____" ], [ "dados['tempo'].value_counts().plot.pie()", "_____no_output_____" ], [ "dados['tempo'].value_counts().plot.bar()", "_____no_output_____" ], [ "dados_filtrados = dados[dados['g-0'] >0]\ndados_filtrados.head()", "_____no_output_____" ] ], [ [ "###Desafio 01: Ivestigar por que a classe tratamento é tão desbalanceado\n###Desafio 02: Plotar as cinco últimas linhas da tabela\n###Desafio 03: Proporção das classes tratamento\n###Desafio 04: Quantos tipos de drogas foram investigados\n###Desafio 05: Procurar na documentação o método query(pandas)\n###Desafio 06: Colocar descrição no gráfico\n###Desafio 07: Renomear as colunas tirando o hífen", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb234660652010830ec56610e4d9f071af1b12d0
977,851
ipynb
Jupyter Notebook
doc/transform.ipynb
jbusecke/xgcm
f362bf0a629712ac3f448fffb7f9be1bdc1c59bc
[ "MIT" ]
null
null
null
doc/transform.ipynb
jbusecke/xgcm
f362bf0a629712ac3f448fffb7f9be1bdc1c59bc
[ "MIT" ]
2
2022-03-22T00:53:59.000Z
2022-03-30T09:12:29.000Z
doc/transform.ipynb
jbusecke/xgcm
f362bf0a629712ac3f448fffb7f9be1bdc1c59bc
[ "MIT" ]
null
null
null
121.865778
163,516
0.750221
[ [ [ "# Transforming Vertical Coordinates\n\nA common need in the analysis of ocean and atmospheric data is to transform the vertical coordinate from its original coordinate (e.g. depth) to a new coordinate (e.g. density).\nXgcm supports this sort of one-dimensional coordinate transform on `Axis` and `Grid` objects using the `transform` method.\nTwo algorithms are implemented:\n\n- _Linear interpolation:_ Linear interpolation is designed to interpolate intensive quantities (e.g. temperature) from one coordinate to another. This method is suitable when the target coordinate is monotonically increasing or decreasing and the data variable is intensive. For example, you want to visualize oxygen on density surfaces from a z-coordinate ocean model.\n- _Conservative remapping:_ This algorithm is designed to conserve extensive quantities (e.g. transport, heat content). It requires knowledge of cell bounds in both the source and target coordinate. It also handles non-monotonic target coordinates.\n\nOn this page, we explain how to use these coordinate transformation capabilities.", "_____no_output_____" ] ], [ [ "from xgcm import Grid\nimport xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## 1D Toy Data Example\n\nFirst we will create a simple, one-dimensional dataset to illustrate how the `transform` function works.\nThis dataset contains\n\n- a coordinate called `z`, representing the original depth coordinate\n\n- a data variable called `theta`, a function of `z`, which we want as our new vertical coordinate\n\n- a data variable called `phi`, akso a function of `z`, which represents the data we want to transform into this new coordinate space\n\nIn an oceanic context `theta` might be density and `phi` might be oxygen.\nIn an atmospheric context, `theta` might be potential temperature and `phi` might be potential vorticity.", "_____no_output_____" ] ], [ [ "z = np.arange(2, 12)\ntheta = xr.DataArray(np.log(z), dims=['z'], coords={'z': z})\nphi = xr.DataArray(np.flip(np.log(z)*0.5+ np.random.rand(len(z))), dims=['z'], coords={'z':z})\nds = xr.Dataset({'phi': phi, 'theta': theta})\nds", "_____no_output_____" ] ], [ [ "Let's plot this data. Note that, for a simple 1D profile, we can easily visualize `phi` in `theta` space by simply plotting `phi` vs. `theta`. This is essentially a form of linear interpolation, performed automatically by matplotlib when it draws lines between the discrete points of our data.", "_____no_output_____" ] ], [ [ "def plot_profile():\n fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=[8,5])\n ds.theta.plot(ax=ax1, y='z', marker='.', yincrease=False)\n ds.phi.plot(ax=ax2, y='z', marker='.', yincrease=False)\n ds.swap_dims({'z': 'theta'}).phi.plot(ax=ax3, y='theta', marker='.', yincrease=False)\n fig.subplots_adjust(wspace=0.5)\n return ax3\n\nplot_profile();", "_____no_output_____" ] ], [ [ "### Linear transformation\n\nOk now lets transform `phi` to `theta` coordinates using linear interpolation.\nA key part of this is to define specific `theta` levels onto which we want to interpolate the data.", "_____no_output_____" ] ], [ [ "# First create an xgcm grid object\ngrid = Grid(ds, coords={'Z': {'center':'z'}}, periodic=False)\n\n# define the target values in density, linearly spaced\ntheta_target = np.linspace(0, 3, 20)\n\n# and transform\nphi_transformed = grid.transform(ds.phi, 'Z', theta_target, target_data=ds.theta)\nphi_transformed", "_____no_output_____" ] ], [ [ "Now let's see what the result looks like.", "_____no_output_____" ] ], [ [ "ax = plot_profile()\nphi_transformed.plot(ax=ax, y='theta', marker='.')", "_____no_output_____" ] ], [ [ "Not too bad. We can increase the number of interpolation levels to capture more of the small scale structure.", "_____no_output_____" ] ], [ [ "target_theta = np.linspace(0,3, 100)\nphi_transformed = grid.transform(ds.phi, 'Z', target_theta, target_data=ds.theta)\nax = plot_profile()\nphi_transformed.plot(ax=ax, y='theta', marker='.')", "_____no_output_____" ] ], [ [ "Note that by default, values of `theta_target` which lie outside the range of `theta` have been masked (set to `NaN`).\nTo disable this behavior, you can pass `mask_edges=False`; values outside the range of `theta` will be filled with the nearest valid value.", "_____no_output_____" ] ], [ [ "target_theta = np.linspace(0,3, 60)\nphi_transformed = grid.transform(ds.phi, 'Z', target_theta, target_data=ds.theta, mask_edges=False)\nax = plot_profile()\nphi_transformed.plot(ax=ax, y='theta', marker='.')", "_____no_output_____" ] ], [ [ "### Conservative transformation\n\nConservative transformation is designed to preseve the total sum of `phi` over the `Z` axis.\nIt presumes that `phi` is an _extensive quantity_, i.e. a quantity that is already volume weighted, with respect to the Z axis: for example, units of `Kelvins * meters` for heat content, rather than just `Kelvins`.\nThe conservative method requires more input data at the moment.\nYou have to not only specify the coordinates of the cell centers, but also the cell faces (or bounds/boundaries). In xgcm we achieve this by defining the bounding coordinates as the `outer` axis position.\nThe target `theta` values are likewise intepreted as cell boundaries in `theta`-space.\nIn this way, conservative transformation is similar to calculating a histogram.", "_____no_output_____" ] ], [ [ "# define the cell bounds in depth\nzc = np.arange(1,12)+0.5\n\n# add them to the existing dataset\nds = ds.assign_coords({'zc': zc})\nds", "_____no_output_____" ], [ "# Recreate the grid object with a staggered `center`/`outer` coordinate layout\ngrid = Grid(ds, coords={'Z':{'center':'z', 'outer':'zc'}},\n periodic=False)\ngrid", "_____no_output_____" ] ], [ [ "Currently the `target_data`(`theta` in this case) has to be located on the `outer` coordinate for the conservative method (compared to the `center` for the linear method).\n\nWe can easily interpolate `theta` on the outer coordinate with the grid object.", "_____no_output_____" ] ], [ [ "ds['theta_outer'] = grid.interp(ds.theta, 'Z', boundary='fill')\nds['theta_outer']", "_____no_output_____" ] ], [ [ "Now lets transform the data using the conservative method. Note that the target values will now be interpreted as cell bounds and not cell centers as before.", "_____no_output_____" ] ], [ [ "# define the target values in density\ntheta_target = np.linspace(0,3, 20)\n\n# and transform\nphi_transformed_cons = grid.transform(ds.phi,\n 'Z',\n theta_target,\n method='conservative',\n target_data=ds.theta_outer)\nphi_transformed_cons", "_____no_output_____" ], [ "phi_transformed_cons.plot(y='theta_outer', marker='.', yincrease=False)", "_____no_output_____" ] ], [ [ "There is no point in comparing `phi_transformed_cons` directly to `phi` or the results of linear interoplation, since here we have reinterpreted `phi` as an extensive quantity.\nHowever, we can verify that the sum of the two quantities over the Z axis is exactly the same.", "_____no_output_____" ] ], [ [ "ds.phi.sum().values", "_____no_output_____" ], [ "phi_transformed_cons.sum().values", "_____no_output_____" ] ], [ [ "## Realistic Data Example\n\nTo illustrate these features in a more realistic example, we use data from the [NCEP Global Ocean Data Assimilation](https://www.cpc.ncep.noaa.gov/products/GODAS/) (GODAS).\nThis data are available from the [Pangeo Cloud Data Library](https://catalog.pangeo.io/browse/master/ocean/GODAS/).\nWe can see that this is a full, global, 4D ocean dataset.", "_____no_output_____" ] ], [ [ "from intake import open_catalog\n\ncat = open_catalog(\"https://raw.githubusercontent.com/pangeo-data/pangeo-datastore/master/intake-catalogs/ocean.yaml\")\nds = cat[\"GODAS\"].to_dask()\nds", "_____no_output_____" ] ], [ [ "The grid is missing an `outer` coordinate for the Z axis, so we will construct one.\nThis will be needed for conservative interpolation.", "_____no_output_____" ] ], [ [ "level_outer_data = np.pad(ds.level_w.values, [1, 0])\nlevel_outer = xr.DataArray(\n level_outer_data,\n dims=['level_outer'],\n coords={'level_outer': ('level_outer', level_outer_data)}\n)\nds = ds.assign_coords({'level_outer': level_outer})", "_____no_output_____" ] ], [ [ "Now we create a `Grid` object for this dataset.", "_____no_output_____" ] ], [ [ "grid = Grid(ds, coords={'Z': {'center': 'level', 'outer': 'level_outer'},\n 'X': {'center': 'lon', 'right': 'lon_u'},\n 'Y': {'center': 'lat', 'right': 'lat_u'}},\n periodic=['X'])\ngrid", "_____no_output_____" ] ], [ [ "### Linear Interpolation\n\nTo illustrate linear interpolation, we will interpolate salinity onto temperature surface.", "_____no_output_____" ] ], [ [ "# convert to standard units\ntheta = ds.pottmp - 273.15 \nsalt = 1000 * ds.salt", "_____no_output_____" ], [ "target_theta_levels = np.arange(-2, 36)\nsalt_on_theta = grid.transform(salt, 'Z', target_theta_levels, target_data=theta, method='linear')\nsalt_on_theta", "_____no_output_____" ] ], [ [ "Note that the computation is lazy. (No data has been downloaded or computed yet.)\nWe can trigger computation by plotting something.", "_____no_output_____" ] ], [ [ "salt_on_theta.isel(time=0).sel(pottmp=18).plot()", "/home/jovyan/xgcm/xgcm/transform.py:60: RuntimeWarning: invalid value encountered in _interp_1d_linear\n return _interp_1d_linear(phi, theta, target_theta_levels, mask_edges)\n" ], [ "salt_on_theta.isel(time=0).mean(dim='lon').plot(x='lat')", "/home/jovyan/xgcm/xgcm/transform.py:60: RuntimeWarning: invalid value encountered in _interp_1d_linear\n return _interp_1d_linear(phi, theta, target_theta_levels, mask_edges)\n/srv/conda/envs/notebook/lib/python3.7/site-packages/dask/array/numpy_compat.py:41: RuntimeWarning: invalid value encountered in true_divide\n x = np.divide(x1, x2, out)\n" ] ], [ [ "## Conservative Interpolation\n\nTo do conservative interpolation, we will attempt to calculate the meridional overturning in temperature space.\nNote that this is not a perfectly precise calculation, since the GODAS data are not exactly volume conserving as provided.\nHowever, it's sufficient to illustrate the basic principles of the calculation.\n\nTo use conservative interpolation, we have to go from an intensive quantity (velocity) to an extensive one (velocity times cell thickness).\nWe fill any missing values with 0, since they don't contribute to the transport.", "_____no_output_____" ] ], [ [ "thickness = grid.diff(ds.level_outer, 'Z')\nv_transport = ds.vcur * thickness\nv_transport = v_transport.fillna(0.).rename('v_transport')\nv_transport", "_____no_output_____" ] ], [ [ "We also need to interpolate `theta`, our target data for interoplation, to the same horizontal position as `v_transport`. This means moving from cell center to cell corner.\nThis step introduces some considerable errors, particularly near the boundaries of bathymetry.\n(Xgcm currently has no special treatment for internal boundary conditions--see issue [222](https://github.com/xgcm/xgcm/issues/240).)", "_____no_output_____" ] ], [ [ "theta = grid.interp(theta,['X', 'Y'], boundary='extend').rename('theta')\ntheta", "_____no_output_____" ], [ "v_transport_theta = grid.transform(v_transport, 'Z', target_theta_levels,\n target_data=theta, method='conservative')\nv_transport_theta", "/home/jovyan/xgcm/xgcm/grid.py:909: UserWarning: The `target data` input is not located on the cell bounds. This method will continue with linear interpolation with repeated boundary values. For most accurate results provide values on cell bounds.\n UserWarning,\n" ] ], [ [ "Notice that this produced a warning. The `conservative` transformation method natively needs `target_data` to be provided on the cell bounds (here `level_outer`).\nSince transforming onto tracer coordinates is a very common scenario, xgcm uses linear interpolation to infer the values on the `outer` axis position.\n\nTo demonstrate how to provide provide `target_data` on the outer grid position, we reproduce the steps xgcm executes internally:", "_____no_output_____" ] ], [ [ "theta_outer = grid.interp(theta,['Z'], boundary='extend')\n# the data cannot be chunked along the transformation axis\ntheta_outer = theta_outer.chunk({'level_outer': -1}).rename('theta')\ntheta_outer", "_____no_output_____" ] ], [ [ "When we apply the transformation we can see that the results in this case are equivalent:", "_____no_output_____" ] ], [ [ "v_transport_theta_manual = grid.transform(v_transport, 'Z', target_theta_levels,\n target_data=theta_outer, method='conservative')\n\n# Warning: this step takes a long time to compute. We will only compare the first time value\nxr.testing.assert_allclose(v_transport_theta_manual.isel(time=0), v_transport_theta.isel(time=0))", "_____no_output_____" ] ], [ [ "Now we verify visually that the vertically integrated transport is conserved under this transformation.", "_____no_output_____" ] ], [ [ "v_transport.isel(time=0).sum(dim='level').plot(robust=True)", "_____no_output_____" ], [ "v_transport_theta.isel(time=0).sum(dim='theta').plot(robust=True)", "_____no_output_____" ] ], [ [ "Finally, we attempt to plot a crude meridional overturning streamfunction for a single timestep.", "_____no_output_____" ] ], [ [ "dx = 110e3 * np.cos(np.deg2rad(ds.lat_u))\n(v_transport_theta.isel(time=0) * dx).sum(dim='lon_u').cumsum(dim='theta').plot.contourf(x='lat_u', levels=31)", "/srv/conda/envs/notebook/lib/python3.7/site-packages/xarray/plot/utils.py:624: MatplotlibDeprecationWarning: The 'extend' parameter to Colorbar has no effect because it is overridden by the mappable; it is deprecated since 3.3 and will be removed two minor releases later.\n cbar = fig.colorbar(primitive, **cbar_kwargs)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb2356d7da12dcfada80bd664f50e2cf4219621b
6,025
ipynb
Jupyter Notebook
4_6_Matrices_and_Transformation_of_State/5_matrix_transpose.ipynb
cutillav/CVND_Localization_Exercises
0fd3c381a860f439a1cde7ec2d54dff1a8b2b661
[ "MIT" ]
null
null
null
4_6_Matrices_and_Transformation_of_State/5_matrix_transpose.ipynb
cutillav/CVND_Localization_Exercises
0fd3c381a860f439a1cde7ec2d54dff1a8b2b661
[ "MIT" ]
null
null
null
4_6_Matrices_and_Transformation_of_State/5_matrix_transpose.ipynb
cutillav/CVND_Localization_Exercises
0fd3c381a860f439a1cde7ec2d54dff1a8b2b661
[ "MIT" ]
null
null
null
35.441176
290
0.470705
[ [ [ "# Transpose of a Matrix\n\nIn this set of exercises, you will work with the transpose of a matrix.\n\nYour first task is to write a function that takes the transpose of a matrix. Think about how to use nested for loops efficiently.\n\nThe second task will be to write a new matrix multiplication function that takes advantage of your matrix transposition function.", "_____no_output_____" ] ], [ [ "### TODO: Write a function called transpose() that \n### takes in a matrix and outputs the transpose of the matrix\n\ndef transpose(matrix):\n matrix_transpose = []\n for i in range(len(matrix[0])):\n matrix_transpose.append([row[i] for row in matrix])\n return matrix_transpose", "_____no_output_____" ], [ "### TODO: Run the code in the cell below. If there is no \n### output, then your answers were as expected\n\nassert transpose([[5, 4, 1, 7], [2, 1, 3, 5]]) == [[5, 2], [4, 1], [1, 3], [7, 5]]\nassert transpose([[5]]) == [[5]]\nassert transpose([[5, 3, 2], [7, 1, 4], [1, 1, 2], [8, 9, 1]]) == [[5, 7, 1, 8], [3, 1, 1, 9], [2, 4, 2, 1]]\n", "_____no_output_____" ] ], [ [ "### Matrix Multiplication\n\nNow that you have your transpose function working, write a matrix multiplication function that takes advantage of the transpose. \n\nAs part of the matrix multiplication code, you might want to re-use your dot product function from the matrix multiplication exercises. But you won't need your get_row and get_column functions anymore because the tranpose essentially takes care of turning columns into row vectors.\n\nRemember that if matrix A is mxn and matrix B is nxp, then the resulting product will be mxp.", "_____no_output_____" ] ], [ [ "### TODO: Write a function called matrix_multiplication() that\n### takes in two matrices and outputs the product of the two\n### matrices\n\n### TODO: Copy your dot_product() function here so that you can\n### use it in your matrix_multiplication function\ndef dot_product(matrixA, matrixB):\n return sum([x[0]*x[1] for x in zip(matrixA, matrixB)])\n\ndef matrix_multiplication(matrixA, matrixB):\n product = []\n \n \n ## TODO: Take the transpose of matrixB and store the result\n ## in a new variable\n \n \n ## TODO: Use a nested for loop to iterate through the rows\n ## of matrix A and the rows of the tranpose of matrix B\n \n ## TODO: Calculate the dot product between each row of matrix A\n ## with each row in the transpose of matrix B\n \n ## TODO: As you calculate the results inside your for loops,\n ## store the results in the product variable\n matrixB_transpose = transpose(matrixB)\n for i in range(len(matrixA)):\n product_row = []\n for j in range(len(matrixB_transpose)):\n product_row.append(dot_product(matrixA[i],matrixB_transpose[j]))\n product.append(product_row)\n\n return product", "_____no_output_____" ], [ "### TODO: Run the code in the cell below. If there is no \n### output, then your answers were as expected\n\nassert matrix_multiplication([[5, 3, 1], \n [6, 2, 7]], \n [[4, 2], \n [8, 1], \n [7, 4]]) == [[51, 17], \n [89, 42]]\n\nassert matrix_multiplication([[5]], [[4]]) == [[20]]\n\nassert matrix_multiplication([[2, 8, 1, 2, 9],\n [7, 9, 1, 10, 5],\n [8, 4, 11, 98, 2],\n [5, 5, 4, 4, 1]], \n [[4], \n [2], \n [17], \n [80], \n [2]]) == [[219], [873], [8071], [420]]\n\n\nassert matrix_multiplication([[2, 8, 1, 2, 9],\n [7, 9, 1, 10, 5],\n [8, 4, 11, 98, 2],\n [5, 5, 4, 4, 1]], \n [[4, 1, 2], \n [2, 3, 1], \n [17, 8, 1], \n [1, 3, 0], \n [2, 1, 4]]) == [[61, 49, 49], [83, 77, 44], [329, 404, 39], [104, 65, 23]]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb2368bba81657a92bf0ea0f04fdb7febbaf46c3
44,325
ipynb
Jupyter Notebook
slides/handson-ml2-01.ipynb
codingalzi/hands-on-ml
fde88fe5a195b496a5a56d84c0f4ac7238889c79
[ "Apache-2.0" ]
null
null
null
slides/handson-ml2-01.ipynb
codingalzi/hands-on-ml
fde88fe5a195b496a5a56d84c0f4ac7238889c79
[ "Apache-2.0" ]
null
null
null
slides/handson-ml2-01.ipynb
codingalzi/hands-on-ml
fde88fe5a195b496a5a56d84c0f4ac7238889c79
[ "Apache-2.0" ]
4
2021-05-11T16:51:39.000Z
2022-02-17T08:23:21.000Z
17.389172
169
0.454123
[ [ [ "# 1장 한눈에 보는 머신러닝", "_____no_output_____" ], [ "#### 감사의 글", "_____no_output_____" ], [ "자료를 공개한 저자 오렐리앙 제롱과 강의자료를 지원한 한빛아카데미에게 진심어린 감사를 전합니다.", "_____no_output_____" ], [ "## 1.1 머신러닝이란?", "_____no_output_____" ], [ "- 아서 새뮤얼(Artuhr Samuel), 1959\n\n> 머신러닝은 **명시적인 프로그래밍** 없이 컴퓨터가 학습하는 능력을 갖추게 하는 연구 분야\n", "_____no_output_____" ], [ "- 톰 미첼(Tom Michell), 1977\n\n> 어떤 작업 T에 대한 컴퓨터 프로그램의 성능을 P로 측정했을 때\n> 경험 E로 인해 성능이 향상되었다면, \n> 이 컴퓨터 프로그램은 __작업 T와 성능 측정 P에 대해 경험 E로부터 학습한다__고 말한다.", "_____no_output_____" ], [ "### 머신러닝 프로그램 예제", "_____no_output_____" ], [ "* 스팸 필터: 스팸(spam)과 스팸이 아닌 메일(ham)의 샘플을 이용하여 스팸 메일 구분법 학습 ", "_____no_output_____" ], [ "### 기본 용어", "_____no_output_____" ], [ "* __훈련 세트__(training set): 머신러닝 프로그램이 훈련(학습)하는 데 사용하는 데이터 집합", "_____no_output_____" ], [ "* __훈련 사례__ 혹은 __샘플__: 각각의 훈련 데이터", "_____no_output_____" ], [ "* 톰 미첼의 정의와의 연계\n - 작업 T: 새로운 메일이 스팸 여부 판단\n - 경험 E: 훈련 데이터셋\n - 성능 P: 예를 들어, 정확히 분류된 메일의 비율\n (다른 성능 측정 기준도 가능. 3장 참조)", "_____no_output_____" ], [ "## 1.2 왜 머신러닝을 사용하는가?", "_____no_output_____" ], [ "### 전통적인 프로그래밍", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-01.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 전통적인 프로그래밍 접근 방법은 다음과 같다.\n * **문제 연구**: 누군가가 문제를 해결하기 위해 해결책을 찾음 \n * **규칙 작성**: 결정된 규칙을 개발자가 프로그램을 작성\n * **평가**: 만들어진 프로그램을 테스트\n * 문제가 없다면 **론칭**, 문제가 있다면 **오차를 분석**한 후 처음 과정부터 다시 실시", "_____no_output_____" ], [ "#### 예제: 스팸 메일 분류", "_____no_output_____" ], [ "* **특정 단어**가 들어가면 스팸 메일로 처리", "_____no_output_____" ], [ "* 프로그램이 론칭된 후 새로운 스팸단어가 생겼을 때 소프트웨어는 이 단어를 자동으로 분류할 수 없음", "_____no_output_____" ], [ "* 개발자가 새로운 규칙을 업데이트 시켜줘야 함", "_____no_output_____" ], [ "* **새로운 규칙이 생겼을 때 사용자가 매번 업데이트를** 시켜줘야하기 때문에 유지 보수가 어려움", "_____no_output_____" ], [ "### 머신러닝", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-02.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "#### 예제: 스팸 메일 분류", "_____no_output_____" ], [ "* 사용자가 스팸으로 지정한 메일에 '4U', 'For U', 'Buy Drugs\" 등의 표현이 자주 등장하는 경우 \n 그런 표현을 자동으로 인식하고 메일을 스팸으로 분류하도록 프로그램 스스로 학습", "_____no_output_____" ], [ "### 머신러닝 학습 자동화", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-03.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 머신러닝 작업 흐름의 전체를 __머신러닝 파이프라인__ \n 또는 __MLOps(Machine Learning Operations, 머신러닝 운영)__라 부르며 자동화가 가능함.\n \n* 참조: [MLOps: 머신러닝의 지속적 배포 및 자동화 파이프라인](https://cloud.google.com/solutions/machine-learning/mlops-continuous-delivery-and-automation-pipelines-in-machine-learning)", "_____no_output_____" ], [ "### 머신러닝의 장점", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-04.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 전통적인 방식으로는 해결 방법이 없는 너무 복잡한 문제 해결", "_____no_output_____" ], [ "* 새로운 데이터에 쉽게 적응 가능한 시스템", "_____no_output_____" ], [ "* 데이터 마이닝(data mining): 복잡한 문제와 대량의 데이터에서 통찰 얻기", "_____no_output_____" ], [ "## 1.3 적용 사례", "_____no_output_____" ], [ "### 대표적인 머신러닝 적용 사례", "_____no_output_____" ], [ "* 이미지 분류 작업: 생산 라인에서 제품 이미지를 분석해 자동으로 분류", "_____no_output_____" ], [ "* 시맨틱 분할 작업: 뇌를 스캔하여 종양 진단", "_____no_output_____" ], [ "* 텍스트 분류(자연어 처리): 자동으로 뉴스 기사 분류", "_____no_output_____" ], [ "* 텍스트 분류: 토론 포럼에서 부정적인 코멘트를 자동으로 구분", "_____no_output_____" ], [ "* 텍스트 요약: 긴 문서를 자동으로 요약", "_____no_output_____" ], [ "* 자연어 이해 : 챗봇(chatbot) 또는 개인 비서 만들기", "_____no_output_____" ], [ "* 회귀 분석: 회사의 내년도 수익을 예측하기", "_____no_output_____" ], [ "* 음성 인식: 음성 명령에 반응하는 앱", "_____no_output_____" ], [ "* 이상치 탐지: 신용 카드 부정 거래 감지", "_____no_output_____" ], [ "* 군집 작업: 구매 이력을 기반 고객 분류 후 다른 마케팅 전략 계획", "_____no_output_____" ], [ "* 데이터 시각화: 고차원의 복잡한 데이터셋을 그래프로 효율적 표현", "_____no_output_____" ], [ "* 추천 시스템: 과거 구매 이력 관심 상품 추천", "_____no_output_____" ], [ "* 강화 학습: 지능형 게임 봇(bot) 만들기", "_____no_output_____" ], [ "## 1.4 머신러닝 시스템 종류", "_____no_output_____" ], [ "### 머신러닝 시스템 분류 기준", "_____no_output_____" ], [ "* 기준 1: 훈련 지도 여부\n * 지도 학습\n * 비지도 학습\n * 준지도 학습\n * 강화 학습", "_____no_output_____" ], [ "* 기준 2: 실시간 훈련 여부\n * 온라인 학습\n * 배치 학습", "_____no_output_____" ], [ "* 기준 3: 예측 모델 사용 여부\n * 사례 기반 학습\n * 모델 기반 학습", "_____no_output_____" ], [ "#### 분류 기준의 비배타성", "_____no_output_____" ], [ "* 분류 기준이 상호 배타적이지는 않음.", "_____no_output_____" ], [ "* 스팸 필터 예제\n * 심층 신경망 모델 활용 실시간 스팸 메일 분류 학습 가능\n * 지도 학습 + 온라인 학습 + 모델 기반 학습", "_____no_output_____" ], [ "### 훈련 지도 여부 구분", "_____no_output_____" ], [ "### 지도 학습", "_____no_output_____" ], [ "* 훈련 데이터에 __레이블(label)__이라는 답 포함\n * 레이블 대신에 __타깃(target)__이란 표현도 사용됨.", "_____no_output_____" ], [ "* 대표적 지도 학습\n * 분류\n * 회귀", "_____no_output_____" ], [ "#### 분류", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-05.png\" width=\"500\"/></div>\n\n* 특성을 사용한 데이터 분류", "_____no_output_____" ], [ "* 예제: 스팸 필터\n * 특성: 소속 정보, 특정 단어 포함 여부 등\n * 레이블(타깃): 스팸 또는 햄 ", "_____no_output_____" ], [ "#### 회귀", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-06.png\" width=\"400\"/></div>\n\n* 특성을 사용하여 타깃(target) 수치 예측", "_____no_output_____" ], [ "* 예제: 중고차 가격 예측\n * 특성: 주행거리, 연식, 브랜드 등\n * 타깃: 중고차 가격", "_____no_output_____" ], [ "#### 중요한 지도학습 알고리즘들 ", "_____no_output_____" ], [ "* k-최근접 이웃(k-NNs)\n* 선형 회귀(linear regression)\n* 로지스틱 회귀(logistic regression)\n* 서포트 벡터 머신(support vector machines, SVCs)\n* 결정 트리(decision trees)와 랜덤 포레스트(random forests)\n* 신경망(neural networks)", "_____no_output_____" ], [ "#### 주의사항", "_____no_output_____" ], [ "* 일부 회귀/분류 알고리즘을 분류/회귀에 사용 가능\n* 예제: 로지스틱 회귀, SVM 등등", "_____no_output_____" ], [ "### 비지도 학습", "_____no_output_____" ], [ "* __레이블 없는 훈련 데이터__를 이용하여 시스템 스스로 학습", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-07.png\" width=\"410\"/></div>", "_____no_output_____" ], [ "* 대표적 비지도 학습\n * 군집\n * 시각화\n * 차원 축소\n * 연관 규칙 학습", "_____no_output_____" ], [ "#### 군집", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-08.png\" width=\"400\"/></div>\n\n* 데이터를 비슷한 특징을 가진 몇 개의 그룹으로 나누는 것", "_____no_output_____" ], [ "* 예제: 블로그 방문자들을 그룹으로 묶기: 남성, 여성, 주말, 주중, 만화책, SF 등등", "_____no_output_____" ], [ "* 대표적 군집 알고리즘 \n * k-평균\n * DBSCAN\n * 계층 군집 분석", "_____no_output_____" ], [ "#### 시각화", "_____no_output_____" ], [ "* 다차원 특성을 가진 데이터셋을 2D 또는 3D로 표현하기\n\n* 시각화를 하기 위해서는 데이터의 특성을 2가지로 줄여야함\n\n* 데이터가 구성 패턴에 대한 정보 획득 가능", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-09.png\" width=\"650\"/></div>", "_____no_output_____" ], [ "#### 차원 축소", "_____no_output_____" ], [ "* 데이터의 특성 수 줄이기", "_____no_output_____" ], [ "* 예제\n * 특성 추출: 상관관계가 있는 여러 특성을 하나로 합치기\n * 자동차의 주행거리와 연식은 상관관계가 높음. 따라서 차의 '마모정도'라는 하나의 특성으로 합칠 수 있음.", "_____no_output_____" ], [ "* 차원 축소의 장점: 머신러닝 알고리즘의 성능 향상\n * 훈련 실행 속도 빨라짐\n * 메모리 사용 공간 줄어듬", "_____no_output_____" ], [ "#### 시각화와 차원축소 알고리즘 ", "_____no_output_____" ], [ "* 주성분 분석(PCA)\n* 커널 PCA\n* 지역적 선형 임베딩\n* t-SNE", "_____no_output_____" ], [ "#### 이상치 탐지(Outlier detection)", "_____no_output_____" ], [ "* 정상 샘플을 이용하여 훈련 후 입력 샘플의 정상여부 판단 후 이상치(outliers) 자동 제거", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-10.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 예제: 부정거래 사용 감지, 제조 결함 잡아내기 등등", "_____no_output_____" ], [ "#### 특이치 탐지(Novelty detection)", "_____no_output_____" ], [ "* 전혀 '오염되지 않은'(clean) 훈련 세트 활용 후 훈련 세트에 포함된 데이터와 달라 보이는 테이터 감지하기", "_____no_output_____" ], [ "#### 이상치 탐지 vs. 특이치 탐지", "_____no_output_____" ], [ "* 예제: 수 천장의 강아지 사진에 치와와 사진이 1%정도 포함되어 있는 경우", "_____no_output_____" ], [ "* 특이치 탐지 알고리즘은 새로운 치와와 사진을 특이한 것으로 간주하지 않음.", "_____no_output_____" ], [ "* 반면에 비정상 탐지 알고리즘은 새로운 치와와 사진을 다른 강아지들과 다른 종으로 간주할 수 있음.", "_____no_output_____" ], [ "#### 연관 규칙 학습", "_____no_output_____" ], [ "* 데이터 특성 간의 흥미로운 관계 찾기", "_____no_output_____" ], [ "* 예제: 마트 판매 기록\n * 바비규 소스와 감자 구매와 스테이크 구매 사의 연관성이 밝혀지면 상품을 서로 가까이 진열해야 함.", "_____no_output_____" ], [ "### 준지도 학습", "_____no_output_____" ], [ "* 레이블이 적용된 적은 수의 샘플이 주어졌을 때 유횽함.", "_____no_output_____" ], [ "* 비지도 학습을 통해 군집을 분류한 후 샘플들을 활용해 지도 학습 실행", "_____no_output_____" ], [ "* 대부분 지도 학습과 비지도 학습 혼합 사용", "_____no_output_____" ], [ "#### 준지도 학습 예제", "_____no_output_____" ], [ "* 아래 그림 참조: 새로운 사례 `X`를 세모에 더 가깝다고 판단함.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-11.png\" width=\"400\"/></div>", "_____no_output_____" ], [ "* 구글 포토 호스팅: 가족 사진 몇 장에만 레이블 적용. 이후 모든 사진에서 가족사진 확인 가능.", "_____no_output_____" ], [ "### 강화 학습", "_____no_output_____" ], [ "* 에이전트(학습 시스템)가 취한 행동에 대해 보상 또는 벌점을 주어 가장 큰 보상을 받는 방향으로 유도하기", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-12.png\" width=\"400\"/></div>", "_____no_output_____" ], [ "* 예제: 딥마인드(DeepMind)의 알파고(AlphaGo)", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/alphago01.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "### 실시간 훈련 여부 구분", "_____no_output_____" ], [ "### 배치 학습(batch learning)", "_____no_output_____" ], [ "* 주어진 훈련 세트 전체를 사용해 오프라인에서 훈련", "_____no_output_____" ], [ "* 먼저 시스템을 훈련시킨 후 더 이상의 학습 없이 제품 시스템에 적용", "_____no_output_____" ], [ "* 단점\n * 컴퓨팅 자원(cpu, gpu, 메모리, 저장장치 등)이 충분한 경우에만 사용 가능\n * 새로운 데이터가 들어오면 처음부터 새롭게 학습해야 함. \n * 하지만 MLOps 등을 이용한 자동화 가능", "_____no_output_____" ], [ "### 온라인 학습(online learing)", "_____no_output_____" ], [ "* 하나씩 또는 적은 양의 데이터 묶음(미니배치, mini-batch)를 사용해 점진적으로 훈련", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-13.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 단점\n * 나쁜 데이터가 주입되는 경우 시스템 성능이 점진적으로 떨어질 수 있음.\n * 지속적인 시스템 모니터링 필요", "_____no_output_____" ], [ "* 예제\n * 주식가격 시스템 등 실시간 반영이 중요한 시스템\n * 스마트폰 등 제한된 자원의 시스템\n * 외부 메모리 학습: 매우 큰 데이터셋 활용하는 시스템", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-14.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "### 예측 모델 사용 여부 구분", "_____no_output_____" ], [ "* 훈련 모델의 __일반화(generalization)__ 방식에 따른 분류\n* 일반화 = '새로운 데이터에 대한 예측'", "_____no_output_____" ], [ "### 사례 기반 학습", "_____no_output_____" ], [ "* **샘플을 기억**하는 것이 훈련의 전부", "_____no_output_____" ], [ "* 예측을 위해 기존 샘플과의 **유사도** 측정", "_____no_output_____" ], [ "* 예제: k-최근접 이웃(k-NN, k-nearest neighbors) 알고리즘", "_____no_output_____" ], [ "* k-NN 활용 예제: 새로운 샘플 `X`가 기존에 세모인 샘플과의 유사도가 높기 때문에 세모로 분류.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-15.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "### 모델 기반 학습", "_____no_output_____" ], [ "* 모델을 미리 지정한 후 훈련 세트를 사용해서 모델을 훈련시킴", "_____no_output_____" ], [ "* 훈련된 모델을 사용해 새로운 데이터에 대한 예측 실행", "_____no_output_____" ], [ "* 예제: 이 책에서 다루는 대부분의 알고리즘", "_____no_output_____" ], [ "* 예제: 학습된 모델을 이용하여 새로운 데이터 `X`를 세모 클래스로 분류", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-16.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "#### 선형 모델 학습 예제", "_____no_output_____" ], [ "* 목표: OECD 국가의 1인당 GDP(1인당 국가총생산)와 삶의 만족도 사이의 관계 파악", "_____no_output_____" ], [ "* 1인당 GDP가 증가할 수록 삶의 만족도가 선형으로 증가하는 것처럼 보임.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-17.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 데이터를 대표하는 하나의 직선(선형 모델)을 찾기\n\n $$\n \\text{'삶의만족도'} = \\theta_0 + \\theta_1 \\times \\text{'1인당GDP'}\n $$", "_____no_output_____" ], [ "* 데이터를 대표할 수 있는 선형 방정식을 찾아야 함", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-18.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 학습되는 모델의 성능 평가 기준을 측정하여 가장 적합한 모델 학습\n * 효용 함수: 모델이 얼마나 좋은지 측정\n * 비용 함수: 모델이 얼마나 나쁜지 측정", "_____no_output_____" ], [ "* 아래 선형 모델이 최적!\n\n<div align=\"center\"><img src=\"images/ch01/homl01-19.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "## 1.5 머신러닝의 주요 도전 과제", "_____no_output_____" ], [ "### 충분하지 않은 양의 훈련 데이터", "_____no_output_____" ], [ "* 간단한 문제라도 수천 개 이상의 데이터가 필요", "_____no_output_____" ], [ "* 이미지나 음성 인식 같은 문제는 수백만 개가 필요할 수도 있음", "_____no_output_____" ], [ "* 데이터가 부족하면 알고리즘 성능 향성 어려움", "_____no_output_____" ], [ "* 일반적으로 데이터가 많을 수록 모델의 성능 높아짐.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-20.png\" width=\"400\"/></div>", "_____no_output_____" ], [ "### 대표성 없는 훈련 데이터", "_____no_output_____" ], [ "* 샘플링 잡음: 우연에 의해 추가된 대표성이 없는 데이터", "_____no_output_____" ], [ "* 샘플링 편향: 표본 추출 방법이 잘못되어 한 쪽으로 쏠린 대표성이 없는 데이터", "_____no_output_____" ], [ "* 예제: 1인당 GDP와 삶의 만족도 관계\n - 잡음: 빨강 네모 데이터가 추가 될 경우 선형 모델 달라짐.\n - 편향: OECD 국가중에서 이름에 영어 알파벳 W가 포함된 국가들은 삶의 만족도가 매우 높음. 하지만 일반화는 불가능.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-21.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "### 낮은 품질의 데이터 처리", "_____no_output_____" ], [ "* 이상치 샘플이라면 고치거나 무시", "_____no_output_____" ], [ "* 특성이 누락되었다면\n * 해당 특성을 제외\n * 해당 샘플을 제외\n * 누락된 값을 채움\n * 해당 특성을 넣은 경우와 뺀 경우 각기 모델을 훈련", "_____no_output_____" ], [ "### 관련이 없는 특성", "_____no_output_____" ], [ "* 풀려는 문제에 관련이 높은 특성을 찾아야 함", "_____no_output_____" ], [ "* 특성 선택: 준비되어 있는 특성 중 가장 유용한 특성을 찾음", "_____no_output_____" ], [ "* 특성 추출: 특성을 조합하여 새로운 특성을 만듦", "_____no_output_____" ], [ "### 과대적합", "_____no_output_____" ], [ "* 훈련 세트에 특화되어 일반화 성능이 떨어지는 현상", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-22.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "* 규제를 적용해 과대적합을 감소시킬 수 있음 ", "_____no_output_____" ], [ "* 파라미터를 조정되는 과정에 규제 적용", "_____no_output_____" ], [ "* 파랑 점선이 규제를 적용해 훈련된 선형 모델임.", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch01/homl01-23.png\" width=\"500\"/></div>", "_____no_output_____" ], [ "### 과소적합", "_____no_output_____" ], [ "* 모델이 너무 단순해서 훈련 세트를 잘 학습하지 못함", "_____no_output_____" ], [ "<div align=\"center\"><img src=\"images/ch04/homl04-06.png\" width=\"400\"/></div>", "_____no_output_____" ], [ "* 해결 방법\n * 보다 많은 모델 파라미터를 사용하는 모델 적용\n * 보다 좋은 특성 활용\n * 보다 규제 강도 적용", "_____no_output_____" ], [ "## 1.6 테스트와 검증", "_____no_output_____" ], [ "### 검증", "_____no_output_____" ], [ "* 훈련된 모델의 성능 평가: 테스트 세트 활용", "_____no_output_____" ], [ "* 전체 데이터셋을 훈련 세트(80%)와 테스트 세트(20%)로 구분\n * 훈련 세트: 모델 훈련용.\n * 테스트 세트: 모델 테스트용\n * 데이터셋이 매우 크면 테스트 세트 비율을 낮출 수 있음.", "_____no_output_____" ], [ "* 검증 기준: __일반화 오차__\n * 새로운 샘플에 대한 오류 비율\n * 학습된 모델의 일반화 성능의 기준", "_____no_output_____" ], [ "* 과대 적합: 훈련 오차에 비해 일반화 오차가 높은 경우", "_____no_output_____" ], [ "### 하이퍼파라미터(hyper-parameter)", "_____no_output_____" ], [ "* 알고리즘 학습 모델을 지정에 사용되는 파라미터", "_____no_output_____" ], [ "* 훈련 과정에 변하는 파라미터가 아님", "_____no_output_____" ], [ "* 하이퍼파라미터를 조절하면서 가장 좋은 성능의 모델 선정", "_____no_output_____" ], [ "### 교차 검증", "_____no_output_____" ], [ "* 예비표본(홀드아웃, holdout) 검증\n * 예비표본(검증세트): 훈련 세트의 일부로 만들어진 데이터셋\n * 다양한 하이퍼파라미터 값을 사용하는 후보 모델을 평가하는 용도로 예비표본을 활용하는 기법", "_____no_output_____" ], [ "* 교차 검증\n * 여러 개의 검증세트를 사용한 반복적인 예비표본 검증 적용 기법\n * 장점: 교차 검증 후 모든 모델의 평가를 평균하면 훨씬 정확한 성능 측정 가능\n * 단점: 훈련 시간이 검증 세트의 개수에 비례해 늘어남", "_____no_output_____" ], [ "### 검증 예제: 데이터 불일치", "_____no_output_____" ], [ "* 모델 훈련에 사용된 데이터가 실전에 사용되는 데이터를 완벽하게 대변하지 못하는 경우", "_____no_output_____" ], [ "* 예제: 꽃이름 확인 알고리즘 \n * 인터넷으로 구한 꽃사진으로 모델 훈련\n * 이후 직접 촬영한 사진으로 진행한 성능측정이 낮게 나오면 __데이터 불일치__ 가능성 높음", "_____no_output_____" ], [ "* 데이터 불일치 여부 확인 방법\n * 훈련-개발 세트: 예를 들어, 인터넷에서 다운로드한 꽃사진의 일부로 이루어진 데이터셋\n * 훈련-개발 세트를 제외한 나머지 꽃사진으로 모델 훈련 후, 훈련-개발 세트를 이용한 성능 평가 진행", "_____no_output_____" ], [ "* 훈련-개발 세트에 대한 평가가 좋은 경우: 과대적합 아님\n * 훈련-개발 세티트에 평가는 좋지만 (실제 찍은 사진으로 이루어진) 검증 세트에 대한 평가 나쁜 경우: 데이터 불일치\n * 다운로드한 사진을 실제 찍은 사진처럼 보이도록 전처리 한 후에 다시 훈련시키면 성능 향상시킬 수 있음.", "_____no_output_____" ], [ "* 훈련-개발 세트에 대한 평가가 나쁜 경우: 과대적합\n * 모델에 규제를 적용하거나 더 많은 훈련 데이터 활용해야 함.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb236f1f3f56ca650d6238cb2a9b650058714d13
3,889
ipynb
Jupyter Notebook
nbs/datasets/datasets.ipynb
Jaume-JCI/block-types
868eb97afa41cd4fee1ec2e0a2a97489e21783e8
[ "MIT" ]
null
null
null
nbs/datasets/datasets.ipynb
Jaume-JCI/block-types
868eb97afa41cd4fee1ec2e0a2a97489e21783e8
[ "MIT" ]
37
2021-11-17T22:38:36.000Z
2022-03-05T15:21:54.000Z
nbs/datasets/datasets.ipynb
Jaume-JCI/block-types
868eb97afa41cd4fee1ec2e0a2a97489e21783e8
[ "MIT" ]
null
null
null
23.287425
83
0.509128
[ [ [ "# hide\n# default_exp datasets.datasets\nfrom nbdev.showdoc import *\nfrom dsblocks.utils.nbdev_utils import nbdev_setup, TestRunner\n\nnbdev_setup ()\ntst = TestRunner (targets=['dummy'])", "_____no_output_____" ] ], [ [ "# Datasets\n\n> Classes for loading datasets", "_____no_output_____" ] ], [ [ "#export\nfrom pathlib import Path\nimport abc\nimport pandas as pd\nimport numpy as np\nimport logging\n\n# dsblocks API\nfrom dsblocks.config import bt_defaults as dflt\nfrom dsblocks.utils.utils import set_logger", "_____no_output_____" ], [ "#for tests\nimport pytest \nimport logging", "_____no_output_____" ] ], [ [ "## DataSet", "_____no_output_____" ] ], [ [ "# export\nclass DataSet (metaclass=abc.ABCMeta):\n \"\"\"Abstract DataSet class.\"\"\"\n def __init__ (self, logger = None, verbose=dflt.verbose, **kwargs):\n \"\"\"\n Initialize common attributes and fields, in particular the logger.\n \n Parameters\n ----------\n logger : logging.Logger or None, optional\n Logger used to write messages\n verbose : int, optional\n Verbosity, 0: warning or critical, 1: info, 2: debug.\n \"\"\"\n if logger is None:\n self.logger = set_logger ('dsblocks', verbose=verbose)\n else:\n self.logger = logger\n @abc.abstractmethod\n def load (self):\n pass", "_____no_output_____" ] ], [ [ "### Usage example", "_____no_output_____" ] ], [ [ "# export tests.datasets.test_datasets\nclass MyDataSet (DataSet):\n def __init__ (self, **kwargs):\n super().__init__ (**kwargs)\n def load (self):\n return [2, 1, 3]\ndef test_dataset ():\n dataset = MyDataSet ()\n x = dataset.load ()\n assert x==[2, 1, 3]\n assert isinstance(dataset.logger, logging.Logger)", "_____no_output_____" ], [ "tst.run (test_dataset, tag='dummy')", "running test_dataset\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb238756f6ddbfa6780c8c96a14b6f60d22d20eb
57,600
ipynb
Jupyter Notebook
CORGISIntro/Combine.ipynb
copl68/367FirstAssignment
f7d79bed68a9c622a1d672dcf97c62c2f5c2cfd8
[ "MIT" ]
null
null
null
CORGISIntro/Combine.ipynb
copl68/367FirstAssignment
f7d79bed68a9c622a1d672dcf97c62c2f5c2cfd8
[ "MIT" ]
1
2021-02-25T01:01:04.000Z
2021-02-25T05:40:23.000Z
CORGISIntro/Combine.ipynb
copl68/367FirstAssignment
f7d79bed68a9c622a1d672dcf97c62c2f5c2cfd8
[ "MIT" ]
null
null
null
41.830065
170
0.3175
[ [ [ "#Setup and imports\nimport pandas as pd\nimport numpy as np\ndf = pd.read_csv('raw/jantojun2020.csv', dtype=object)\nexisting = pd.read_csv('raw/airlines-corgis.csv')", "_____no_output_____" ], [ "#For looking at our new dataset\ndf.head(5).iloc[:,0:]", "_____no_output_____" ], [ "#For looking at the old dataset\nexisting.head(5)\nexisting.iloc[0:10, 5:]", "_____no_output_____" ], [ "#Start similar dataframe from new data\nnew = pd.DataFrame()\nnew['Airport.Code'] = df[\"ORIGIN\"]\n\n#Create dictionary of airport codes : airport names\ncodes = pd.Series(existing['Airport.Name'].values, existing['Airport.Code'].values).to_dict()\n\n#Add airport names to the new dataframe\nL = list(codes.keys()) + list(codes.values())\nnew['Airport.Name'] = new['Airport.Code'].str.extract('(' + '|'.join(L) + ')', expand=False).replace(codes)", "_____no_output_____" ], [ "#Add date information\nnew['Time.Label'] = df['YEAR'] + '/0' + df['MONTH']\nnew['Time.Month'] = df['MONTH']\nmonths = {'1':'January', '2':'February', '3':'March', '4':'April', '5':'May', '6':'June'}\nnew['Time.Month Name'] = new['Time.Month'].replace(months)\nnew['Time.Year'] = df['YEAR']\n\n#Create dataframe that will hold info about each individual flight\nnew_indiv = new.copy(deep=True)\n\n#Ignore flights from airports not in original dataset\nnew = new.dropna(subset=['Airport.Name'])\n\n#Reduce rows to one for each Airport.Code/Time.Label combo (like exisiting dataframe)\nnew = new.drop_duplicates(subset=['Airport.Code', 'Time.Label'])", "_____no_output_____" ], [ "#Copy over data from jantojun2020... Currently in delay per minutes\nnew_indiv['Statistics.# of Delays.Carrier'] = df['CARRIER_DELAY']\nnew_indiv['Statistics.# of Delays.Late Aircraft'] = df['LATE_AIRCRAFT_DELAY']\nnew_indiv['Statistics.# of Delays.National Aviation System'] = df['NAS_DELAY']\nnew_indiv['Statistics.# of Delays.Security'] = df['SECURITY_DELAY']\nnew_indiv['Statistics.# of Delays.Weather'] = df['WEATHER_DELAY']\n\n#Replace all delays of 0.0 minutes (no delays) with 0, otherwise replace with 1 (meaning there was a delay)\nnew_indiv['Statistics.# of Delays.Carrier'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Carrier']]\nnew_indiv['Statistics.# of Delays.Late Aircraft'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Late Aircraft']]\nnew_indiv['Statistics.# of Delays.National Aviation System'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.National Aviation System']]\nnew_indiv['Statistics.# of Delays.Security'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Security']]\nnew_indiv['Statistics.# of Delays.Weather'] = [1 if float(x) > 0.0 else 0 for x in new_indiv['Statistics.# of Delays.Weather']]", "_____no_output_____" ], [ "#Create dict to map airline codes to names\nairlines = {'AA':'American Airlines Inc.', 'AS':'Alaska Airlines Inc.', 'B6':'JetBlue Airways', \n 'DL':'Delta Air Lines Inc.', 'F9':'Frontier Airlines Inc.', 'G4': np.nan, 'HA':'Hawaiian Airlines Inc.',\n 'NK':'Spirit Air Lines', 'UA':'United Air Lines Inc.', 'WN': 'Southwest Airlines Co.'}\n\n#Transfer over airline names\nnew_indiv['Statistics.Carriers.Names'] = df['MKT_UNIQUE_CARRIER']\n\n#Replace airline codes with names. Allegiant airlines not in original dataset, so its name is repalced with NaN\nnew_indiv['Statistics.Carriers.Names'] = new_indiv['Statistics.Carriers.Names'].replace(airlines)", "_____no_output_____" ], [ "#Transfer cancelled flights data\nnew_indiv['Statistics.Flights.Cancelled'] = df['CANCELLED'].astype('int64')\n\n#Add delayed flights data (1=Delayed, 0=Not Delayed)\ndf['DEP_DELAY_NEW'] = df['DEP_DELAY_NEW'].astype('float64')\nnew_indiv['Statistics.Flights.Delayed'] = [1 if x > 0 else 0 for x in df['DEP_DELAY_NEW']]\n#[x if y == 0 else 0 for x in new_indiv['Statistics.Flights.Delayed'] for y in new_indiv['Statistics.Flights.Cancelled']]\n\n#No data in the new dataset on diverted or on-time flights\n#We cannot account for diverted, but we will count on-time as being neither cancelled or delayed\n#(1 = On-Time, 0 = Not On-Time)\nnew_indiv['Statistics.Flights.On Time'] = new_indiv['Statistics.Flights.Cancelled'] + new_indiv['Statistics.Flights.Delayed']\nnew_indiv['Statistics.Flights.On Time'] = new_indiv['Statistics.Flights.On Time'].replace(2, 1)\nnew_indiv['Statistics.Flights.On Time'] = [1-x for x in new_indiv['Statistics.Flights.On Time']]\n\n#Every flight that is cancelled has its delay status set to 0 (for sake of not double-counting in totals)\nnew_indiv.loc[new_indiv['Statistics.Flights.Cancelled'] > 0, 'Statistics.Flights.Delayed'] = 0", "_____no_output_____" ], [ "#Transfer data of minutes of delays over\nnew_indiv['Statistics.Minutes Delayed.Carrier'] = df['CARRIER_DELAY']\nnew_indiv['Statistics.Minutes Delayed.Late Aircraft'] = df['LATE_AIRCRAFT_DELAY']\nnew_indiv['Statistics.Minutes Delayed.National Aviation System'] = df['NAS_DELAY']\nnew_indiv['Statistics.Minutes Delayed.Security'] = df['SECURITY_DELAY']\nnew_indiv['Statistics.Minutes Delayed .Weather'] = df['WEATHER_DELAY']", "_____no_output_____" ], [ "#Drop and ignore and flights that are at airports not in the original dataset\nnew_indiv = new_indiv.dropna(subset=['Airport.Name'])\n\n#Sort \"new\" df entries by data, then by airport\nnew = new.sort_values(['Time.Label', 'Airport.Code'])\n\n#Adding total carrier delay info for each airport/data combo entry\ndelays_car = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Carrier'].sum())\nnew['Statistics.# of Delays.Carrier'] = delays_car\n\n#Adding total late aircraft delay info for each airport/data combo entry\ndelays_late = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Late Aircraft'].sum())\nnew['Statistics.# of Delays.Late Aircraft'] = delays_late\n\n#Adding total national aviation system delay info for each airport/data combo entry\ndelays_nas = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.National Aviation System'].sum())\nnew['Statistics.# of Delays.National Aviation System'] = delays_nas\n\n#Adding total security delay info for each airport/data combo entry\ndelays_sec = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Security'].sum())\nnew['Statistics.# of Delays.Security'] = delays_sec\n\n#Adding total weather delay info for each airport/data combo entry\ndelays_wthr = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.# of Delays.Weather'].sum())\nnew['Statistics.# of Delays.Weather'] = delays_wthr", "_____no_output_____" ], [ "#Added as a placeholder for now\nnew['Statistics.Carriers.Names'] = ''\n\n#Add number of carriers for each airport/date combo \ncarriers = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Carriers.Names'].nunique())\nnew['Statistics.Carriers.Total'] = carriers\n\n##Add flights delayed for each airport/date combo \ncancelled = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Flights.Cancelled'].sum())\nnew['Statistics.Flights.Cancelled'] = cancelled\n\n#Add flights cancelled for each airport/date combo\ndelays = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Statistics.Flights.Delayed'].sum())\nnew['Statistics.Flights.Delayed'] = delays\n\n#Diverted flights arent in new dataset, so set to null\nnew['Statistics.Flights.Diverted'] = np.nan\n\n#Placeholder for now\nnew['Statistics.Flights.On Time'] = ''\n\n#Add total number of flights for each airport/date combo\ntotals = list(new_indiv.groupby(['Time.Label', 'Airport.Code'])['Airport.Name'].count())\nnew['Statistics.Flights.Total'] = totals", "_____no_output_____" ], [ "new.to_csv('airlines-corgis.csv', header=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb23968f95ac8a8321e94b4d2edafadef4e27ea0
460,510
ipynb
Jupyter Notebook
Lab 2/MB4_2.ipynb
klean2050/Neural_Networks_NTUA
f68cc88610d998cf6e1e822e8779aff816c85875
[ "MIT" ]
3
2020-03-25T18:11:30.000Z
2021-11-25T14:22:33.000Z
Lab 2/MB4_2.ipynb
klean2050/Neural_Networks_NTUA
f68cc88610d998cf6e1e822e8779aff816c85875
[ "MIT" ]
null
null
null
Lab 2/MB4_2.ipynb
klean2050/Neural_Networks_NTUA
f68cc88610d998cf6e1e822e8779aff816c85875
[ "MIT" ]
1
2020-12-02T15:44:41.000Z
2020-12-02T15:44:41.000Z
272.329982
381,508
0.902293
[ [ [ "#### ΠΡΟΣΟΧΗ:\nΤα joblib dumps των τελικών `corpus_tf_idf.pkl` και `som.pkl` δεν περιέχονται στο zip file καθώς είχαν απαγορευτικά μεγάλο μέγεθος. Αυτό ΔΕΝ οφείλεται σε δική μας ελλιπή υλοποίηση, αλλά σε μια ιδιομορφία του corpus που μας αντιστοιχεί και αναγκάζει ορισμένους πίνακες να αντιστοιχίζονται αχρείαστα σε float64. Το πρόβλημα το έχει δει ο κ. Σιόλας, ο οποίος μας έδωσε την άδεια να ανεβάσουμε τα pickles σε ένα drive ώστε να έχετε πρόσβαση σε αυτά. Μας διαβεβαίωσε πως δε θα υπάρξει βαθμολογική ποινή. Τα links των αρχείων:\n\n* `corpus_tf_idf.pkl` : https://drive.google.com/open?id=1q5G1fRPwNBhNUzkWNTAqvAZCzY1B0tJF\n* `som.pkl` : https://drive.google.com/open?id=1V5Je-RfpvQyCgm-F5UDGaPD88gXbdad8\n\nΓια οποιαδήποτε απορία ή πρόβλημα στα Links επικοινωνήστε μαζί μας. Για το θέμα που προέκυψε μπορείτε να απευθυνείτε στον κ. Σιόλα.", "_____no_output_____" ], [ "# Neural Networks ECE NTUA Course 2019-20 ~ Team M.B.4\n## Lab Assingment #2: Unsupervised Learning (Recommendation System & SOM)", "_____no_output_____" ], [ "### A. The Team\n* Αβραμίδης Κλεάνθης ~ 03115117\n* Κρατημένος Άγγελος ~ 03115025\n* Πανίδης Κωνσταντίνος ~ 03113602", "_____no_output_____" ], [ "### Requested Imports", "_____no_output_____" ] ], [ [ "import pandas as pd, numpy as np, scipy as sp\nimport nltk, string, collections\nimport time, joblib\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\nimport somoclu, matplotlib\n%matplotlib inline", "Warning: the binary library cannot be imported. You cannot train maps, but you can load and analyze ones that you have already saved.\nIf you installed Somoclu with pip on Windows, this typically means missing DLLs. Please refer to the documentation.\n" ] ], [ [ "## Εισαγωγή του Dataset", "_____no_output_____" ], [ "Το σύνολο δεδομένων με το οποίο θα δουλέψουμε είναι βασισμένο στο [Carnegie Mellon Movie Summary Corpus](http://www.cs.cmu.edu/~ark/personas/). Πρόκειται για ένα dataset με περίπου 40.000 περιγραφές ταινιών. Η περιγραφή κάθε ταινίας αποτελείται από τον τίτλο της, μια ή περισσότερες ετικέτες που χαρακτηρίζουν το είδος της ταινίας και τέλος τη σύνοψη της υπόθεσής της. Αρχικά εισάγουμε το dataset (χρησιμοποιήστε αυτούσιο τον κώδικα, δεν χρειάζεστε το αρχείο csv) στο dataframe `df_data_1`: ", "_____no_output_____" ] ], [ [ "dataset_url = \"https://drive.google.com/uc?export=download&id=1PdkVDENX12tQliCk_HtUnAUbfxXvnWuG\"\ndf_data_1 = pd.read_csv(dataset_url, sep='\\t', header=None, quoting=3, error_bad_lines=False)", "_____no_output_____" ] ], [ [ "Κάθε ομάδα θα δουλέψει σε ένα μοναδικό υποσύνολο 5.000 ταινιών (διαφορετικό dataset για κάθε ομάδα) ως εξής\n\n1. Κάθε ομάδα μπορεί να βρει [εδώ](https://docs.google.com/spreadsheets/d/1oEr3yuPg22lmMeqDjFtWjJRzmGQ8N57YIuV-ZOvy3dM/edit?usp=sharing) τον μοναδικό αριθμό της \"Seed\" από 1 έως 78. \n\n2. Το data frame `df_data_2` έχει 78 γραμμές (ομάδες) και 5.000 στήλες. Σε κάθε ομάδα αντιστοιχεί η γραμμή του πίνακα με το `team_seed_number` της. Η γραμμή αυτή θα περιλαμβάνει 5.000 διαφορετικούς αριθμούς που αντιστοιχούν σε ταινίες του αρχικού dataset. \n\n3. Στο επόμενο κελί αλλάξτε τη μεταβλητή `team_seed_number` με το Seed της ομάδας σας από το Google Sheet.\n\n4. Τρέξτε τον κώδικα. Θα προκύψουν τα μοναδικά για κάθε ομάδα titles, categories, catbins, summaries και corpus με τα οποία θα δουλέψετε.", "_____no_output_____" ] ], [ [ "# βάλτε το seed που αντιστοιχεί στην ομάδα σας\nteam_seed_number = 19\n\nmovie_seeds_url = \"https://drive.google.com/uc?export=download&id=1RRoiOjhD0JB3l4oHNFOmPUqZHDphIdwL\"\ndf_data_2 = pd.read_csv(movie_seeds_url, header=None, error_bad_lines=False)\n\n# επιλέγεται \nmy_index = df_data_2.iloc[team_seed_number,:].values\n\ntitles = df_data_1.iloc[:, [2]].values[my_index] # movie titles (string)\ncategories = df_data_1.iloc[:, [3]].values[my_index] # movie categories (string)\nbins = df_data_1.iloc[:, [4]]\ncatbins = bins[4].str.split(',', expand=True).values.astype(np.float)[my_index] # movie categories in binary form\nsummaries = df_data_1.iloc[:, [5]].values[my_index] # movie summaries (string)\ncorpus = summaries[:,0].tolist() # list form of summaries", "_____no_output_____" ] ], [ [ "- Ο πίνακας **titles** περιέχει τους τίτλους των ταινιών. Παράδειγμα: 'Sid and Nancy'.\n- O πίνακας **categories** περιέχει τις κατηγορίες (είδη) της ταινίας υπό τη μορφή string. Παράδειγμα: '\"Tragedy\", \"Indie\", \"Punk rock\", \"Addiction Drama\", \"Cult\", \"Musical\", \"Drama\", \"Biopic \\[feature\\]\", \"Romantic drama\", \"Romance Film\", \"Biographical film\"'. Παρατηρούμε ότι είναι μια comma separated λίστα strings, με κάθε string να είναι μια κατηγορία.\n- Ο πίνακας **catbins** περιλαμβάνει πάλι τις κατηγορίες των ταινιών αλλά σε δυαδική μορφή ([one hot encoding](https://hackernoon.com/what-is-one-hot-encoding-why-and-when-do-you-have-to-use-it-e3c6186d008f)). Έχει διαστάσεις 5.000 x 322 (όσες οι διαφορετικές κατηγορίες). Αν η ταινία ανήκει στο συγκεκριμένο είδος η αντίστοιχη στήλη παίρνει την τιμή 1, αλλιώς παίρνει την τιμή 0.\n- Ο πίνακας **summaries** και η λίστα **corpus** περιλαμβάνουν τις συνόψεις των ταινιών (η corpus είναι απλά ο summaries σε μορφή λίστας). Κάθε σύνοψη είναι ένα (συνήθως μεγάλο) string. Παράδειγμα: *'The film is based on the real story of a Soviet Internal Troops soldier who killed his entire unit as a result of Dedovschina. The plot unfolds mostly on board of the prisoner transport rail car guarded by a unit of paramilitary conscripts.'*\n- Θεωρούμε ως **ID** της κάθε ταινίας τον αριθμό γραμμής της ή το αντίστοιχο στοιχείο της λίστας. Παράδειγμα: για να τυπώσουμε τη σύνοψη της ταινίας με `ID=99` (την εκατοστή) θα γράψουμε `print(corpus[99])`.", "_____no_output_____" ] ], [ [ "ID = 99\nprint(titles[ID])\nprint(categories[ID])\nprint(catbins[ID])\nprint(corpus[ID])", "[\"Confessions d'un Barjo\"]\n['\"Drama\", \"Comedy of manners\", \"Comedy-drama\", \"Domestic Comedy\"']\n[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\nBarjo is eccentric, naive and obsessive. After he accidentally burns down his house during a \"scientific\" experiment, he moves in with his impulsive twin sister Fanfan , who is married to Charles \"the Aluminum King\" . In his new surroundings, Barjo continues his old habits: cataloging old science magazines, testing bizarre inventions and filling his notebooks with his observations about human behavior and his thoughts about the end of the world. Through Barjo's journals we see the development of conflict and sexual tension between Fanfan and Charles, and the descent of Charles into madness.\n" ] ], [ [ "# Υλοποίηση συστήματος συστάσεων ταινιών βασισμένο στο περιεχόμενο\n\n<img src=\"http://clture.org/wp-content/uploads/2015/12/Netflix-Streaming-End-of-Year-Posts.jpg\" width=\"70%\">\n\nΗ πρώτη εφαρμογή που θα αναπτύξετε θα είναι ένα [σύστημα συστάσεων](https://en.wikipedia.org/wiki/Recommender_system) ταινιών βασισμένο στο περιεχόμενο (content based recommender system). Τα συστήματα συστάσεων στοχεύουν στο να προτείνουν αυτόματα στο χρήστη αντικείμενα από μια συλλογή τα οποία ιδανικά θέλουμε να βρει ενδιαφέροντα ο χρήστης. Η κατηγοριοποίηση των συστημάτων συστάσεων βασίζεται στο πώς γίνεται η επιλογή (filtering) των συστηνόμενων αντικειμένων. Οι δύο κύριες κατηγορίες είναι η συνεργατική διήθηση (collaborative filtering) όπου το σύστημα προτείνει στο χρήστη αντικείμενα που έχουν αξιολογηθεί θετικά από χρήστες που έχουν παρόμοιο με αυτόν ιστορικό αξιολογήσεων και η διήθηση με βάση το περιεχόμενο (content based filtering), όπου προτείνονται στο χρήστη αντικείμενα με παρόμοιο περιεχόμενο (με βάση κάποια χαρακτηριστικά) με αυτά που έχει προηγουμένως αξιολογήσει θετικά.\n\nΤο σύστημα συστάσεων που θα αναπτύξετε θα βασίζεται στο **περιεχόμενο** και συγκεκριμένα στις συνόψεις των ταινιών (corpus). \n", "_____no_output_____" ], [ "## Προσθήκη stop words που βρέθηκαν εμπειρικά οτι βελτιώνουν τις συστάσεις\n\nΠροστέθηκαν ονόματα και συχνές λέξεις στην περιγραφή ταινιών (πχ plot,story,film) που δεν προσθέτουν αξία στο περιεχόμενο της περιγραφής:", "_____no_output_____" ] ], [ [ "nltk.download(\"stopwords\")\n\nname_file = open(\"stopwords.txt\",'r')\nnames = [line.split(',') for line in name_file.readlines()]\nname_stopwords = names[0]\n\nfor i in range(len(name_stopwords)): name_stopwords[i]=name_stopwords[i].strip()\n \nmovie_words=[\"story\",\"film\",\"plot\",\"about\",\"movie\",'000','mother','father','sister','brother','daughter','son','village'\n '10', '12', '15', '20','00', '01', '02', '04', '05', '06', '07', '08', '09', '100', '1000', '10th', '11', \n '120', '13', '13th', '14', '14th', '150', '15th', '16', '16th', '17', '18']\n\nmy_stopwords = stopwords.words('english') + movie_words + name_stopwords", "[nltk_data] Downloading package stopwords to\n[nltk_data] C:\\Users\\kavra\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ] ], [ [ "## Stemming & TF-IDF\n\nΠροχωρούμε σε περαιτέρω επεξεργασία του corpus μας εστιάζοντας στο stem των λέξεων και αγνοώντας σχετικά λήμματα, προς αύξηση της αποδοτικότητας. Στη συνέχεια κάνουμε τη ζητούμενη μετατροπή σε tf-idf:", "_____no_output_____" ] ], [ [ "def thorough_filter(words):\n filtered_words = []\n for word in words:\n pun = []\n for letter in word: pun.append(letter in string.punctuation)\n if not all(pun): filtered_words.append(word)\n return filtered_words\n\ndef preprocess_document(document):\n words = nltk.word_tokenize(document.lower())\n porter_stemmer = PorterStemmer()\n stemmed_words = [porter_stemmer.stem(word) for word in words] \n return (\" \".join(stemmed_words))", "_____no_output_____" ], [ "# απαραίτητα download για τους stemmer/lemmatizer/tokenizer\nnltk.download('wordnet')\nnltk.download('rslp')\nnltk.download('punkt')\n\nstemmed_corpus = [preprocess_document(corp) for corp in corpus]\n\nvectorizer = TfidfVectorizer(max_df=0.2, min_df=0.01, analyzer='word', stop_words = my_stopwords, ngram_range=(1,1))\ncorpus_tf_idf = vectorizer.fit_transform(stemmed_corpus).toarray()\nprint(\"corpus after tf-idf\",corpus_tf_idf.shape)\n\njoblib.dump(corpus_tf_idf, 'corpus_tf_idf.pkl') ", "[nltk_data] Downloading package wordnet to\n[nltk_data] C:\\Users\\kavra\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package wordnet is already up-to-date!\n[nltk_data] Downloading package rslp to\n[nltk_data] C:\\Users\\kavra\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package rslp is already up-to-date!\n[nltk_data] Downloading package punkt to\n[nltk_data] C:\\Users\\kavra\\AppData\\Roaming\\nltk_data...\n[nltk_data] Package punkt is already up-to-date!\n" ] ], [ [ "Η συνάρτηση [TfidfVectorizer](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) όπως καλείται εδώ **είναι βελτιστοποιημένη**. Οι επιλογές των μεθόδων και παραμέτρων που κάναμε έχουν **σημαντική επίδραση στην ποιότητα των συστάσεων, στη διαστατικότητα και τον όγκο των δεδομένων, κατά συνέπεια και στους χρόνους εκπαίδευσης**.", "_____no_output_____" ], [ "## Υλοποίηση του συστήματος συστάσεων\n\nΤο σύστημα συστάσεων που θα παραδώσετε θα είναι μια συνάρτηση `content_recommender` με δύο ορίσματα `target_movie` και `max_recommendations`. Στην `target_movie` περνάμε το ID μιας ταινίας-στόχου για την οποία μας ενδιαφέρει να βρούμε παρόμοιες ως προς το περιεχόμενο (τη σύνοψη) ταινίες, `max_recommendations` στο πλήθος.\nΥλοποιήστε τη συνάρτηση ως εξής: \n- για την ταινία-στόχο, από το `corpus_tf_idf` υπολογίστε την [ομοιότητα συνημιτόνου](https://en.wikipedia.org/wiki/Cosine_similarity) της με όλες τις ταινίες της συλλογής σας\n- με βάση την ομοιότητα συνημιτόνου που υπολογίσατε, δημιουργήστε ταξινομημένο πίνακα από το μεγαλύτερο στο μικρότερο, με τα indices (`ID`) των ταινιών. Παράδειγμα: αν η ταινία με index 1 έχει ομοιότητα συνημιτόνου με 3 ταινίες \\[0.2 1 0.6\\] (έχει ομοιότητα 1 με τον εαύτό της) ο ταξινομημένος αυτός πίνακας indices θα είναι \\[1 2 0\\].\n- Για την ταινία-στόχο εκτυπώστε: id, τίτλο, σύνοψη, κατηγορίες (categories)\n- Για τις `max_recommendations` ταινίες (πλην της ίδιας της ταινίας-στόχου που έχει cosine similarity 1 με τον εαυτό της) με τη μεγαλύτερη ομοιότητα συνημιτόνου (σε φθίνουσα σειρά), τυπώστε σειρά σύστασης (1 πιο κοντινή, 2 η δεύτερη πιο κοντινή κλπ), id, τίτλο, σύνοψη, κατηγορίες (categories)\n", "_____no_output_____" ] ], [ [ "def movie_info(movie_id):\n print(*titles[movie_id].flatten(),\"~ ID:\",movie_id)\n print(\"Category: \",*categories[movie_id].flatten())\n\ndef get_distances(target_movie_id,corpus):\n distances = np.zeros((corpus.shape[0]))\n for i in range(corpus.shape[0]): distances[i]=sp.spatial.distance.cosine(corpus[target_movie_id],corpus[i])\n return distances\n\ndef content_recommender(target_movie,max_recomendations):\n\n distances = get_distances(target_movie,corpus_tf_idf)\n similarity = np.argsort(distances) # similarity = 1-distance\n similarity = similarity[:max_recomendations+1]\n \n for i in similarity:\n if i==target_movie:\n print(\"Target Movie:\",end=\" \")\n movie_info(i)\n print(\"\\nRecommendations:\\n\")\n else: movie_info(i); print()", "_____no_output_____" ] ], [ [ "## Βελτιστοποίηση\n\nΑφού υλοποιήσετε τη συνάρτηση `content_recommender` χρησιμοποιήστε τη για να βελτιστοποιήσετε την `TfidfVectorizer`. Συγκεκριμένα, αρχικά μπορείτε να δείτε τι επιστρέφει το σύστημα για τυχαίες ταινίες-στόχους και για ένα μικρό `max_recommendations` (2 ή 3). Αν σε κάποιες ταινίες το σύστημα μοιάζει να επιστρέφει σημασιολογικά κοντινές ταινίες σημειώστε το `ID` τους. Δοκιμάστε στη συνέχεια να βελτιστοποιήσετε την `TfidfVectorizer` για τα συγκεκριμένα `ID` ώστε να επιστρέφονται σημασιολογικά κοντινές ταινίες για μεγαλύτερο αριθμό `max_recommendations`. Παράλληλα, όσο βελτιστοποιείτε την `TfidfVectorizer`, θα πρέπει να λαμβάνετε καλές συστάσεις για μεγαλύτερο αριθμό τυχαίων ταινιών. Μπορείτε επίσης να βελτιστοποιήσετε τη συνάρτηση παρατηρώντας πολλά φαινόμενα που το σύστημα εκλαμβάνει ως ομοιότητα περιεχομένου ενώ επί της ουσίας δεν είναι επιθυμητό να συνυπολογίζονται (δείτε σχετικά το [FAQ](https://docs.google.com/document/d/1-E4eQkVnTxa3Jb0HL9OAs11bugYRRZ7RNWpu7yh9G4s/edit?usp=sharing)). Ταυτόχρονα, μια άλλη κατεύθυνση της βελτιστοποίησης είναι να χρησιμοποιείτε τις παραμέτρους του `TfidfVectorizer` έτσι ώστε να μειώνονται οι διαστάσεις του Vector Space Model μέχρι το σημείο που θα αρχίσει να εμφανίζονται ποιοτικές επιπτώσεις.", "_____no_output_____" ] ], [ [ "corpus_tf_idf = joblib.load('corpus_tf_idf.pkl')\ncontent_recommender(120,5)", "Target Movie: Ride Beyond Vengeance ~ ID: 120\nCategory: \"Action/Adventure\", \"Western\", \"Action\"\n\nRecommendations:\n\nFull Clip ~ ID: 3045\nCategory: \"Thriller\", \"Action/Adventure\", \"Action Thrillers\", \"Action\"\n\nForty Guns ~ ID: 283\nCategory: \"Action/Adventure\", \"Western\"\n\nOne Girl's Confession ~ ID: 1232\nCategory: \"Crime Fiction\", \"Drama\", \"Black-and-white\", \"Film noir\"\n\nThe Thief Lord ~ ID: 4274\nCategory: \"Fantasy Adventure\", \"Adventure\", \"Children's/Family\", \"Fantasy\", \"Family Film\", \"Family-Oriented Adventure\"\n\nWarlock ~ ID: 2174\nCategory: \"Action/Adventure\", \"Western\", \"Drama\", \"Time travel\"\n\n" ] ], [ [ "## Επεξήγηση επιλογών και ποιοτική ερμηνεία\n\nΠεριγράψτε πώς προχωρήσατε στις επιλογές σας για τη βελτιστοποίηση της `TfidfVectorizer`. Δώστε 10 παραδείγματα (IDs) που επιστρέφουν καλά αποτελέσματα μέχρι `max_recommendations` (5 και παραπάνω) και σημειώστε ποια είναι η θεματική που ενώνει τις ταινίες.", "_____no_output_____" ], [ "### Διαδικασία βελτιστοποίησης\nΑκολουθήσαμε την πρόταση της εκφώνησης της εργασίας και ξεκινήσαμε την τροποποίηση των παραμέτρων του TfidfVectorizer αρχικά για 2 προτάσεις, και στην συνέχεια για 5 προτάσεις.\nΑρχικά παρατηρήσαμε οτι η παράμετρος max_df του TfidfVectorizer δεν αφαιρούσε περισσότερα features για τιμές άνω του 0.4, και για τιμές χαμηλότερες απο αυτό μείωνε την ποιότητα των προτάσεων. Επίσης η παράμετρος ngram_range δοκιμάστηκε με τιμές (1,1)-only unigrams , (1,2)-unigrams and bigrams , (1,3)-unigrams bigrams and trigrams αλλα δεν επηρέασε θετικά τα αποτελέσματα οπότε αφέθηκε στην τιμη (1,1). Οπότε, η βελτιστοποίηση του TfidfVectorizer έγινε με την τροποποίηση της παραμέτρου min_df και την εισαγωγή stop words με ταυτόχρονη παρατήρηση των προτάσεων που προκύπτουν. Τα 10 ζητούμενα παραδείγματα:", "_____no_output_____" ] ], [ [ "movie_list = [10,20,21,100,120,222,540,2020,2859,3130]\ncats = [\"Crime Fiction\",\"Action/Adventure\",\"Drama\",\"Crime Fiction\",\"Adventure\",\n \"Thriller\",\"Horror\",\"Science Fiction\",\"Drama\",\"Science Fiction\"]\nfor i in range(10):\n content_recommender(movie_list[i],5)\n print(\"Common category:\",cats[i],\" -------------------------------------------------------------\\n\")", "Target Movie: Shield for Murder ~ ID: 10\nCategory: \"Crime Fiction\", \"Detective fiction\", \"Detective\", \"Mystery\", \"Drama\", \"Film noir\"\n\nRecommendations:\n\nIt's a Mad, Mad, Mad, Mad World ~ ID: 1753\nCategory: \"Crime Fiction\", \"Ensemble Film\", \"Adventure\", \"Chase Movie\", \"Comedy\", \"Family Film\", \"Action\"\n\nNerrukku Ner ~ ID: 3065\nCategory: \"Musical\", \"Drama\", \"Bollywood\", \"World cinema\"\n\nThe Enforcer ~ ID: 4285\nCategory: \"Crime Fiction\", \"Drama\", \"Film noir\"\n\nRunning Out Of Time ~ ID: 1112\nCategory: \"Thriller\", \"Action/Adventure\", \"Action\", \"Chinese Movies\", \"World cinema\"\n\nFilantropica ~ ID: 3216\nCategory: \"Comedy\"\n\nCommon category: Crime Fiction -------------------------------------------------------------\n\nTarget Movie: Eagle's Wing ~ ID: 20\nCategory: \"Action/Adventure\", \"Western\"\n\nRecommendations:\n\nThe Only Good Indian ~ ID: 3749\nCategory: \"Indie\", \"Period piece\", \"Action/Adventure\", \"Revisionist Western\", \"Western\", \"Drama\"\n\nSouth Sea Woman ~ ID: 123\nCategory: \"Adventure\", \"Comedy-drama\", \"Black-and-white\", \"Action/Adventure\", \"Adventure Comedy\", \"Comedy\", \"War film\", \"Action\"\n\nTarzan, the Ape Man ~ ID: 728\nCategory: \"Action/Adventure\", \"Action\", \"Adventure\", \"Comedy\"\n\nThe White Buffalo ~ ID: 4468\nCategory: \"Natural horror films\", \"Western\", \"Drama\", \"Indian Western\"\n\nEpic Movie ~ ID: 4659\nCategory: \"Parody\", \"Adventure\", \"Action/Adventure\", \"Comedy\", \"Action\", \"Slapstick\"\n\nCommon category: Action/Adventure -------------------------------------------------------------\n\nTarget Movie: Annie's Coming Out ~ ID: 21\nCategory: \"Drama\"\n\nRecommendations:\n\nThe Beautiful City ~ ID: 3711\nCategory: \"Drama\"\n\nYou, Me & Marley ~ ID: 4425\nCategory: \"Drama\", \"Indie\"\n\nSalim Langde Pe Mat Ro ~ ID: 1851\nCategory: \"Musical\", \"Drama\", \"Bollywood\", \"World cinema\"\n\nSaddle the Wind ~ ID: 3817\nCategory: \"Action/Adventure\", \"Western\", \"Action\"\n\nNight and Day ~ ID: 3109\nCategory: \"Drama\", \"World cinema\"\n\nCommon category: Drama -------------------------------------------------------------\n\nTarget Movie: Mad Money ~ ID: 100\nCategory: \"Crime Fiction\", \"Heist\", \"Crime Comedy\", \"Caper story\", \"Action/Adventure\", \"Comedy\", \"Action\"\n\nRecommendations:\n\nPenelope ~ ID: 142\nCategory: \"Crime Fiction\", \"Comedy\"\n\nThe Perfect Clown ~ ID: 1764\nCategory: \"Silent film\", \"Indie\", \"Black-and-white\", \"Comedy\"\n\nLeikkikalugangsteri ~ ID: 4995\nCategory: \"Crime Fiction\", \"Comedy film\"\n\nSilver Bears ~ ID: 2497\nCategory: \"Ensemble Film\", \"Crime Comedy\", \"Comedy\"\n\nOcean's Thirteen ~ ID: 3397\nCategory: \"Thriller\", \"Crime Fiction\", \"Ensemble Film\", \"Caper story\", \"Master Criminal Films\", \"Comedy\"\n\nCommon category: Crime Fiction -------------------------------------------------------------\n\nTarget Movie: Ride Beyond Vengeance ~ ID: 120\nCategory: \"Action/Adventure\", \"Western\", \"Action\"\n\nRecommendations:\n\nFull Clip ~ ID: 3045\nCategory: \"Thriller\", \"Action/Adventure\", \"Action Thrillers\", \"Action\"\n\nForty Guns ~ ID: 283\nCategory: \"Action/Adventure\", \"Western\"\n\nOne Girl's Confession ~ ID: 1232\nCategory: \"Crime Fiction\", \"Drama\", \"Black-and-white\", \"Film noir\"\n\nThe Thief Lord ~ ID: 4274\nCategory: \"Fantasy Adventure\", \"Adventure\", \"Children's/Family\", \"Fantasy\", \"Family Film\", \"Family-Oriented Adventure\"\n\nWarlock ~ ID: 2174\nCategory: \"Action/Adventure\", \"Western\", \"Drama\", \"Time travel\"\n\nCommon category: Adventure -------------------------------------------------------------\n\nTarget Movie: Milano calibro 9 ~ ID: 222\nCategory: \"Crime Fiction\", \"Thriller\", \"World cinema\", \"Action/Adventure\", \"Drama\", \"Crime Thriller\", \"Action Thrillers\", \"Action\"\n\nRecommendations:\n\nGangster No. 1 ~ ID: 1755\nCategory: \"Thriller\", \"Crime Fiction\", \"Gangster Film\", \"Action/Adventure\", \"Period piece\", \"Drama\", \"Crime Thriller\", \"Action\"\n\nSisters ~ ID: 3228\nCategory: \"Crime Fiction\", \"Thriller\", \"Action\", \"Gangster Film\", \"Drama\"\n\nThe Twin Diamonds ~ ID: 2164\nCategory: \"Thriller\"\n\nÀ nous la liberté ~ ID: 4687\nCategory: \"World cinema\", \"Workplace Comedy\", \"Black-and-white\", \"Musical\", \"Satire\", \"Fantasy\", \"Comedy\"\n\nYou Can't See 'round Corners ~ ID: 4925\nCategory: \"Drama\"\n\nCommon category: Thriller -------------------------------------------------------------\n\nTarget Movie: Andre the Butcher ~ ID: 540\nCategory: \"Comedy film\", \"Horror\"\n\nRecommendations:\n\nJack Frost ~ ID: 2883\nCategory: \"Monster movie\", \"Science Fiction\", \"B-movie\", \"Comedy\", \"Horror\", \"Slasher\"\n\nMoon of the Wolf ~ ID: 944\nCategory: \"Thriller\", \"Horror\", \"Indie\", \"Supernatural\", \"Creature Film\", \"Mystery\", \"Suspense\", \"Crime Thriller\"\n\nWrong Turn 5 ~ ID: 54\nCategory: \"Thriller\", \"Horror\"\n\nThe California Kid ~ ID: 610\nCategory: \"Thriller\", \"Drama\", \"Chase Movie\"\n\nA Man Alone ~ ID: 1010\nCategory: \"Western\"\n\nCommon category: Horror -------------------------------------------------------------\n\nTarget Movie: Conquest of the Planet of the Apes ~ ID: 2020\nCategory: \"Cult\", \"Science Fiction\", \"Action\", \"Adventure\"\n\nRecommendations:\n\nYour Friend the Rat ~ ID: 3722\nCategory: \"Computer Animation\", \"Short Film\", \"Comedy\"\n\nThe Creation of the Humanoids ~ ID: 3130\nCategory: \"Science Fiction\", \"Action\", \"Indie\"\n\nBattlefield Earth ~ ID: 211\nCategory: \"Space opera\", \"Alien Film\", \"Science Fiction\", \"Action\", \"Film adaptation\"\n\nUnderworld: Awakening ~ ID: 4568\nCategory: \"Action/Adventure\", \"Fantasy\", \"Action\", \"Horror\", \"Thriller\"\n\nGalerians:Rion ~ ID: 1224\nCategory: \"Computer Animation\", \"Japanese Movies\", \"Science Fiction\", \"Action\", \"Horror\"\n\nCommon category: Science Fiction -------------------------------------------------------------\n\nTarget Movie: Nam Naadu ~ ID: 2859\nCategory: \"Drama\"\n\nRecommendations:\n\nOonche Log ~ ID: 4950\nCategory: \"Drama\", \"World cinema\"\n\nOnce Upon A Time In Mumbai ~ ID: 2742\nCategory: \"Crime Fiction\", \"Romance Film\", \"Drama\", \"World cinema\"\n\nSallapam ~ ID: 3221\nCategory: \"Romantic drama\", \"Musical\", \"World cinema\"\n\nLanka ~ ID: 429\nCategory: \"Drama\"\n\nThe Business Man ~ ID: 437\nCategory: \"Crime Fiction\", \"Action\", \"World cinema\"\n\nCommon category: Drama -------------------------------------------------------------\n\nTarget Movie: The Creation of the Humanoids ~ ID: 3130\nCategory: \"Science Fiction\", \"Action\", \"Indie\"\n\nRecommendations:\n\nTest pilota Pirxa ~ ID: 2136\nCategory: \"Science Fiction\", \"Drama\"\n\nGalerians:Rion ~ ID: 1224\nCategory: \"Computer Animation\", \"Japanese Movies\", \"Science Fiction\", \"Action\", \"Horror\"\n\nRobot Stories ~ ID: 1788\nCategory: \"Science Fiction\", \"Indie\", \"Comedy-drama\", \"Drama\", \"Comedy\", \"Romantic drama\", \"Romance Film\"\n\nUnderworld: Awakening ~ ID: 4568\nCategory: \"Action/Adventure\", \"Fantasy\", \"Action\", \"Horror\", \"Thriller\"\n\nHollywood ~ ID: 775\nCategory: \"Science Fiction\"\n\nCommon category: Science Fiction -------------------------------------------------------------\n\n" ] ], [ [ "# Τοπολογική και σημασιολογική απεικόνιση της ταινιών με χρήση SOM\n\n<img src=\"https://drive.google.com/uc?export=download&id=1R1R7Ds9UEfhjOY_fk_3wcTjsM0rI4WLl\" width=\"60%\">", "_____no_output_____" ], [ "## Δημιουργία dataset\nΣτη δεύτερη εφαρμογή θα βασιστούμε στις τοπολογικές ιδιότητες των Self Organizing Maps (SOM) για να φτιάξουμε ενά χάρτη (grid) δύο διαστάσεων όπου θα απεικονίζονται όλες οι ταινίες της συλλογής της ομάδας με τρόπο χωρικά συνεκτικό ως προς το περιεχόμενο και κυρίως το είδος τους. \n\nΗ `build_final_set` αρχικά μετατρέπει την αραιή αναπαράσταση tf-idf της εξόδου της `TfidfVectorizer()` σε πυκνή (η [αραιή αναπαράσταση](https://en.wikipedia.org/wiki/Sparse_matrix) έχει τιμές μόνο για τα μη μηδενικά στοιχεία). Στη συνέχεια ενώνει την πυκνή `dense_tf_idf` αναπαράσταση και τις binarized κατηγορίες `catbins` των ταινιών ως επιπλέον στήλες (χαρακτηριστικά). Συνεπώς, κάθε ταινία αναπαρίσταται στο Vector Space Model από τα χαρακτηριστικά του TFIDF και τις κατηγορίες της. Τέλος, δέχεται ένα ορισμα για το πόσες ταινίες να επιστρέψει, με default τιμή όλες τις ταινίες (5000). Αυτό είναι χρήσιμο για να μπορείτε αν θέλετε να φτιάχνετε μικρότερα σύνολα δεδομένων ώστε να εκπαιδεύεται ταχύτερα το SOM.", "_____no_output_____" ] ], [ [ "def build_final_set(doc_limit=5000, tf_idf_only=False):\n # convert sparse tf_idf to dense tf_idf representation\n dense_tf_idf = corpus_tf_idf[0:doc_limit,:]\n if tf_idf_only:\n # use only tf_idf\n final_set = dense_tf_idf\n else:\n # append the binary categories features horizontaly to the (dense) tf_idf features\n final_set = np.hstack((dense_tf_idf, catbins[0:doc_limit,:]))\n # η somoclu θέλει δεδομένα σε float32\n return np.array(final_set, dtype=np.float32)", "_____no_output_____" ], [ "final_set = build_final_set()", "_____no_output_____" ] ], [ [ "Τυπώνουμε τις διαστάσεις του τελικού dataset μας. Χωρίς βελτιστοποίηση του TFIDF θα έχουμε περίπου 50.000 χαρακτηριστικά.", "_____no_output_____" ] ], [ [ "final_set.shape", "_____no_output_____" ] ], [ [ "Με βάση την εμπειρία σας στην προετοιμασία των δεδομένων στην επιβλεπόμενη μάθηση, υπάρχει κάποιο βήμα προεπεξεργασίας που θα μπορούσε να εφαρμοστεί σε αυτό το dataset; \n\n\n\n\n>Θα μπορούσαμε με PCA να μειώσουμε τις διαστάσεις:\n\n", "_____no_output_____" ] ], [ [ "pca = PCA(n_components=0.97)\npca_final_set = pca.fit_transform(final_set)\nprint(pca_final_set.shape)\nprint(\"Decrease of components:\", round ((1-pca_final_set.shape[1]/final_set.shape[1])*100, 2 ),\"%\")", "(5000, 1233)\nDecrease of components: 42.17 %\n" ] ], [ [ "Παρατηρούμε οτι με διατήρηση 97% της διασποράς των χαρακτηριστικών μπορούμε να μειώσουμε τις διαστάσεις πάνω από 40%.", "_____no_output_____" ], [ "## Εκπαίδευση χάρτη SOM\n\nΘα δουλέψουμε με τη βιβλιοθήκη SOM [\"Somoclu\"](http://somoclu.readthedocs.io/en/stable/index.html). Καταρχάς διαβάστε το [function reference](http://somoclu.readthedocs.io/en/stable/reference.html) του somoclu. Θα δoυλέψουμε με χάρτη τύπου planar, παραλληλόγραμμου σχήματος νευρώνων με τυχαία αρχικοποίηση (όλα αυτά είναι default). Μπορείτε να δοκιμάσετε διάφορα μεγέθη χάρτη ωστόσο όσο ο αριθμός των νευρώνων μεγαλώνει, μεγαλώνει και ο χρόνος εκπαίδευσης. Για το training δεν χρειάζεται να ξεπεράσετε τα 100 epochs. Σε γενικές γραμμές μπορούμε να βασιστούμε στις default παραμέτρους μέχρι να έχουμε τη δυνατότητα να οπτικοποιήσουμε και να αναλύσουμε ποιοτικά τα αποτελέσματα. Ξεκινήστε με ένα χάρτη 10 x 10, 100 epochs training και ένα υποσύνολο των ταινιών (π.χ. 2000). Χρησιμοποιήστε την `time` για να έχετε μια εικόνα των χρόνων εκπαίδευσης. Ενδεικτικά, με σωστή κωδικοποίηση tf-idf, μικροί χάρτες για λίγα δεδομένα (1000-2000) παίρνουν γύρω στο ένα λεπτό ενώ μεγαλύτεροι χάρτες με όλα τα δεδομένα μπορούν να πάρουν 10-15 λεπτά ή και περισσότερο.", "_____no_output_____" ] ], [ [ "n_rows, n_columns = 30, 30\nsom = somoclu.Somoclu(n_columns, n_rows, compactsupport=False)\n%time som.train(final_set,epochs=100)", "_____no_output_____" ] ], [ [ "Λόγω του προβλήματος στο training των μοντέλων Somoclu που αναφέρεται στο import του μοντέλου, η εκπαίδευση έγινε στο Colab. Αποθηκεύουμε το προκύπτον μοντέλο και το λαμβάνουμε εδώ:", "_____no_output_____" ] ], [ [ "som = joblib.load(\"som.pkl\")", "_____no_output_____" ] ], [ [ "\n## Best matching units\n\nΜετά από κάθε εκπαίδευση αποθηκεύστε σε μια μεταβλητή τα best matching units (bmus) για κάθε ταινία. Τα bmus μας δείχνουν σε ποιο νευρώνα ανήκει η κάθε ταινία. Προσοχή: η σύμβαση των συντεταγμένων των νευρώνων είναι (στήλη, γραμμή) δηλαδή το ανάποδο από την Python. Με χρήση της [np.unique](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.unique.html) (μια πολύ χρήσιμη συνάρτηση στην άσκηση) αποθηκεύστε τα μοναδικά best matching units και τους δείκτες τους (indices) προς τις ταινίες. Σημειώστε ότι μπορεί να έχετε λιγότερα μοναδικά bmus από αριθμό νευρώνων γιατί μπορεί σε κάποιους νευρώνες να μην έχουν ανατεθεί ταινίες. Ως αριθμό νευρώνα θα θεωρήσουμε τον αριθμό γραμμής στον πίνακα μοναδικών bmus.\n", "_____no_output_____" ] ], [ [ "bmus, indices = np.unique(som.bmus,axis=0,return_index=True)", "_____no_output_____" ] ], [ [ "\n## Ομαδοποίηση (clustering)\n\nΤυπικά, η ομαδοποίηση σε ένα χάρτη SOM προκύπτει από το unified distance matrix (U-matrix): για κάθε κόμβο υπολογίζεται η μέση απόστασή του από τους γειτονικούς κόμβους. Εάν χρησιμοποιηθεί μπλε χρώμα στις περιοχές του χάρτη όπου η τιμή αυτή είναι χαμηλή (μικρή απόσταση) και κόκκινο εκεί που η τιμή είναι υψηλή (μεγάλη απόσταση), τότε μπορούμε να πούμε ότι οι μπλε περιοχές αποτελούν clusters και οι κόκκινες αποτελούν σύνορα μεταξύ clusters.\n\nTo somoclu δίνει την επιπρόσθετη δυνατότητα να κάνουμε ομαδοποίηση των νευρώνων χρησιμοποιώντας οποιονδήποτε αλγόριθμο ομαδοποίησης του scikit-learn. Στην άσκηση θα χρησιμοποιήσουμε τον k-Means. Για τον αρχικό σας χάρτη δοκιμάστε ένα k=20 ή 25. Οι δύο προσεγγίσεις ομαδοποίησης είναι διαφορετικές, οπότε περιμένουμε τα αποτελέσματα να είναι κοντά αλλά όχι τα ίδια.\n", "_____no_output_____" ] ], [ [ "som.cluster(KMeans(n_clusters=30))", "_____no_output_____" ] ], [ [ "\n## Αποθήκευση του SOM\n\nΕπειδή η αρχικοποίηση του SOM γίνεται τυχαία και το clustering είναι και αυτό στοχαστική διαδικασία, οι θέσεις και οι ετικέτες των νευρώνων και των clusters θα είναι διαφορετικές κάθε φορά που τρέχετε τον χάρτη, ακόμα και με τις ίδιες παραμέτρους. Για να αποθηκεύσετε ένα συγκεκριμένο som και clustering χρησιμοποιήστε και πάλι την `joblib`. Μετά την ανάκληση ενός SOM θυμηθείτε να ακολουθήσετε τη διαδικασία για τα bmus.\n", "_____no_output_____" ] ], [ [ "joblib.dump(som,'som.pkl')", "_____no_output_____" ] ], [ [ "## Οπτικοποίηση U-matrix, clustering και μέγεθος clusters\n\nΓια την εκτύπωση του U-matrix χρησιμοποιήστε τη `view_umatrix` με ορίσματα `bestmatches=True` και `figsize=(15, 15)` ή `figsize=(20, 20)`. Τα διαφορετικά χρώματα που εμφανίζονται στους κόμβους αντιπροσωπεύουν τα διαφορετικά clusters που προκύπτουν από τον k-Means. Μπορείτε να εμφανίσετε τη λεζάντα του U-matrix με το όρισμα `colorbar`. Μην τυπώνετε τις ετικέτες (labels) των δειγμάτων, είναι πολύ μεγάλος ο αριθμός τους.", "_____no_output_____" ] ], [ [ "som.view_umatrix(bestmatches=True, colorbar=True,figsize=(15, 15)); matplotlib.pyplot.show()", "_____no_output_____" ] ], [ [ "Για μια δεύτερη πιο ξεκάθαρη οπτικοποίηση του clustering τυπώστε απευθείας τη μεταβλητή `clusters`.", "_____no_output_____" ] ], [ [ "print(som.clusters)", "[[16 16 4 20 20 20 8 8 14 14 9 9 9 9 19 19 19 19 19 23 23 23 23 23\n 15 15 15 15 15 15]\n [16 16 4 4 20 20 20 14 14 9 9 9 9 9 19 19 19 19 19 23 23 23 23 23\n 15 15 15 15 15 15]\n [16 16 4 11 11 11 20 14 14 9 9 9 9 9 9 19 19 19 19 19 23 23 23 23\n 15 15 15 15 15 15]\n [16 16 11 11 11 11 11 11 21 9 9 9 9 9 9 19 19 19 19 25 25 25 23 23\n 15 15 15 15 15 15]\n [ 6 6 11 11 11 11 11 21 21 10 10 10 9 9 9 19 19 25 25 25 25 25 25 23\n 15 15 15 15 15 15]\n [ 6 6 11 11 11 11 11 21 21 10 10 10 10 9 3 3 3 25 25 25 25 25 25 25\n 23 14 20 15 15 15]\n [ 6 6 6 11 11 11 21 21 21 21 10 10 3 3 3 3 3 26 25 25 25 25 25 23\n 23 14 20 20 20 11]\n [ 6 6 6 6 6 6 21 21 21 21 21 3 3 3 3 3 3 26 26 25 25 25 23 23\n 23 23 20 20 20 11]\n [ 6 6 6 6 6 6 21 21 21 21 3 3 3 3 3 3 26 26 26 26 26 23 23 23\n 23 14 20 20 20 20]\n [ 6 6 6 6 6 6 6 21 5 5 5 3 3 3 3 3 26 26 26 26 26 26 23 23\n 14 14 20 20 20 20]\n [ 6 6 6 6 6 2 2 5 5 5 5 5 3 3 3 13 26 26 26 26 26 26 23 23\n 14 14 20 20 20 20]\n [ 6 6 6 6 6 16 16 5 5 5 5 5 13 3 13 13 13 26 26 26 26 26 23 11\n 11 11 20 20 20 20]\n [ 6 6 6 6 16 16 16 5 5 5 5 13 13 13 13 13 13 26 26 27 27 27 27 11\n 11 11 14 4 20 20]\n [29 29 29 17 17 16 16 16 5 5 5 13 13 13 13 13 13 13 27 27 27 27 27 14\n 14 14 14 4 4 4]\n [29 29 29 17 17 17 16 24 5 24 24 13 13 13 13 13 13 13 27 27 27 27 8 14\n 14 14 14 4 4 4]\n [29 29 29 17 17 17 24 24 24 24 24 13 13 13 13 13 13 13 27 27 27 8 8 14\n 14 14 14 14 4 4]\n [29 29 29 29 17 17 24 24 24 24 24 24 13 13 13 13 13 13 27 27 8 8 8 14\n 14 14 14 14 4 4]\n [29 29 29 29 29 18 24 24 24 24 24 12 13 13 13 13 13 7 27 8 8 8 8 14\n 14 14 14 14 14 14]\n [29 29 18 18 18 18 24 24 24 24 24 12 28 28 13 13 7 9 10 8 8 8 8 14\n 14 14 14 14 14 14]\n [29 18 18 18 18 18 12 24 12 12 12 28 28 28 28 28 28 10 10 10 8 8 8 14\n 14 14 14 14 14 14]\n [18 18 18 18 18 18 12 12 12 12 12 28 28 28 28 28 28 10 10 10 10 8 14 14\n 14 14 14 14 14 14]\n [18 18 18 18 18 18 12 12 12 12 12 28 28 28 28 28 28 7 10 10 14 14 14 14\n 14 14 14 14 14 14]\n [ 0 0 0 18 18 18 18 12 12 12 22 28 28 28 28 28 28 7 7 7 14 14 14 17\n 14 14 14 14 14 14]\n [ 0 0 0 18 18 18 18 22 22 22 22 22 28 28 28 28 28 7 7 7 7 7 7 14\n 14 14 14 1 14 14]\n [ 0 0 0 0 18 18 22 22 22 22 22 22 22 22 28 28 7 7 7 7 7 7 7 7\n 14 14 1 1 1 1]\n [ 0 0 0 0 0 0 22 22 22 22 22 22 22 22 28 28 7 7 7 7 7 7 7 7\n 7 7 1 1 1 1]\n [ 0 0 0 0 0 0 0 22 22 22 22 22 22 22 7 7 7 7 7 7 7 7 7 7\n 7 7 1 1 1 1]\n [ 0 0 0 0 0 0 2 2 2 2 22 22 22 22 7 7 7 7 7 7 7 7 7 7\n 7 7 7 1 1 1]\n [ 0 0 0 0 0 0 2 2 2 2 2 22 22 7 7 7 7 7 7 7 7 7 7 7\n 7 7 11 7 1 1]\n [ 0 0 0 0 0 0 2 2 2 2 2 22 22 7 7 7 7 7 7 7 7 7 7 7\n 7 7 7 7 1 1]]\n" ] ], [ [ "Τέλος, χρησιμοποιώντας πάλι την `np.unique` (με διαφορετικό όρισμα) και την `np.argsort` (υπάρχουν και άλλοι τρόποι υλοποίησης) εκτυπώστε τις ετικέτες των clusters (αριθμοί από 0 έως k-1) και τον αριθμό των νευρώνων σε κάθε cluster, με φθίνουσα ή αύξουσα σειρά ως προς τον αριθμό των νευρώνων. Ουσιαστικά είναι ένα εργαλείο για να βρίσκετε εύκολα τα μεγάλα και μικρά clusters. ", "_____no_output_____" ] ], [ [ "print(\"\\nClusters sorted by increasing number of neurons: (cluster_index,num_neurons)\")\nvalues,counts = np.unique(som.clusters,return_counts=True)\nsorted_counts = np.argsort(counts)\nprint(np.array([list(values[sorted_counts]),list(counts[sorted_counts])]))", "\nClusters sorted by increasing number of neurons: (cluster_index,num_neurons)\n[[17 4 2 16 12 21 27 10 1 8 19 29 5 25 24 9 26 3 11 18 20 15 23 28\n 22 6 0 13 7 14]\n [11 15 16 17 19 19 19 19 20 20 21 21 21 24 25 26 27 29 31 32 32 33 33 35\n 37 40 41 45 84 88]]\n" ] ], [ [ "## Σημασιολογική ερμηνεία των clusters\n\nΠροκειμένου να μελετήσουμε τις τοπολογικές ιδιότητες του SOM και το αν έχουν ενσωματώσει σημασιολογική πληροφορία για τις ταινίες διαμέσου της διανυσματικής αναπαράστασης με το tf-idf και των κατηγοριών, χρειαζόμαστε ένα κριτήριο ποιοτικής επισκόπησης των clusters. Θα υλοποιήσουμε το εξής κριτήριο: Λαμβάνουμε όρισμα έναν αριθμό (ετικέτα) cluster. Για το cluster αυτό βρίσκουμε όλους τους νευρώνες που του έχουν ανατεθεί από τον k-Means. Για όλους τους νευρώνες αυτούς βρίσκουμε όλες τις ταινίες που τους έχουν ανατεθεί (για τις οποίες αποτελούν bmus). Για όλες αυτές τις ταινίες τυπώνουμε ταξινομημένη τη συνολική στατιστική όλων των ειδών (κατηγοριών) και τις συχνότητές τους. Αν το cluster διαθέτει καλή συνοχή και εξειδίκευση, θα πρέπει κάποιες κατηγορίες να έχουν σαφώς μεγαλύτερη συχνότητα από τις υπόλοιπες. Θα μπορούμε τότε να αναθέσουμε αυτήν/ές την/τις κατηγορία/ες ως ετικέτες κινηματογραφικού είδους στο cluster.\n\nΜπορείτε να υλοποιήσετε τη συνάρτηση αυτή όπως θέλετε. Μια πιθανή διαδικασία θα μπορούσε να είναι η ακόλουθη:\n\n1. Ορίζουμε συνάρτηση `print_categories_stats` που δέχεται ως είσοδο λίστα με ids ταινιών. Δημιουργούμε μια κενή λίστα συνολικών κατηγοριών. Στη συνέχεια, για κάθε ταινία επεξεργαζόμαστε το string `categories` ως εξής: δημιουργούμε μια λίστα διαχωρίζοντας το string κατάλληλα με την `split` και αφαιρούμε τα whitespaces μεταξύ ετικετών με την `strip`. Προσθέτουμε τη λίστα αυτή στη συνολική λίστα κατηγοριών με την `extend`. Τέλος χρησιμοποιούμε πάλι την `np.unique` για να μετρήσουμε συχνότητα μοναδικών ετικετών κατηγοριών και ταξινομούμε με την `np.argsort`. Τυπώνουμε τις κατηγορίες και τις συχνότητες εμφάνισης ταξινομημένα. Χρήσιμες μπορεί να σας φανούν και οι `np.ravel`, `np.nditer`, `np.array2string` και `zip`.", "_____no_output_____" ] ], [ [ "def print_categories_stats(ID_list):\n total_categories = []\n for i in ID_list:\n cat = [category.strip(\" \").strip('\"') for category in categories[i][0].split(\",\")]\n total_categories.extend(cat)\n result,counts = np.unique(total_categories,return_counts=True)\n sorted_counts = np.argsort(-counts)\n final = [(result[n],counts[n]) for n in sorted_counts]\n print(\"Overall Cluster Genres stats:\")\n print(final); return ", "_____no_output_____" ] ], [ [ "2. Ορίζουμε τη βασική μας συνάρτηση `print_cluster_neurons_movies_report` που δέχεται ως όρισμα τον αριθμό ενός cluster. Με τη χρήση της `np.where` μπορούμε να βρούμε τις συντεταγμένες των bmus που αντιστοιχούν στο cluster και με την `column_stack` να φτιάξουμε έναν πίνακα bmus για το cluster. Προσοχή στη σειρά (στήλη - σειρά) στον πίνακα bmus. Για κάθε bmu αυτού του πίνακα ελέγχουμε αν υπάρχει στον πίνακα μοναδικών bmus που έχουμε υπολογίσει στην αρχή συνολικά και αν ναι προσθέτουμε το αντίστοιχο index του νευρώνα σε μια λίστα. Χρήσιμες μπορεί να είναι και οι `np.rollaxis`, `np.append`, `np.asscalar`. Επίσης πιθανώς να πρέπει να υλοποιήσετε ένα κριτήριο ομοιότητας μεταξύ ενός bmu και ενός μοναδικού bmu από τον αρχικό πίνακα bmus.", "_____no_output_____" ] ], [ [ "def where_is_same(a,b):\n return np.where(np.all(a==b,axis=1))[0]\n\ndef print_cluster_neurons_movies_report(cluster):\n cluster_bmus = np.column_stack(np.where(som.clusters==cluster)[::-1])\n li = [indices[where_is_same(bmu,bmus)] for bmu in cluster_bmus if bmu in bmus]\n li = [ind[0] for ind in li if len(ind)]\n return cluster_bmus", "_____no_output_____" ] ], [ [ "3. Υλοποιούμε μια βοηθητική συνάρτηση `neuron_movies_report`. Λαμβάνει ένα σύνολο νευρώνων από την `print_cluster_neurons_movies_report` και μέσω της `indices` φτιάχνει μια λίστα με το σύνολο ταινιών που ανήκουν σε αυτούς τους νευρώνες. Στο τέλος καλεί με αυτή τη λίστα την `print_categories_stats` που τυπώνει τις στατιστικές των κατηγοριών.", "_____no_output_____" ] ], [ [ "def neuron_movies_report(neurons):\n id_list = []\n index = [where_is_same(som.bmus,neuron) for neuron in neurons]\n index = [list(i) for i in index if len(i)]\n for i in index: id_list += i\n return id_list", "_____no_output_____" ] ], [ [ "Μπορείτε βέβαια να προσθέσετε οποιαδήποτε επιπλέον έξοδο σας βοηθάει. Μια χρήσιμη έξοδος είναι πόσοι νευρώνες ανήκουν στο cluster και σε πόσους και ποιους από αυτούς έχουν ανατεθεί ταινίες. Θα επιτελούμε τη σημασιολογική ερμηνεία του χάρτη καλώντας την `print_cluster_neurons_movies_report` με τον αριθμό ενός cluster που μας ενδιαφέρει. Παράδειγμα εξόδου για ένα cluster (μη βελτιστοποιημένος χάρτης):\n\n```\nOverall Cluster Genres stats: \n[('\"Horror\"', 86), ('\"Science Fiction\"', 24), ('\"B-movie\"', 16), ('\"Monster movie\"', 10), ('\"Creature Film\"', 10), ('\"Indie\"', 9), ('\"Zombie Film\"', 9), ('\"Slasher\"', 8), ('\"World cinema\"', 8), ('\"Sci-Fi Horror\"', 7), ('\"Natural horror films\"', 6), ('\"Supernatural\"', 6), ('\"Thriller\"', 6), ('\"Cult\"', 5), ('\"Black-and-white\"', 5), ('\"Japanese Movies\"', 4), ('\"Short Film\"', 3), ('\"Drama\"', 3), ('\"Psychological thriller\"', 3), ('\"Crime Fiction\"', 3), ('\"Monster\"', 3), ('\"Comedy\"', 2), ('\"Western\"', 2), ('\"Horror Comedy\"', 2), ('\"Archaeology\"', 2), ('\"Alien Film\"', 2), ('\"Teen\"', 2), ('\"Mystery\"', 2), ('\"Adventure\"', 2), ('\"Comedy film\"', 2), ('\"Combat Films\"', 1), ('\"Chinese Movies\"', 1), ('\"Action/Adventure\"', 1), ('\"Gothic Film\"', 1), ('\"Costume drama\"', 1), ('\"Disaster\"', 1), ('\"Docudrama\"', 1), ('\"Film adaptation\"', 1), ('\"Film noir\"', 1), ('\"Parody\"', 1), ('\"Period piece\"', 1), ('\"Action\"', 1)]```\n ", "_____no_output_____" ] ], [ [ "cluster = 7\ncluster_neurons = print_cluster_neurons_movies_report(cluster)\nid_list = neuron_movies_report(cluster_neurons)\nprint_categories_stats(id_list)", "Overall Cluster Genres stats:\n[('Drama', 728), ('War film', 54), ('Family Film', 43), ('Action', 42), ('Comedy film', 35), ('Adventure', 30), ('Fantasy', 24), ('Family Drama', 22), ('Film adaptation', 20), ('Historical fiction', 19), ('Melodrama', 19), ('Musical', 19), ('Short Film', 18), ('Sports', 16), ('Docudrama', 13), ('Chinese Movies', 13), ('Romance Film', 12), ('Science Fiction', 11), ('Western', 10), ('Political drama', 10), ('Coming of age', 10), ('Comedy-drama', 10), ('Biography', 10), ('Japanese Movies', 9), ('Romantic drama', 9), ('Biographical film', 9), ('History', 8), ('Teen', 8), ('Mystery', 8), ('Horror', 8), ('Silent film', 7), ('Documentary', 7), ('Music', 7), ('Black comedy', 6), ('Satire', 6), ('Art film', 6), ('Combat Films', 5), ('Historical drama', 5), ('Christian film', 5), (\"Children's/Family\", 5), ('Marriage Drama', 5), ('Costume drama', 4), ('Spy', 4), ('Prison', 4), ('Black-and-white', 4), (\"Children's\", 4), ('Courtroom Drama', 4), ('Ensemble Film', 3), ('Historical Epic', 3), ('Action/Adventure', 3), ('Slice of life story', 3), ('Filipino Movies', 3), ('Disaster', 3), ('Period piece', 3), ('Crime Drama', 3), ('Film noir', 3), ('Social problem film', 3), ('Martial Arts Film', 3), ('Medical fiction', 2), ('Surrealism', 2), ('Television movie', 2), ('Romantic comedy', 2), ('Musical comedy', 2), ('Anime', 2), ('Doomsday film', 2), ('Anti-war', 2), ('Bollywood', 2), ('Addiction Drama', 2), ('Domestic Comedy', 2), ('Anti-war film', 2), ('Christmas movie', 2), ('Erotic Drama', 2), ('Americana', 2), ('Blaxploitation', 2), ('Epic', 2), ('Biopic [feature]', 2), ('Chase Movie', 1), ('Romantic fantasy', 1), ('Sex comedy', 1), ('Comedy', 1), ('Animal Picture', 1), ('Roadshow theatrical release', 1), ('Childhood Drama', 1), ('Buddy film', 1), ('Feminist Film', 1), ('Boxing', 1), ('Road movie', 1), ('Biker Film', 1), ('Tamil cinema', 1), ('Avant-garde', 1), ('Thriller', 1), ('Tragedy', 1), ('Tragicomedy', 1), ('British New Wave', 1), ('Religious Film', 1), ('Propaganda film', 1), ('Psychological horror', 1), ('Film u00e0 clef', 1), ('Hip hop movies', 1), ('Experimental film', 1), ('Existentialism', 1), ('Escape Film', 1), ('Holiday Film', 1), ('Hybrid Western', 1), ('Indian Western', 1), ('Education', 1), ('Dystopia', 1), ('Psychological thriller', 1), ('Dogme 95', 1), ('Detective fiction', 1), ('Detective', 1), ('Natural horror films', 1), ('Nuclear warfare', 1), ('Crime Fiction', 1), ('Crime', 1), ('Political satire', 1), ('Political thriller', 1), ('Comedy of manners', 1), ('Animation', 1), ('Musical Drama', 1), ('World cinema', 1)]\n" ] ], [ [ "Βλέπουμε πως το 7ο cluster συγκεντρώνει κυρίως ταινίες δράματος, με τις υπόλοιπες κατηγορίες να είναι αρκετά σπανιότερες.", "_____no_output_____" ], [ "\n## Tips για το SOM και το clustering\n\n- Για την ομαδοποίηση ένα U-matrix καλό είναι να εμφανίζει και μπλε-πράσινες περιοχές (clusters) και κόκκινες περιοχές (ορίων). Παρατηρήστε ποια σχέση υπάρχει μεταξύ αριθμού ταινιών στο final set, μεγέθους grid και ποιότητας U-matrix.\n- Για το k του k-Means προσπαθήστε να προσεγγίζει σχετικά τα clusters του U-matrix (όπως είπαμε είναι διαφορετικοί μέθοδοι clustering). Μικρός αριθμός k δεν θα σέβεται τα όρια. Μεγάλος αριθμός θα δημιουργεί υπο-clusters εντός των clusters που φαίνονται στο U-matrix. Το τελευταίο δεν είναι απαραίτητα κακό, αλλά μεγαλώνει τον αριθμό clusters που πρέπει να αναλυθούν σημασιολογικά.\n- Σε μικρούς χάρτες και με μικρά final sets δοκιμάστε διαφορετικές παραμέτρους για την εκπαίδευση του SOM. Σημειώστε τυχόν παραμέτρους που επηρεάζουν την ποιότητα του clustering για το dataset σας ώστε να τις εφαρμόσετε στους μεγάλους χάρτες.\n- Κάποια τοπολογικά χαρακτηριστικά εμφανίζονται ήδη σε μικρούς χάρτες. Κάποια άλλα χρειάζονται μεγαλύτερους χάρτες. Δοκιμάστε μεγέθη 20x20, 25x25 ή και 30x30 και αντίστοιχη προσαρμογή των k. Όσο μεγαλώνουν οι χάρτες, μεγαλώνει η ανάλυση του χάρτη αλλά μεγαλώνει και ο αριθμός clusters που πρέπει να αναλυθούν.\n", "_____no_output_____" ], [ "## Ανάλυση τοπολογικών ιδιοτήτων χάρτη SOM\n\nΜετά το πέρας της εκπαίδευσης και του clustering θα έχετε ένα χάρτη με τοπολογικές ιδιότητες ως προς τα είδη των ταίνιών της συλλογής σας, κάτι αντίστοιχο με την εικόνα στην αρχή της Εφαρμογής 2 αυτού του notebook (η συγκεκριμένη εικόνα είναι μόνο για εικονογράφιση, δεν έχει καμία σχέση με τη συλλογή δεδομένων και τις κατηγορίες μας).\n\nΓια τον τελικό χάρτη SOM που θα παράξετε για τη συλλογή σας, αναλύστε σε markdown με συγκεκριμένη αναφορά σε αριθμούς clusters και τη σημασιολογική ερμηνεία τους τις εξής τρεις τοπολογικές ιδιότητες του SOM: \n\n1. *Δεδομένα που έχουν μεγαλύτερη πυκνότητα πιθανότητας στο χώρο εισόδου τείνουν να απεικονίζονται με περισσότερους νευρώνες στο χώρο μειωμένης διαστατικότητας. Δώστε παραδείγματα από συχνές και λιγότερο συχνές κατηγορίες ταινιών. Χρησιμοποιήστε τις στατιστικές των κατηγοριών στη συλλογή σας και τον αριθμό κόμβων που χαρακτηρίζουν.*", "_____no_output_____" ] ], [ [ "def categories_in_neuron(neuron):\n ID_list = neuron_movies_report([neuron])\n total_categories = []\n for i in ID_list:\n cat = [category.strip(\" \").strip('\"') for category in categories[i][0].split(\",\")]\n total_categories.extend(cat)\n result = np.unique(total_categories)\n sorted_counts = np.argsort(counts)\n return result\n\ncategories_in_neurons = np.asarray([categories_in_neuron(i) for i in range(20)])\n\ndef number_of_neurons_in_category(category):\n i=0\n for c in categories_in_neurons:\n if category in c: i+=1\n return i", "_____no_output_____" ], [ "# list of all categories \ndistinct_categories=[]\nfor i in range(5000):\n cat = [category.strip(\" \").strip('\"') for category in categories[i][0].split(\",\")]\n distinct_categories.extend(cat)\ndistinct_categories=np.asarray(distinct_categories)\ndistinct_categories=np.unique(distinct_categories)\n\n\n# all categories and their number of neurons \nncategories=[]\nfor c in distinct_categories:\n k=number_of_neurons_in_category(c)\n ncategories.append([c,k])\n\nncategories=np.asarray(ncategories)\nsorted_indices = np.argsort(ncategories[:,1])\nncategories = ncategories[sorted_indices]\nprint(\"Most popular categories and their number of neurons:\\n\", ncategories[::-1][:5])\nprint(\"\\nLeast popular categories and their number of neurons:\\n\", ncategories[::-1][-5:])", "Most popular categories and their number of neurons:\n [['Crime Fiction' '9']\n ['Action' '7']\n ['Drama' '7']\n ['World cinema' '5']\n ['Family Film' '5']]\n\nLeast popular categories and their number of neurons:\n [['Law & Crime' '0']\n ['Language & Literature' '0']\n ['Kitchen sink realism' '0']\n ['Kafkaesque' '0']\n ['Hagiography' '0']]\n" ] ], [ [ "Είναι εμφανές πως οι περισσότερο συχνές και οικείες κατηγορίες έχουν αντιστοιχηθεί σε μεγαλύτερο αριθμό νευρώνων.\n\n2. *Μακρινά πρότυπα εισόδου τείνουν να απεικονίζονται απομακρυσμένα στο χάρτη. Υπάρχουν χαρακτηριστικές κατηγορίες ταινιών που ήδη από μικρούς χάρτες τείνουν να τοποθετούνται σε διαφορετικά ή απομονωμένα σημεία του χάρτη.*", "_____no_output_____" ] ], [ [ "cluster = 16\ncluster_neurons = print_cluster_neurons_movies_report(cluster)\nid_list = neuron_movies_report(cluster_neurons)\nprint_categories_stats(id_list)", "Overall Cluster Genres stats:\n[('Comedy', 124), ('Black-and-white', 106), ('Short Film', 73), ('Musical', 32), ('Slapstick', 21), ('Silent film', 21), ('Indie', 19), ('Family Film', 10), ('Satire', 7), ('Screwball comedy', 5), ('Romance Film', 3), ('Comedy of Errors', 3), ('Parody', 3), ('Science Fiction', 3), ('Teen', 2), ('Road movie', 2), ('Musical comedy', 2), ('Western', 2), ('Workplace Comedy', 2), ('Fantasy', 2), ('World cinema', 2), (\"Children's/Family\", 2), ('Adventure', 2), ('Family-Oriented Adventure', 2), ('Action', 1), ('War film', 1), ('Thriller', 1), ('Spy', 1), ('Slice of life story', 1), ('Adventure Comedy', 1), ('Animal Picture', 1), ('Animation', 1), ('Black comedy', 1), ('Buddy film', 1), ('Fantasy Comedy', 1), ('Comedy of manners', 1), ('Remake', 1), ('Comedy-drama', 1), ('Operetta', 1), ('Cult', 1), ('Documentary', 1), ('Mondo film', 1), ('Melodrama', 1), ('Drama', 1), ('Horror', 1), ('Comedy Western', 1), ('Absurdism', 1)]\n" ] ], [ [ "Βλέπουμε πως για τα δύο μακρινά clusters 7, 16 όπως φαίνεται στον παραπάνω πίνακα των clusters, το είδος των ταινιών που αντιπροσωπεύεται διαφέρει σημαντικά (δράμα και κωμωδία).", "_____no_output_____" ], [ "3. *Κοντινά πρότυπα εισόδου τείνουν να απεικονίζονται κοντά στο χάρτη. Σε μεγάλους χάρτες εντοπίστε είδη ταινιών και κοντινά τους υποείδη.*", "_____no_output_____" ] ], [ [ "cluster = 11\ncluster_neurons = print_cluster_neurons_movies_report(cluster)\nid_list = neuron_movies_report(cluster_neurons)\nprint_categories_stats(id_list)", "Overall Cluster Genres stats:\n[('Family Film', 137), ('Comedy', 88), ('Adventure', 76), (\"Children's/Family\", 60), ('Fantasy', 52), ('Animation', 40), ('Family-Oriented Adventure', 24), (\"Children's\", 21), ('Drama', 20), (\"Children's Fantasy\", 19), ('Science Fiction', 19), ('Musical', 11), ('Romance Film', 10), ('Computer Animation', 9), ('Fantasy Adventure', 9), ('Family Drama', 7), ('Coming of age', 7), ('Action', 7), ('Japanese Movies', 7), ('Mystery', 6), ('Animal Picture', 6), ('Slapstick', 6), ('Television movie', 6), ('Christmas movie', 6), ('Teen', 5), ('Fantasy Comedy', 5), ('Fairy tale', 5), ('Animated Musical', 5), ('Costume drama', 4), ('Holiday Film', 4), ('Domestic Comedy', 4), ('Sports', 4), ('Music', 3), ('Buddy film', 3), ('Sci-Fi Adventure', 3), ('Short Film', 3), ('Christian film', 3), ('Adventure Comedy', 3), ('Action/Adventure', 3), ('Thriller', 3), ('Creature Film', 3), ('Childhood Drama', 3), ('Romantic comedy', 2), ('Animated cartoon', 2), ('Road movie', 2), ('Period piece', 2), ('Parody', 2), ('Mythological Fantasy', 2), ('Anime', 2), ('Time travel', 2), ('Indie', 2), ('World cinema', 2), ('Crime Fiction', 2), ('Documentary', 2), ('Film adaptation', 2), ('Biography', 2), ('Black comedy', 2), ('Comedy-drama', 1), (\"Children's Entertainment\", 1), ('Doomsday film', 1), ('Sword and sorcery', 1), ('Supernatural', 1), ('Stop motion', 1), ('Educational', 1), ('Alien Film', 1), ('Ensemble Film', 1), ('War film', 1), ('Humour', 1), ('Satire', 1), ('Chase Movie', 1), ('Biopic [feature]', 1), ('Prison', 1), ('Biographical film', 1), ('Film noir', 1), ('Baseball', 1), ('Haunted House Film', 1), ('Jungle Film', 1), ('Epic', 1), ('Bengali Cinema', 1)]\n" ] ], [ [ "Παρατηρούμε όντως πως το cluster 11 που είναι γειτονικό του 16 έχει επίσης κλίση προς τηνκατηγορία \"Κωμωδία\" ενώ διευρύνει το είδος και προς τη συγγενή κατηγορία των οικογνειακών ταινιών. Προφανώς τοποθέτηση σε 2 διαστάσεις που να σέβεται μια απόλυτη τοπολογία δεν είναι εφικτή, αφενός γιατί δεν υπάρχει κάποια απόλυτη εξ ορισμού για τα κινηματογραφικά είδη ακόμα και σε πολλές διαστάσεις, αφετέρου γιατί πραγματοποιούμε μείωση διαστατικότητας. Εντοπίστε μεγάλα clusters και μικρά clusters που δεν έχουν σαφή χαρακτηριστικά.\n\n*Εντοπίστε clusters συγκεκριμένων ειδών που μοιάζουν να μην έχουν τοπολογική συνάφεια με γύρω περιοχές. Προτείνετε πιθανές ερμηνείες. Τέλος, εντοπίστε clusters που έχουν κατά την άποψή σας ιδιαίτερο ενδιαφέρον στη συλλογή της ομάδας σας και σχολιάστε:*", "_____no_output_____" ] ], [ [ "cluster = 14\ncluster_neurons = print_cluster_neurons_movies_report(cluster)\nid_list = neuron_movies_report(cluster_neurons)\nprint_categories_stats(id_list)", "Overall Cluster Genres stats:\n[('Documentary', 104), ('Comedy film', 86), ('Action', 68), ('Romance Film', 61), ('Silent film', 41), ('Musical', 32), ('Adventure', 26), ('Family Film', 26), ('Fantasy', 25), ('Chinese Movies', 21), ('Culture & Society', 20), ('War film', 19), ('Western', 17), ('World cinema', 16), ('Biography', 15), ('Indie', 14), ('Music', 14), ('Romantic comedy', 14), (\"Children's/Family\", 14), ('Bollywood', 13), ('Historical fiction', 10), ('History', 9), ('Rockumentary', 9), ('Martial Arts Film', 9), ('Political cinema', 8), ('Animation', 7), ('Sports', 7), ('Comedy-drama', 7), ('Social issues', 7), ('Science Fiction', 6), ('Romantic drama', 6), ('Historical drama', 6), ('Filipino Movies', 5), ('Experimental film', 5), ('Avant-garde', 5), ('Television movie', 4), ('Black-and-white', 4), ('Art film', 4), (\"Children's Fantasy\", 4), ('Wuxia', 3), ('Epic', 3), ('Spaghetti Western', 3), ('Period piece', 3), ('Action/Adventure', 3), ('Tragedy', 3), ('Crime Fiction', 3), ('Costume drama', 3), ('Law & Crime', 3), (\"Children's\", 3), ('Black comedy', 3), ('Dance', 3), ('Fantasy Adventure', 3), ('Historical Epic', 2), ('Interpersonal Relationships', 2), ('Satire', 2), ('Psychological thriller', 2), ('Short Film', 2), ('Mystery', 2), ('Drama', 2), ('Fairy tale', 2), ('Animals', 2), ('Auto racing', 2), ('Biographical film', 2), ('Biopic [feature]', 2), (\"Children's Entertainment\", 2), ('Computers', 2), ('Costume Adventure', 2), ('Propaganda film', 2), ('Disaster', 2), ('Political drama', 2), ('Spy', 2), ('Environmental Science', 2), ('Stop motion', 2), ('Surrealism', 1), ('World History', 1), ('Social problem film', 1), ('Travel', 1), ('Religious Film', 1), ('Road-Horror', 1), ('Thriller', 1), ('Screwball comedy', 1), ('Slasher', 1), ('Tamil cinema', 1), ('Sword and Sandal', 1), ('Tragicomedy', 1), ('Teen', 1), ('Gender Issues', 1), ('Outlaw biker film', 1), ('Action Comedy', 1), ('Alien Film', 1), ('Animal Picture', 1), ('Animated cartoon', 1), ('Anime', 1), ('Anthropology', 1), ('B-Western', 1), ('Biker Film', 1), ('Blaxploitation', 1), ('Christmas movie', 1), ('Comedy', 1), ('Computer Animation', 1), ('Concert film', 1), ('Crime Drama', 1), ('Doomsday film', 1), ('Education', 1), ('Ensemble Film', 1), ('News', 1), ('Nature', 1), ('Mythological Fantasy', 1), ('Musical comedy', 1), ('Mockumentary', 1), ('Media Satire', 1), ('Political thriller', 1), ('Language & Literature', 1), ('Health & Fitness', 1), ('Film noir', 1), ('Family-Oriented Adventure', 1), ('Family & Personal Relationships', 1), ('Extreme Sports', 1), ('Essay Film', 1), ('Holiday Film', 1), ('Zombie Film', 1)]\n" ] ], [ [ "Παραπάνω έχουμε ένα παράδειγμα cluster (14) κατηγοριών κυρίως Ντοκιμαντέρ και κωμωδίες που δεν έχουν τόση σχέση με \"Δράμα\" ενώ βρίσκονται γειτονικά στον αντίστοιχο χάρτη (7). Η ερμηνεία που δίνουμε για την τοπολογική τους συνάφεια είναι η δευτερεύουσα παρουσία ταινιών \"Action\", \"Family\" και στα 2 cluster που γεφυρώνει το χάσμα τους. Σύμφωνα με την ερώτηση 1, ένα ενδιαφέρον cluster για εμάς θα ήταν αυτό που αντιπροσωπεύει το \"Crime fiction\". Με inspection επιβεβαιώσαμε πως πρόκειται για τo cluster 27 που ωστόσο δεν έχει απόλυτη τοπολογική συνάφεια με τη γειτονιά του, ούτε μεγάλη έκταση. Εκτιμούμε πως αυτό οφείλεται στην ιδιότητα του είδους να υπάγεται σε μεγαλύτερες κατηγορίες όπως Drama, Thriller κλπ, κατηγορίες που είναι τοπολογικά κοντινές στα συγκεκριμένα clusters.", "_____no_output_____" ] ], [ [ "cluster = 27\ncluster_neurons = print_cluster_neurons_movies_report(cluster)\nid_list = neuron_movies_report(cluster_neurons)\nprint_categories_stats(id_list)", "Overall Cluster Genres stats:\n[('Crime Fiction', 57), ('Thriller', 22), ('Mystery', 20), ('Comedy film', 6), ('Crime Thriller', 6), ('Adventure', 5), ('Romance Film', 4), ('World cinema', 3), ('Television movie', 3), ('Crime Drama', 3), ('Detective', 3), ('Detective fiction', 3), ('Film noir', 3), ('Science Fiction', 2), ('Musical', 2), ('Fantasy', 2), ('Comedy', 2), ('Chinese Movies', 2), ('Whodunit', 2), ('Escape Film', 1), ('Gangster Film', 1), ('Horror', 1), ('Creature Film', 1), ('Natural horror films', 1), ('Prison', 1), ('Sci-Fi Horror', 1), ('Bollywood', 1), ('Superhero', 1), ('Superhero movie', 1), ('Black-and-white', 1), ('Black comedy', 1), ('Action/Adventure', 1)]\n" ] ], [ [ "## Παράδοση Άσκησης\n\nΣτο zip file περιέχονται, εκτός από το παρόν notebook, ο κώδικας σε .py script καθώς και το αρχείο `stopwords.txt` που απαιτήθηκε κατά τη βελτιστοποίηση του tf-idf. Επίσης, παραδίδουμε και μια HTML μορφή του notebook καθώς πιθανότατα σε επόμενο τρέξιμο του clustering, τα νέα clusters που θα προκύψουν να μην ανταποκρίνονται σε αυτό που περιγράφουμε στην αναφορά.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb239e4d7a8f027b1f2ba78869fe89a979b3e537
53,891
ipynb
Jupyter Notebook
tensorflow/tensorflow_simple_linear_regression.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
3
2020-04-19T01:07:41.000Z
2021-02-05T02:29:50.000Z
tensorflow/tensorflow_simple_linear_regression.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
null
null
null
tensorflow/tensorflow_simple_linear_regression.ipynb
liaison/python
6930343918c8a21d871feee972529822d05ae9dc
[ "Apache-2.0" ]
null
null
null
168.937304
45,024
0.887792
[ [ [ "### Abstract\n\nThis is an example to show to use use the basic API of TensorFlow, to construct a linear regression model. \n\nThis notebook is an exercise adapted from [the Medium.com blog](https://medium.com/@saxenarohan97/intro-to-tensorflow-solving-a-simple-regression-problem-e87b42fd4845).\n\nNote that recent version of TensorFlow does have more advanced API such like LinearClassifier that provides the scikit-learn alike machine learning API.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np\nfrom sklearn.datasets import load_boston\nfrom sklearn.preprocessing import scale\nfrom matplotlib import pyplot as plt\n\n%matplotlib inline\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 15,6", "_____no_output_____" ] ], [ [ "Split the data into training, validation and test sets.", "_____no_output_____" ] ], [ [ "# Retrieve the data\nbunch = load_boston()\nprint('total data shape:', bunch.data.shape)\n\ntotal_features = bunch.data[:, range(12)]\ntotal_prices = bunch.data[:, [12]]\n\nprint('features shape:', total_features.shape, 'targe shape:', total_prices.shape)\n\n# new in 0.18 version\n# total_features, total_prices = load_boston(True)\n\n# Keep 300 samples for training\ntrain_features = scale(total_features[:300])\ntrain_prices = total_prices[:300]\nprint('training dataset:', len(train_features))\nprint('feature example:', train_features[0:1])\nprint('mean of feature 0:', np.asarray(train_features[:, 0]).mean())\n\n# Keep 100 samples for validation\nvalid_features = scale(total_features[300:400])\nvalid_prices = total_prices[300:400]\nprint('validation dataset:', len(valid_features))\n\n# Keep remaining samples as test set\ntest_features = scale(total_features[400:])\ntest_prices = total_prices[400:]\nprint('test dataset:', len(test_features))", "total data shape: (506, 13)\nfeatures shape: (506, 12) targe shape: (506, 1)\ntraining dataset: 300\nfeature example: [[-0.64113113 0.10080399 -1.03067021 -0.31448545 0.217757 0.21942717\n 0.08260981 -0.09559716 -2.15826599 -0.23254428 -1.00268807 0.42054571]]\nmean of feature 0: 2.36847578587e-17\nvalidation dataset: 100\ntest dataset: 106\n" ] ], [ [ "#### Linear Regression Model ", "_____no_output_____" ] ], [ [ "w = tf.Variable(tf.truncated_normal([12, 1], mean=0.0, stddev=1.0, dtype=tf.float64))\nb = tf.Variable(tf.zeros(1, dtype = tf.float64))", "_____no_output_____" ], [ "def calc(x, y):\n '''\n linear regression model that return (prediction, L2_error)\n '''\n # Returns predictions and error\n predictions = tf.add(b, tf.matmul(x, w))\n error = tf.reduce_mean(tf.square(y - predictions))\n return [ predictions, error ]", "_____no_output_____" ], [ "y, cost = calc(train_features, train_prices)\n\n# augment the model with the regularisation\nL1_regu_cost = tf.add(cost, tf.reduce_mean(tf.abs(w)))\nL2_regu_cost = tf.add(cost, tf.reduce_mean(tf.square(w)))", "_____no_output_____" ], [ "def train(cost, learning_rate=0.025, epochs=300):\n '''\n run the cost computation graph with gradient descent optimizer.\n '''\n errors = [[], []] \n\n init = tf.global_variables_initializer()\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth=True\n sess = tf.Session(config=config)\n\n with sess:\n sess.run(init)\n\n for i in range(epochs):\n\n sess.run(optimizer)\n\n errors[0].append(i+1)\n errors[1].append(sess.run(cost))\n\n # Get the parameters of the linear regression model.\n print('weights:\\n', sess.run(w))\n print('bias:', sess.run(b))\n \n valid_cost = calc(valid_features, valid_prices)[1]\n print('Validation error =', sess.run(valid_cost), '\\n')\n\n test_cost = calc(test_features, test_prices)[1]\n print('Test error =', sess.run(test_cost), '\\n')\n \n return errors", "_____no_output_____" ], [ "# with L1 regularisation, the testing error is slightly improved, i.e. 75 vs. 76\n# similarly with L1 regularisation, the L2 regularisation improves the testing error to 75 as well.\nepochs = 500\n\nerrors_lr_005 = train(cost, learning_rate=0.005, epochs=epochs)\n\nerrors_lr_025 = train(cost, learning_rate=0.025, epochs=epochs)", "weights:\n [[ 0.01976278]\n [ 0.2216641 ]\n [ 0.33897141]\n [ 0.14712622]\n [ 1.49651714]\n [-3.30264682]\n [ 2.19215736]\n [ 0.75213107]\n [-0.2088241 ]\n [-0.35450866]\n [ 0.65472403]\n [ 0.07399816]]\nbias: [ 10.63383511]\nValidation error = 39.140050724 \n\nTest error = 76.3269062042 \n\nweights:\n [[ 0.40265458]\n [ 0.38716099]\n [ 0.40915654]\n [ 0.11570143]\n [ 0.7819646 ]\n [-3.46135321]\n [ 2.58540755]\n [ 0.64041114]\n [-0.13593196]\n [-0.43936893]\n [ 0.54024542]\n [ 0.12436986]]\nbias: [ 10.70416667]\nValidation error = 37.305764913 \n\nTest error = 75.9908063646 \n\n" ], [ "ax = plt.subplot(111)\n\nplt.plot(errors_lr_005[1], color='green', label='learning rate 0.005')\nplt.plot(errors_lr_025[1], color='red', label='learning rate 0.025')\n\n#ax = plt.plot(errors[0], errors[1], 'r--')\n\nplt.axis([0, epochs, 0, 200])\n\nplt.title('Evolution of L2 errors along each epoch')\nplt.xlabel('epoch')\nplt.ylabel('L2 error')\n_ = plt.legend(loc='best')\n\nplt.show()", "_____no_output_____" ] ], [ [ "The **higher** the learning rate, the **faster** that the model converges. But if the learning rate is too large, it could also prevent the model from convergence.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb23a0ab02b1c8211833e0118c88273e25cc4692
7,920
ipynb
Jupyter Notebook
chapter14/high_accuracy_MNIST.ipynb
sfefilatyev/tensorflow_excercises
7b1372259fd5e68f8584a3f4039bf4e44b45a458
[ "Apache-2.0" ]
null
null
null
chapter14/high_accuracy_MNIST.ipynb
sfefilatyev/tensorflow_excercises
7b1372259fd5e68f8584a3f4039bf4e44b45a458
[ "Apache-2.0" ]
null
null
null
chapter14/high_accuracy_MNIST.ipynb
sfefilatyev/tensorflow_excercises
7b1372259fd5e68f8584a3f4039bf4e44b45a458
[ "Apache-2.0" ]
null
null
null
35.675676
145
0.548359
[ [ [ "# High Accuracy CNN for MNIST", "_____no_output_____" ], [ "### Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST.", "_____no_output_____" ] ], [ [ "# Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST.", "_____no_output_____" ], [ "# he following model uses 2 convolutional layers, followed by 1 pooling layer, then dropout 25%, \n# then a dense layer, another dropout layer but with 50% dropout, and finally the output layer. \n# It reaches about 99.2% accuracy on the test set. This places this model roughly in the top 20% \n# in the MNIST Kaggle competition (if we ignore the models with an accuracy greater than 99.79% \n# which were most likely trained on the test set, as explained by Chris Deotte in this post). \n# Can you do better? To reach 99.5 to 99.7% accuracy on the test set, you need to add image augmentation, \n# batch norm, use a learning schedule such as 1-cycle, and possibly create an ensemble.", "_____no_output_____" ], [ "# Python ≥3.5 is required\nimport sys\nassert sys.version_info >= (3, 5)\n\n# Is this notebook running on Colab or Kaggle?\nIS_COLAB = \"google.colab\" in sys.modules\nIS_KAGGLE = \"kaggle_secrets\" in sys.modules\n\n# Scikit-Learn ≥0.20 is required\nimport sklearn\nassert sklearn.__version__ >= \"0.20\"\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\nif not tf.config.list_physical_devices('GPU'):\n print(\"No GPU was detected. CNNs can be very slow without a GPU.\")\n if IS_COLAB:\n print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n if IS_KAGGLE:\n print(\"Go to Settings > Accelerator and select GPU.\")\n\n# Common imports\nimport numpy as np\nimport os\n\n# to make this notebook's output stable across runs\nnp.random.seed(42)\ntf.random.set_seed(42)\n\n# To plot pretty figures\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nmpl.rc('axes', labelsize=14)\nmpl.rc('xtick', labelsize=12)\nmpl.rc('ytick', labelsize=12)\n\n# Where to save the figures\nPROJECT_ROOT_DIR = \".\"\nCHAPTER_ID = \"cnn\"\nIMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID)\nos.makedirs(IMAGES_PATH, exist_ok=True)\n\ndef save_fig(fig_id, tight_layout=True, fig_extension=\"png\", resolution=300):\n path = os.path.join(IMAGES_PATH, fig_id + \".\" + fig_extension)\n print(\"Saving figure\", fig_id)\n if tight_layout:\n plt.tight_layout()", "No GPU was detected. CNNs can be very slow without a GPU.\n" ], [ "(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\nX_train_full = X_train_full / 255.\nX_test = X_test / 255.\nX_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\ny_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]\n\nX_train = X_train[..., np.newaxis]\nX_valid = X_valid[..., np.newaxis]\nX_test = X_test[..., np.newaxis]", "Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n11493376/11490434 [==============================] - 2s 0us/step\n" ], [ "keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nmodel = keras.models.Sequential([\n keras.layers.Conv2D(32, kernel_size=3, padding=\"same\", activation=\"relu\"),\n keras.layers.Conv2D(64, kernel_size=3, padding=\"same\", activation=\"relu\"),\n keras.layers.MaxPool2D(),\n keras.layers.Flatten(),\n keras.layers.Dropout(0.25),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(10, activation=\"softmax\")\n])\nmodel.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\",\n metrics=[\"accuracy\"])\n\nmodel.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))\nmodel.evaluate(X_test, y_test)", "Epoch 1/10\n1719/1719 [==============================] - 70s 40ms/step - loss: 0.3724 - accuracy: 0.8834 - val_loss: 0.0457 - val_accuracy: 0.9886\nEpoch 2/10\n1719/1719 [==============================] - 67s 39ms/step - loss: 0.0795 - accuracy: 0.9754 - val_loss: 0.0424 - val_accuracy: 0.9878\nEpoch 3/10\n1719/1719 [==============================] - 70s 41ms/step - loss: 0.0583 - accuracy: 0.9814 - val_loss: 0.0350 - val_accuracy: 0.9902\nEpoch 4/10\n1719/1719 [==============================] - 68s 40ms/step - loss: 0.0519 - accuracy: 0.9837 - val_loss: 0.0335 - val_accuracy: 0.9912\nEpoch 5/10\n1719/1719 [==============================] - 70s 41ms/step - loss: 0.0402 - accuracy: 0.9867 - val_loss: 0.0353 - val_accuracy: 0.9904\nEpoch 6/10\n1719/1719 [==============================] - 66s 38ms/step - loss: 0.0351 - accuracy: 0.9892 - val_loss: 0.0398 - val_accuracy: 0.9916\nEpoch 7/10\n1719/1719 [==============================] - 65s 38ms/step - loss: 0.0320 - accuracy: 0.9897 - val_loss: 0.0356 - val_accuracy: 0.9918\nEpoch 8/10\n1719/1719 [==============================] - 66s 38ms/step - loss: 0.0290 - accuracy: 0.9902 - val_loss: 0.0438 - val_accuracy: 0.9910\nEpoch 9/10\n1719/1719 [==============================] - 69s 40ms/step - loss: 0.0240 - accuracy: 0.9922 - val_loss: 0.0384 - val_accuracy: 0.9918\nEpoch 10/10\n1719/1719 [==============================] - 68s 40ms/step - loss: 0.0219 - accuracy: 0.9927 - val_loss: 0.0395 - val_accuracy: 0.9922\n313/313 [==============================] - 2s 7ms/step - loss: 0.0310 - accuracy: 0.9903\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb23a70bcc8b8ccc29be2062fcf6cc0b449cac73
268,479
ipynb
Jupyter Notebook
src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
motazsaad/UNet-1
fb189eff3f8dc19d3e0143e0e82e31c2e9f028b9
[ "MIT" ]
1
2019-10-03T10:23:05.000Z
2019-10-03T10:23:05.000Z
src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
VernicaJ/SemanticSegmentation
fb189eff3f8dc19d3e0143e0e82e31c2e9f028b9
[ "MIT" ]
null
null
null
src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
VernicaJ/SemanticSegmentation
fb189eff3f8dc19d3e0143e0e82e31c2e9f028b9
[ "MIT" ]
null
null
null
213.92749
112,696
0.893169
[ [ [ "import os\nimport json\nfrom train_unet import convert_labels\nimport matplotlib.pyplot as plt\n", "/home/renjie/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n" ], [ "data_path = os.path.join(os.getcwd(), '..', 'data', 'raw')\nimg_path = os.path.join(data_path, 'images')\nlabel_path = os.path.join(data_path, 'labels')\n\nlabel_1 = os.listdir(label_path)[0]\nimg_1 = os.listdir(img_path)[0]\nprint(label_1)\nprint(img_1)\nwith open(os.path.join(label_path, label_1)) as f:\n test_label = convert_labels(f)\n", "6000026.surfaces.txt\n4100246.jpg\n" ] ], [ [ "# Data Augmentation", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import misc, ndimage\nimport keras\nfrom keras import backend as K\nfrom keras.preprocessing.image import ImageDataGenerator\n# from preprocessing import ImageDataGenerator\n%matplotlib inline", "_____no_output_____" ], [ "gen = ImageDataGenerator(rotation_range=10,\n width_shift_range=0,\n height_shift_range=0,\n shear_range=0.15,\n zoom_range=0.1,\n channel_shift_range=10,\n horizontal_flip=True)", "_____no_output_____" ], [ "test_img= np.expand_dims(plt.imread(os.path.join(img_path,img_1)),0)\n\nplt.imshow(test_img[0])\nplt.show()\nprint(test_img.shape)\naug_iter = gen.flow(test_img)", "_____no_output_____" ], [ "plt.imshow(next(aug_iter)[0].astype(np.uint8))\nplt.show()", "_____no_output_____" ], [ "aug_images = [next(aug_iter)[0]]", "_____no_output_____" ] ], [ [ "# Convert to Parquet", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport pyarrow\nimport pyarrow.parquet as pq\nimport matplotlib.pyplot as plt\nfrom utils import convert_labels", "_____no_output_____" ], [ "data_path = os.path.join(os.getcwd(), '..', 'data', 'raw')\nimg_path = os.path.join(data_path, 'images')\nlabel_path = os.path.join(data_path, 'labels')", "_____no_output_____" ], [ "os.listdir(label_path)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb23b4738db1ecd6d8fc5d77271a184e427aa4c4
45,434
ipynb
Jupyter Notebook
code/phase0/2.1.Train_TF_Local_Script_Mode.ipynb
gonsoomoon-ml/SageMaker-Tensorflow-Step-By-Step
132589a2544bd5d99cb494d871195195d1dcec5c
[ "MIT" ]
1
2022-03-02T07:52:55.000Z
2022-03-02T07:52:55.000Z
code/phase0/2.1.Train_TF_Local_Script_Mode.ipynb
gonsoomoon-ml/SageMaker-Tensorflow-Step-By-Step
132589a2544bd5d99cb494d871195195d1dcec5c
[ "MIT" ]
null
null
null
code/phase0/2.1.Train_TF_Local_Script_Mode.ipynb
gonsoomoon-ml/SageMaker-Tensorflow-Step-By-Step
132589a2544bd5d99cb494d871195195d1dcec5c
[ "MIT" ]
1
2022-03-02T01:19:38.000Z
2022-03-02T01:19:38.000Z
57.877707
1,723
0.624774
[ [ [ "# [Module 2.1] 세이지 메이커 로컬 모드 및 스크립트 모드로 훈련\n\n본 워크샵의 모든 노트북은 **<font color=\"red\">conda_tensorflow2_p36</font>** 를 사용합니다.\n\n이 노트북은 아래와 같은 작업을 합니다.\n- 1. 기본 환경 세팅 \n- 2. 노트북에서 세이지 메이커 스크립트 모드 스타일로 코드 변경\n- 3. 세이지 메이커 로컬 모드로 훈련\n- 4. 세이지 메이커의 호스트 모드로 훈련\n- 5. 모델 아티펙트 경로 저장\n\n\n---", "_____no_output_____" ], [ "# 1. 기본 환경 세팅\n사용하는 패키지는 import 시점에 다시 재로딩 합니다.", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import sagemaker\n\nsagemaker_session = sagemaker.Session()\n\nbucket = sagemaker_session.default_bucket()\nprefix = \"sagemaker/DEMO-pytorch-cnn-cifar10\"\n\nrole = sagemaker.get_execution_role()\n", "_____no_output_____" ], [ "import tensorflow as tf\nprint(\"tensorflow version: \", tf.__version__)", "tensorflow version: 2.4.1\n" ], [ "%store -r train_dir\n%store -r validation_dir\n%store -r eval_dir\n%store -r data_dir", "_____no_output_____" ] ], [ [ "# 2. 노트북에서 세이지 메이커 스크립트 모드 스타일로 코드 변경\n\n- Keras 버전의 스크래치 코드에서 세이지 메이커의 코드 변경을 참고 하세요.\n - `1.2.Train_Keras_Local_Script_Mode.ipynb` 참고\n", "_____no_output_____" ] ], [ [ "# !pygmentize src/cifar10_tf2_sm.py", "_____no_output_____" ] ], [ [ "# 3. 세이지 메이커 로컬 모드로 훈련\n\n본격적으로 학습을 시작하기 전에 로컬 모드를 사용하여 디버깅을 먼저 수행합니다. 로컬 모드는 학습 인스턴스를 생성하는 과정이 없이 로컬 인스턴스로 컨테이너를 가져온 후 곧바로 학습을 수행하기 때문에 코드를 보다 신속히 검증할 수 있습니다.\n\nAmazon SageMaker Python SDK의 로컬 모드는 TensorFlow 또는 MXNet estimator서 단일 인자값을 변경하여 CPU (단일 및 다중 인스턴스) 및 GPU (단일 인스턴스) SageMaker 학습 작업을 에뮬레이션(enumlate)할 수 있습니다. \n\n로컬 모드 학습을 위해서는 docker-compose 또는 nvidia-docker-compose (GPU 인스턴스인 경우)의 설치가 필요합니다. 아래 코드 셀을 통해 본 노트북 환경에 docker-compose 또는 nvidia-docker-compose를 설치하고 구성합니다. \n \n로컬 모드의 학습을 통해 여러분의 코드가 현재 사용 중인 하드웨어를 적절히 활용하고 있는지 확인하기 위한 GPU 점유와 같은 지표(metric)를 쉽게 모니터링할 수 있습니다.", "_____no_output_____" ], [ "### 로컬 모드로 훈련 실행\n- 아래의 두 라인이 로컬모드로 훈련을 지시 합니다.\n```python\n instance_type=instance_type, # local_gpu or local 지정\n session = sagemaker.LocalSession(), # 로컬 세션을 사용합니다.\n```", "_____no_output_____" ], [ "#### 로컬의 GPU, CPU 여부로 instance_type 결정", "_____no_output_____" ] ], [ [ "import os\nimport subprocess\n\n\ninstance_type = \"local_gpu\" # GPU 사용을 가정 합니다. CPU 사용시에 'local' 로 정의 합니다.\n\nprint(\"Instance type = \" + instance_type)", "Instance type = local_gpu\n" ] ], [ [ "학습 작업을 시작하기 위해 `estimator.fit() ` 호출 시, Amazon ECS에서 Amazon SageMaker TensorFlow 컨테이너를 로컬 노트북 인스턴스로 다운로드합니다.\n\n`sagemaker.tensorflow` 클래스를 사용하여 SageMaker Python SDK의 Tensorflow Estimator 인스턴스를 생성합니다.\n인자값으로 하이퍼파라메터와 다양한 설정들을 변경할 수 있습니다.\n\n\n자세한 내용은 [documentation](https://sagemaker.readthedocs.io/en/stable/using_tf.html#training-with-tensorflow-estimator)을 확인하시기 바랍니다.", "_____no_output_____" ] ], [ [ "hyperparameters = {\n 'epochs' : 1,\n 'learning-rate' : 0.001,\n 'print-interval' : 100,\n 'train-batch-size': 256, \n 'eval-batch-size': 512, \n 'validation-batch-size': 512,\n }", "_____no_output_____" ], [ "from sagemaker.tensorflow import TensorFlow\nestimator = TensorFlow(base_job_name='cifar10',\n entry_point='cifar10_tf2_sm.py',\n source_dir='src',\n role=role,\n framework_version='2.4.1',\n py_version='py37',\n script_mode=True,\n hyperparameters= hyperparameters,\n train_instance_count=1, \n train_instance_type= instance_type)", "train_instance_type has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\ntrain_instance_count has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\ntrain_instance_type has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ], [ "%%time\nestimator.fit({'train': f'file://{train_dir}',\n 'validation': f'file://{validation_dir}',\n 'eval': f'file://{eval_dir}'})", "Creating 6syhdngngx-algo-1-cl7gh ... \nCreating 6syhdngngx-algo-1-cl7gh ... done\nAttaching to 6syhdngngx-algo-1-cl7gh\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:53.710766: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:53.710971: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:53.715651: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:53.754759: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:55,531 sagemaker-training-toolkit INFO Imported framework sagemaker_tensorflow_container.training\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:55,897 sagemaker-training-toolkit INFO Invoking user script\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Training Env:\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"additional_framework_parameters\": {},\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"channel_input_dirs\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"train\": \"/opt/ml/input/data/train\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"validation\": \"/opt/ml/input/data/validation\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"eval\": \"/opt/ml/input/data/eval\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"current_host\": \"algo-1-cl7gh\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"framework_module\": \"sagemaker_tensorflow_container.training:main\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"hosts\": [\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"algo-1-cl7gh\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m ],\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"hyperparameters\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"epochs\": 1,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"learning-rate\": 0.001,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"print-interval\": 100,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"train-batch-size\": 256,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"eval-batch-size\": 512,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"validation-batch-size\": 512,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"model_dir\": \"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"input_config_dir\": \"/opt/ml/input/config\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"input_data_config\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"train\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"TrainingInputMode\": \"File\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"validation\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"TrainingInputMode\": \"File\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"eval\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"TrainingInputMode\": \"File\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m }\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"input_dir\": \"/opt/ml/input\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"is_master\": true,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"job_name\": \"cifar10-2021-10-11-11-21-50-789\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"log_level\": 20,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"master_hostname\": \"algo-1-cl7gh\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"model_dir\": \"/opt/ml/model\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"module_dir\": \"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/source/sourcedir.tar.gz\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"module_name\": \"cifar10_tf2_sm\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"network_interface_name\": \"eth0\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"num_cpus\": 8,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"num_gpus\": 1,\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"output_data_dir\": \"/opt/ml/output/data\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"output_dir\": \"/opt/ml/output\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"resource_config\": {\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"current_host\": \"algo-1-cl7gh\",\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"hosts\": [\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"algo-1-cl7gh\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m ]\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m },\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \"user_entry_point\": \"cifar10_tf2_sm.py\"\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m }\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Environment variables:\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HOSTS=[\"algo-1-cl7gh\"]\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_NETWORK_INTERFACE_NAME=eth0\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HPS={\"epochs\":1,\"eval-batch-size\":512,\"learning-rate\":0.001,\"model_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model\",\"print-interval\":100,\"train-batch-size\":256,\"validation-batch-size\":512}\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_USER_ENTRY_POINT=cifar10_tf2_sm.py\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_FRAMEWORK_PARAMS={}\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_RESOURCE_CONFIG={\"current_host\":\"algo-1-cl7gh\",\"hosts\":[\"algo-1-cl7gh\"]}\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_INPUT_DATA_CONFIG={\"eval\":{\"TrainingInputMode\":\"File\"},\"train\":{\"TrainingInputMode\":\"File\"},\"validation\":{\"TrainingInputMode\":\"File\"}}\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_OUTPUT_DATA_DIR=/opt/ml/output/data\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_CHANNELS=[\"eval\",\"train\",\"validation\"]\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_CURRENT_HOST=algo-1-cl7gh\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_MODULE_NAME=cifar10_tf2_sm\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_LOG_LEVEL=20\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_INPUT_DIR=/opt/ml/input\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_INPUT_CONFIG_DIR=/opt/ml/input/config\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_OUTPUT_DIR=/opt/ml/output\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_NUM_CPUS=8\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_NUM_GPUS=1\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_MODEL_DIR=/opt/ml/model\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_MODULE_DIR=s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/source/sourcedir.tar.gz\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"eval\":\"/opt/ml/input/data/eval\",\"train\":\"/opt/ml/input/data/train\",\"validation\":\"/opt/ml/input/data/validation\"},\"current_host\":\"algo-1-cl7gh\",\"framework_module\":\"sagemaker_tensorflow_container.training:main\",\"hosts\":[\"algo-1-cl7gh\"],\"hyperparameters\":{\"epochs\":1,\"eval-batch-size\":512,\"learning-rate\":0.001,\"model_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model\",\"print-interval\":100,\"train-batch-size\":256,\"validation-batch-size\":512},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"eval\":{\"TrainingInputMode\":\"File\"},\"train\":{\"TrainingInputMode\":\"File\"},\"validation\":{\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"cifar10-2021-10-11-11-21-50-789\",\"log_level\":20,\"master_hostname\":\"algo-1-cl7gh\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/source/sourcedir.tar.gz\",\"module_name\":\"cifar10_tf2_sm\",\"network_interface_name\":\"eth0\",\"num_cpus\":8,\"num_gpus\":1,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1-cl7gh\",\"hosts\":[\"algo-1-cl7gh\"]},\"user_entry_point\":\"cifar10_tf2_sm.py\"}\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_USER_ARGS=[\"--epochs\",\"1\",\"--eval-batch-size\",\"512\",\"--learning-rate\",\"0.001\",\"--model_dir\",\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model\",\"--print-interval\",\"100\",\"--train-batch-size\",\"256\",\"--validation-batch-size\",\"512\"]\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_CHANNEL_TRAIN=/opt/ml/input/data/train\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_CHANNEL_VALIDATION=/opt/ml/input/data/validation\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_CHANNEL_EVAL=/opt/ml/input/data/eval\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_EPOCHS=1\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_LEARNING-RATE=0.001\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_PRINT-INTERVAL=100\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_TRAIN-BATCH-SIZE=256\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_EVAL-BATCH-SIZE=512\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_VALIDATION-BATCH-SIZE=512\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m SM_HP_MODEL_DIR=s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m PYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/local/lib/python37.zip:/usr/local/lib/python3.7:/usr/local/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/site-packages\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Invoking script with the following command:\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m /usr/local/bin/python3.7 cifar10_tf2_sm.py --epochs 1 --eval-batch-size 512 --learning-rate 0.001 --model_dir s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model --print-interval 100 --train-batch-size 256 --validation-batch-size 512\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m tensorflow version: 2.4.1\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m args: \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Namespace(epochs=1, eval='/opt/ml/input/data/eval', eval_batch_size=512, learning_rate=0.001, model_dir='s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-21-50-789/model', model_output_dir='/opt/ml/model', momentum=0.9, optimizer='adam', print_interval=100, train='/opt/ml/input/data/train', train_batch_size=256, validation='/opt/ml/input/data/validation', validation_batch_size=512, weight_decay=0.0002)\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Channel Name: train\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches loading TFRecord : 10000\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m buffer_size: 10000\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches in train: 39\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Channel Name: eval\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches loading TFRecord : 10000\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches in eval: 19\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Channel Name: validation\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches loading TFRecord : 10000\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m # of batches in validation: 19\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m [2021-10-11 11:22:05.203 6f19020ebdc1:37 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m [2021-10-11 11:22:05.237 6f19020ebdc1:37 INFO profiler_config_parser.py:102] Unable to find config at /opt/ml/input/config/profilerconfig.json. Profiler is disabled.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Step #0\tLoss: 60.272499\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Epoch 1, Test Loss: 2.293747663497925, Test Accuracy: 11.307565689086914\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m Training Finished.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:56.049740: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:56.049902: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:21:56.090474: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:22:09.289920: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m INFO:tensorflow:Assets written to: /opt/ml/model/1/assets\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m INFO:tensorflow:Assets written to: /opt/ml/model/1/assets\n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m \n\u001b[36m6syhdngngx-algo-1-cl7gh |\u001b[0m 2021-10-11 11:22:10,920 sagemaker-training-toolkit INFO Reporting training SUCCESS\n\u001b[36m6syhdngngx-algo-1-cl7gh exited with code 0\n\u001b[0mAborting on container exit...\n===== Job Complete =====\nCPU times: user 601 ms, sys: 58.5 ms, total: 659 ms\nWall time: 21.3 s\n" ] ], [ [ "# 4. 세이지 메이커의 호스트 모드로 훈련", "_____no_output_____" ], [ "### 데이터 세트를 S3에 업로드\n", "_____no_output_____" ] ], [ [ "dataset_location = sagemaker_session.upload_data(path=data_dir, key_prefix='data/DEMO-cifar10')\ndisplay(dataset_location)", "_____no_output_____" ], [ "hyperparameters = {\n 'epochs' : 20,\n 'learning-rate' : 0.001, \n 'print-interval' : 100,\n 'train-batch-size': 256, \n 'eval-batch-size': 512, \n 'validation-batch-size': 512,\n }", "_____no_output_____" ], [ "from sagemaker.tensorflow import TensorFlow\n\ninstance_type='ml.p3.8xlarge'\n\nsm_estimator = TensorFlow(base_job_name='cifar10',\n entry_point='cifar10_tf2_sm.py',\n source_dir='src',\n role=role,\n framework_version='2.4.1',\n py_version='py37',\n script_mode=True,\n hyperparameters= hyperparameters,\n train_instance_count=1, \n train_instance_type= instance_type)\n\n", "train_instance_type has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\ntrain_instance_count has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\ntrain_instance_type has been renamed in sagemaker>=2.\nSee: https://sagemaker.readthedocs.io/en/stable/v2.html for details.\n" ] ], [ [ "## SageMaker Host Mode 로 훈련\n- `cifar10_estimator.fit(inputs, wait=False)`\n - 입력 데이터를 inputs로서 S3 의 경로를 제공합니다.\n - wait=False 로 지정해서 async 모드로 훈련을 실행합니다. \n - 실행 경과는 아래의 cifar10_estimator.logs() 에서 확인 합니다.", "_____no_output_____" ] ], [ [ "%%time\nsm_estimator.fit({'train':'{}/train'.format(dataset_location),\n 'validation':'{}/validation'.format(dataset_location),\n 'eval':'{}/eval'.format(dataset_location)}, wait=False)", "CPU times: user 267 ms, sys: 0 ns, total: 267 ms\nWall time: 662 ms\n" ], [ "sm_estimator.logs()", "2021-10-11 11:22:25 Starting - Starting the training job...\n2021-10-11 11:22:48 Starting - Launching requested ML instancesProfilerReport-1633951344: InProgress\n...\n2021-10-11 11:23:08 Starting - Insufficient capacity error from EC2 while launching instances, retrying!...........................\n2021-10-11 11:27:50 Starting - Preparing the instances for training.........\n2021-10-11 11:29:26 Downloading - Downloading input data\n2021-10-11 11:29:26 Training - Downloading the training image.................\u001b[34m2021-10-11 11:32:03.614371: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-10-11 11:32:03.627367: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\u001b[0m\n\u001b[34m2021-10-11 11:32:03.911369: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\u001b[0m\n\u001b[34m2021-10-11 11:32:04.152537: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\n2021-10-11 11:32:11 Training - Training image download completed. Training in progress.\u001b[34m2021-10-11 11:32:14,661 sagemaker-training-toolkit INFO Imported framework sagemaker_tensorflow_container.training\u001b[0m\n\u001b[34m2021-10-11 11:32:15,477 sagemaker-training-toolkit INFO Invoking user script\n\u001b[0m\n\u001b[34mTraining Env:\n\u001b[0m\n\u001b[34m{\n \"additional_framework_parameters\": {},\n \"channel_input_dirs\": {\n \"eval\": \"/opt/ml/input/data/eval\",\n \"validation\": \"/opt/ml/input/data/validation\",\n \"train\": \"/opt/ml/input/data/train\"\n },\n \"current_host\": \"algo-1\",\n \"framework_module\": \"sagemaker_tensorflow_container.training:main\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"hyperparameters\": {\n \"validation-batch-size\": 512,\n \"learning-rate\": 0.001,\n \"print-interval\": 100,\n \"model_dir\": \"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model\",\n \"train-batch-size\": 256,\n \"epochs\": 20,\n \"eval-batch-size\": 512\n },\n \"input_config_dir\": \"/opt/ml/input/config\",\n \"input_data_config\": {\n \"eval\": {\n \"TrainingInputMode\": \"File\",\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\"\n },\n \"validation\": {\n \"TrainingInputMode\": \"File\",\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\"\n },\n \"train\": {\n \"TrainingInputMode\": \"File\",\n \"S3DistributionType\": \"FullyReplicated\",\n \"RecordWrapperType\": \"None\"\n }\n },\n \"input_dir\": \"/opt/ml/input\",\n \"is_master\": true,\n \"job_name\": \"cifar10-2021-10-11-11-22-24-452\",\n \"log_level\": 20,\n \"master_hostname\": \"algo-1\",\n \"model_dir\": \"/opt/ml/model\",\n \"module_dir\": \"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/source/sourcedir.tar.gz\",\n \"module_name\": \"cifar10_tf2_sm\",\n \"network_interface_name\": \"eth0\",\n \"num_cpus\": 32,\n \"num_gpus\": 4,\n \"output_data_dir\": \"/opt/ml/output/data\",\n \"output_dir\": \"/opt/ml/output\",\n \"output_intermediate_dir\": \"/opt/ml/output/intermediate\",\n \"resource_config\": {\n \"current_host\": \"algo-1\",\n \"hosts\": [\n \"algo-1\"\n ],\n \"network_interface_name\": \"eth0\"\n },\n \"user_entry_point\": \"cifar10_tf2_sm.py\"\u001b[0m\n\u001b[34m}\n\u001b[0m\n\u001b[34mEnvironment variables:\n\u001b[0m\n\u001b[34mSM_HOSTS=[\"algo-1\"]\u001b[0m\n\u001b[34mSM_NETWORK_INTERFACE_NAME=eth0\u001b[0m\n\u001b[34mSM_HPS={\"epochs\":20,\"eval-batch-size\":512,\"learning-rate\":0.001,\"model_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model\",\"print-interval\":100,\"train-batch-size\":256,\"validation-batch-size\":512}\u001b[0m\n\u001b[34mSM_USER_ENTRY_POINT=cifar10_tf2_sm.py\u001b[0m\n\u001b[34mSM_FRAMEWORK_PARAMS={}\u001b[0m\n\u001b[34mSM_RESOURCE_CONFIG={\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"}\u001b[0m\n\u001b[34mSM_INPUT_DATA_CONFIG={\"eval\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"train\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"validation\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}}\u001b[0m\n\u001b[34mSM_OUTPUT_DATA_DIR=/opt/ml/output/data\u001b[0m\n\u001b[34mSM_CHANNELS=[\"eval\",\"train\",\"validation\"]\u001b[0m\n\u001b[34mSM_CURRENT_HOST=algo-1\u001b[0m\n\u001b[34mSM_MODULE_NAME=cifar10_tf2_sm\u001b[0m\n\u001b[34mSM_LOG_LEVEL=20\u001b[0m\n\u001b[34mSM_FRAMEWORK_MODULE=sagemaker_tensorflow_container.training:main\u001b[0m\n\u001b[34mSM_INPUT_DIR=/opt/ml/input\u001b[0m\n\u001b[34mSM_INPUT_CONFIG_DIR=/opt/ml/input/config\u001b[0m\n\u001b[34mSM_OUTPUT_DIR=/opt/ml/output\u001b[0m\n\u001b[34mSM_NUM_CPUS=32\u001b[0m\n\u001b[34mSM_NUM_GPUS=4\u001b[0m\n\u001b[34mSM_MODEL_DIR=/opt/ml/model\u001b[0m\n\u001b[34mSM_MODULE_DIR=s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/source/sourcedir.tar.gz\u001b[0m\n\u001b[34mSM_TRAINING_ENV={\"additional_framework_parameters\":{},\"channel_input_dirs\":{\"eval\":\"/opt/ml/input/data/eval\",\"train\":\"/opt/ml/input/data/train\",\"validation\":\"/opt/ml/input/data/validation\"},\"current_host\":\"algo-1\",\"framework_module\":\"sagemaker_tensorflow_container.training:main\",\"hosts\":[\"algo-1\"],\"hyperparameters\":{\"epochs\":20,\"eval-batch-size\":512,\"learning-rate\":0.001,\"model_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model\",\"print-interval\":100,\"train-batch-size\":256,\"validation-batch-size\":512},\"input_config_dir\":\"/opt/ml/input/config\",\"input_data_config\":{\"eval\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"train\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"},\"validation\":{\"RecordWrapperType\":\"None\",\"S3DistributionType\":\"FullyReplicated\",\"TrainingInputMode\":\"File\"}},\"input_dir\":\"/opt/ml/input\",\"is_master\":true,\"job_name\":\"cifar10-2021-10-11-11-22-24-452\",\"log_level\":20,\"master_hostname\":\"algo-1\",\"model_dir\":\"/opt/ml/model\",\"module_dir\":\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/source/sourcedir.tar.gz\",\"module_name\":\"cifar10_tf2_sm\",\"network_interface_name\":\"eth0\",\"num_cpus\":32,\"num_gpus\":4,\"output_data_dir\":\"/opt/ml/output/data\",\"output_dir\":\"/opt/ml/output\",\"output_intermediate_dir\":\"/opt/ml/output/intermediate\",\"resource_config\":{\"current_host\":\"algo-1\",\"hosts\":[\"algo-1\"],\"network_interface_name\":\"eth0\"},\"user_entry_point\":\"cifar10_tf2_sm.py\"}\u001b[0m\n\u001b[34mSM_USER_ARGS=[\"--epochs\",\"20\",\"--eval-batch-size\",\"512\",\"--learning-rate\",\"0.001\",\"--model_dir\",\"s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model\",\"--print-interval\",\"100\",\"--train-batch-size\",\"256\",\"--validation-batch-size\",\"512\"]\u001b[0m\n\u001b[34mSM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate\u001b[0m\n\u001b[34mSM_CHANNEL_EVAL=/opt/ml/input/data/eval\u001b[0m\n\u001b[34mSM_CHANNEL_VALIDATION=/opt/ml/input/data/validation\u001b[0m\n\u001b[34mSM_CHANNEL_TRAIN=/opt/ml/input/data/train\u001b[0m\n\u001b[34mSM_HP_VALIDATION-BATCH-SIZE=512\u001b[0m\n\u001b[34mSM_HP_LEARNING-RATE=0.001\u001b[0m\n\u001b[34mSM_HP_PRINT-INTERVAL=100\u001b[0m\n\u001b[34mSM_HP_MODEL_DIR=s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model\u001b[0m\n\u001b[34mSM_HP_TRAIN-BATCH-SIZE=256\u001b[0m\n\u001b[34mSM_HP_EPOCHS=20\u001b[0m\n\u001b[34mSM_HP_EVAL-BATCH-SIZE=512\u001b[0m\n\u001b[34mPYTHONPATH=/opt/ml/code:/usr/local/bin:/usr/local/lib/python37.zip:/usr/local/lib/python3.7:/usr/local/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/site-packages\n\u001b[0m\n\u001b[34mInvoking script with the following command:\n\u001b[0m\n\u001b[34m/usr/local/bin/python3.7 cifar10_tf2_sm.py --epochs 20 --eval-batch-size 512 --learning-rate 0.001 --model_dir s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model --print-interval 100 --train-batch-size 256 --validation-batch-size 512\n\n\u001b[0m\n\u001b[34mtensorflow version: 2.4.1\u001b[0m\n\u001b[34margs: \n Namespace(epochs=20, eval='/opt/ml/input/data/eval', eval_batch_size=512, learning_rate=0.001, model_dir='s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/model', model_output_dir='/opt/ml/model', momentum=0.9, optimizer='adam', print_interval=100, train='/opt/ml/input/data/train', train_batch_size=256, validation='/opt/ml/input/data/validation', validation_batch_size=512, weight_decay=0.0002)\n\u001b[0m\n\u001b[34mChannel Name: train\n\u001b[0m\n\u001b[34m# of batches loading TFRecord : 10000\u001b[0m\n\u001b[34mbuffer_size: 10000\u001b[0m\n\u001b[34m# of batches in train: 39\n\u001b[0m\n\u001b[34mChannel Name: eval\n\u001b[0m\n\u001b[34m# of batches loading TFRecord : 10000\u001b[0m\n\u001b[34m# of batches in eval: 19\n\u001b[0m\n\u001b[34mChannel Name: validation\n\u001b[0m\n\u001b[34m# of batches loading TFRecord : 10000\u001b[0m\n\u001b[34m# of batches in validation: 19\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.367 ip-10-0-217-253.ec2.internal:82 INFO utils.py:27] RULE_JOB_STOP_SIGNAL_FILENAME: None\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.524 ip-10-0-217-253.ec2.internal:82 INFO profiler_config_parser.py:102] User has disabled profiler.\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.526 ip-10-0-217-253.ec2.internal:82 INFO json_config.py:91] Creating hook from json_config at /opt/ml/input/config/debughookconfig.json.\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.526 ip-10-0-217-253.ec2.internal:82 INFO hook.py:199] tensorboard_dir has not been set for the hook. SMDebug will not be exporting tensorboard summaries.\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.527 ip-10-0-217-253.ec2.internal:82 INFO hook.py:253] Saving to /opt/ml/output/tensors\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.527 ip-10-0-217-253.ec2.internal:82 INFO state_store.py:77] The checkpoint config file /opt/ml/input/config/checkpointconfig.json does not exist.\u001b[0m\n\u001b[34m[2021-10-11 11:32:30.527 ip-10-0-217-253.ec2.internal:82 INFO hook.py:413] Monitoring the collections: metrics, sm_metrics, losses\u001b[0m\n\u001b[34mStep #0#011Loss: 54.056965\u001b[0m\n\u001b[34mEpoch 1, Test Loss: 2.26065731048584, Test Accuracy: 14.597039222717285\u001b[0m\n\u001b[34mStep #0#011Loss: 2.259690\u001b[0m\n\u001b[34mEpoch 2, Test Loss: 2.19937801361084, Test Accuracy: 18.359375\u001b[0m\n\u001b[34mStep #0#011Loss: 2.182891\u001b[0m\n\u001b[34mEpoch 3, Test Loss: 2.1109817028045654, Test Accuracy: 23.273027420043945\u001b[0m\n\u001b[34mStep #0#011Loss: 2.147202\u001b[0m\n\u001b[34mEpoch 4, Test Loss: 1.8995871543884277, Test Accuracy: 30.345396041870117\u001b[0m\n\u001b[34mStep #0#011Loss: 1.824833\u001b[0m\n\u001b[34mEpoch 5, Test Loss: 1.8044137954711914, Test Accuracy: 33.72738265991211\u001b[0m\n\u001b[34mStep #0#011Loss: 1.833963\u001b[0m\n\u001b[34mEpoch 6, Test Loss: 1.774757742881775, Test Accuracy: 35.99917984008789\u001b[0m\n\u001b[34mStep #0#011Loss: 1.647774\u001b[0m\n\u001b[34mEpoch 7, Test Loss: 1.7256040573120117, Test Accuracy: 37.67475128173828\u001b[0m\n\u001b[34mStep #0#011Loss: 1.783340\u001b[0m\n\u001b[34mEpoch 8, Test Loss: 1.6955578327178955, Test Accuracy: 38.558799743652344\u001b[0m\n\u001b[34mStep #0#011Loss: 1.576285\u001b[0m\n\u001b[34mEpoch 9, Test Loss: 1.6572834253311157, Test Accuracy: 40.00822448730469\u001b[0m\n\u001b[34mStep #0#011Loss: 1.645678\u001b[0m\n\u001b[34mEpoch 10, Test Loss: 1.670272707939148, Test Accuracy: 39.81291198730469\u001b[0m\n\u001b[34mStep #0#011Loss: 1.600603\u001b[0m\n\u001b[34mEpoch 11, Test Loss: 1.6322380304336548, Test Accuracy: 40.95394515991211\u001b[0m\n\u001b[34mStep #0#011Loss: 1.611182\u001b[0m\n\u001b[34mEpoch 12, Test Loss: 1.579511284828186, Test Accuracy: 43.68832015991211\u001b[0m\n\u001b[34mStep #0#011Loss: 1.509589\u001b[0m\n\u001b[34mEpoch 13, Test Loss: 1.5858755111694336, Test Accuracy: 42.93791198730469\u001b[0m\n\u001b[34mStep #0#011Loss: 1.572258\u001b[0m\n\u001b[34mEpoch 14, Test Loss: 1.5317524671554565, Test Accuracy: 44.7265625\u001b[0m\n\u001b[34mStep #0#011Loss: 1.436440\u001b[0m\n\u001b[34mEpoch 15, Test Loss: 1.586836576461792, Test Accuracy: 43.32853698730469\u001b[0m\n\u001b[34mStep #0#011Loss: 1.452503\u001b[0m\n\u001b[34mEpoch 16, Test Loss: 1.5158073902130127, Test Accuracy: 45.970394134521484\u001b[0m\n\u001b[34mStep #0#011Loss: 1.388567\u001b[0m\n\u001b[34mEpoch 17, Test Loss: 1.4940433502197266, Test Accuracy: 46.607730865478516\u001b[0m\n\u001b[34mStep #0#011Loss: 1.399346\u001b[0m\n\u001b[34mEpoch 18, Test Loss: 1.5035525560379028, Test Accuracy: 46.65912628173828\u001b[0m\n\u001b[34mStep #0#011Loss: 1.477137\u001b[0m\n\u001b[34mEpoch 19, Test Loss: 1.4569551944732666, Test Accuracy: 48.61225128173828\u001b[0m\n\u001b[34mStep #0#011Loss: 1.421552\u001b[0m\n\u001b[34mEpoch 20, Test Loss: 1.4467573165893555, Test Accuracy: 48.76644515991211\u001b[0m\n\u001b[34mTraining Finished.\u001b[0m\n\u001b[34m2021-10-11 11:32:15.626970: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-10-11 11:32:15.627125: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:105] SageMaker Profiler is not enabled. The timeline writer thread will not be started, future recorded events will be dropped.\u001b[0m\n\u001b[34m2021-10-11 11:32:15.669046: W tensorflow/core/profiler/internal/smprofiler_timeline.cc:460] Initializing the SageMaker Profiler.\u001b[0m\n\u001b[34m2021-10-11 11:33:01.658410: W tensorflow/python/util/util.cc:348] Sets are not currently considered sequences, but this may change in the future, so consider avoiding using them.\u001b[0m\n\u001b[34mINFO:tensorflow:Assets written to: /opt/ml/model/1/assets\u001b[0m\n\u001b[34mINFO:tensorflow:Assets written to: /opt/ml/model/1/assets\n\u001b[0m\n\u001b[34m2021-10-11 11:33:04,626 sagemaker-training-toolkit INFO Reporting training SUCCESS\u001b[0m\n\n2021-10-11 11:33:31 Uploading - Uploading generated training model\n2021-10-11 11:33:31 Completed - Training job completed\nProfilerReport-1633951344: NoIssuesFound\nTraining seconds: 258\nBillable seconds: 258\n" ] ], [ [ "# 5. 모델 아티펙트 저장\n- S3 에 저장된 모델 아티펙트를 저장하여 추론시 사용합니다.", "_____no_output_____" ] ], [ [ "tf2_script_artifact_path = sm_estimator.model_data\nprint(\"script_tf_artifact_path: \", tf2_script_artifact_path)\n\n%store tf2_script_artifact_path", "script_tf_artifact_path: s3://sagemaker-us-east-1-227612457811/cifar10-2021-10-11-11-22-24-452/output/model.tar.gz\nStored 'tf2_script_artifact_path' (str)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb23c05c80435ced42566104713a8f8df2fd7005
151,012
ipynb
Jupyter Notebook
intro_to_ai_ethics/04-ai-fairness.ipynb
drakessn/Kaggle-Courses
db9fb7a73743a22f35d65ec435d9b5dfd89fc5a7
[ "MIT" ]
1
2019-09-08T23:30:47.000Z
2019-09-08T23:30:47.000Z
intro_to_ai_ethics/04-ai-fairness.ipynb
drakessn/Kaggle-Courses
db9fb7a73743a22f35d65ec435d9b5dfd89fc5a7
[ "MIT" ]
null
null
null
intro_to_ai_ethics/04-ai-fairness.ipynb
drakessn/Kaggle-Courses
db9fb7a73743a22f35d65ec435d9b5dfd89fc5a7
[ "MIT" ]
null
null
null
157.304167
27,728
0.875387
[ [ [ "**This notebook is an exercise in the [AI Ethics](https://www.kaggle.com/learn/ai-ethics) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/ai-fairness).**\n\n---\n", "_____no_output_____" ], [ "In the tutorial, you learned about different ways of measuring fairness of a machine learning model. In this exercise, you'll train a few models to approve (or deny) credit card applications and analyze fairness. Don't worry if you're new to coding: this exercise assumes no programming knowledge.\n\n# Introduction\n\nWe work with a **synthetic** dataset of information submitted by credit card applicants. \n\nTo load and preview the data, run the next code cell. When the code finishes running, you should see a message saying the data was successfully loaded, along with a preview of the first five rows of the data.", "_____no_output_____" ] ], [ [ "# Set up feedback system\nfrom learntools.core import binder\nbinder.bind(globals())\nfrom learntools.ethics.ex4 import *\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n# Load the data, separate features from target\ndata = pd.read_csv(\"../input/synthetic-credit-card-approval/synthetic_credit_card_approval.csv\")\nX = data.drop([\"Target\"], axis=1)\ny = data[\"Target\"]\n\n# Break into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0)\n\n# Preview the data\nprint(\"Data successfully loaded!\\n\")\nX_train.head()", "Data successfully loaded!\n\n" ] ], [ [ " The dataset contains, for each applicant:\n- income (in the `Income` column),\n- the number of children (in the `Num_Children` column),\n- whether the applicant owns a car (in the `Own_Car` column, the value is `1` if the applicant owns a car, and is else `0`), and\n- whether the applicant owns a home (in the `Own_Housing` column, the value is `1` if the applicant owns a home, and is else `0`)\n\nWhen evaluating fairness, we'll check how the model performs for users in different groups, as identified by the `Group` column: \n- The `Group` column breaks the users into two groups (where each group corresponds to either `0` or `1`). \n- For instance, you can think of the column as breaking the users into two different races, ethnicities, or gender groupings. If the column breaks users into different ethnicities, `0` could correspond to a non-Hispanic user, while `1` corresponds to a Hispanic user. \n\n\nRun the next code cell without changes to train a simple model to approve or deny individuals for a credit card. The output shows the performance of the model.", "_____no_output_____" ] ], [ [ "from sklearn import tree\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\nimport matplotlib.pyplot as plt\n\n# Train a model and make predictions\nmodel_baseline = tree.DecisionTreeClassifier(random_state=0, max_depth=3)\nmodel_baseline.fit(X_train, y_train)\npreds_baseline = model_baseline.predict(X_test)\n\n# Function to plot confusion matrix\ndef plot_confusion_matrix(estimator, X, y_true, y_pred, display_labels=[\"Deny\", \"Approve\"],\n include_values=True, xticks_rotation='horizontal', values_format='',\n normalize=None, cmap=plt.cm.Blues):\n cm = confusion_matrix(y_true, y_pred, normalize=normalize)\n disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels)\n return cm, disp.plot(include_values=include_values, cmap=cmap, xticks_rotation=xticks_rotation,\n values_format=values_format)\n\n# Function to evaluate the fairness of the model\ndef get_stats(X, y, model, group_one, preds):\n \n y_zero, preds_zero, X_zero = y[group_one==False], preds[group_one==False], X[group_one==False]\n y_one, preds_one, X_one = y[group_one], preds[group_one], X[group_one]\n \n print(\"Total approvals:\", preds.sum())\n print(\"Group A:\", preds_zero.sum(), \"({}% of approvals)\".format(round(preds_zero.sum()/sum(preds)*100, 2)))\n print(\"Group B:\", preds_one.sum(), \"({}% of approvals)\".format(round(preds_one.sum()/sum(preds)*100, 2)))\n \n print(\"\\nOverall accuracy: {}%\".format(round((preds==y).sum()/len(y)*100, 2)))\n print(\"Group A: {}%\".format(round((preds_zero==y_zero).sum()/len(y_zero)*100, 2)))\n print(\"Group B: {}%\".format(round((preds_one==y_one).sum()/len(y_one)*100, 2)))\n \n cm_zero, disp_zero = plot_confusion_matrix(model, X_zero, y_zero, preds_zero)\n disp_zero.ax_.set_title(\"Group A\")\n cm_one, disp_one = plot_confusion_matrix(model, X_one, y_one, preds_one)\n disp_one.ax_.set_title(\"Group B\")\n \n print(\"\\nSensitivity / True positive rate:\")\n print(\"Group A: {}%\".format(round(cm_zero[1,1] / cm_zero[1].sum()*100, 2)))\n print(\"Group B: {}%\".format(round(cm_one[1,1] / cm_one[1].sum()*100, 2)))\n \n# Evaluate the model \nget_stats(X_test, y_test, model_baseline, X_test[\"Group\"]==1, preds_baseline)", "Total approvals: 38246\nGroup A: 8028 (20.99% of approvals)\nGroup B: 30218 (79.01% of approvals)\n\nOverall accuracy: 94.79%\nGroup A: 94.56%\nGroup B: 95.02%\n\nSensitivity / True positive rate:\nGroup A: 77.23%\nGroup B: 98.03%\n" ] ], [ [ "The confusion matrices above show how the model performs on some test data. We also print additional information (calculated from the confusion matrices) to assess fairness of the model. For instance,\n- The model approved 38246 people for a credit card. Of these individuals, 8028 belonged to Group A, and 30218 belonged to Group B.\n- The model is 94.56% accurate for Group A, and 95.02% accurate for Group B. These percentages can be calculated directly from the confusion matrix; for instance, for Group A, the accuracy is (39723+7528)/(39723+500+2219+7528).\n- The true positive rate (TPR) for Group A is 77.23%, and the TPR for Group B is 98.03%. These percentages can be calculated directly from the confusion matrix; for instance, for Group A, the TPR is 7528/(7528+2219).\n\n# 1) Varieties of fairness\n\nConsider three different types of fairness covered in the tutorial:\n- **Demographic parity**: Which group has an unfair advantage, with more representation in the group of approved applicants? (Roughly 50% of applicants are from Group A, and 50% of applicants are from Group B.)\n- **Equal accuracy**: Which group has an unfair advantage, where applicants are more likely to be correctly classified? \n- **Equal opportunity**: Which group has an unfair advantage, with a higher true positive rate?", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to get credit!)\nq_1.check()", "_____no_output_____" ] ], [ [ "Run the next code cell without changes to visualize the model.", "_____no_output_____" ] ], [ [ "def visualize_model(model, feature_names, class_names=[\"Deny\", \"Approve\"], impurity=False):\n plot_list = tree.plot_tree(model, feature_names=feature_names, class_names=class_names, impurity=impurity)\n [process_plot_item(item) for item in plot_list]\n\ndef process_plot_item(item):\n split_string = item.get_text().split(\"\\n\")\n if split_string[0].startswith(\"samples\"):\n item.set_text(split_string[-1])\n else:\n item.set_text(split_string[0])\n\nplt.figure(figsize=(20, 6))\nplot_list = visualize_model(model_baseline, feature_names=X_train.columns)", "_____no_output_____" ] ], [ [ "The flowchart shows how the model makes decisions:\n- `Group <= 0.5` checks what group the applicant belongs to: if the applicant belongs to Group A, then `Group <= 0.5` is true.\n- Entries like `Income <= 80210.5` check the applicant's income.\n\nTo follow the flow chart, we start at the top and trace a path depending on the details of the applicant. If the condition is true at a split, then we move down and to the left branch. If it is false, then we move to the right branch.\n\nFor instance, consider an applicant in Group B, who has an income of 75k. Then, \n- We start at the top of the flow chart. the applicant has an income of 75k, so `Income <= 80210.5` is true, and we move to the left.\n- Next, we check the income again. Since `Income <= 71909.5` is false, we move to the right.\n- The last thing to check is what group the applicant belongs to. The applicant belongs to Group B, so `Group <= 0.5` is false, and we move to the right, where the model has decided to approve the applicant.\n\n# 2) Understand the baseline model\n\nBased on the visualization, how can you explain one source of unfairness in the model?\n\n**Hint**: Consider the example applicant, but change the group membership from Group B to Group A (leaving all other characteristics the same). Is this slightly different applicant approved or denied by the model?", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to get credit!)\nq_2.check()", "_____no_output_____" ] ], [ [ "Next, you decide to remove group membership from the training data and train a new model. Do you think this will make the model treat the groups more equally?\n\nRun the next code cell to see how this new **group unaware** model performs.", "_____no_output_____" ] ], [ [ "# Create new dataset with gender removed\nX_train_unaware = X_train.drop([\"Group\"],axis=1)\nX_test_unaware = X_test.drop([\"Group\"],axis=1)\n\n# Train new model on new dataset\nmodel_unaware = tree.DecisionTreeClassifier(random_state=0, max_depth=3)\nmodel_unaware.fit(X_train_unaware, y_train)\n\n# Evaluate the model\npreds_unaware = model_unaware.predict(X_test_unaware)\nget_stats(X_test_unaware, y_test, model_unaware, X_test[\"Group\"]==1, preds_unaware)", "Total approvals: 36670\nGroup A: 11624 (31.7% of approvals)\nGroup B: 25046 (68.3% of approvals)\n\nOverall accuracy: 92.66%\nGroup A: 93.61%\nGroup B: 91.72%\n\nSensitivity / True positive rate:\nGroup A: 93.24%\nGroup B: 86.21%\n" ] ], [ [ "# 3) Varieties of fairness, part 2\n\nHow does this model compare to the first model you trained, when you consider **demographic parity**, **equal accuracy**, and **equal opportunity**? Once you have an answer, run the next code cell.", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to get credit!)\nq_3.check()", "_____no_output_____" ] ], [ [ "You decide to train a third potential model, this time with the goal of having each group have even representation in the group of approved applicants. (This is an implementation of group thresholds, which you can optionally read more about [here](https://pair-code.github.io/what-if-tool/ai-fairness.html).) \n\nRun the next code cell without changes to evaluate this new model. ", "_____no_output_____" ] ], [ [ "# Change the value of zero_threshold to hit the objective\nzero_threshold = 0.11\none_threshold = 0.99\n\n# Evaluate the model\ntest_probs = model_unaware.predict_proba(X_test_unaware)[:,1]\npreds_approval = (((test_probs>zero_threshold)*1)*[X_test[\"Group\"]==0] + ((test_probs>one_threshold)*1)*[X_test[\"Group\"]==1])[0]\nget_stats(X_test, y_test, model_unaware, X_test[\"Group\"]==1, preds_approval)", "Total approvals: 38241\nGroup A: 19869 (51.96% of approvals)\nGroup B: 18372 (48.04% of approvals)\n\nOverall accuracy: 79.38%\nGroup A: 79.74%\nGroup B: 79.02%\n\nSensitivity / True positive rate:\nGroup A: 100.0%\nGroup B: 63.64%\n" ] ], [ [ "# 4) Varieties of fairness, part 3\n\nHow does this final model compare to the previous models, when you consider **demographic parity**, **equal accuracy**, and **equal opportunity**?", "_____no_output_____" ] ], [ [ "# Check your answer (Run this code cell to get credit!)\nq_4.check()", "_____no_output_____" ] ], [ [ "This is only a short exercise to explore different types of fairness, and to illustrate the tradeoff that can occur when you optimize for one type of fairness over another. We have focused on model training here, but in practice, to really mitigate bias, or to make ML systems fair, we need to take a close look at every step in the process, from data collection to releasing a final product to users. \n\nFor instance, if you take a close look at the data, you'll notice that on average, individuals from Group B tend to have higher income than individuals from Group A, and are also more likely to own a home or a car. Knowing this will prove invaluable to deciding what fairness criterion you should use, and to inform ways to achieve fairness. (*For instance, it would likely be a bad aproach, if you did not remove the historical bias in the data and then train the model to get equal accuracy for each group.*)\n\nIn this course, we intentionally avoid taking an opinionated stance on how exactly to minimize bias and ensure fairness in specific projects. This is because the correct answers continue to evolve, since AI fairness is an active area of research. This lesson was a hands-on introduction to the topic, and you can continue your learning by reading blog posts from the [Partnership on AI](https://www.partnershiponai.org/research-lander/) or by following conferences like the [ACM Conference on Fairness, Accountability, and Transparency (ACM FAccT)](https://facctconference.org/).", "_____no_output_____" ], [ "# Keep going\n\nContinue to **[learn how to use model cards](https://www.kaggle.com/var0101/model-cards)** to make machine learning models transparent to large audiences.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb23c79644695156433d21b40c14577aad07abe4
253,505
ipynb
Jupyter Notebook
Notebook/lesson_19.ipynb
mehdihicham/bootcamp
35ccc92db231988d845fc79b45d4e1efc8f448e4
[ "CC-BY-4.0", "MIT" ]
null
null
null
Notebook/lesson_19.ipynb
mehdihicham/bootcamp
35ccc92db231988d845fc79b45d4e1efc8f448e4
[ "CC-BY-4.0", "MIT" ]
null
null
null
Notebook/lesson_19.ipynb
mehdihicham/bootcamp
35ccc92db231988d845fc79b45d4e1efc8f448e4
[ "CC-BY-4.0", "MIT" ]
null
null
null
130.605358
22,535
0.553662
[ [ [ "import pandas as pd\nimport bokeh.io\nimport bokeh.plotting\nbokeh.io.output_notebook()\n", "_____no_output_____" ], [ "df = pd.read_csv(\"data/gfmt_sleep.csv\",na_values = '*')\ndf['insomnia'] = df['sci']<16\ndf.head()", "_____no_output_____" ], [ "x = 'confidence when correct'\ny = 'confidence when incorrect'\n\np = bokeh.plotting.figure(\n frame_width = 400,\n frame_height = 300,\n x_axis_label = x,\n y_axis_label = y\n)", "_____no_output_____" ], [ "#create the glyphs as circle\np.circle(\n source = df,\n x = x , #name of the df column of interest\n y = y ,\n)", "_____no_output_____" ], [ "bokeh.io.show(p)", "_____no_output_____" ], [ "#distinguish insomniac and non insomniac\n# For convenience\nx = 'confidence when correct'\ny = 'confidence when incorrect'\n\n# Make figure\np = bokeh.plotting.figure(\n frame_width=400,\n frame_height=300,\n x_axis_label=x,\n y_axis_label=y,\n tooltips = [\n ('p-number', '@{participant number}'), #braces because there is a space\n ('gender', '@gender'),\n ('age', '@age'),\n ]\n)\n\n# Add glyphs\np.circle(\n source=df.loc[~df['insomnia'], :],\n x=x,\n y=y,\n legend_label='normal sleepers'\n)\n\np.circle(\n source=df.loc[df['insomnia'], :],\n x=x,\n y=y,\n color='orange',\n legend_label='insomniacs'\n)\n\n\np.legend.location = 'top_left'\np.legend.click_policy = \"hide\"\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "bokeh.io.save(\n p,\n filename = \"insomniac_confidence_correct.html\",\n title = 'Sample Bokeh plot' \n)", "/Users/mehdihicham/opt/anaconda3/lib/python3.8/site-packages/bokeh/io/saving.py:125: UserWarning: save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN\n warn(\"save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN\")\n" ] ], [ [ "## lesson 20 \n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nimport iqplot\n\nimport bokeh.plotting\nimport bokeh.io\n\nbokeh.io.output_notebook()", "_____no_output_____" ], [ "df = pd.read_csv('data/frog_tongue_adhesion.csv',comment = '#')\ndf.head()", "_____no_output_____" ], [ "p = iqplot.box(\n data = df,\n q = \"impact force (mN)\",\n cats = 'ID'\n) ", "_____no_output_____" ], [ "bokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.strip(\n data=df,\n q=\"impact force (mN)\",\n cats=\"ID\",\n)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.stripbox(\n data=df,\n q=\"impact force (mN)\",\n cats=\"ID\",\n)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.histogram(\n data=df,\n q=\"impact force (mN)\",\n cats=\"ID\",\n)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "# Generate normally distributed data\nrg = np.random.default_rng(3252)\nx = rg.normal(size=500)\n\n# Plot the histogram\np = iqplot.histogram(x, rug=False)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.ecdf(x)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.ecdf(\n data=df,\n q=\"impact force (mN)\",\n cats=\"ID\",\n)\n\nbokeh.io.show(p)", "_____no_output_____" ], [ "p = iqplot.ecdf(\n data=df,\n q=\"impact force (mN)\",\n cats=\"ID\",\n style='staircase',\n)\n\nbokeh.io.show(p)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb23e00e40054575f2016db2707c5698976ff72d
492,361
ipynb
Jupyter Notebook
src/analysis/python/scripts/indirect/.ipynb_checkpoints/parse_transcripts-checkpoint.ipynb
jm4474/FOMCTextAnalysis
0a039d9b197f487d8ba8c5d230b587c48cf865f6
[ "MIT" ]
6
2020-07-03T23:39:50.000Z
2022-03-30T07:55:23.000Z
src/analysis/python/scripts/indirect/.ipynb_checkpoints/parse_transcripts-checkpoint.ipynb
jm4474/FOMCTextAnalysis
0a039d9b197f487d8ba8c5d230b587c48cf865f6
[ "MIT" ]
null
null
null
src/analysis/python/scripts/indirect/.ipynb_checkpoints/parse_transcripts-checkpoint.ipynb
jm4474/FOMCTextAnalysis
0a039d9b197f487d8ba8c5d230b587c48cf865f6
[ "MIT" ]
12
2019-12-10T13:34:21.000Z
2022-01-24T16:39:15.000Z
168.616781
119,576
0.869433
[ [ [ "# Parsing Text and the LDA output", "_____no_output_____" ], [ "## a.1) Opening pdfs and extracting their text", "_____no_output_____" ], [ "Under the material for Lecture 3 I have added a folder called FOMC_pdf. This folder contains the transcripts of all the meetings that took place during the [Greenspan](https://en.wikipedia.org/wiki/Alan_Greenspan) era (August 11, 1987 to January 31st, 2006). \n\nWe are some lines of code to parse those pdfs. ", "_____no_output_____" ] ], [ [ "#load operating system module\n\nimport os", "_____no_output_____" ] ], [ [ "This module is used to conduct operating system like tasks (such as opening a file, or listing the contents of a directory).", "_____no_output_____" ] ], [ [ "#Define the base directory containing the FOMC statements\n\nbase_directory = \"../../../../collection/python/data/transcript_raw_text\"", "_____no_output_____" ], [ "#Return a list containing the name of the files in the directory\n\nraw_doc = os.listdir(base_directory)", "_____no_output_____" ], [ "#Sort the list in ascending order\n\nfilelist = sorted(raw_doc)[:-1]", "_____no_output_____" ], [ "filelist", "_____no_output_____" ] ], [ [ "To parse the text in the pdfs I will use the PyPDF2 module (but there are other ways to do it. See for example the Tika module). \n\nWarning: Depending on your Python configuration, you might not be able to use PyPDF2 directly. I downloaded Python using the Anaconda distribution and I needed to type directly on the terminal\n\nconda -forge pypdf2\n\nAfter doing this, I was able to upload the PyPDF2 module", "_____no_output_____" ], [ "## a.2) Organizing the information in a data frame", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "#load re to split the content of the pdfs by the occurrence of a pattern\n\nimport re", "_____no_output_____" ], [ "#Creates a data frame containing the date|s of the FOMC meetings\n\ndate = pd.Series(data=filelist).apply(lambda x: x[0:10])", "_____no_output_____" ], [ "print(date)", "0 1976-03-29\n1 1976-04-20\n2 1976-05-18\n3 1976-06-22\n4 1976-07-19\n5 1976-08-17\n6 1976-09-21\n7 1976-10-19\n8 1976-11-08\n9 1976-11-16\n10 1976-12-20\n11 1977-01-17\n12 1977-02-15\n13 1977-03-15\n14 1977-04-19\n15 1977-05-17\n16 1977-05-27\n17 1977-06-21\n18 1977-07-19\n19 1977-08-16\n20 1977-09-20\n21 1977-10-17\n22 1977-11-15\n23 1977-12-19\n24 1978-01-05\n25 1978-01-17\n26 1978-02-28\n27 1978-03-10\n28 1978-03-21\n29 1978-04-18\n ... \n382 2009-02-07\n383 2009-03-17\n384 2009-04-28\n385 2009-06-03\n386 2009-06-23\n387 2009-08-11\n388 2009-09-22\n389 2009-11-03\n390 2009-12-15\n391 2010-01-26\n392 2010-03-16\n393 2010-04-27\n394 2010-05-09\n395 2010-06-22\n396 2010-08-10\n397 2010-09-21\n398 2010-10-15\n399 2010-11-02\n400 2010-12-14\n401 2011-01-25\n402 2011-03-15\n403 2011-04-26\n404 2011-06-21\n405 2011-08-01\n406 2011-08-09\n407 2011-09-20\n408 2011-11-01\n409 2011-11-28\n410 2011-12-13\n411 2013-10-16\nLength: 412, dtype: object\n" ], [ "documents = []", "_____no_output_____" ], [ "for doc in filelist:\n with open(\"{}/{}\".format(base_directory,doc),\"r\") as f:\n documents += f.read()", "_____no_output_____" ], [ "for i in range(len(document)):\n\n interjections = re.split('MR. |MS. |CHAIRMAN |VICE CHAIRMAN ', document[i]) \n #Split the doc by interjections\n \n temp_df = pd.DataFrame(columns=['Date','Speaker','content'],index=range(len(interjections))) \n #Temporary data frame\n \n for j in range(len(interjections)):\n \n \n interjection = interjections[j].replace('\\n',' ') \n #Replace page break (\\n) with space\n \n temp_df['Date'].loc[j] = date[i]\n \n temp_df['Speaker'].loc[j] = interjection.split('.')[0]\n \n temp_df['content'].loc[j] = ''.join(interjection.split('.')[1:])\n \n parsed_text = pd.concat([parsed_text,temp_df],ignore_index=True) \n ", "_____no_output_____" ], [ "parsed_text", "_____no_output_____" ] ], [ [ "We will focus only on Greenspan's interjections.", "_____no_output_____" ] ], [ [ "Greenspan_text = parsed_text.loc[parsed_text['Speaker'] == 'GREENSPAN']", "_____no_output_____" ], [ "Greenspan_text.index = range(sum(parsed_text['Speaker'] == 'GREENSPAN'))", "_____no_output_____" ], [ "Greenspan_text", "_____no_output_____" ] ], [ [ "## a.3) Bag of Words", "_____no_output_____" ] ], [ [ "Greenspan_corpus = list(Greenspan_text['content'])", "_____no_output_____" ], [ "len(Greenspan_corpus)", "_____no_output_____" ], [ "Greenspan_corpus[0]", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import CountVectorizer", "_____no_output_____" ], [ "vectorizer = CountVectorizer()", "_____no_output_____" ], [ "term_doc_matrix = vectorizer.fit_transform(Greenspan_corpus[0:1]).todense()", "_____no_output_____" ], [ "vectorizer.get_feature_names()", "_____no_output_____" ], [ "term_doc_matrix", "_____no_output_____" ] ], [ [ "## a.4) Cloud of words", "_____no_output_____" ], [ "If you got the Anaconda distribution of Python go the terminal and type\n\nconda install -c conda-forge wordcloud", "_____no_output_____" ], [ "Here is a good tutorial on how to generate words of clouds:\n\nhttps://www.datacamp.com/community/tutorials/wordcloud-python", "_____no_output_____" ] ], [ [ "from wordcloud import WordCloud\n\nwordcloud = WordCloud(background_color='white', font_step = 3, stopwords='None', relative_scaling=1).generate(Greenspan_text['content'].loc[0])\n", "_____no_output_____" ], [ "Greenspan_text['content'].loc[0]", "_____no_output_____" ], [ "Greenspan_text.content[0]", "_____no_output_____" ] ], [ [ "Display the generated image", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "plt.imshow(wordcloud, interpolation='bilinear')\n\nplt.axis(\"off\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "Let's do it for the whole Greenspan interjections", "_____no_output_____" ] ], [ [ "text_aux = \" \".join(interjection for interjection in Greenspan_text.content)", "_____no_output_____" ], [ "len(text_aux)", "_____no_output_____" ], [ "wordcloudG = WordCloud(background_color='white', font_step = 3, relative_scaling=1).generate(text_aux)", "_____no_output_____" ], [ "plt.imshow(wordcloudG, interpolation='bilinear')\n\nplt.axis(\"off\")\n\nplt.show()", "_____no_output_____" ] ], [ [ "## a.5) LDA", "_____no_output_____" ], [ "### Tokenize", "_____no_output_____" ] ], [ [ "import nltk", "_____no_output_____" ], [ "from nltk.tokenize import RegexpTokenizer\n\ntokenizer = RegexpTokenizer(r'\\w+')", "_____no_output_____" ], [ "tokens_example = tokenizer.tokenize(Greenspan_text['content'].loc[0])", "_____no_output_____" ], [ "Greenspan_text['content'].loc[0]", "_____no_output_____" ], [ "tokens_example", "_____no_output_____" ] ], [ [ "### Remove Stop Words", "_____no_output_____" ] ], [ [ "from nltk.corpus import stopwords\n\nnltk.download('stopwords')\n\nstopwords = stopwords.words('english')", "[nltk_data] Downloading package stopwords to /Users/jmo11/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n" ], [ "len(stopwords)", "_____no_output_____" ], [ "stopped_tokens = [i for i in tokens_example if not i in stopwords]", "_____no_output_____" ], [ "stopped_tokens", "_____no_output_____" ] ], [ [ "### Stems", "_____no_output_____" ] ], [ [ "from nltk.stem.porter import PorterStemmer\n\n# Create p_stemmer of class PorterStemmer\np_stemmer = PorterStemmer()", "_____no_output_____" ], [ "p_stemmer.*?", "_____no_output_____" ], [ "texts_G = [p_stemmer.stem(i) for i in stopped_tokens]", "_____no_output_____" ], [ "texts_G", "_____no_output_____" ] ], [ [ "### Loop to Tokenize, Remove Stop Words, Stem", "_____no_output_____" ] ], [ [ "texts = []", "_____no_output_____" ], [ "for i in range(0,len(Greenspan_text['content'])):\n\n tokens = tokenizer.tokenize(Greenspan_text['content'].loc[i])\n \n stopped_tokens = [j for j in tokens if not j in stopwords]\n \n texts.append([p_stemmer.stem(j) for j in stopped_tokens])\n ", "_____no_output_____" ], [ "Greenspan_text['content'].loc[1]", "_____no_output_____" ], [ "texts", "_____no_output_____" ], [ "len(Greenspan_text['content'])", "_____no_output_____" ] ], [ [ "### LDA", "_____no_output_____" ], [ "conda install -c anaconda gensim", "_____no_output_____" ] ], [ [ "import gensim", "_____no_output_____" ], [ "from gensim import corpora, models\n\ndictionary = corpora.Dictionary(texts)", "_____no_output_____" ], [ "dictionary.*?", "_____no_output_____" ], [ "corpus = [dictionary.doc2bow(text) for text in texts]", "_____no_output_____" ], [ "corpus[2]", "_____no_output_____" ], [ "ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=2, id2word = dictionary, passes=20)", "_____no_output_____" ], [ "for t in range(2):\n\n plt.figure()\n \n plt.imshow(WordCloud(background_color='white').fit_words(dict(ldamodel.show_topic(t,200))),interpolation='bilinear')\n \n plt.axis('off')\n \n plt.show", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb23e1d000f64897a172dbefa4999cfab59dcc10
25,810
ipynb
Jupyter Notebook
numpy-random1.ipynb
pocathain08/numpy-ran
591832bdace6c2b60778f23ed2478d0ea1a26ace
[ "MIT" ]
null
null
null
numpy-random1.ipynb
pocathain08/numpy-ran
591832bdace6c2b60778f23ed2478d0ea1a26ace
[ "MIT" ]
null
null
null
numpy-random1.ipynb
pocathain08/numpy-ran
591832bdace6c2b60778f23ed2478d0ea1a26ace
[ "MIT" ]
null
null
null
64.525
5,580
0.800387
[ [ [ "# The numpy.random package", "_____no_output_____" ], [ "- Ref : https://docs.scipy.org/doc/numpy-1.15.1/reference/routines.random.html", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ], [ " np.random.rand(3,2)", "_____no_output_____" ] ], [ [ "- Creates an array of random numbers that contains 3 other arrays that look like floats\n- [0, 1) included 0 but not 1 in this distribution.\n- Generates random numbers using a bar chart of cts variables ", "_____no_output_____" ] ], [ [ "np.random.rand(4,2)", "_____no_output_____" ], [ "np.random.rand(3,10)", "_____no_output_____" ], [ "np.random.rand(4)", "_____no_output_____" ], [ "np.random.rand()", "_____no_output_____" ], [ "np.random.rand(3, 2, 4)", "_____no_output_____" ], [ "y = np.random.rand(10000)", "_____no_output_____" ], [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nplt.hist(y)", "_____no_output_____" ] ], [ [ "- From above plot, this function is almost as likely to select any number in the interval [0, 1) as the other. ", "_____no_output_____" ] ], [ [ "import numpy as np\nz = np.random.uniform(0,1000,10000)", "_____no_output_____" ] ], [ [ "- 0 is the lower boundary, 1000 is the upper bound. \n- The upperbound is not included in the interval. \n- The interval size is 10000. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nplt.hist(z)", "_____no_output_____" ] ], [ [ "- Roughtly the same as the rand function", "_____no_output_____" ], [ "# The Normal Distribution", "_____no_output_____" ] ], [ [ "import numpy as np\nw = np.random.normal(0, 0.1, 1000)", "_____no_output_____" ] ], [ [ "- Note that the numbers generated by w will be written in exponiatials \n- 0 is the average of the distribution. 0.1 is the spread of the distribution. ", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\nplt.hist(w)", "_____no_output_____" ] ], [ [ "# Random Generator\n- Ref https://docs.scipy.org/doc/numpy-1.15.1/reference/routines.random.html\n- Computers need a mechanism to generate a random number. \n- The numbers generated look random but are pusedo random. The key piece of information to predict the number is based on the seed, in Random generator. \n- EG, pi doesn't have any peroidic patterns! The seed is the position from where you started. Using the pi expansion, pick a number in pi, it becomes possible to predict the random number. In some applications, where you need to test code, you can set the seed. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb23e8aa31114f2465539d5d39ee97ff5b62c64e
22,492
ipynb
Jupyter Notebook
day03_Linear_classification/03_intro_to_pytorch.ipynb
neychev/harbour_ml2020
429d3a7b28d3e145d92659aeaed3481fbec70b57
[ "MIT" ]
2
2020-03-05T09:59:25.000Z
2022-03-18T05:37:54.000Z
day03_Linear_classification/03_intro_to_pytorch.ipynb
neychev/harbour_ml2020
429d3a7b28d3e145d92659aeaed3481fbec70b57
[ "MIT" ]
null
null
null
day03_Linear_classification/03_intro_to_pytorch.ipynb
neychev/harbour_ml2020
429d3a7b28d3e145d92659aeaed3481fbec70b57
[ "MIT" ]
3
2020-02-17T08:02:58.000Z
2020-03-03T10:56:55.000Z
29.830239
410
0.535924
[ [ [ "## 03 Intro to PyTorch", "_____no_output_____" ], [ "*special thanks to YSDA team for provided materials*", "_____no_output_____" ], [ "What comes today:\n- Introduction to PyTorch\n- Automatic gradient computation\n- Logistic regression (it's a neural network, actually ;) )", "_____no_output_____" ], [ "![img](https://pytorch.org/tutorials/_static/pytorch-logo-dark.svg)\n\n__This notebook__ will teach you to use pytorch low-level core. You can install it [here](http://pytorch.org/).\n\n__Pytorch feels__ differently than other frameworks (like tensorflow/theano) on almost every level. TensorFlow makes your code live in two \"worlds\" simultaneously: symbolic graphs and actual tensors. First you declare a symbolic \"recipe\" of how to get from inputs to outputs, then feed it with actual minibatches of data. In pytorch, __there's only one world__: all tensors have a numeric value.\n\nYou compute outputs on the fly without pre-declaring anything. The code looks exactly as in pure numpy with one exception: pytorch computes gradients for you. And can run stuff on GPU. And has a number of pre-implemented building blocks for your neural nets. [And a few more things.](https://medium.com/towards-data-science/pytorch-vs-tensorflow-spotting-the-difference-25c75777377b)\n\nLet's dive into it!", "_____no_output_____" ] ], [ [ "# !wget hhttps://raw.githubusercontent.com/neychev/harbour_ml2020/master/day03_Linear_classification/notmnist.py", "_____no_output_____" ], [ "import numpy as np\nimport torch\nprint(torch.__version__)", "1.4.0\n" ], [ "# numpy world\n\nx = np.arange(16).reshape(4,4)\n\nprint(\"X :\\n%s\\n\" % x)\nprint(\"X.shape : %s\\n\" % (x.shape,))\nprint(\"add 5 :\\n%s\\n\" % (x + 5))\nprint(\"X*X^T :\\n%s\\n\" % np.dot(x,x.T))\nprint(\"mean over cols :\\n%s\\n\" % (x.mean(axis=-1)))\nprint(\"cumsum of cols :\\n%s\\n\" % (np.cumsum(x,axis=0)))", "X :\n[[ 0 1 2 3]\n [ 4 5 6 7]\n [ 8 9 10 11]\n [12 13 14 15]]\n\nX.shape : (4, 4)\n\nadd 5 :\n[[ 5 6 7 8]\n [ 9 10 11 12]\n [13 14 15 16]\n [17 18 19 20]]\n\nX*X^T :\n[[ 14 38 62 86]\n [ 38 126 214 302]\n [ 62 214 366 518]\n [ 86 302 518 734]]\n\nmean over cols :\n[ 1.5 5.5 9.5 13.5]\n\ncumsum of cols :\n[[ 0 1 2 3]\n [ 4 6 8 10]\n [12 15 18 21]\n [24 28 32 36]]\n\n" ], [ "# pytorch world\n\nx = np.arange(16).reshape(4,4)\n\nx = torch.tensor(x, dtype=torch.float32) #or torch.arange(0,16).view(4,4)\n\nprint (\"X :\\n%s\" % x)\nprint(\"X.shape : %s\\n\" % (x.shape,))\nprint (\"add 5 :\\n%s\" % (x + 5))\nprint (\"X*X^T :\\n%s\" % torch.matmul(x,x.transpose(1,0))) #short: x.mm(x.t())\nprint (\"mean over cols :\\n%s\" % torch.mean(x,dim=-1))\nprint (\"cumsum of cols :\\n%s\" % torch.cumsum(x,dim=0))", "X :\ntensor([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\nX.shape : torch.Size([4, 4])\n\nadd 5 :\ntensor([[ 5., 6., 7., 8.],\n [ 9., 10., 11., 12.],\n [13., 14., 15., 16.],\n [17., 18., 19., 20.]])\nX*X^T :\ntensor([[ 14., 38., 62., 86.],\n [ 38., 126., 214., 302.],\n [ 62., 214., 366., 518.],\n [ 86., 302., 518., 734.]])\nmean over cols :\ntensor([ 1.5000, 5.5000, 9.5000, 13.5000])\ncumsum of cols :\ntensor([[ 0., 1., 2., 3.],\n [ 4., 6., 8., 10.],\n [12., 15., 18., 21.],\n [24., 28., 32., 36.]])\n" ] ], [ [ "#### NumPy and Pytorch\n\nAs you can notice, pytorch allows you to hack stuff much the same way you did with numpy. This means that you can _see the numeric value of any tensor at any moment of time_. Debugging such code can be done with by printing tensors or using any debug tool you want (e.g. [gdb](https://wiki.python.org/moin/DebuggingWithGdb)).\n\nYou could also notice the a few new method names and a different API. So no, there's no compatibility with numpy [yet](https://github.com/pytorch/pytorch/issues/2228) and yes, you'll have to memorize all the names again. Get excited!\n\n![img](http://i0.kym-cdn.com/entries/icons/original/000/017/886/download.jpg)\n\nFor example, \n* If something takes a list/tuple of axes in numpy, you can expect it to take *args in pytorch\n * `x.reshape([1,2,8]) -> x.view(1,2,8)`\n* You should swap _axis_ for _dim_ in operations like mean or cumsum\n * `x.sum(axis=-1) -> x.sum(dim=-1)`\n* most mathematical operations are the same, but types an shaping is different\n * `x.astype('int64') -> x.type(torch.LongTensor)`\n\nTo help you acclimatize, there's a [table](https://github.com/torch/torch7/wiki/Torch-for-Numpy-users) covering most new things. There's also a neat [documentation page](http://pytorch.org/docs/master/).\n\nFinally, if you're stuck with a technical problem, we recommend searching [pytorch forumns](https://discuss.pytorch.org/). Or just googling, which usually works just as efficiently. \n\nIf you feel like you almost give up, remember two things: __GPU__ and __free gradients__. Besides you can always jump back to numpy with x.numpy()", "_____no_output_____" ], [ "### Warmup: trigonometric knotwork\n_inspired by [this post](https://www.quora.com/What-are-the-most-interesting-equation-plots)_\n\nThere are some simple mathematical functions with cool plots. For one, consider this:\n\n$$ x(t) = t - 1.5 * cos( 15 t) $$\n$$ y(t) = t - 1.5 * sin( 16 t) $$\n", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n\nt = torch.linspace(-10, 10, steps = 10000)\n\n# compute x(t) and y(t) as defined above\nx = <your_code_here>\ny = <your_code_here>\n\nplt.plot(x.numpy(), y.numpy())", "_____no_output_____" ] ], [ [ "if you're done early, try adjusting the formula and seing how it affects the function", "_____no_output_____" ], [ "```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n", "_____no_output_____" ], [ "## Automatic gradients\n\nAny self-respecting DL framework must do your backprop for you. Torch handles this with the `autograd` module.\n\nThe general pipeline looks like this:\n* When creating a tensor, you mark it as `requires_grad`:\n * __```torch.zeros(5, requires_grad=True)```__\n * torch.tensor(np.arange(5), dtype=torch.float32, requires_grad=True)\n* Define some differentiable `loss = arbitrary_function(a)`\n* Call `loss.backward()`\n* Gradients are now available as ```a.grads```\n\n__Here's an example:__ let's fit a linear regression on Boston house prices", "_____no_output_____" ] ], [ [ "from sklearn.datasets import load_boston\nboston = load_boston()\nplt.scatter(boston.data[:, -1], boston.target)", "_____no_output_____" ], [ "from torch.autograd import Variable\nw = torch.zeros(1, requires_grad=True)\nb = torch.zeros(1, requires_grad=True)\n\nx = torch.tensor(boston.data[:,-1] / 10, dtype=torch.float32)\ny = torch.tensor(boston.target, dtype=torch.float32)", "_____no_output_____" ], [ "y_pred = w * x + b\nloss = torch.mean( (y_pred - y)**2 )\n\n# propagete gradients\nloss.backward()", "_____no_output_____" ] ], [ [ "The gradients are now stored in `.grad` of those variables that require them.", "_____no_output_____" ] ], [ [ "print(\"dL/dw = {}\\n\".format(w.grad))\nprint(\"dL/db = {}\\n\".format(b.grad))", "_____no_output_____" ] ], [ [ "If you compute gradient from multiple losses, the gradients will add up at variables, therefore it's useful to __zero the gradients__ between iteratons.", "_____no_output_____" ] ], [ [ "from IPython.display import clear_output\n\nfor i in range(100):\n\n y_pred = w * x + b\n loss = torch.mean( (y_pred - y)**2 )\n loss.backward()\n\n w.data -= 0.05 * w.grad.data\n b.data -= 0.05 * b.grad.data\n \n #zero gradients\n w.grad.data.zero_()\n b.grad.data.zero_()\n \n # the rest of code is just bells and whistles\n if (i+1)%5==0:\n clear_output(True)\n plt.scatter(x.data.numpy(), y.data.numpy())\n plt.scatter(x.data.numpy(), y_pred.data.numpy(), color='orange', linewidth=5)\n plt.show()\n\n print(\"loss = \", loss.data.numpy())\n if loss.data.numpy() < 0.5:\n print(\"Done!\")\n break", "_____no_output_____" ] ], [ [ "__Quest__: try implementing and writing some nonlinear regression. You can try quadratic features or some trigonometry, or a simple neural network. The only difference is that now you have more variables and a more complicated `y_pred`. ", "_____no_output_____" ], [ "**Remember!**\n![img](https://media.giphy.com/media/3o751UMCYtSrRAFRFC/giphy.gif)\n\nWhen dealing with more complex stuff like neural network, it's best if you use tensors the way samurai uses his sword. \n", "_____no_output_____" ], [ "# High-level pytorch\n\nSo far we've been dealing with low-level torch API. While it's absolutely vital for any custom losses or layers, building large neura nets in it is a bit clumsy.\n\nLuckily, there's also a high-level torch interface with a pre-defined layers, activations and training algorithms. \n\nWe'll cover them as we go through a simple image recognition problem: classifying letters into __\"A\"__ vs __\"B\"__.\n", "_____no_output_____" ] ], [ [ "from notmnist import load_notmnist\nX_train, y_train, X_test, y_test = load_notmnist(letters='AB')\nX_train, X_test = X_train.reshape([-1, 784]), X_test.reshape([-1, 784])\n\nprint(\"Train size = %i, test_size = %i\"%(len(X_train),len(X_test)))", "_____no_output_____" ], [ "for i in [0,1]:\n plt.subplot(1, 2, i + 1)\n plt.imshow(X_train[i].reshape([28,28]))\n plt.title(str(y_train[i]))", "_____no_output_____" ] ], [ [ "Let's start with layers. The main abstraction here is __`torch.nn.Module`__", "_____no_output_____" ] ], [ [ "from torch import nn\nimport torch.nn.functional as F\n\nprint(nn.Module.__doc__)", "_____no_output_____" ] ], [ [ "There's a vast library of popular layers and architectures already built for ya'.\n\nThis is a binary classification problem, so we'll train a __Logistic Regression with sigmoid__.\n$$P(y_i | X_i) = \\sigma(W \\cdot X_i + b) ={ 1 \\over {1+e^{- [W \\cdot X_i + b]}} }$$\n", "_____no_output_____" ] ], [ [ "# create a network that stacks layers on top of each other\nmodel = nn.Sequential()\n\n# add first \"dense\" layer with 784 input units and 1 output unit. \nmodel.add_module('l1', nn.Linear(784, 1))\n\n# add softmax activation for probabilities. Normalize over axis 1\n# note: layer names must be unique\nmodel.add_module('l2', nn.Sigmoid())", "_____no_output_____" ], [ "print(\"Weight shapes:\", [w.shape for w in model.parameters()])", "_____no_output_____" ], [ "# create dummy data with 3 samples and 784 features\nx = torch.tensor(X_train[:3], dtype=torch.float32)\ny = torch.tensor(y_train[:3], dtype=torch.float32)\n\n# compute outputs given inputs, both are variables\ny_predicted = model(x)[:, 0]\n\ny_predicted # display what we've got", "_____no_output_____" ] ], [ [ "Let's now define a loss function for our model.\n\nThe natural choice is to use binary crossentropy (aka logloss, negative llh):\n$$ L = {1 \\over N} \\underset{X_i,y_i} \\sum - [ y_i \\cdot log P(y_i | X_i) + (1-y_i) \\cdot log (1-P(y_i | X_i)) ]$$\n\n", "_____no_output_____" ] ], [ [ "crossentropy = ### YOUR CODE\n\nloss = ### YOUR CODE\n\nassert tuple(crossentropy.size()) == (3,), \"Crossentropy must be a vector with element per sample\"\nassert tuple(loss.size()) == (1,), \"Loss must be scalar. Did you forget the mean/sum?\"\nassert loss.data.numpy()[0] > 0, \"Crossentropy must non-negative, zero only for perfect prediction\"\nassert loss.data.numpy()[0] <= np.log(3), \"Loss is too large even for untrained model. Please double-check it.\"", "_____no_output_____" ] ], [ [ "__Note:__ you can also find many such functions in `torch.nn.functional`, just type __`F.<tab>`__.", "_____no_output_____" ], [ "__Torch optimizers__\n\nWhen we trained Linear Regression above, we had to manually .zero_() gradients on both our variables. Imagine that code for a 50-layer network.\n\nAgain, to keep it from getting dirty, there's `torch.optim` module with pre-implemented algorithms:", "_____no_output_____" ] ], [ [ "opt = torch.optim.RMSprop(model.parameters(), lr=0.01)\n\n# here's how it's used:\nloss.backward() # add new gradients\nopt.step() # change weights\nopt.zero_grad() # clear gradients", "_____no_output_____" ], [ "# dispose of old variables to avoid bugs later\ndel x, y, y_predicted, loss, y_pred", "_____no_output_____" ] ], [ [ "### Putting it all together", "_____no_output_____" ] ], [ [ "# create network again just in case\nmodel = nn.Sequential()\nmodel.add_module('first', nn.Linear(784, 1))\nmodel.add_module('second', nn.Sigmoid())\n\nopt = torch.optim.Adam(model.parameters(), lr=1e-3)", "_____no_output_____" ], [ "history = []\n\nfor i in range(100):\n \n # sample 256 random images\n ix = np.random.randint(0, len(X_train), 256)\n x_batch = torch.tensor(X_train[ix], dtype=torch.float32)\n y_batch = torch.tensor(y_train[ix], dtype=torch.float32)\n \n # predict probabilities\n y_predicted = ### YOUR CODE\n \n assert y_predicted.dim() == 1, \"did you forget to select first column with [:, 0]\"\n \n # compute loss, just like before\n loss = ### YOUR CODE\n \n # compute gradients\n ### YOUR CODE\n \n # Adam step\n ### YOUR CODE\n \n # clear gradients\n ### YOUR CODE\n \n history.append(loss.data.numpy())\n \n if i % 10 == 0:\n print(\"step #%i | mean loss = %.3f\" % (i, np.mean(history[-10:])))", "_____no_output_____" ] ], [ [ "__Debugging tips:__\n* make sure your model predicts probabilities correctly. Just print them and see what's inside.\n* don't forget _minus_ sign in the loss function! It's a mistake 99% ppl do at some point.\n* make sure you zero-out gradients after each step. Srsly:)\n* In general, pytorch's error messages are quite helpful, read 'em before you google 'em.\n* if you see nan/inf, print what happens at each iteration to find our where exactly it occurs.\n * If loss goes down and then turns nan midway through, try smaller learning rate. (Our current loss formula is unstable).\n", "_____no_output_____" ], [ "### Evaluation\n\nLet's see how our model performs on test data", "_____no_output_____" ] ], [ [ "# use your model to predict classes (0 or 1) for all test samples\npredicted_y_test = ### YOUR CODE\npredicted_y_test = np.array(predicted_y_test > 0.5)\n \nassert isinstance(predicted_y_test, np.ndarray), \"please return np array, not %s\" % type(predicted_y_test)\nassert predicted_y_test.shape == y_test.shape, \"please predict one class for each test sample\"\nassert np.in1d(predicted_y_test, y_test).all(), \"please predict class indexes\"\n\naccuracy = np.mean(predicted_y_test == y_test)\n\nprint(\"Test accuracy: %.5f\" % accuracy)\nassert accuracy > 0.95, \"try training longer\"\n\nprint('Great job!')", "_____no_output_____" ] ], [ [ "```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n\n```\n### More about pytorch:\n* Using torch on GPU and multi-GPU - [link](http://pytorch.org/docs/master/notes/cuda.html)\n* More tutorials on pytorch - [link](http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html)\n* Pytorch examples - a repo that implements many cool DL models in pytorch - [link](https://github.com/pytorch/examples)\n* Practical pytorch - a repo that implements some... other cool DL models... yes, in pytorch - [link](https://github.com/spro/practical-pytorch)\n* And some more - [link](https://www.reddit.com/r/pytorch/comments/6z0yeo/pytorch_and_pytorch_tricks_for_kaggle/)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb23ef7eafa2eac3a97382b6efaa2dd3442a44d2
1,327
ipynb
Jupyter Notebook
comments.ipynb
cpignedoli/aiidalab-empa-surfaces
1a8a250ace1a2ae48de28a06c31abc1aa10c9d3d
[ "MIT" ]
2
2021-05-27T00:58:50.000Z
2021-09-13T22:57:18.000Z
comments.ipynb
nanotech-empa/aiidalab-empa-surfaces
d74ac695bc8abe6eb7b2535bd08ac694be8d0606
[ "MIT" ]
6
2020-05-26T08:24:49.000Z
2022-01-28T08:44:20.000Z
comments.ipynb
nanotech-empa/aiidalab-empa-surfaces
d74ac695bc8abe6eb7b2535bd08ac694be8d0606
[ "MIT" ]
3
2017-11-16T09:06:16.000Z
2018-03-10T01:11:53.000Z
19.231884
84
0.546345
[ [ [ "%aiida\nimport ipywidgets as ipw\nfrom apps.surfaces.widgets.comments import CommentsWidget \nimport urllib.parse as urlparse", "_____no_output_____" ], [ "pk = urlparse.parse_qs(urlparse.urlsplit(jupyter_notebook_url).query)['pk'][0]", "_____no_output_____" ], [ "cc=CommentsWidget(workchain=pk)", "_____no_output_____" ], [ "display(cc)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
cb23fd8bbbcd2f87ac7be123abd3f2e13e3244d9
56,767
ipynb
Jupyter Notebook
notebooks/book1/15/attention_torch.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
notebooks/book1/15/attention_torch.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
notebooks/book1/15/attention_torch.ipynb
karm-patel/pyprobml
af8230a0bc0d01bb0f779582d87e5856d25e6211
[ "MIT" ]
null
null
null
38.356081
333
0.488999
[ [ [ "Please find jax implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/15/attention_jax.ipynb", "_____no_output_____" ], [ "<a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/attention_torch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Basics of differentiable (soft) attention\n\nWe show how to implement soft attention.\nBased on sec 10.3 of http://d2l.ai/chapter_attention-mechanisms/attention-scoring-functions.html.\n\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom IPython import display\n\ntry:\n import torch\nexcept ModuleNotFoundError:\n %pip install torch\n import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils import data\n\nimport random\nimport os\nimport time\n\nnp.random.seed(seed=1)\ntorch.manual_seed(1)\n!mkdir figures # for saving plots", "_____no_output_____" ] ], [ [ "# Masked soft attention", "_____no_output_____" ] ], [ [ "def sequence_mask(X, valid_len, value=0):\n \"\"\"Mask irrelevant entries in sequences.\"\"\"\n maxlen = X.size(1)\n mask = torch.arange((maxlen), dtype=torch.float32, device=X.device)[None, :] < valid_len[:, None]\n X[~mask] = value\n return X\n\n\ndef masked_softmax(X, valid_lens):\n \"\"\"Perform softmax operation by masking elements on the last axis.\"\"\"\n # `X`: 3D tensor, `valid_lens`: 1D or 2D tensor\n if valid_lens is None:\n return nn.functional.softmax(X, dim=-1)\n else:\n shape = X.shape\n if valid_lens.dim() == 1:\n valid_lens = torch.repeat_interleave(valid_lens, shape[1])\n else:\n valid_lens = valid_lens.reshape(-1)\n # On the last axis, replace masked elements with a very large negative\n # value, whose exponentiation outputs 0\n X = sequence_mask(X.reshape(-1, shape[-1]), valid_lens, value=-1e6)\n return nn.functional.softmax(X.reshape(shape), dim=-1)", "_____no_output_____" ] ], [ [ "Example. Batch size 2, feature size 2, sequence length 4.\nThe valid lengths are 2,3. So the output has size (2,2,4),\nbut the length dimension is full of 0s in the invalid locations.", "_____no_output_____" ] ], [ [ "Y = masked_softmax(torch.rand(2, 2, 4), torch.tensor([2, 3]))\nprint(Y)", "tensor([[[0.6174, 0.3826, 0.0000, 0.0000],\n [0.3164, 0.6836, 0.0000, 0.0000]],\n\n [[0.3391, 0.2975, 0.3634, 0.0000],\n [0.4018, 0.2755, 0.3227, 0.0000]]])\n" ] ], [ [ "Example. Batch size 2, feature size 2, sequence length 4.\nThe valid lengths are (1,3) for batch 1, and (2,4) for batch 2.", "_____no_output_____" ] ], [ [ "Y = masked_softmax(torch.rand(2, 2, 4), torch.tensor([[1, 3], [2, 4]]))\nprint(Y)", "tensor([[[1.0000, 0.0000, 0.0000, 0.0000],\n [0.3335, 0.2970, 0.3695, 0.0000]],\n\n [[0.4541, 0.5459, 0.0000, 0.0000],\n [0.1296, 0.2880, 0.2429, 0.3395]]])\n" ] ], [ [ "# Additive attention\n\n$$\n\\alpha(q,k) = w_v^T \\tanh(W_q q + w_k k)\n$$\n\n", "_____no_output_____" ] ], [ [ "class AdditiveAttention(nn.Module):\n def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):\n super(AdditiveAttention, self).__init__(**kwargs)\n self.W_k = nn.Linear(key_size, num_hiddens, bias=False)\n self.W_q = nn.Linear(query_size, num_hiddens, bias=False)\n self.w_v = nn.Linear(num_hiddens, 1, bias=False)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, queries, keys, values, valid_lens):\n queries, keys = self.W_q(queries), self.W_k(keys)\n # After dimension expansion, shape of `queries`: (`batch_size`, no. of\n # queries, 1, `num_hiddens`) and shape of `keys`: (`batch_size`, 1,\n # no. of key-value pairs, `num_hiddens`). Sum them up with\n # broadcasting\n features = queries.unsqueeze(2) + keys.unsqueeze(1)\n features = torch.tanh(features)\n # There is only one output of `self.w_v`, so we remove the last\n # one-dimensional entry from the shape. Shape of `scores`:\n # (`batch_size`, no. of queries, no. of key-value pairs)\n scores = self.w_v(features).squeeze(-1)\n self.attention_weights = masked_softmax(scores, valid_lens)\n # Shape of `values`: (`batch_size`, no. of key-value pairs, value\n # dimension)\n return torch.bmm(self.dropout(self.attention_weights), values)", "_____no_output_____" ], [ "# batch size 2. 1 query of dim 20, 10 keys of dim 2.\nqueries, keys = torch.normal(0, 1, (2, 1, 20)), torch.ones((2, 10, 2))\n# 10 values of dim 4 in each of the 2 batches.\nvalues = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(2, 1, 1)\nprint(values.shape)\nvalid_lens = torch.tensor([2, 6])\n\nattention = AdditiveAttention(key_size=2, query_size=20, num_hiddens=8, dropout=0.1)\nattention.eval()\nA = attention(queries, keys, values, valid_lens)\nprint(A.shape)\nprint(A)", "torch.Size([2, 10, 4])\ntorch.Size([2, 1, 4])\ntensor([[[ 2.0000, 3.0000, 4.0000, 5.0000]],\n\n [[10.0000, 11.0000, 12.0000, 13.0000]]], grad_fn=<BmmBackward0>)\n" ] ], [ [ "The heatmap is uniform across the keys, since the keys are all 1s.\nHowever, the support is truncated to the valid length.", "_____no_output_____" ] ], [ [ "def show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(2.5, 2.5), cmap=\"Reds\"):\n display.set_matplotlib_formats(\"svg\")\n num_rows, num_cols = matrices.shape[0], matrices.shape[1]\n fig, axes = plt.subplots(num_rows, num_cols, figsize=figsize, sharex=True, sharey=True, squeeze=False)\n for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)):\n for j, (ax, matrix) in enumerate(zip(row_axes, row_matrices)):\n pcm = ax.imshow(matrix.detach(), cmap=cmap)\n if i == num_rows - 1:\n ax.set_xlabel(xlabel)\n if j == 0:\n ax.set_ylabel(ylabel)\n if titles:\n ax.set_title(titles[j])\n fig.colorbar(pcm, ax=axes, shrink=0.6)", "_____no_output_____" ], [ "show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)), xlabel=\"Keys\", ylabel=\"Queries\")", "_____no_output_____" ] ], [ [ "# Dot-product attention\n\n\n$$\nA = \\text{softmax}(Q K^T/\\sqrt{d}) V\n$$\n", "_____no_output_____" ] ], [ [ "class DotProductAttention(nn.Module):\n \"\"\"Scaled dot product attention.\"\"\"\n\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n # Shape of `queries`: (`batch_size`, no. of queries, `d`)\n # Shape of `keys`: (`batch_size`, no. of key-value pairs, `d`)\n # Shape of `values`: (`batch_size`, no. of key-value pairs, value\n # dimension)\n # Shape of `valid_lens`: (`batch_size`,) or (`batch_size`, no. of queries)\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1]\n # Set `transpose_b=True` to swap the last two dimensions of `keys`\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n return torch.bmm(self.dropout(self.attention_weights), values)", "_____no_output_____" ], [ "# batch size 2. 1 query of dim 2, 10 keys of dim 2.\nqueries = torch.normal(0, 1, (2, 1, 2))\nattention = DotProductAttention(dropout=0.5)\nattention.eval()\nattention(queries, keys, values, valid_lens)", "_____no_output_____" ], [ "show_heatmaps(attention.attention_weights.reshape((1, 1, 2, 10)), xlabel=\"Keys\", ylabel=\"Queries\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb23ffa6bc9c365f5f34f58169cb6661012ffc53
13,286
ipynb
Jupyter Notebook
lecture4/cnn.ipynb
ika-si/lecture_pytorch
06196d2b1a5837210254fafc07a836529e6150f6
[ "MIT" ]
35
2020-05-04T11:43:49.000Z
2021-12-08T14:14:30.000Z
lecture4/cnn.ipynb
ika-si/lecture_pytorch
06196d2b1a5837210254fafc07a836529e6150f6
[ "MIT" ]
2
2021-06-08T21:44:08.000Z
2021-09-08T02:07:55.000Z
lecture4/cnn.ipynb
ika-si/lecture_pytorch
06196d2b1a5837210254fafc07a836529e6150f6
[ "MIT" ]
16
2020-06-05T15:44:15.000Z
2022-03-02T12:47:49.000Z
32.804938
235
0.483893
[ [ [ "<a href=\"https://colab.research.google.com/github/yukinaga/lecture_pytorch/blob/master/lecture4/cnn.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# CNNの実装\nPyTorchを使って、畳み込みニューラルネットワーク(CNN)を実装します。 \nCNN自体はCNNの層を追加するのみで実装可能なのですが、今回はデータ拡張とドロップアウトの実装も行います。\n", "_____no_output_____" ], [ "## CIFAR-10\ntorchvision.datasetsを使い、CIFAR-10を読み込みます。 \nCIFARは、約6万枚の画像にラベルをつけたたデータセットです。 \n以下のコードでは、CIFAR-10を読み込み、ランダムな25枚の画像を表示します。", "_____no_output_____" ] ], [ [ "from torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncifar10_data = CIFAR10(root=\"./data\",\n train=False,download=True,\n transform=transforms.ToTensor())\ncifar10_classes = np.array([\"airplane\", \"automobile\", \"bird\", \"cat\", \"deer\",\n \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"])\nprint(\"データの数:\", len(cifar10_data))\n\nn_image = 25 # 表示する画像の数\ncifar10_loader = DataLoader(cifar10_data, batch_size=n_image, shuffle=True)\ndataiter = iter(cifar10_loader) # イテレータ\nimages, labels = dataiter.next() # 最初のバッチを取り出す\n\nplt.figure(figsize=(10,10)) # 画像の表示サイズ\nfor i in range(n_image):\n plt.subplot(5,5,i+1)\n plt.imshow(np.transpose(images[i], (1, 2, 0))) # チャンネルを一番後ろに\n label = cifar10_classes[labels[i]]\n plt.title(label)\n plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に\n\nplt.show()", "_____no_output_____" ] ], [ [ "## データ拡張\ntorchvision.transformsを使ってデータ拡張を行います。 \n今回は、cifar-10の画像に-30〜30°の回転、および0.8〜1.2倍のリサイズを行います。 \nこれらの処理は、バッチを取り出す際に元の画像に対してランダムに加えられます。 \n", "_____no_output_____" ] ], [ [ "from torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntransform = transforms.Compose([transforms.RandomAffine([-30, 30], scale=(0.8, 1.2)), # 回転とリサイズ\n transforms.ToTensor()])\ncifar10_data = CIFAR10(root=\"./data\",\n train=False,download=True,\n transform=transform)\ncifar10_classes = np.array([\"airplane\", \"automobile\", \"bird\", \"cat\", \"deer\",\n \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"])\nprint(\"データの数:\", len(cifar10_data))\n\nn_image = 25 # 表示する画像の数\ncifar10_loader = DataLoader(cifar10_data, batch_size=n_image, shuffle=True)\ndataiter = iter(cifar10_loader) # イテレータ\nimages, labels = dataiter.next() # 最初のバッチを取り出す\n\nplt.figure(figsize=(10,10)) # 画像の表示サイズ\nfor i in range(n_image):\n plt.subplot(5,5,i+1)\n plt.imshow(np.transpose(images[i], (1, 2, 0))) # チャンネルを一番後ろに\n label = cifar10_classes[labels[i]]\n plt.title(label)\n plt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に\n\nplt.show()", "_____no_output_____" ] ], [ [ "## データの前処理\nここからCNNを実装します。 \nデータ拡張として、回転とリサイズ、および左右反転を行います。 \nまた、学習が効率的になるように入力の平均値を0、標準偏差を1にします(標準化)。 \nDataLoaderは、訓練データ、テストデータそれぞれで設定しますが、テストデータにはミニバッチ法を適用しないのでバッチサイズは元データのサンプル数にします。", "_____no_output_____" ] ], [ [ "from torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\naffine = transforms.RandomAffine([-15, 15], scale=(0.8, 1.2)) # 回転とリサイズ\nflip = transforms.RandomHorizontalFlip(p=0.5) # 左右反転\nnormalize = transforms.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) # 平均値を0、標準偏差を1に\nto_tensor = transforms.ToTensor()\n\ntransform_train = transforms.Compose([affine, flip, to_tensor, normalize])\ntransform_test = transforms.Compose([to_tensor, normalize])\ncifar10_train = CIFAR10(\"./data\", train=True, download=True, transform=transform_train)\ncifar10_test = CIFAR10(\"./data\", train=False, download=True, transform=transform_test)\n\n# DataLoaderの設定\nbatch_size = 64\ntrain_loader = DataLoader(cifar10_train, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(cifar10_test, batch_size=len(cifar10_test), shuffle=False)", "_____no_output_____" ] ], [ [ "## モデルの構築\n`nn.Module`モジュールを継承したクラスとして、モデルを構築します。 \n今回は、過学習を抑制するためにドロップアウトを導入します。 ", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5) # 畳み込み層:(入力チャンネル数, フィルタ数、フィルタサイズ)\n self.pool = nn.MaxPool2d(2, 2) # プーリング層:(領域のサイズ, ストライド)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16*5*5, 256) # 全結合層\n self.dropout = nn.Dropout(p=0.5) # ドロップアウト:(p=ドロップアウト率)\n self.fc2 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16*5*5)\n x = F.relu(self.fc1(x))\n x = self.dropout(x)\n x = self.fc2(x)\n return x\n\nnet = Net()\nnet.cuda() # GPU対応\nprint(net)", "_____no_output_____" ] ], [ [ "## 学習\nモデルを訓練します。 \nDataLoaderを使い、ミニバッチを取り出して訓練および評価を行います。 \n今回は、評価時にミニバッチ法は使わず、テストデータ全体を使って一度に誤差を計算します。 \n学習には時間がかかりますので、編集→ノートブックの設定のハードウェアアクセラレーターでGPUを選択しましょう。\n", "_____no_output_____" ] ], [ [ "from torch import optim\n\n# 交差エントロピー誤差関数\nloss_fnc = nn.CrossEntropyLoss()\n\n# 最適化アルゴリズム\noptimizer = optim.Adam(net.parameters())\n\n# 損失のログ\nrecord_loss_train = []\nrecord_loss_test = []\n\n# 学習\nx_test, t_test = iter(test_loader).next()\nx_test, t_test = x_test.cuda(), t_test.cuda()\nfor i in range(20): # 20エポック学習\n net.train() # 訓練モード\n loss_train = 0\n for j, (x, t) in enumerate(train_loader): # ミニバッチ(x, t)を取り出す\n x, t = x.cuda(), t.cuda() # GPU対応\n y = net(x)\n loss = loss_fnc(y, t)\n loss_train += loss.item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_train /= j+1\n record_loss_train.append(loss_train)\n\n net.eval() # 評価モード\n y_test = net(x_test)\n loss_test = loss_fnc(y_test, t_test).item()\n record_loss_test.append(loss_test)\n\n if i%1 == 0:\n print(\"Epoch:\", i, \"Loss_Train:\", loss_train, \"Loss_Test:\", loss_test)", "_____no_output_____" ] ], [ [ "## 誤差の推移\n訓練データ、テストデータで誤差の推移をグラフ表示します。 ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n\nplt.plot(range(len(record_loss_train)), record_loss_train, label=\"Train\")\nplt.plot(range(len(record_loss_test)), record_loss_test, label=\"Test\")\nplt.legend()\n\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Error\")\nplt.show()", "_____no_output_____" ] ], [ [ "## 正解率\nモデルの性能を把握するため、テストデータ使い正解率を測定します。 ", "_____no_output_____" ] ], [ [ "correct = 0\ntotal = 0\nnet.eval() # 評価モード\nfor i, (x, t) in enumerate(test_loader):\n x, t = x.cuda(), t.cuda() # GPU対応\n y = net(x)\n correct += (y.argmax(1) == t).sum().item()\n total += len(x)\nprint(\"正解率:\", str(correct/total*100) + \"%\")", "_____no_output_____" ] ], [ [ "## 訓練済みのモデルを使った予測\n訓練済みのモデルを使ってみましょう。 \n画像を入力し、モデルが機能していることを確かめます。", "_____no_output_____" ] ], [ [ "cifar10_loader = DataLoader(cifar10_test, batch_size=1, shuffle=True)\ndataiter = iter(cifar10_loader)\nimages, labels = dataiter.next() # サンプルを1つだけ取り出す\n\nplt.imshow(np.transpose(images[0], (1, 2, 0))) # チャンネルを一番後ろに\nplt.tick_params(labelbottom=False, labelleft=False, bottom=False, left=False) # ラベルとメモリを非表示に\nplt.show()\n\nnet.eval() # 評価モード\nx, t = images.cuda(), labels.cuda() # GPU対応\ny = net(x)\nprint(\"正解:\", cifar10_classes[labels[0]],\n \"予測結果:\", cifar10_classes[y.argmax().item()])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb240fc19e04175f09eeb3701da3633cf416563b
594
ipynb
Jupyter Notebook
research/Untitled.ipynb
herminjjc/machineLearningPython
8a46700ad15346c12df16a0c8674cf9bd331d97f
[ "MIT" ]
null
null
null
research/Untitled.ipynb
herminjjc/machineLearningPython
8a46700ad15346c12df16a0c8674cf9bd331d97f
[ "MIT" ]
null
null
null
research/Untitled.ipynb
herminjjc/machineLearningPython
8a46700ad15346c12df16a0c8674cf9bd331d97f
[ "MIT" ]
null
null
null
16.5
34
0.518519
[ [ [ "pip install ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb241fcc0fa85ee555e4e7466342c5293af6e7d9
23,915
ipynb
Jupyter Notebook
src/gan-mnist/MNIST_GAN.ipynb
akoken/deep-learning
681e846a763f8820410ac770ce76e81a871e7149
[ "MIT" ]
null
null
null
src/gan-mnist/MNIST_GAN.ipynb
akoken/deep-learning
681e846a763f8820410ac770ce76e81a871e7149
[ "MIT" ]
null
null
null
src/gan-mnist/MNIST_GAN.ipynb
akoken/deep-learning
681e846a763f8820410ac770ce76e81a871e7149
[ "MIT" ]
null
null
null
36.344985
420
0.571064
[ [ [ "# Generative Adversarial Network\n\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\n\nGANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\n* [Pix2Pix](https://affinelayer.com/pixsrv/) \n* [CycleGAN & Pix2Pix in PyTorch, Jun-Yan Zhu](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)\n* [A list of generative models](https://github.com/wiseodd/generative-models)\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes \"fake\" data to pass to the discriminator. The discriminator also sees real training data and predicts if the data it's received is real or fake. \n> * The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real, training data. \n* The discriminator is a classifier that is trained to figure out which data is real and which is fake. \n\nWhat ends up happening is that the generator learns to make data that is indistinguishable from real data to the discriminator.\n\n<img src='assets/gan_pipeline.png' width=70% />\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector that the generator uses to construct its fake images. This is often called a **latent vector** and that vector space is called **latent space**. As the generator trains, it figures out how to map latent vectors to recognizable images that can fool the discriminator.\n\nIf you're interested in generating only new images, you can throw out the discriminator after training. In this notebook, I'll show you how to define and train these adversarial networks in PyTorch and generate new images!", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 64\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# get the training datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\n\n# prepare data loader\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)", "_____no_output_____" ] ], [ [ "### Visualize the data", "_____no_output_____" ] ], [ [ "# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# get one image from the batch\nimg = np.squeeze(images[0])\n\nfig = plt.figure(figsize = (3,3)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')", "_____no_output_____" ] ], [ [ "---\n# Define the Model\n\nA GAN is comprised of two adversarial networks, a discriminator and a generator.", "_____no_output_____" ], [ "## Discriminator\n\nThe discriminator network is going to be a pretty typical linear classifier. To make this network a universal function approximator, we'll need at least one hidden layer, and these hidden layers should have one key attribute:\n> All hidden layers will have a [Leaky ReLu](https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU) activation function applied to their outputs.\n\n<img src='assets/gan_network.png' width=70% />\n\n#### Leaky ReLu\n\nWe should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\n\n<img src='assets/leaky_relu.png' width=40% />\n\n#### Sigmoid Output\n\nWe'll also take the approach of using a more numerically stable loss function on the outputs. Recall that we want the discriminator to output a value 0-1 indicating whether an image is _real or fake_. \n> We will ultimately use [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss), which combines a `sigmoid` activation function **and** and binary cross entropy loss in one function. \n\nSo, our final output layer should not have any activation function applied to it.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass Discriminator(nn.Module):\n\n def __init__(self, input_size, hidden_dim, output_size):\n super(Discriminator, self).__init__()\n \n self.fc1 = nn.Linear(input_size, hidden_dim*4)\n self.fc2 = nn.Linear(hidden_dim*4, hidden_dim*2)\n self.fc3 = nn.Linear(hidden_dim*2, hidden_dim)\n self.fc4 = nn.Linear(hidden_dim, output_size)\n \n self.dropout = nn.Dropout(0.3)\n \n def forward(self, x):\n # flatten image\n x = x.view(-1, 28*28)\n\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = self.dropout(x)\n\n x = self.fc4(x)\n\n return x\n", "_____no_output_____" ] ], [ [ "## Generator\n\nThe generator network will be almost exactly the same as the discriminator network, except that we're applying a [tanh activation function](https://pytorch.org/docs/stable/nn.html#tanh) to our output layer.\n\n#### tanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output, which scales the output to be between -1 and 1, instead of 0 and 1. \n\n<img src='assets/tanh_fn.png' width=40% />\n\nRecall that we also want these outputs to be comparable to the *real* input pixel values, which are read in as normalized values between 0 and 1. \n> So, we'll also have to **scale our real input images to have pixel values between -1 and 1** when we train the discriminator. \n\nI'll do this in the training loop, later on.", "_____no_output_____" ] ], [ [ "class Generator(nn.Module):\n\n def __init__(self, input_size, hidden_dim, output_size):\n super(Generator, self).__init__()\n \n # define all layers\n self.fc1 = nn.Linear(input_size, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, hidden_dim*2)\n self.fc3 = nn.Linear(hidden_dim*2, hidden_dim*4)\n self.fc4 = nn.Linear(hidden_dim*4, output_size)\n \n self.dropout = nn.Dropout(0.3) \n\n def forward(self, x):\n # pass x through all layers\n x = F.leaky_relu(self.fc1(x), 0.2) # (input, negative_slope=0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = self.dropout(x)\n\n # final layer with tanh applied\n x = F.tanh(self.fc4(x)) \n \n return x", "_____no_output_____" ] ], [ [ "## Model hyperparameters", "_____no_output_____" ] ], [ [ "# Discriminator hyperparams\n\n# Size of input image to discriminator (28*28)\ninput_size = 784\n# Size of discriminator output (real or fake)\nd_output_size = 1\n# Size of *last* hidden layer in the discriminator\nd_hidden_size = 32\n\n# Generator hyperparams\n\n# Size of latent vector to give to generator\nz_size = 100\n# Size of discriminator output (generated image)\ng_output_size = 784\n# Size of *first* hidden layer in the generator\ng_hidden_size = 32", "_____no_output_____" ] ], [ [ "## Build complete network\n\nNow we're instantiating the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments.", "_____no_output_____" ] ], [ [ "# instantiate discriminator and generator\nD = Discriminator(input_size, d_hidden_size, d_output_size)\nG = Generator(z_size, g_hidden_size, g_output_size)\n\n# check that they are as you expect\nprint(D)\nprint()\nprint(G)", "_____no_output_____" ] ], [ [ "---\n## Discriminator and Generator Losses\n\nNow we need to calculate the losses. \n\n### Discriminator Losses\n\n> * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`. \n* Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\n\n<img src='assets/gan_pipeline.png' width=70% />\n\nThe losses will by binary cross entropy loss with logits, which we can get with [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss). This combines a `sigmoid` activation function **and** and binary cross entropy loss in one function.\n\nFor the real images, we want `D(real_images) = 1`. That is, we want the discriminator to classify the the real images with a label = 1, indicating that these are real. To help the discriminator generalize better, the labels are **reduced a bit from 1.0 to 0.9**. For this, we'll use the parameter `smooth`; if True, then we should smooth our labels. In PyTorch, this looks like `labels = torch.ones(size) * 0.9`\n\nThe discriminator loss for the fake data is similar. We want `D(fake_images) = 0`, where the fake images are the _generator output_, `fake_images = G(z)`. \n\n### Generator Loss\n\nThe generator loss will look similar only with flipped labels. The generator's goal is to get `D(fake_images) = 1`. In this case, the labels are **flipped** to represent that the generator is trying to fool the discriminator into thinking that the images it generates (fakes) are real!", "_____no_output_____" ] ], [ [ "# Calculate losses\ndef real_loss(D_out, smooth=False):\n batch_size = D_out.size(0)\n # label smoothing\n if smooth:\n # smooth, real labels = 0.9\n labels = torch.ones(batch_size)*0.9\n else:\n labels = torch.ones(batch_size) # real labels = 1\n \n # numerically stable loss\n criterion = nn.BCEWithLogitsLoss()\n # calculate loss\n loss = criterion(D_out.squeeze(), labels)\n return loss\n\ndef fake_loss(D_out):\n batch_size = D_out.size(0)\n labels = torch.zeros(batch_size) # fake labels = 0\n criterion = nn.BCEWithLogitsLoss()\n # calculate loss\n loss = criterion(D_out.squeeze(), labels)\n return loss# Calculate losses", "_____no_output_____" ] ], [ [ "## Optimizers\n\nWe want to update the generator and discriminator variables separately. So, we'll define two separate Adam optimizers.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\n# learning rate for optimizers\nlr = 0.002\n\n# Create optimizers for the discriminator and generator\nd_optimizer = optim.Adam(D.parameters(), lr)\ng_optimizer = optim.Adam(G.parameters(), lr)", "_____no_output_____" ] ], [ [ "---\n## Training\n\nTraining will involve alternating between training the discriminator and the generator. We'll use our functions `real_loss` and `fake_loss` to help us calculate the discriminator losses in all of the following cases.\n\n### Discriminator training\n1. Compute the discriminator loss on real, training images \n2. Generate fake images\n3. Compute the discriminator loss on fake, generated images \n4. Add up real and fake loss\n5. Perform backpropagation + an optimization step to update the discriminator's weights\n\n### Generator training\n1. Generate fake images\n2. Compute the discriminator loss on fake images, using **flipped** labels!\n3. Perform backpropagation + an optimization step to update the generator's weights\n\n#### Saving Samples\n\nAs we train, we'll also print out some loss statistics and save some generated \"fake\" samples.", "_____no_output_____" ] ], [ [ "import pickle as pkl\n\n# training hyperparams\nnum_epochs = 100\n\n# keep track of loss and generated, \"fake\" samples\nsamples = []\nlosses = []\n\nprint_every = 400\n\n# Get some fixed data for sampling. These are images that are held\n# constant throughout training, and allow us to inspect the model's performance\nsample_size=16\nfixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))\nfixed_z = torch.from_numpy(fixed_z).float()\n\n# train the network\nD.train()\nG.train()\nfor epoch in range(num_epochs):\n \n for batch_i, (real_images, _) in enumerate(train_loader):\n \n batch_size = real_images.size(0)\n \n ## Important rescaling step ## \n real_images = real_images*2 - 1 # rescale input images from [0,1) to [-1, 1)\n \n # ============================================\n # TRAIN THE DISCRIMINATOR\n # ============================================\n \n d_optimizer.zero_grad()\n \n # 1. Train with real images\n\n # Compute the discriminator losses on real images \n # smooth the real labels\n D_real = D(real_images)\n d_real_loss = real_loss(D_real, smooth=True)\n \n # 2. Train with fake images\n \n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n fake_images = G(z)\n \n # Compute the discriminator losses on fake images \n D_fake = D(fake_images)\n d_fake_loss = fake_loss(D_fake)\n \n # add up loss and perform backprop\n d_loss = d_real_loss + d_fake_loss\n d_loss.backward()\n d_optimizer.step()\n \n \n # =========================================\n # TRAIN THE GENERATOR\n # =========================================\n g_optimizer.zero_grad()\n \n # 1. Train with fake images and flipped labels\n \n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n fake_images = G(z)\n \n # Compute the discriminator losses on fake images \n # using flipped labels!\n D_fake = D(fake_images)\n g_loss = real_loss(D_fake) # use real loss to flip labels\n \n # perform backprop\n g_loss.backward()\n g_optimizer.step()\n\n # Print some loss stats\n if batch_i % print_every == 0:\n # print discriminator and generator loss\n print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format(\n epoch+1, num_epochs, d_loss.item(), g_loss.item()))\n\n \n ## AFTER EACH EPOCH##\n # append discriminator loss and generator loss\n losses.append((d_loss.item(), g_loss.item()))\n \n # generate and save sample, fake images\n G.eval() # eval mode for generating samples\n samples_z = G(fixed_z)\n samples.append(samples_z)\n G.train() # back to train mode\n\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)import pickle as pkl", "_____no_output_____" ] ], [ [ "## Training loss\n\nHere we'll plot the training losses for the generator and discriminator, recorded after each epoch.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "_____no_output_____" ] ], [ [ "## Generator samples from training\n\nHere we can view samples of images from the generator. First we'll look at the images we saved during training.", "_____no_output_____" ] ], [ [ "# helper function for viewing a list of passed in sample images\ndef view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n img = img.detach()\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')", "_____no_output_____" ], [ "# Load samples from generator, taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "_____no_output_____" ] ], [ [ "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_____no_output_____" ] ], [ [ "# -1 indicates final epoch's samples (the last in the list)\nview_samples(-1, samples)", "_____no_output_____" ] ], [ [ "Below I'm showing the generated images as the network was training, every 10 epochs.", "_____no_output_____" ] ], [ [ "rows = 10 # split epochs into 10, so 100/10 = every 10 epochs\ncols = 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n img = img.detach()\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "_____no_output_____" ] ], [ [ "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.", "_____no_output_____" ], [ "## Sampling from the generator\n\nWe can also get completely new images from the generator by using the checkpoint we saved after training. **We just need to pass in a new latent vector $z$ and we'll get new samples**!", "_____no_output_____" ] ], [ [ "# randomly generated, new latent vectors\nsample_size=16\nrand_z = np.random.uniform(-1, 1, size=(sample_size, z_size))\nrand_z = torch.from_numpy(rand_z).float()\n\nG.eval() # eval mode\n# generated samples\nrand_images = G(rand_z)\n\n# 0 indicates the first set of samples in the passed in list\n# and we only have one batch of samples, here\nview_samples(0, [rand_images])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb2423408df593994073bd7dd78e0a8369a5cba7
958,073
ipynb
Jupyter Notebook
Machine Learning Challenge 3 (Prediction Problem) - Round 1/EDA & Prediction.ipynb
rajat5ranjan/Yes-Bank-Datathon
f4ed235aff2faec08cb9d08913614aa7b6d9322b
[ "Apache-2.0" ]
5
2019-02-16T19:31:49.000Z
2019-05-04T12:51:16.000Z
Machine Learning Challenge 3 (Prediction Problem) - Round 1/EDA & Prediction.ipynb
rajat5ranjan/Yes-Bank-Datathon
f4ed235aff2faec08cb9d08913614aa7b6d9322b
[ "Apache-2.0" ]
null
null
null
Machine Learning Challenge 3 (Prediction Problem) - Round 1/EDA & Prediction.ipynb
rajat5ranjan/Yes-Bank-Datathon
f4ed235aff2faec08cb9d08913614aa7b6d9322b
[ "Apache-2.0" ]
2
2019-02-16T19:30:11.000Z
2019-09-18T07:21:34.000Z
348.010534
298,180
0.898615
[ [ [ "![title](yesbank_feature_banner.png)", "_____no_output_____" ], [ "# YES BANK DATATHON\n\n## Machine Learning Challenge Round 3 - EDA\n\n### Data Description \nThe data given is of credit records of individuals with certain attributes.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport seaborn as sns\n%matplotlib inline\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "train=pd.read_csv('Yes_Bank_Train.csv')\ntest=pd.read_csv('Yes_Bank_Test_int.csv')\ntrain.info()\n\nsub=pd.read_csv('sample_clusters.csv')", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 21 columns):\nserial number 800 non-null int64\naccount_info 800 non-null object\nduration_month 800 non-null int64\ncredit_history 800 non-null object\npurpose 800 non-null object\ncredit_amount 800 non-null int64\nsavings_account 800 non-null object\nemployment_st 800 non-null object\npoi 800 non-null int64\npersonal_status 800 non-null object\ngurantors 800 non-null object\nresident_since 800 non-null int64\nproperty_type 800 non-null object\nage 800 non-null int64\ninstallment_type 800 non-null object\nhousing_type 800 non-null object\ncredits_no 800 non-null int64\njob_type 800 non-null object\nliables 800 non-null int64\ntelephone 800 non-null object\nforeigner 800 non-null object\ndtypes: int64(8), object(13)\nmemory usage: 131.3+ KB\n" ], [ "train.head()", "_____no_output_____" ], [ "train.describe(include='all').T", "_____no_output_____" ], [ "test.describe(include='all').T", "_____no_output_____" ] ], [ [ "Most of them are Categorical value, lets have a view at the features and what they signify", "_____no_output_____" ], [ "a. **serial number** : unique identification key\n\nb. **account_info :** Categorized details of existing accounts of the individuals. The balance of money in account provided is stated by this variable\n\n* A11 signifies 0 (excluding 0) or lesser amount credited to current checking account. (Amounts are in units of certain currency)\n* A12 signifies greater than 0 (including 0) and lesser than 200 (excluding 200) units of currency\n* A13 signifies amount greater than 200 (including 200) being recorded in the account\n* A14 signifies no account details provided*\n\nc. **duration_month** : Duration in months for which the credit is existing\n\nd. **credit_history** : This categorical variable signifies the credit history of the individual who has taken the loan\n\n\n* A30 signifies that no previous loans has been taken or all loans taken have been payed back.\n* A31 signifies that all loans from the current bank has been payed off. Loan information of other banks are not available.\n* A32 signifies loan exists but till now regular installments have been payed back in full amount.\n* A33 signifies that significant delays have been seen in repayment of loan installments.\n* A34 signifies other loans exist at the same bank. Irregular behaviour in repayment.*\n\n\ne. **purpose**: This variable signifies why the loan was taken\n\n\n* A40 signifies that the loan is taken to buy a new car\n* A41 signifies that the loan was taken to buy a old car \n* A42 signifies that the loan is taken to buy furniture or equipment\n* A43 signifies that the loan is taken to buy radio or TV\n* A44 signifies that the loan is taken to buy domestic appliances\n* A45 signifies that the loan is taken for repairing purposes\n* A46 signifies that the loan is taken for education\n* A47 signifies that the loan is taken for vacation\n* A48 signifies that the loan is taken for re skilling\n* A49 signifies that the loan is taken for business and establishment\n* A410 signifies other purposes*\n\n\nf. **credit_amount**: The numerical variable signifies the amount credited to the individual (in units of a certain currency)(**TARGET**)\n\ng. **savings_account**: This variable signifies details of the amount present in savings account of the individual:\n\n* A61 signifies that less than 100 units (excluding 100) of currency is present\n* A62 signifies that greater than 100 units (including 100) and less than 500 (excluding 500) units of currency is present\n* A63 signifies that greater than 500 (including 500) and less than 1000 (excluding 1000) units of currency is present.\n* A64 signifies that greater than 1000 (including 1000) units of currency is present.\n* A65 signifies that no savings account details is present on record*\n\n\nh. **employment_s**: Catergorical variable that signifies the employment status of everyone who has been alloted loans\n\n* A71 signifies that the individual is unemployed\n* A72 signifies that the individual has been employed for less than a year\n* A73 signifies that the individual has been employed for more than a year but less than four years\n* A74 signifies that the individual has been employed more than four years but less than seven years\n* A75 signifies that the individual has been employed for more than seven years*\n\n\ni. **poi**: This numerical variable signifies what percentage of disposable income is spent on loan interest amount.\n\nj. ***personal_status**: This categorical variable signifies the personal status of the individual\n\n* A91 signifies that the individual is a separated or divorced male\n* A92 signifies female individuals who are separated or divorced\n* A93 signifies unmarried males\n* A94 signifies married or widowed males\n* A95 signifies single females*\n\n\nk. **gurantors**: Categorical variable which signifies if any other individual is involved with an individual loan case\n\n* A101 signifies that only a single individual is involved in the loan application\n* A102 signifies that one or more co-applicant is present in the loan application\n* A103 signifies that guarantor are present.*\n\n\nl. **resident_since**: Numerical variable that signifies for how many years the applicant has been a resident\n\nm. **property_type**: This qualitative variable defines the property holding information of the individual\n\n* A121 signifies that the individual holds real estate property\n* A122 signifies that the individual holds a building society savings agreement or life insurance\n* A123 signifies that the individual holds cars or other properties\n* A124 signifies that property information is not available*\n\n\nn. **age**: Numerical variable that signifies age in number of years\n\no. **installment_type**: This variable signifies other installment types taken\n\n* A141 signifies installment to bank\n* A142 signifies installment to outlets or stores\n* A143 signifies that no information is present*\n\n\np. **housing_type**: This is a categorical variable that signifies which type of housing does a applicant have.\n\n* A151 signifies that the housing is on rent\n* A152 signifies that the housing is owned by the applicant\n* A153 signifies that no loan amount is present on the housing and there is no expense for the housing) *\n\n\nq. **credits_no**: Numerical variable for number of credits taken by the person\n\nr. **job_type**: Signifies the employment status of the person\n\n* A171 signifies that the individual is unemployed or unskilled and is a non-resident\n* A172 signifies that the individual is unskilled but is a resident\n* A173 signifies that the individual is a skilled employee or official\n* A174 signifies that the individual is involved in management or is self-employed or a highly qualified employee or officer*\n\ns. **liables**: Signifies number of persons dependent on the applicant\n\nt. **telephone**: Signifies if the individual has a telephone or not\n\n* A191 signifies that no telephonic records are present\n* A192 signifies that a telephone is registered with the customer’s name*\n\n\nu. **foreigner**: Signifies if the individual is a foreigner or not (considering the country of residence of the bank)\n\n* A201 signifies that the individual is a foreigner\n* A202 signifies that the individual is a resident*", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(12,9))\nsns.countplot(train.account_info)", "_____no_output_____" ], [ "plt.figure(figsize=(12,9))\nsns.pairplot(train.drop(['serial number','liables'],axis=1),hue='account_info')", "_____no_output_____" ], [ "plt.figure(figsize=(12,9))\nsns.pairplot(train.drop(['serial number','liables'],axis=1),hue='credit_history')", "_____no_output_____" ], [ "plt.figure(figsize=(12,9))\nsns.jointplot(train.age,train.credit_amount,kind='hex')", "C:\\Users\\Acer\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\nC:\\Users\\Acer\\Anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\nC:\\Users\\Acer\\Anaconda3\\lib\\site-packages\\matplotlib\\axes\\_axes.py:6462: UserWarning: The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n warnings.warn(\"The 'normed' kwarg is deprecated, and has been \"\n" ], [ "\n# sns.jointplot(train.poi,train.credit_amount,kind='scatter')\nsns.lmplot('duration_month','credit_amount',train,hue='credit_history')", "C:\\Users\\Acer\\Anaconda3\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n" ] ], [ [ "### One Hot Encoded", "_____no_output_____" ] ], [ [ "dftrain=pd.get_dummies(train,drop_first=True)\ndftrain.head()", "_____no_output_____" ] ], [ [ "**Correlation**", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(15,10))\nsns.heatmap(dftrain.corr())", "_____no_output_____" ], [ "plt.figure(figsize=(12,9))\nplt.scatter(dftrain['serial number'],dftrain.credit_amount)", "_____no_output_____" ] ], [ [ "**Lets have a try at train test split to see where we are at**", "_____no_output_____" ] ], [ [ "X,y=dftrain.drop(['serial number','credit_amount'],axis=1),dftrain.credit_amount", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1994)", "_____no_output_____" ] ], [ [ "![title](yesbank_feature_banner.png)", "_____no_output_____" ], [ "\n\n# YES BANK DATATHON\n\n## Machine Learning Challenge Round 3 - Prediction", "_____no_output_____" ], [ "### Ensemble", "_____no_output_____" ], [ "Taking **RMSE** as the Eval Metric", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.metrics import mean_squared_error\n\nrf=RandomForestRegressor(n_estimators=420)\nrf.fit(X_train,y_train)\np=rf.predict(X_test)\nprint(np.sqrt(mean_squared_error(y_test,p)))", "1819.5808199916296\n" ] ], [ [ "Feature Importances", "_____no_output_____" ] ], [ [ "col=pd.DataFrame({'col':X.columns,'imp':rf.feature_importances_}).sort_values('imp',ascending=False)\ncol", "_____no_output_____" ] ], [ [ "Taking top 45 values", "_____no_output_____" ] ], [ [ "main_col=col.col.values[:45]", "_____no_output_____" ] ], [ [ "Taking and Encoding Test Data as well", "_____no_output_____" ] ], [ [ "dftest=pd.get_dummies(test,drop_first=True)\ndftest.head()", "_____no_output_____" ] ], [ [ "Using **Kfold**", "_____no_output_____" ] ], [ [ "X1=X[main_col]\ndftest1=dftest[main_col]\nfrom sklearn.metrics import mean_squared_error\nerr=[]\npdd=[]\nfrom sklearn.model_selection import KFold\nfold=KFold(n_splits=4,shuffle=True)\nfor train_index, test_index in fold.split(X1):\n X_train, X_test = X1.iloc[train_index], X1.iloc[test_index]\n y_train, y_test = y[train_index], y[test_index]\n rf=RandomForestRegressor(n_estimators=420,max_features=10)\n rf.fit(X_train,y_train)\n err.append(np.sqrt(mean_squared_error(y_test,rf.predict(X_test))))\n p=rf.predict(dftest1)\n pdd.append(p)\n", "_____no_output_____" ], [ "np.mean(err,axis=0)", "_____no_output_____" ], [ "pdd_mean=np.mean(pdd,axis=0)\npdd_mean", "_____no_output_____" ] ], [ [ "### Neural Network", "_____no_output_____" ] ], [ [ "from sklearn.neural_network import MLPClassifier, MLPRegressor\nmlp=MLPRegressor(hidden_layer_sizes=(120,30,), activation=\"relu\", max_iter=500, random_state=8,solver='adam')\nmlp.fit(X_train,y_train)\np=mlp.predict(X_test)\nprint(np.sqrt(mean_squared_error(y_test,p)))", "1680.675862689182\n" ], [ "mlp.fit(X,y)\npred=mlp.predict(dftest.drop('serial number',axis=1))", "C:\\Users\\Acer\\Anaconda3\\lib\\site-packages\\sklearn\\neural_network\\multilayer_perceptron.py:564: ConvergenceWarning: Stochastic Optimizer: Maximum iterations (500) reached and the optimization hasn't converged yet.\n % self.max_iter, ConvergenceWarning)\n" ], [ "pred", "_____no_output_____" ] ], [ [ "Taking Avg", "_____no_output_____" ] ], [ [ "main_p=(pdd_mean+pred)/2", "_____no_output_____" ], [ "sub=pd.DataFrame({'serial number':test['serial number'],'credit_amount':main_p})\nsub.head()", "_____no_output_____" ], [ "sub.to_csv('stack_main.csv',index=False)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]